diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index 8994d8e6..2d9778e1 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -18,6 +18,7 @@ jobs: - "fork11-rollup" - "fork12-validium" - "fork12-rollup" + # - "fork12-pessimistic" runs-on: ubuntu-latest steps: - name: Checkout code @@ -32,26 +33,17 @@ jobs: - name: Build Docker run: make build-docker - - # this is better to get the action in - - name: Install kurtosis - shell: bash - run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt update - sudo apt install kurtosis-cli=1.4.1 - kurtosis version - - - name: Disable kurtosis analytics - shell: bash - run: kurtosis analytics disable - - - name: Install yq - shell: bash - run: | - pip3 install yq - yq --version - + + - name: Checkout kurtosis-cdk + uses: actions/checkout@v4 + with: + repository: 0xPolygon/kurtosis-cdk + path: kurtosis-cdk + ref: v0.2.24 + + - name: Install Kurtosis CDK tools + uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk + - name: Install polycli run: | POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" @@ -62,16 +54,6 @@ jobs: sudo chmod +x /usr/local/bin/polycli /usr/local/bin/polycli version - - name: Install foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: checkout kurtosis-cdk - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - path: "kurtosis-cdk" - ref: "v0.2.19" - - name: Setup Bats and bats libs uses: bats-core/bats-action@2.0.0 @@ -81,6 +63,7 @@ jobs: env: KURTOSIS_FOLDER: ${{ github.workspace }}/kurtosis-cdk BATS_LIB_PATH: /usr/lib/ + agglayer_prover_sp1_key: ${{ secrets.SP1_PRIVATE_KEY }} - name: Dump enclave logs if: failure() diff --git a/.github/workflows/test-resequence.yml b/.github/workflows/test-resequence.yml index 66bc437a..2625017c 100644 --- a/.github/workflows/test-resequence.yml +++ b/.github/workflows/test-resequence.yml @@ -23,7 +23,7 @@ jobs: with: path: cdk - - name: Checkout kurtosis-cdk + - name: Checkout cdk-erigon uses: actions/checkout@v4 with: repository: 0xPolygonHermez/cdk-erigon @@ -34,21 +34,12 @@ jobs: uses: actions/checkout@v4 with: repository: 0xPolygon/kurtosis-cdk - ref: a7a80b7b5d98a69a23415ab0018e556257a6dfb6 path: kurtosis-cdk + ref: v0.2.24 - name: Install Kurtosis CDK tools uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Install yq - run: | - sudo curl -L https://github.com/mikefarah/yq/releases/download/v4.44.2/yq_linux_amd64 -o /usr/local/bin/yq - sudo chmod +x /usr/local/bin/yq - /usr/local/bin/yq --version - - name: Install polycli run: | POLYCLI_VERSION="${{ vars.POLYCLI_VERSION }}" diff --git a/Dockerfile b/Dockerfile index 694cff7d..bb272bc2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,7 @@ RUN make build-go # CONTAINER FOR RUNNING BINARY FROM --platform=${BUILDPLATFORM} debian:bookworm-slim -RUN apt-get update && apt-get install -y ca-certificates postgresql-client libssl-dev && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y ca-certificates sqlite3 procps libssl-dev && rm -rf /var/lib/apt/lists/* COPY --from=build /go/src/github.com/0xPolygon/cdk/target/cdk-node /usr/local/bin/ CMD ["/bin/sh", "-c", "cdk"] diff --git a/agglayer/client.go b/agglayer/client.go index 8396fc9e..8a186be4 100644 --- a/agglayer/client.go +++ b/agglayer/client.go @@ -30,6 +30,7 @@ type AgglayerClientInterface interface { WaitTxToBeMined(hash common.Hash, ctx context.Context) error SendCertificate(certificate *SignedCertificate) (common.Hash, error) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) + GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) AggLayerClientGetEpochConfiguration } @@ -158,3 +159,24 @@ func (c *AggLayerClient) GetEpochConfiguration() (*ClockConfiguration, error) { return result, nil } + +// GetLatestKnownCertificateHeader returns the last certificate header submitted by networkID +func (c *AggLayerClient) GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) { + response, err := jSONRPCCall(c.url, "interop_getLatestKnownCertificateHeader", networkID) + if err != nil { + return nil, fmt.Errorf("GetLatestKnownCertificateHeader error jSONRPCCall. Err: %w", err) + } + + if response.Error != nil { + return nil, fmt.Errorf("GetLatestKnownCertificateHeader rpc returns an error: code=%d msg=%s", + response.Error.Code, response.Error.Message) + } + + var result *CertificateHeader + err = json.Unmarshal(response.Result, &result) + if err != nil { + return nil, fmt.Errorf("GetLatestKnownCertificateHeader error Unmashal. Err: %w", err) + } + + return result, nil +} diff --git a/agglayer/client_test.go b/agglayer/client_test.go index 82baea85..c4117eb8 100644 --- a/agglayer/client_test.go +++ b/agglayer/client_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/0xPolygon/cdk-rpc/rpc" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -14,11 +15,40 @@ const ( func TestExploratoryClient(t *testing.T) { t.Skip("This test is for exploratory purposes only") - sut := NewAggLayerClient("http://127.0.0.1:32853") + sut := NewAggLayerClient("http://127.0.0.1:32781") config, err := sut.GetEpochConfiguration() require.NoError(t, err) require.NotNil(t, config) fmt.Printf("Config: %s", config.String()) + + lastCert, err := sut.GetLatestKnownCertificateHeader(1) + require.NoError(t, err) + require.NotNil(t, lastCert) + fmt.Printf("LastCert: %s", lastCert.String()) +} + +func TestExploratoryGetCertificateHeader(t *testing.T) { + t.Skip("This test is exploratory and should be skipped") + aggLayerClient := NewAggLayerClient("http://localhost:32796") + certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") + certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) + require.NoError(t, err) + fmt.Print(certificateHeader) +} +func TestExploratoryGetEpochConfiguration(t *testing.T) { + t.Skip("This test is exploratory and should be skipped") + aggLayerClient := NewAggLayerClient("http://localhost:32796") + clockConfig, err := aggLayerClient.GetEpochConfiguration() + require.NoError(t, err) + fmt.Print(clockConfig) +} + +func TestExploratoryGetLatestKnownCertificateHeader(t *testing.T) { + t.Skip("This test is exploratory and should be skipped") + aggLayerClient := NewAggLayerClient("http://localhost:32843") + cert, err := aggLayerClient.GetLatestKnownCertificateHeader(1) + require.NoError(t, err) + fmt.Print(cert) } func TestGetEpochConfigurationResponseWithError(t *testing.T) { @@ -74,3 +104,60 @@ func TestGetEpochConfigurationOkResponse(t *testing.T) { GenesisBlock: 1, }, *clockConfig) } + +func TestGetLatestKnownCertificateHeaderOkResponse(t *testing.T) { + sut := NewAggLayerClient(testURL) + response := rpc.Response{ + Result: []byte(`{"network_id":1,"height":0,"epoch_number":223,"certificate_index":0,"certificate_id":"0xf9179d2fbe535814b5a14496e2eed474f49c6131227a9dfc5d2d8caf9e212054","new_local_exit_root":"0x7ae06f4a5d0b6da7dd4973fb6ef40d82c9f2680899b3baaf9e564413b59cc160","metadata":"0x00000000000000000000000000000000000000000000000000000000000001a7","status":"Settled"}`), + } + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return response, nil + } + cert, err := sut.GetLatestKnownCertificateHeader(1) + require.NotNil(t, cert) + require.NoError(t, err) + require.Nil(t, cert.PreviousLocalExitRoot) +} + +func TestGetLatestKnownCertificateHeaderErrorResponse(t *testing.T) { + sut := NewAggLayerClient(testURL) + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return rpc.Response{}, fmt.Errorf("unittest error") + } + + cert, err := sut.GetLatestKnownCertificateHeader(1) + + require.Nil(t, cert) + require.Error(t, err) +} + +func TestGetLatestKnownCertificateHeaderResponseBadJson(t *testing.T) { + sut := NewAggLayerClient(testURL) + response := rpc.Response{ + Result: []byte(`{`), + } + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return response, nil + } + + cert, err := sut.GetLatestKnownCertificateHeader(1) + + require.Nil(t, cert) + require.Error(t, err) +} + +func TestGetLatestKnownCertificateHeaderWithPrevLERResponse(t *testing.T) { + sut := NewAggLayerClient(testURL) + response := rpc.Response{ + Result: []byte(`{"network_id":1,"height":0,"epoch_number":223,"certificate_index":0,"certificate_id":"0xf9179d2fbe535814b5a14496e2eed474f49c6131227a9dfc5d2d8caf9e212054","prev_local_exit_root":"0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757","new_local_exit_root":"0x7ae06f4a5d0b6da7dd4973fb6ef40d82c9f2680899b3baaf9e564413b59cc160","metadata":"0x00000000000000000000000000000000000000000000000000000000000001a7","status":"Settled"}`), + } + jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { + return response, nil + } + cert, err := sut.GetLatestKnownCertificateHeader(1) + + require.NoError(t, err) + require.NotNil(t, cert) + + require.Equal(t, "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757", cert.PreviousLocalExitRoot.String()) +} diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go index b7f70ee8..6c5a3fbf 100644 --- a/agglayer/mock_agglayer_client.go +++ b/agglayer/mock_agglayer_client.go @@ -138,6 +138,64 @@ func (_c *AgglayerClientMock_GetEpochConfiguration_Call) RunAndReturn(run func() return _c } +// GetLatestKnownCertificateHeader provides a mock function with given fields: networkID +func (_m *AgglayerClientMock) GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) { + ret := _m.Called(networkID) + + if len(ret) == 0 { + panic("no return value specified for GetLatestKnownCertificateHeader") + } + + var r0 *CertificateHeader + var r1 error + if rf, ok := ret.Get(0).(func(uint32) (*CertificateHeader, error)); ok { + return rf(networkID) + } + if rf, ok := ret.Get(0).(func(uint32) *CertificateHeader); ok { + r0 = rf(networkID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*CertificateHeader) + } + } + + if rf, ok := ret.Get(1).(func(uint32) error); ok { + r1 = rf(networkID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AgglayerClientMock_GetLatestKnownCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestKnownCertificateHeader' +type AgglayerClientMock_GetLatestKnownCertificateHeader_Call struct { + *mock.Call +} + +// GetLatestKnownCertificateHeader is a helper method to define mock.On call +// - networkID uint32 +func (_e *AgglayerClientMock_Expecter) GetLatestKnownCertificateHeader(networkID interface{}) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { + return &AgglayerClientMock_GetLatestKnownCertificateHeader_Call{Call: _e.mock.On("GetLatestKnownCertificateHeader", networkID)} +} + +func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) Run(run func(networkID uint32)) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(uint32)) + }) + return _c +} + +func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) Return(_a0 *CertificateHeader, _a1 error) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) RunAndReturn(run func(uint32) (*CertificateHeader, error)) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { + _c.Call.Return(run) + return _c +} + // SendCertificate provides a mock function with given fields: certificate func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { ret := _m.Called(certificate) diff --git a/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json b/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json index dc74e325..fe3c420a 100644 --- a/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json +++ b/agglayer/testdata/type_conversion_errors/l1_info_root_incorrect_error.json @@ -8,4 +8,4 @@ "expected_error": "value of key leaf_count is not of type uint32", "certificate_header": "{\"network_id\":1,\"height\":6,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\",\"new_local_exit_root\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"metadata\":\"0x000000000000000000000000000000000000000000000000000000000000001f\",\"status\":{\"InError\":{\"error\":{\"TypeConversionError\":{\"L1InfoRootIncorrect\":{\"leaf_count\":\"invalid\",\"declared\":\"0x566244fbf813b6926f6895142979f61ed6706184909cb8c819cd0216202a8aa9\",\"retrieved\":\"0xa80cd4abb016bbb3c3058f923a88be1ad49d68277366c55554d6f13d62428a1f\"}}}}}}" } -] \ No newline at end of file +] diff --git a/agglayer/types.go b/agglayer/types.go index aece93f0..c4341591 100644 --- a/agglayer/types.go +++ b/agglayer/types.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "slices" "strings" "github.com/0xPolygon/cdk/bridgesync" @@ -22,6 +23,13 @@ const ( Candidate InError Settled + + nilStr = "nil" +) + +var ( + NonSettledStatuses = []CertificateStatus{Pending, Candidate, Proven} + ClosedStatuses = []CertificateStatus{Settled, InError} ) // String representation of the enum @@ -29,6 +37,26 @@ func (c CertificateStatus) String() string { return [...]string{"Pending", "Proven", "Candidate", "InError", "Settled"}[c] } +// IsClosed returns true if the certificate is closed (settled or inError) +func (c CertificateStatus) IsClosed() bool { + return !c.IsOpen() +} + +// IsSettled returns true if the certificate is settled +func (c CertificateStatus) IsSettled() bool { + return c == Settled +} + +// IsInError returns true if the certificate is in error +func (c CertificateStatus) IsInError() bool { + return c == InError +} + +// IsOpen returns true if the certificate is open (pending, candidate or proven) +func (c CertificateStatus) IsOpen() bool { + return slices.Contains(NonSettledStatuses, c) +} + // UnmarshalJSON is the implementation of the json.Unmarshaler interface func (c *CertificateStatus) UnmarshalJSON(data []byte) error { dataStr := string(data) @@ -84,27 +112,14 @@ type Certificate struct { Metadata common.Hash `json:"metadata"` } -func (c *Certificate) String() string { - res := fmt.Sprintf("NetworkID: %d, Height: %d, PrevLocalExitRoot: %s, NewLocalExitRoot: %s, Metadata: %s\n", - c.NetworkID, c.Height, common.Bytes2Hex(c.PrevLocalExitRoot[:]), - common.Bytes2Hex(c.NewLocalExitRoot[:]), common.Bytes2Hex(c.Metadata[:])) - - if c.BridgeExits == nil { - res += " BridgeExits: nil\n" - } else { - for i, bridgeExit := range c.BridgeExits { - res += fmt.Sprintf(", BridgeExit[%d]: %s\n", i, bridgeExit.String()) - } - } - - if c.ImportedBridgeExits == nil { - res += " ImportedBridgeExits: nil\n" - } else { - for i, importedBridgeExit := range c.ImportedBridgeExits { - res += fmt.Sprintf(" ImportedBridgeExit[%d]: %s\n", i, importedBridgeExit.String()) - } +// Brief returns a string with a brief cert +func (c *Certificate) Brief() string { + if c == nil { + return nilStr } - + res := fmt.Sprintf("agglayer.Cert {height: %d prevLER: %s newLER: %s exits: %d imported_exits: %d}", c.Height, + common.Bytes2Hex(c.PrevLocalExitRoot[:]), common.Bytes2Hex(c.NewLocalExitRoot[:]), + len(c.BridgeExits), len(c.ImportedBridgeExits)) return res } @@ -153,8 +168,8 @@ type SignedCertificate struct { Signature *Signature `json:"signature"` } -func (s *SignedCertificate) String() string { - return fmt.Sprintf("Certificate:%s,\nSignature: %s", s.Certificate.String(), s.Signature.String()) +func (s *SignedCertificate) Brief() string { + return fmt.Sprintf("Certificate:%s,\nSignature: %s", s.Certificate.Brief(), s.Signature.String()) } // CopyWithDefaulting returns a shallow copy of the signed certificate @@ -537,27 +552,52 @@ func (c *ImportedBridgeExit) Hash() common.Hash { ) } +type GenericPPError struct { + Key string + Value string +} + +func (p *GenericPPError) String() string { + return fmt.Sprintf("Generic error: %s: %s", p.Key, p.Value) +} + // CertificateHeader is the structure returned by the interop_getCertificateHeader RPC call type CertificateHeader struct { - NetworkID uint32 `json:"network_id"` - Height uint64 `json:"height"` - EpochNumber *uint64 `json:"epoch_number"` - CertificateIndex *uint64 `json:"certificate_index"` - CertificateID common.Hash `json:"certificate_id"` - NewLocalExitRoot common.Hash `json:"new_local_exit_root"` - Status CertificateStatus `json:"status"` - Metadata common.Hash `json:"metadata"` - Error PPError `json:"-"` -} - -func (c CertificateHeader) String() string { + NetworkID uint32 `json:"network_id"` + Height uint64 `json:"height"` + EpochNumber *uint64 `json:"epoch_number"` + CertificateIndex *uint64 `json:"certificate_index"` + CertificateID common.Hash `json:"certificate_id"` + PreviousLocalExitRoot *common.Hash `json:"prev_local_exit_root,omitempty"` + NewLocalExitRoot common.Hash `json:"new_local_exit_root"` + Status CertificateStatus `json:"status"` + Metadata common.Hash `json:"metadata"` + Error PPError `json:"-"` +} + +// ID returns a string with the ident of this cert (height/certID) +func (c *CertificateHeader) ID() string { + if c == nil { + return nilStr + } + return fmt.Sprintf("%d/%s", c.Height, c.CertificateID.String()) +} + +func (c *CertificateHeader) String() string { + if c == nil { + return nilStr + } errors := "" if c.Error != nil { errors = c.Error.String() } - - return fmt.Sprintf("Height: %d, CertificateID: %s, NewLocalExitRoot: %s. Status: %s. Errors: [%s]", - c.Height, c.CertificateID.String(), c.NewLocalExitRoot.String(), c.Status.String(), errors) + previousLocalExitRoot := nilStr + if c.PreviousLocalExitRoot != nil { + previousLocalExitRoot = c.PreviousLocalExitRoot.String() + } + return fmt.Sprintf("Height: %d, CertificateID: %s, PreviousLocalExitRoot: %s, NewLocalExitRoot: %s. Status: %s."+ + " Errors: [%s]", + c.Height, c.CertificateID.String(), previousLocalExitRoot, c.NewLocalExitRoot.String(), c.Status.String(), errors) } func (c *CertificateHeader) UnmarshalJSON(data []byte) error { @@ -617,7 +657,12 @@ func (c *CertificateHeader) UnmarshalJSON(data []byte) error { ppError = p default: - return fmt.Errorf("invalid error type: %s", key) + valueStr, err := json.Marshal(value) + if err != nil { + ppError = &GenericPPError{Key: key, Value: "error marshalling value"} + } else { + ppError = &GenericPPError{Key: key, Value: string(valueStr)} + } } } diff --git a/agglayer/types_test.go b/agglayer/types_test.go index f2133923..648fc73c 100644 --- a/agglayer/types_test.go +++ b/agglayer/types_test.go @@ -15,6 +15,34 @@ const ( expectedSignedCertificateyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"new_local_exit_root":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[]},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` ) +func TestMGenericPPError(t *testing.T) { + err := GenericPPError{"test", "value"} + require.Equal(t, "Generic error: test: value", err.String()) +} + +func TestCertificateHeaderID(t *testing.T) { + certificate := CertificateHeader{ + Height: 1, + CertificateID: common.HexToHash("0x123"), + } + require.Equal(t, "1/0x0000000000000000000000000000000000000000000000000000000000000123", certificate.ID()) + + var certNil *CertificateHeader + require.Equal(t, "nil", certNil.ID()) +} + +func TestCertificateHeaderString(t *testing.T) { + certificate := CertificateHeader{ + Height: 1, + CertificateID: common.HexToHash("0x123"), + } + require.Equal(t, "Height: 1, CertificateID: 0x0000000000000000000000000000000000000000000000000000000000000123, PreviousLocalExitRoot: nil, NewLocalExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000000. Status: Pending. Errors: []", + certificate.String()) + + var certNil *CertificateHeader + require.Equal(t, "nil", certNil.String()) +} + func TestMarshalJSON(t *testing.T) { cert := SignedCertificate{ Certificate: &Certificate{ @@ -251,3 +279,14 @@ func TestGlobalIndex_UnmarshalFromMap(t *testing.T) { }) } } + +func TestUnmarshalCertificateHeaderUnknownError(t *testing.T) { + str := "{\"network_id\":14,\"height\":0,\"epoch_number\":null,\"certificate_index\":null,\"certificate_id\":\"0x3af88c9ca106822bd141fdc680dcb888f4e9d4997fad1645ba3d5d747059eb32\",\"new_local_exit_root\":\"0x625e889ced3c31277c6653229096374d396a2fd3564a8894aaad2ff935d2fc8c\",\"metadata\":\"0x0000000000000000000000000000000000000000000000000000000000002f3d\",\"status\":{\"InError\":{\"error\":{\"ProofVerificationFailed\":{\"Plonk\":\"the verifying key does not match the inner plonk bn254 proof's committed verifying key\"}}}}}" + data := []byte(str) + var result *CertificateHeader + err := json.Unmarshal(data, &result) + require.NoError(t, err) + require.NotNil(t, result) + ppError := result.Error.String() + require.Equal(t, `Generic error: ProofVerificationFailed: {"Plonk":"the verifying key does not match the inner plonk bn254 proof's committed verifying key"}`, ppError) +} diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 58e97402..9b89d557 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -17,6 +17,7 @@ import ( cdkTypes "github.com/0xPolygon/cdk-rpc/types" "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggregator/db/dbstorage" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" @@ -59,7 +60,7 @@ type Aggregator struct { cfg Config logger *log.Logger - state StateInterface + storage StorageInterface etherman Etherman ethTxManager EthTxManagerClient l1Syncr synchronizer.Synchronizer @@ -67,10 +68,9 @@ type Aggregator struct { accInputHashes map[uint64]common.Hash accInputHashesMutex *sync.Mutex - profitabilityChecker aggregatorTxProfitabilityChecker timeSendFinalProof time.Time timeCleanupLockedProofs types.Duration - stateDBMutex *sync.Mutex + storageMutex *sync.Mutex timeSendFinalProofMutex *sync.RWMutex finalProof chan finalProofMsg @@ -93,21 +93,7 @@ func New( ctx context.Context, cfg Config, logger *log.Logger, - stateInterface StateInterface, etherman Etherman) (*Aggregator, error) { - var profitabilityChecker aggregatorTxProfitabilityChecker - - switch cfg.TxProfitabilityCheckerType { - case ProfitabilityBase: - profitabilityChecker = NewTxProfitabilityCheckerBase( - stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, cfg.TxProfitabilityMinReward.Int, - ) - case ProfitabilityAcceptAll: - profitabilityChecker = NewTxProfitabilityCheckerAcceptAll( - stateInterface, cfg.IntervalAfterWhichBatchConsolidateAnyway.Duration, - ) - } - // Create ethtxmanager client cfg.EthTxManager.Log = ethtxlog.Config{ Environment: ethtxlog.LogEnvironment(cfg.Log.Environment), @@ -150,18 +136,22 @@ func New( } } + storage, err := dbstorage.NewDBStorage(cfg.DBPath) + if err != nil { + return nil, err + } + a := &Aggregator{ ctx: ctx, cfg: cfg, logger: logger, - state: stateInterface, + storage: storage, etherman: etherman, ethTxManager: ethTxManager, l1Syncr: l1Syncr, accInputHashes: make(map[uint64]common.Hash), accInputHashesMutex: &sync.Mutex{}, - profitabilityChecker: profitabilityChecker, - stateDBMutex: &sync.Mutex{}, + storageMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), @@ -213,7 +203,7 @@ func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { a.logger.Errorf("Error getting last virtual batch number: %v", err) } else { // Delete wip proofs - err = a.state.DeleteUngeneratedProofs(a.ctx, nil) + err = a.storage.DeleteUngeneratedProofs(a.ctx, nil) if err != nil { a.logger.Errorf("Error deleting ungenerated proofs: %v", err) } else { @@ -221,7 +211,7 @@ func (a *Aggregator) handleReorg(reorgData synchronizer.ReorgExecutionResult) { } // Delete any proof for the batches that have been rolled back - err = a.state.DeleteGeneratedProofs(a.ctx, lastVBatchNumber+1, maxDBBigIntValue, nil) + err = a.storage.DeleteGeneratedProofs(a.ctx, lastVBatchNumber+1, maxDBBigIntValue, nil) if err != nil { a.logger.Errorf("Error deleting generated proofs: %v", err) } else { @@ -275,7 +265,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat // Delete wip proofs if err == nil { - err = a.state.DeleteUngeneratedProofs(a.ctx, nil) + err = a.storage.DeleteUngeneratedProofs(a.ctx, nil) if err != nil { a.logger.Errorf("Error deleting ungenerated proofs: %v", err) } else { @@ -285,7 +275,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat // Delete any proof for the batches that have been rolled back if err == nil { - err = a.state.DeleteGeneratedProofs(a.ctx, rollbackData.LastBatchNumber+1, maxDBBigIntValue, nil) + err = a.storage.DeleteGeneratedProofs(a.ctx, rollbackData.LastBatchNumber+1, maxDBBigIntValue, nil) if err != nil { a.logger.Errorf("Error deleting generated proofs: %v", err) } else { @@ -335,12 +325,6 @@ func (a *Aggregator) Start() error { healthService := newHealthChecker() grpchealth.RegisterHealthServer(a.srv, healthService) - // Delete ungenerated recursive proofs - err = a.state.DeleteUngeneratedProofs(a.ctx, nil) - if err != nil { - return fmt.Errorf("failed to initialize proofs cache %w", err) - } - // Get last verified batch number to set the starting point for verifications lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() if err != nil { @@ -357,6 +341,12 @@ func (a *Aggregator) Start() error { a.logger.Infof("Starting AccInputHash:%v", accInputHash.String()) a.setAccInputHash(lastVerifiedBatchNumber, *accInputHash) + // Delete existing proofs + err = a.storage.DeleteGeneratedProofs(a.ctx, lastVerifiedBatchNumber, maxDBBigIntValue, nil) + if err != nil { + return fmt.Errorf("failed to delete proofs table %w", err) + } + a.resetVerifyProofTime() go a.cleanupLockedProofs() @@ -608,7 +598,7 @@ func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Cont "batches", fmt.Sprintf("%d-%d", proof.BatchNumber, proof.BatchNumberFinal), ) proof.GeneratingSince = nil - err := a.state.UpdateGeneratedProof(ctx, proof, nil) + err := a.storage.UpdateGeneratedProof(ctx, proof, nil) if err != nil { tmpLogger.Errorf("Failed updating proof state (false): %v", err) } @@ -703,7 +693,7 @@ func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover ProverInterf if err != nil { // Set the generating state to false for the proof ("unlock" it) proof.GeneratingSince = nil - err2 := a.state.UpdateGeneratedProof(a.ctx, proof, nil) + err2 := a.storage.UpdateGeneratedProof(a.ctx, proof, nil) if err2 != nil { tmpLogger.Errorf("Failed to unlock proof: %v", err2) } @@ -766,7 +756,7 @@ func (a *Aggregator) validateEligibleFinalProof( // We have a proof that contains batches below that the last batch verified, we need to delete this proof a.logger.Warnf("Proof %d-%d lower than next batch to verify %d. Deleting it", proof.BatchNumber, proof.BatchNumberFinal, batchNumberToVerify) - err := a.state.DeleteGeneratedProofs(ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) + err := a.storage.DeleteGeneratedProofs(ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) if err != nil { return false, fmt.Errorf("failed to delete discarded proof, err: %w", err) } @@ -779,7 +769,7 @@ func (a *Aggregator) validateEligibleFinalProof( } } - bComplete, err := a.state.CheckProofContainsCompleteSequences(ctx, proof, nil) + bComplete, err := a.storage.CheckProofContainsCompleteSequences(ctx, proof, nil) if err != nil { return false, fmt.Errorf("failed to check if proof contains complete sequences, %w", err) } @@ -795,11 +785,11 @@ func (a *Aggregator) validateEligibleFinalProof( func (a *Aggregator) getAndLockProofReadyToVerify( ctx context.Context, lastVerifiedBatchNum uint64, ) (*state.Proof, error) { - a.stateDBMutex.Lock() - defer a.stateDBMutex.Unlock() + a.storageMutex.Lock() + defer a.storageMutex.Unlock() // Get proof ready to be verified - proofToVerify, err := a.state.GetProofReadyToVerify(ctx, lastVerifiedBatchNum, nil) + proofToVerify, err := a.storage.GetProofReadyToVerify(ctx, lastVerifiedBatchNum, nil) if err != nil { return nil, err } @@ -807,7 +797,7 @@ func (a *Aggregator) getAndLockProofReadyToVerify( now := time.Now().Round(time.Microsecond) proofToVerify.GeneratingSince = &now - err = a.state.UpdateGeneratedProof(ctx, proofToVerify, nil) + err = a.storage.UpdateGeneratedProof(ctx, proofToVerify, nil) if err != nil { return nil, err } @@ -817,21 +807,21 @@ func (a *Aggregator) getAndLockProofReadyToVerify( func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state.Proof, proof2 *state.Proof) error { // Release proofs from generating state in a single transaction - dbTx, err := a.state.BeginStateTransaction(ctx) + dbTx, err := a.storage.BeginTx(ctx, nil) if err != nil { a.logger.Warnf("Failed to begin transaction to release proof aggregation state, err: %v", err) return err } proof1.GeneratingSince = nil - err = a.state.UpdateGeneratedProof(ctx, proof1, dbTx) + err = a.storage.UpdateGeneratedProof(ctx, proof1, dbTx) if err == nil { proof2.GeneratingSince = nil - err = a.state.UpdateGeneratedProof(ctx, proof2, dbTx) + err = a.storage.UpdateGeneratedProof(ctx, proof2, dbTx) } if err != nil { - if err := dbTx.Rollback(ctx); err != nil { + if err := dbTx.Rollback(); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state: %w", err) a.logger.Error(FirstToUpper(err.Error())) return err @@ -840,7 +830,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. return fmt.Errorf("failed to release proof aggregation state: %w", err) } - err = dbTx.Commit(ctx) + err = dbTx.Commit() if err != nil { return fmt.Errorf("failed to release proof aggregation state %w", err) } @@ -856,16 +846,16 @@ func (a *Aggregator) getAndLockProofsToAggregate( "proverAddr", prover.Addr(), ) - a.stateDBMutex.Lock() - defer a.stateDBMutex.Unlock() + a.storageMutex.Lock() + defer a.storageMutex.Unlock() - proof1, proof2, err := a.state.GetProofsToAggregate(ctx, nil) + proof1, proof2, err := a.storage.GetProofsToAggregate(ctx, nil) if err != nil { return nil, nil, err } // Set proofs in generating state in a single transaction - dbTx, err := a.state.BeginStateTransaction(ctx) + dbTx, err := a.storage.BeginTx(ctx, nil) if err != nil { tmpLogger.Errorf("Failed to begin transaction to set proof aggregation state, err: %v", err) return nil, nil, err @@ -873,14 +863,14 @@ func (a *Aggregator) getAndLockProofsToAggregate( now := time.Now().Round(time.Microsecond) proof1.GeneratingSince = &now - err = a.state.UpdateGeneratedProof(ctx, proof1, dbTx) + err = a.storage.UpdateGeneratedProof(ctx, proof1, dbTx) if err == nil { proof2.GeneratingSince = &now - err = a.state.UpdateGeneratedProof(ctx, proof2, dbTx) + err = a.storage.UpdateGeneratedProof(ctx, proof2, dbTx) } if err != nil { - if err := dbTx.Rollback(ctx); err != nil { + if err := dbTx.Rollback(); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state %w", err) tmpLogger.Error(FirstToUpper(err.Error())) return nil, nil, err @@ -889,7 +879,7 @@ func (a *Aggregator) getAndLockProofsToAggregate( return nil, nil, fmt.Errorf("failed to set proof aggregation state %w", err) } - err = dbTx.Commit(ctx) + err = dbTx.Commit() if err != nil { return nil, nil, fmt.Errorf("failed to set proof aggregation state %w", err) } @@ -983,16 +973,16 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf // update the state by removing the 2 aggregated proofs and storing the // newly generated recursive proof - dbTx, err := a.state.BeginStateTransaction(ctx) + dbTx, err := a.storage.BeginTx(ctx, nil) if err != nil { err = fmt.Errorf("failed to begin transaction to update proof aggregation state, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) return false, err } - err = a.state.DeleteGeneratedProofs(ctx, proof1.BatchNumber, proof2.BatchNumberFinal, dbTx) + err = a.storage.DeleteGeneratedProofs(ctx, proof1.BatchNumber, proof2.BatchNumberFinal, dbTx) if err != nil { - if err := dbTx.Rollback(ctx); err != nil { + if err := dbTx.Rollback(); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) return false, err @@ -1005,9 +995,9 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf now := time.Now().Round(time.Microsecond) proof.GeneratingSince = &now - err = a.state.AddGeneratedProof(ctx, proof, dbTx) + err = a.storage.AddGeneratedProof(ctx, proof, dbTx) if err != nil { - if err := dbTx.Rollback(ctx); err != nil { + if err := dbTx.Rollback(); err != nil { err := fmt.Errorf("failed to rollback proof aggregation state, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) return false, err @@ -1017,7 +1007,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf return false, err } - err = dbTx.Commit(ctx) + err = dbTx.Commit() if err != nil { err = fmt.Errorf("failed to store the recursive proof, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) @@ -1041,7 +1031,7 @@ func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterf proof.GeneratingSince = nil // final proof has not been generated, update the recursive proof - err := a.state.UpdateGeneratedProof(a.ctx, proof, nil) + err := a.storage.UpdateGeneratedProof(a.ctx, proof, nil) if err != nil { err = fmt.Errorf("failed to store batch proof result, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) @@ -1073,8 +1063,8 @@ func (a *Aggregator) getAndLockBatchToProve( "proverAddr", prover.Addr(), ) - a.stateDBMutex.Lock() - defer a.stateDBMutex.Unlock() + a.storageMutex.Lock() + defer a.storageMutex.Unlock() // Get last virtual batch number from L1 lastVerifiedBatchNumber, err := a.etherman.GetLatestVerifiedBatchNum() @@ -1088,7 +1078,7 @@ func (a *Aggregator) getAndLockBatchToProve( // Look for the batch number to verify for proofExists { batchNumberToVerify++ - proofExists, err = a.state.CheckProofExistsForBatch(ctx, batchNumberToVerify, nil) + proofExists, err = a.storage.CheckProofExistsForBatch(ctx, batchNumberToVerify, nil) if err != nil { tmpLogger.Infof("Error checking proof exists for batch %d", batchNumberToVerify) @@ -1101,7 +1091,7 @@ func (a *Aggregator) getAndLockBatchToProve( tmpLogger.Warnf("AccInputHash for batch %d is not in memory, "+ "deleting proofs to regenerate acc input hash chain in memory", batchNumberToVerify) - err := a.state.CleanupGeneratedProofs(ctx, math.MaxInt, nil) + err := a.storage.CleanupGeneratedProofs(ctx, math.MaxInt, nil) if err != nil { tmpLogger.Infof("Error cleaning up generated proofs for batch %d", batchNumberToVerify) return nil, nil, nil, err @@ -1201,7 +1191,6 @@ func (a *Aggregator) getAndLockBatchToProve( a.logger.Debugf("Calculated acc input hash for batch %d: %v", batchNumberToVerify, accInputHash) a.logger.Debugf("OldAccInputHash: %v", oldAccInputHash) a.logger.Debugf("L1InfoRoot: %v", virtualBatch.L1InfoRoot) - // a.logger.Debugf("LastL2BLockTimestamp: %v", rpcBatch.LastL2BLockTimestamp()) a.logger.Debugf("TimestampLimit: %v", uint64(sequence.Timestamp.Unix())) a.logger.Debugf("LastCoinbase: %v", rpcBatch.LastCoinbase()) a.logger.Debugf("ForcedBlockHashL1: %v", rpcBatch.ForcedBlockHashL1()) @@ -1242,7 +1231,7 @@ func (a *Aggregator) getAndLockBatchToProve( a.logger.Debugf("Time to get witness for batch %d: %v", batchNumberToVerify, end.Sub(start)) // Store the sequence in aggregator DB - err = a.state.AddSequence(ctx, stateSequence, nil) + err = a.storage.AddSequence(ctx, stateSequence, nil) if err != nil { tmpLogger.Infof("Error storing sequence for batch %d", batchNumberToVerify) @@ -1250,25 +1239,9 @@ func (a *Aggregator) getAndLockBatchToProve( } // All the data required to generate a proof is ready - tmpLogger.Infof("Found virtual batch %d pending to generate proof", virtualBatch.BatchNumber) + tmpLogger.Infof("All information to generate proof for batch %d is ready", virtualBatch.BatchNumber) tmpLogger = tmpLogger.WithFields("batch", virtualBatch.BatchNumber) - tmpLogger.Info("Checking profitability to aggregate batch") - - // pass pol collateral as zero here, bcs in smart contract fee for aggregator is not defined yet - isProfitable, err := a.profitabilityChecker.IsProfitable(ctx, big.NewInt(0)) - if err != nil { - tmpLogger.Errorf("Failed to check aggregator profitability, err: %v", err) - - return nil, nil, nil, err - } - - if !isProfitable { - tmpLogger.Infof("Batch is not profitable, pol collateral %d", big.NewInt(0)) - - return nil, nil, nil, err - } - now := time.Now().Round(time.Microsecond) proof := &state.Proof{ BatchNumber: virtualBatch.BatchNumber, @@ -1279,9 +1252,9 @@ func (a *Aggregator) getAndLockBatchToProve( } // Avoid other prover to process the same batch - err = a.state.AddGeneratedProof(ctx, proof, nil) + err = a.storage.AddGeneratedProof(ctx, proof, nil) if err != nil { - tmpLogger.Errorf("Failed to add batch proof, err: %v", err) + tmpLogger.Errorf("Failed to add batch proof to DB for batch %d, err: %v", virtualBatch.BatchNumber, err) return nil, nil, nil, err } @@ -1317,7 +1290,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt defer func() { if err != nil { tmpLogger.Debug("Deleting proof in progress") - err2 := a.state.DeleteGeneratedProofs(a.ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) + err2 := a.storage.DeleteGeneratedProofs(a.ctx, proof.BatchNumber, proof.BatchNumberFinal, nil) if err2 != nil { tmpLogger.Errorf("Failed to delete proof in progress, err: %v", err2) } @@ -1356,33 +1329,9 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt tmpLogger.Info("Batch proof generated") - // Sanity Check: state root from the proof must match the one from the batch - if a.cfg.BatchProofSanityCheckEnabled && (stateRoot != common.Hash{}) && (stateRoot != batchToProve.StateRoot) { - for { - tmpLogger.Errorf("HALTING: "+ - "State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", - batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String(), - ) - time.Sleep(a.cfg.RetryTime.Duration) - } - } else { - tmpLogger.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) + if a.cfg.BatchProofSanityCheckEnabled { + a.performSanityChecks(tmpLogger, stateRoot, accInputHash, batchToProve) } - - // Sanity Check: acc input hash from the proof must match the one from the batch - if a.cfg.BatchProofSanityCheckEnabled && (accInputHash != common.Hash{}) && - (accInputHash != batchToProve.AccInputHash) { - for { - tmpLogger.Errorf("HALTING: Acc input hash from the proof does not match the expected for "+ - "batch %d: Proof = [%s] Expected = [%s]", - batchToProve.BatchNumber, accInputHash.String(), batchToProve.AccInputHash.String(), - ) - time.Sleep(a.cfg.RetryTime.Duration) - } - } else { - tmpLogger.Infof("Acc input hash sanity check for batch %d passed", batchToProve.BatchNumber) - } - proof.Proof = resGetProof // NOTE(pg): the defer func is useless from now on, use a different variable @@ -1400,7 +1349,7 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt proof.GeneratingSince = nil // final proof has not been generated, update the batch proof - err := a.state.UpdateGeneratedProof(a.ctx, proof, nil) + err := a.storage.UpdateGeneratedProof(a.ctx, proof, nil) if err != nil { err = fmt.Errorf("failed to store batch proof result, %w", err) tmpLogger.Error(FirstToUpper(err.Error())) @@ -1411,6 +1360,35 @@ func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInt return true, nil } +func (a *Aggregator) performSanityChecks(tmpLogger *log.Logger, stateRoot, accInputHash common.Hash, + batchToProve *state.Batch) { + // Sanity Check: state root from the proof must match the one from the batch + if (stateRoot != common.Hash{}) && (stateRoot != batchToProve.StateRoot) { + for { + tmpLogger.Errorf("HALTING: "+ + "State root from the proof does not match the expected for batch %d: Proof = [%s] Expected = [%s]", + batchToProve.BatchNumber, stateRoot.String(), batchToProve.StateRoot.String(), + ) + time.Sleep(a.cfg.RetryTime.Duration) + } + } else { + tmpLogger.Infof("State root sanity check for batch %d passed", batchToProve.BatchNumber) + } + + // Sanity Check: acc input hash from the proof must match the one from the batch + if (accInputHash != common.Hash{}) && (accInputHash != batchToProve.AccInputHash) { + for { + tmpLogger.Errorf("HALTING: Acc input hash from the proof does not match the expected for "+ + "batch %d: Proof = [%s] Expected = [%s]", + batchToProve.BatchNumber, accInputHash.String(), batchToProve.AccInputHash.String(), + ) + time.Sleep(a.cfg.RetryTime.Duration) + } + } else { + tmpLogger.Infof("Acc input hash sanity check for batch %d passed", batchToProve.BatchNumber) + } +} + // canVerifyProof returns true if we have reached the timeout to verify a proof // and no other prover is verifying a proof (verifyingProof = false). func (a *Aggregator) canVerifyProof() bool { @@ -1577,7 +1555,6 @@ func printInputProver(logger *log.Logger, inputProver *prover.StatelessInputProv logger.Debugf("Witness length: %v", len(inputProver.PublicInputs.Witness)) logger.Debugf("BatchL2Data length: %v", len(inputProver.PublicInputs.BatchL2Data)) - // logger.Debugf("Full DataStream: %v", common.Bytes2Hex(inputProver.PublicInputs.DataStream)) logger.Debugf("OldAccInputHash: %v", common.BytesToHash(inputProver.PublicInputs.OldAccInputHash)) logger.Debugf("L1InfoRoot: %v", common.BytesToHash(inputProver.PublicInputs.L1InfoRoot)) logger.Debugf("TimestampLimit: %v", inputProver.PublicInputs.TimestampLimit) @@ -1646,7 +1623,7 @@ func (a *Aggregator) handleMonitoredTxResult(result ethtxtypes.MonitoredTxResult } } - err = a.state.DeleteGeneratedProofs(a.ctx, firstBatch, lastBatch, nil) + err = a.storage.DeleteGeneratedProofs(a.ctx, firstBatch, lastBatch, nil) if err != nil { mTxResultLogger.Errorf("failed to delete generated proofs from %d to %d: %v", firstBatch, lastBatch, err) } @@ -1664,7 +1641,7 @@ func (a *Aggregator) cleanupLockedProofs() { case <-a.ctx.Done(): return case <-time.After(a.timeCleanupLockedProofs.Duration): - n, err := a.state.CleanupLockedProofs(a.ctx, a.cfg.GeneratingProofCleanupThreshold, nil) + n, err := a.storage.CleanupLockedProofs(a.ctx, a.cfg.GeneratingProofCleanupThreshold, nil) if err != nil { a.logger.Errorf("Failed to cleanup locked proofs: %v", err) } diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index ff788190..652b4a62 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -6,6 +6,8 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "database/sql" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -49,13 +51,14 @@ const ( ) type mox struct { - stateMock *mocks.StateInterfaceMock + storageMock *mocks.StorageInterfaceMock ethTxManager *mocks.EthTxManagerClientMock etherman *mocks.EthermanMock proverMock *mocks.ProverInterfaceMock aggLayerClientMock *agglayer.AgglayerClientMock synchronizerMock *mocks.SynchronizerInterfaceMock rpcMock *mocks.RPCInterfaceMock + txerMock *mocks.TxerMock } func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { @@ -75,7 +78,7 @@ func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { } func Test_Start(t *testing.T) { - mockState := new(mocks.StateInterfaceMock) + mockStorage := new(mocks.StorageInterfaceMock) mockL1Syncr := new(mocks.SynchronizerInterfaceMock) mockEtherman := new(mocks.EthermanMock) mockEthTxManager := new(mocks.EthTxManagerClientMock) @@ -83,21 +86,21 @@ func Test_Start(t *testing.T) { mockL1Syncr.On("Sync", mock.Anything).Return(nil) mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() - mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() - mockState.On("CleanupLockedProofs", mock.Anything, "", nil).Return(int64(0), nil) + mockStorage.On("DeleteGeneratedProofs", mock.Anything, uint64(90), mock.Anything, nil).Return(nil).Once() + mockStorage.On("CleanupLockedProofs", mock.Anything, "", nil).Return(int64(0), nil) mockEthTxManager.On("Start").Return(nil) ctx := context.Background() a := &Aggregator{ - state: mockState, + storage: mockStorage, logger: log.GetDefaultLogger(), halted: atomic.Bool{}, l1Syncr: mockL1Syncr, etherman: mockEtherman, ethTxManager: mockEthTxManager, ctx: ctx, - stateDBMutex: &sync.Mutex{}, + storageMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: types.Duration{Duration: 5 * time.Second}, accInputHashes: make(map[uint64]common.Hash), @@ -116,26 +119,26 @@ func Test_handleReorg(t *testing.T) { t.Parallel() mockL1Syncr := new(mocks.SynchronizerInterfaceMock) - mockState := new(mocks.StateInterfaceMock) + mockStorage := new(mocks.StorageInterfaceMock) reorgData := synchronizer.ReorgExecutionResult{} a := &Aggregator{ l1Syncr: mockL1Syncr, - state: mockState, + storage: mockStorage, logger: log.GetDefaultLogger(), halted: atomic.Bool{}, ctx: context.Background(), } mockL1Syncr.On("GetLastestVirtualBatchNumber", mock.Anything).Return(uint64(100), nil).Once() - mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, nil).Return(nil).Once() - mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() + mockStorage.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, nil).Return(nil).Once() + mockStorage.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() go a.handleReorg(reorgData) time.Sleep(3 * time.Second) assert.True(t, a.halted.Load()) - mockState.AssertExpectations(t) + mockStorage.AssertExpectations(t) mockL1Syncr.AssertExpectations(t) } @@ -143,7 +146,7 @@ func Test_handleRollbackBatches(t *testing.T) { t.Parallel() mockEtherman := new(mocks.EthermanMock) - mockState := new(mocks.StateInterfaceMock) + mockStorage := new(mocks.StorageInterfaceMock) // Test data rollbackData := synchronizer.RollbackBatchesData{ @@ -152,13 +155,13 @@ func Test_handleRollbackBatches(t *testing.T) { mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() mockEtherman.On("GetBatchAccInputHash", mock.Anything, uint64(90)).Return(common.Hash{}, nil).Once() - mockState.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() - mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + mockStorage.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() + mockStorage.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() a := Aggregator{ ctx: context.Background(), etherman: mockEtherman, - state: mockState, + storage: mockStorage, logger: log.GetDefaultLogger(), halted: atomic.Bool{}, accInputHashes: make(map[uint64]common.Hash), @@ -170,18 +173,18 @@ func Test_handleRollbackBatches(t *testing.T) { assert.False(t, a.halted.Load()) mockEtherman.AssertExpectations(t) - mockState.AssertExpectations(t) + mockStorage.AssertExpectations(t) } func Test_handleRollbackBatchesHalt(t *testing.T) { t.Parallel() mockEtherman := new(mocks.EthermanMock) - mockState := new(mocks.StateInterfaceMock) + mockStorage := new(mocks.StorageInterfaceMock) mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(110), nil).Once() - mockState.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() - mockState.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + mockStorage.On("DeleteUngeneratedProofs", mock.Anything, mock.Anything).Return(nil).Once() + mockStorage.On("DeleteGeneratedProofs", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() // Test data rollbackData := synchronizer.RollbackBatchesData{ @@ -191,7 +194,7 @@ func Test_handleRollbackBatchesHalt(t *testing.T) { a := Aggregator{ ctx: context.Background(), etherman: mockEtherman, - state: mockState, + storage: mockStorage, logger: log.GetDefaultLogger(), halted: atomic.Bool{}, accInputHashes: make(map[uint64]common.Hash), @@ -210,7 +213,7 @@ func Test_handleRollbackBatchesError(t *testing.T) { t.Parallel() mockEtherman := new(mocks.EthermanMock) - mockState := new(mocks.StateInterfaceMock) + mockStorage := new(mocks.StorageInterfaceMock) mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(110), fmt.Errorf("error")).Once() @@ -222,7 +225,7 @@ func Test_handleRollbackBatchesError(t *testing.T) { a := Aggregator{ ctx: context.Background(), etherman: mockEtherman, - state: mockState, + storage: mockStorage, logger: log.GetDefaultLogger(), halted: atomic.Bool{}, accInputHashes: make(map[uint64]common.Hash), @@ -307,7 +310,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - stateMock := mocks.NewStateInterfaceMock(t) + storageMock := mocks.NewStorageInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) aggLayerClient := agglayer.NewAgglayerClientMock(t) @@ -318,14 +321,14 @@ func Test_sendFinalProofSuccess(t *testing.T) { require.NoError(err, "error generating key") a := Aggregator{ - state: stateMock, + storage: storageMock, etherman: etherman, ethTxManager: ethTxManager, aggLayerClient: aggLayerClient, finalProof: make(chan finalProofMsg), logger: log.GetDefaultLogger(), verifyingProof: false, - stateDBMutex: &sync.Mutex{}, + storageMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, rpcClient: rpcMock, @@ -335,7 +338,7 @@ func Test_sendFinalProofSuccess(t *testing.T) { a.ctx, a.exit = context.WithCancel(context.Background()) m := mox{ - stateMock: stateMock, + storageMock: storageMock, ethTxManager: ethTxManager, etherman: etherman, aggLayerClientMock: aggLayerClient, @@ -417,7 +420,7 @@ func Test_sendFinalProofError(t *testing.T) { fmt.Println("Stopping sendFinalProof") a.exit() }).Return(nil, errTest).Once() - m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + m.storageMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() }, asserts: func(a *Aggregator) { assert.False(a.verifyingProof) @@ -441,7 +444,7 @@ func Test_sendFinalProofError(t *testing.T) { fmt.Println("Stopping sendFinalProof") a.exit() }).Return(errTest) - m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + m.storageMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() }, asserts: func(a *Aggregator) { assert.False(a.verifyingProof) @@ -463,7 +466,7 @@ func Test_sendFinalProofError(t *testing.T) { fmt.Println("Stopping sendFinalProof") a.exit() }).Return(nil, nil, errTest) - m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + m.storageMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() }, asserts: func(a *Aggregator) { assert.False(a.verifyingProof) @@ -487,7 +490,7 @@ func Test_sendFinalProofError(t *testing.T) { fmt.Println("Stopping sendFinalProof") a.exit() }).Return(nil, errTest).Once() - m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + m.storageMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() }, asserts: func(a *Aggregator) { assert.False(a.verifyingProof) @@ -498,7 +501,7 @@ func Test_sendFinalProofError(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - stateMock := mocks.NewStateInterfaceMock(t) + storageMock := mocks.NewStorageInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) aggLayerClient := agglayer.NewAgglayerClientMock(t) @@ -509,14 +512,14 @@ func Test_sendFinalProofError(t *testing.T) { require.NoError(err, "error generating key") a := Aggregator{ - state: stateMock, + storage: storageMock, etherman: etherman, ethTxManager: ethTxManager, aggLayerClient: aggLayerClient, finalProof: make(chan finalProofMsg), logger: log.GetDefaultLogger(), verifyingProof: false, - stateDBMutex: &sync.Mutex{}, + storageMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, sequencerPrivateKey: privateKey, rpcClient: rpcMock, @@ -526,7 +529,7 @@ func Test_sendFinalProofError(t *testing.T) { a.ctx, a.exit = context.WithCancel(context.Background()) m := mox{ - stateMock: stateMock, + storageMock: storageMock, ethTxManager: ethTxManager, etherman: etherman, aggLayerClientMock: aggLayerClient, @@ -625,16 +628,16 @@ func Test_buildFinalProof(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { proverMock := mocks.NewProverInterfaceMock(t) - stateMock := mocks.NewStateInterfaceMock(t) + storageMock := mocks.NewStorageInterfaceMock(t) rpcMock := mocks.NewRPCInterfaceMock(t) m := mox{ - proverMock: proverMock, - stateMock: stateMock, - rpcMock: rpcMock, + proverMock: proverMock, + storageMock: storageMock, + rpcMock: rpcMock, } a := Aggregator{ - state: stateMock, - logger: log.GetDefaultLogger(), + storage: storageMock, + logger: log.GetDefaultLogger(), cfg: Config{ SenderAddress: common.BytesToAddress([]byte("from")).Hex(), }, @@ -655,9 +658,8 @@ func Test_tryBuildFinalProof(t *testing.T) { errTest := errors.New("test error") from := common.BytesToAddress([]byte("from")) cfg := Config{ - VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, - TxProfitabilityCheckerType: ProfitabilityAcceptAll, - SenderAddress: from.Hex(), + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + SenderAddress: from.Hex(), } latestVerifiedBatchNum := uint64(22) batchNum := uint64(23) @@ -727,10 +729,10 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr").Twice() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() - proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.storageMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.storageMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(nil, errTest).Once() - m.stateMock. + m.storageMock. On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). Return(nil). Once(). @@ -748,11 +750,11 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr").Twice() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() - proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.storageMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.storageMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(nil, errTest).Once() - m.stateMock. + m.storageMock. On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). Return(nil). Once(). @@ -770,7 +772,7 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Once() m.proverMock.On("Addr").Return(proverID).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, errTest).Once() + m.storageMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, errTest).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -784,7 +786,7 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Once() m.proverMock.On("Addr").Return(proverID).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, state.ErrNotFound).Once() + m.storageMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, state.ErrNotFound).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -798,8 +800,8 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return(proverID).Twice() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() - m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.storageMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + m.storageMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() }, @@ -821,7 +823,7 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Once() m.proverMock.On("Addr").Return(proverID).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, errTest).Once() + m.storageMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, errTest).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -850,7 +852,7 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Once() m.proverMock.On("Addr").Return(proverID).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, nil).Once() + m.storageMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, nil).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -865,7 +867,7 @@ func Test_tryBuildFinalProof(t *testing.T) { m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return(proverID).Twice() m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() - m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(true, nil).Once() + m.storageMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(true, nil).Once() m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() }, @@ -884,18 +886,18 @@ func Test_tryBuildFinalProof(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - stateMock := mocks.NewStateInterfaceMock(t) + storageMock := mocks.NewStorageInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) proverMock := mocks.NewProverInterfaceMock(t) a := Aggregator{ cfg: cfg, - state: stateMock, + storage: storageMock, etherman: etherman, ethTxManager: ethTxManager, logger: log.GetDefaultLogger(), - stateDBMutex: &sync.Mutex{}, + storageMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), @@ -906,7 +908,7 @@ func Test_tryBuildFinalProof(t *testing.T) { aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) m := mox{ - stateMock: stateMock, + storageMock: storageMock, ethTxManager: ethTxManager, etherman: etherman, proverMock: proverMock, @@ -973,7 +975,7 @@ func Test_tryAggregateProofs(t *testing.T) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, errTest).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, errTest).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -986,7 +988,7 @@ func Test_tryAggregateProofs(t *testing.T) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, state.ErrNotFound).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, state.ErrNotFound).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -999,12 +1001,12 @@ func Test_tryAggregateProofs(t *testing.T) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - dbTx := &mocks.DbTxMock{} - dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + m.txerMock.On("Rollback").Return(nil).Once() + m.storageMock.On("BeginTx", mock.Anything, (*sql.TxOptions)(nil)).Return(m.txerMock, nil).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1018,18 +1020,19 @@ func Test_tryAggregateProofs(t *testing.T) { assert.ErrorIs(err, errTest) }, }, + { name: "AggregatedProof error", setup: func(m mox, a *Aggregator) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - dbTx := &mocks.DbTxMock{} - lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() - lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - proof1GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + + lockProofsTxBegin := m.storageMock.On("BeginTx", mock.MatchedBy(matchProverCtxFn), (*sql.TxOptions)(nil)).Return(m.txerMock, nil).Once() + // lockProofsTxCommit := m.proverMock.On("Commit").Return(nil).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1037,8 +1040,8 @@ func Test_tryAggregateProofs(t *testing.T) { }). Return(nil). Once() - proof2GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + proof2GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { // Use a type assertion with a check proofArg, ok := args[1].(*state.Proof) @@ -1050,9 +1053,9 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(nil, errTest).Once() - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + m.storageMock.On("BeginTx", mock.Anything, (*sql.TxOptions)(nil)).Return(m.txerMock, nil).Once().NotBefore(lockProofsTxBegin) + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) if !ok { @@ -1063,8 +1066,8 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof1GeneratingTrueCall) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) if !ok { @@ -1075,25 +1078,25 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof2GeneratingTrueCall) - dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + m.txerMock.On("Commit").Return(nil) }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) assert.ErrorIs(err, errTest) }, }, + { name: "WaitRecursiveProof prover error", setup: func(m mox, a *Aggregator) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - dbTx := &mocks.DbTxMock{} - lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() - lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - proof1GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + lockProofsTxBegin := m.storageMock.On("BeginTx", mock.MatchedBy(matchProverCtxFn), (*sql.TxOptions)(nil)).Return(m.txerMock, nil).Once() + // lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) if !ok { @@ -1103,8 +1106,8 @@ func Test_tryAggregateProofs(t *testing.T) { }). Return(nil). Once() - proof2GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + proof2GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1114,9 +1117,9 @@ func Test_tryAggregateProofs(t *testing.T) { Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, errTest).Once() - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + m.storageMock.On("BeginTx", mock.MatchedBy(matchAggregatorCtxFn), (*sql.TxOptions)(nil)).Return(m.txerMock, nil).Once().NotBefore(lockProofsTxBegin) + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1125,8 +1128,8 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof1GeneratingTrueCall) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1135,25 +1138,25 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof2GeneratingTrueCall) - dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + m.txerMock.On("Commit").Return(nil) }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) assert.ErrorIs(err, errTest) }, }, + { name: "unlockProofsToAggregate error after WaitRecursiveProof prover error", setup: func(m mox, a *Aggregator) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return(proverID) - dbTx := &mocks.DbTxMock{} - lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() - dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - proof1GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + // lockProofsTxBegin := m.storageMock.On("BeginTx", mock.MatchedBy(matchProverCtxFn)).Return(m.txerMock, nil).Once() + m.txerMock.On("Commit").Return(nil) + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1161,8 +1164,8 @@ func Test_tryAggregateProofs(t *testing.T) { }). Return(nil). Once() - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1172,9 +1175,9 @@ func Test_tryAggregateProofs(t *testing.T) { Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, errTest).Once() - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + m.storageMock.On("BeginTx", mock.Anything, (*sql.TxOptions)(nil)).Return(m.txerMock, nil) + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1183,7 +1186,7 @@ func Test_tryAggregateProofs(t *testing.T) { Return(errTest). Once(). NotBefore(proof1GeneratingTrueCall) - dbTx.On("Rollback", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once() + m.txerMock.On("Rollback").Return(nil).Once() }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -1196,12 +1199,11 @@ func Test_tryAggregateProofs(t *testing.T) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - dbTx := &mocks.DbTxMock{} - lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() - lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - proof1GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + // lockProofsTxBegin := m.storageMock.On("BeginTx", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + // lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1209,8 +1211,8 @@ func Test_tryAggregateProofs(t *testing.T) { }). Return(nil). Once() - proof2GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + proof2GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1220,11 +1222,11 @@ func Test_tryAggregateProofs(t *testing.T) { Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, common.Hash{}, nil).Once() - m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(errTest).Once() - dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + m.storageMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, mock.Anything).Return(errTest).Once() + m.txerMock.On("Rollback").Return(nil).Once() + m.storageMock.On("BeginTx", mock.Anything, (*sql.TxOptions)(nil)).Return(m.txerMock, nil) + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1233,8 +1235,8 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof1GeneratingTrueCall) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1243,25 +1245,25 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof2GeneratingTrueCall) - dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + m.txerMock.On("Commit").Return(nil) }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) assert.ErrorIs(err, errTest) }, }, + { name: "rollback after AddGeneratedProof error in db transaction", setup: func(m mox, a *Aggregator) { m.proverMock.On("Name").Return(proverName).Twice() m.proverMock.On("ID").Return(proverID).Twice() m.proverMock.On("Addr").Return("addr") - dbTx := &mocks.DbTxMock{} - lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() - lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - proof1GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + // lockProofsTxBegin := m.storageMock.On("BeginTx", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + // lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1269,8 +1271,8 @@ func Test_tryAggregateProofs(t *testing.T) { }). Return(nil). Once() - proof2GeneratingTrueCall := m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + proof2GeneratingTrueCall := m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1280,12 +1282,12 @@ func Test_tryAggregateProofs(t *testing.T) { Once() m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, common.Hash{}, nil).Once() - m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Return(errTest).Once() - dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + m.storageMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, mock.Anything).Return(nil).Once() + m.storageMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, mock.Anything).Return(errTest).Once() + m.txerMock.On("Rollback").Return(nil).Once() + m.storageMock.On("BeginTx", mock.Anything, (*sql.TxOptions)(nil)).Return(m.txerMock, nil) + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1294,8 +1296,8 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof1GeneratingTrueCall) - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1304,7 +1306,7 @@ func Test_tryAggregateProofs(t *testing.T) { Return(nil). Once(). NotBefore(proof2GeneratingTrueCall) - dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + m.txerMock.On("Commit").Return(nil) }, asserts: func(result bool, a *Aggregator, err error) { assert.False(result) @@ -1314,16 +1316,16 @@ func Test_tryAggregateProofs(t *testing.T) { { name: "time to send final, state error", setup: func(m mox, a *Aggregator) { + a.accInputHashes = make(map[uint64]common.Hash) a.cfg.VerifyProofInterval = types.Duration{Duration: time.Nanosecond} m.proverMock.On("Name").Return(proverName).Times(3) m.proverMock.On("ID").Return(proverID).Times(3) m.proverMock.On("Addr").Return("addr") - dbTx := &mocks.DbTxMock{} - m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() - dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Twice() - m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + m.storageMock.On("BeginTx", mock.Anything, (*sql.TxOptions)(nil)).Return(m.txerMock, nil) + m.txerMock.On("Commit").Return(nil) + m.storageMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1331,8 +1333,8 @@ func Test_tryAggregateProofs(t *testing.T) { }). Return(nil). Once() - m.stateMock. - On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + m.storageMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, mock.Anything). Run(func(args mock.Arguments) { proofArg, ok := args[1].(*state.Proof) assert.True(ok, "Expected argument of type *state.Proof") @@ -1343,14 +1345,14 @@ func Test_tryAggregateProofs(t *testing.T) { m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, common.Hash{}, nil).Once() - m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + m.storageMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, m.txerMock).Return(nil).Once() expectedInputProver := map[string]interface{}{ "recursive_proof_1": proof1.Proof, "recursive_proof_2": proof2.Proof, } b, err := json.Marshal(expectedInputProver) require.NoError(err) - m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Run( + m.storageMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, mock.Anything).Run( func(args mock.Arguments) { proof, ok := args[1].(*state.Proof) if !ok { @@ -1367,7 +1369,7 @@ func Test_tryAggregateProofs(t *testing.T) { ).Return(nil).Once() m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), errTest).Once() - m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( + m.storageMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( func(args mock.Arguments) { proof, ok := args[1].(*state.Proof) if !ok { @@ -1392,17 +1394,18 @@ func Test_tryAggregateProofs(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - stateMock := mocks.NewStateInterfaceMock(t) + storageMock := mocks.NewStorageInterfaceMock(t) ethTxManager := mocks.NewEthTxManagerClientMock(t) etherman := mocks.NewEthermanMock(t) proverMock := mocks.NewProverInterfaceMock(t) + txerMock := mocks.NewTxerMock(t) a := Aggregator{ cfg: cfg, - state: stateMock, + storage: storageMock, etherman: etherman, ethTxManager: ethTxManager, logger: log.GetDefaultLogger(), - stateDBMutex: &sync.Mutex{}, + storageMutex: &sync.Mutex{}, timeSendFinalProofMutex: &sync.RWMutex{}, timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, finalProof: make(chan finalProofMsg), @@ -1412,10 +1415,11 @@ func Test_tryAggregateProofs(t *testing.T) { aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck a.ctx, a.exit = context.WithCancel(aggregatorCtx) m := mox{ - stateMock: stateMock, + storageMock: storageMock, ethTxManager: ethTxManager, etherman: etherman, proverMock: proverMock, + txerMock: txerMock, } if tc.setup != nil { tc.setup(m, &a) @@ -1431,6 +1435,440 @@ func Test_tryAggregateProofs(t *testing.T) { } } +func Test_tryGenerateBatchProof(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + from := common.BytesToAddress([]byte("from")) + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Duration(10000000)}, + SenderAddress: from.Hex(), + IntervalAfterWhichBatchConsolidateAnyway: types.Duration{Duration: time.Second * 1}, + ChainID: uint64(1), + ForkId: uint64(12), + BatchProofSanityCheckEnabled: true, + } + lastVerifiedBatchNum := uint64(22) + + batchNum := uint64(23) + + batchToProve := state.Batch{ + BatchNumber: batchNum, + } + + proofID := "proofId" + + proverName := "proverName" + proverID := "proverID" + errTest := errors.New("test error") + errAIH := fmt.Errorf("failed to build input prover, acc input hash for previous batch (22) is not in memory") + proverCtx := context.WithValue(context.Background(), "owner", ownerProver) //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerProver } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == ownerAggregator } + fixedTimestamp := time.Date(2023, 10, 13, 15, 0, 0, 0, time.UTC) + + l1InfoTreeLeaf := []synchronizer.L1InfoTreeLeaf{ + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + { + GlobalExitRoot: common.Hash{}, + PreviousBlockHash: common.Hash{}, + Timestamp: fixedTimestamp, + }, + } + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + }{ + { + name: "getAndLockBatchToProve returns AIH error", + setup: func(m mox, a *Aggregator) { + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(1), + ToBatchNumber: uint64(2), + } + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: 1, + BatchL2Data: []byte{ + 0xb, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x1, 0xc8, 0xb, 0x0, 0x0, 0x3, 0x15, 0x0, 0x1, 0x8a, 0xf8, + }, + L1InfoRoot: &l1InfoRoot, + } + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, []byte("batchL2Data"), common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + + m.proverMock.On("Name").Return(proverName) + m.proverMock.On("ID").Return(proverID) + m.proverMock.On("Addr").Return("addr") + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), nil) + m.storageMock.On("CheckProofExistsForBatch", mock.Anything, uint64(1), nil).Return(false, nil) + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.Anything, mock.Anything).Return(&sequence, nil) + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, mock.Anything).Return(&virtualBatch, nil) + m.synchronizerMock.On("GetL1BlockByNumber", mock.Anything, mock.Anything).Return(&synchronizer.L1Block{ParentHash: common.Hash{}}, nil) + m.rpcMock.On("GetBatch", mock.Anything).Return(rpcBatch, nil) + m.rpcMock.On("GetWitness", mock.Anything, false).Return([]byte("witness"), nil) + m.storageMock.On("AddGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil) + m.storageMock.On("AddSequence", mock.Anything, mock.Anything, nil).Return(nil) + m.storageMock.On("DeleteGeneratedProofs", mock.Anything, uint64(1), uint64(1), nil).Return(nil) + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(1), + }, + }, nil) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorContains(err, errAIH.Error()) + }, + }, + { + name: "getAndLockBatchToProve returns generic error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "getAndLockBatchToProve returns ErrNotFound", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(0), state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "BatchProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, + } + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, mock.Anything).Return(&virtualBatch, nil).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.storageMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(true, nil) + m.storageMock.On("CleanupGeneratedProofs", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.Anything, mock.Anything).Return(&sequence, nil) + + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetWitness", mock.Anything, false).Return([]byte("witness"), nil) + m.rpcMock.On("GetBatch", mock.Anything).Return(rpcBatch, nil) + m.storageMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.storageMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil) + + m.proverMock.On("BatchProof", mock.Anything).Return(nil, errTest).Once() + m.storageMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, + } + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.storageMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + m.storageMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.storageMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil) + + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) + m.proverMock.On("BatchProof", mock.Anything).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, errTest).Once() + m.storageMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(nil) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "WaitRecursiveProof no error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName) + m.proverMock.On("ID").Return(proverID) + m.proverMock.On("Addr").Return("addr") + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, + } + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil) + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil) + m.storageMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil) + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil) + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + m.storageMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil) + m.storageMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil) + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil) + + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) + m.proverMock.On("BatchProof", mock.Anything).Return(&proofID, nil) + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, nil) + m.storageMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + }, + { + name: "DeleteBatchProofs error after WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + + batchL2Data, err := hex.DecodeString(codedL2Block1) + require.NoError(err) + l1InfoRoot := common.HexToHash("0x057e9950fbd39b002e323f37c2330d0c096e66919e24cc96fb4b2dfa8f4af782") + + m.etherman.On("GetLatestVerifiedBatchNum").Return(lastVerifiedBatchNum, nil).Once() + m.storageMock.On("CheckProofExistsForBatch", mock.MatchedBy(matchProverCtxFn), mock.AnythingOfType("uint64"), nil).Return(false, nil).Once() + sequence := synchronizer.SequencedBatches{ + FromBatchNumber: uint64(10), + ToBatchNumber: uint64(20), + } + m.synchronizerMock.On("GetSequenceByBatchNumber", mock.MatchedBy(matchProverCtxFn), lastVerifiedBatchNum+1).Return(&sequence, nil).Once() + rpcBatch := rpctypes.NewRPCBatch(lastVerifiedBatchNum+1, common.Hash{}, []string{}, batchL2Data, common.Hash{}, common.BytesToHash([]byte("mock LocalExitRoot")), common.BytesToHash([]byte("mock StateRoot")), common.Address{}, false) + rpcBatch.SetLastL2BLockTimestamp(uint64(time.Now().Unix())) + m.rpcMock.On("GetBatch", lastVerifiedBatchNum+1).Return(rpcBatch, nil) + m.storageMock.On("AddSequence", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Return(nil).Once() + m.storageMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(batchToProve.BatchNumber, proof.BatchNumber) + assert.Equal(batchToProve.BatchNumber, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.synchronizerMock.On("GetLeafsByL1InfoRoot", mock.Anything, l1InfoRoot).Return(l1InfoTreeLeaf, nil) + m.synchronizerMock.On("GetL1InfoTreeLeaves", mock.Anything, mock.Anything).Return(map[uint32]synchronizer.L1InfoTreeLeaf{ + 1: { + BlockNumber: uint64(35), + }, + }, nil) + + m.rpcMock.On("GetWitness", lastVerifiedBatchNum+1, false).Return([]byte("witness"), nil) + + virtualBatch := synchronizer.VirtualBatch{ + BatchNumber: lastVerifiedBatchNum + 1, + BatchL2Data: batchL2Data, + L1InfoRoot: &l1InfoRoot, + } + + m.synchronizerMock.On("GetVirtualBatchByBatchNumber", mock.Anything, lastVerifiedBatchNum+1).Return(&virtualBatch, nil).Once() + + m.proverMock.On("BatchProof", mock.Anything).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, common.Hash{}, errTest).Once() + m.storageMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchAggregatorCtxFn), batchToProve.BatchNumber, batchToProve.BatchNumber, nil).Return(errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + } + + for x, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + storageMock := mocks.NewStorageInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + synchronizerMock := mocks.NewSynchronizerInterfaceMock(t) + mockRPC := mocks.NewRPCInterfaceMock(t) + + a := Aggregator{ + cfg: cfg, + storage: storageMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + storageMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + l1Syncr: synchronizerMock, + rpcClient: mockRPC, + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, + } + if x > 0 { + a.accInputHashes = populateAccInputHashes() + } + aggregatorCtx := context.WithValue(context.Background(), "owner", ownerAggregator) //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + + m := mox{ + storageMock: storageMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + synchronizerMock: synchronizerMock, + rpcMock: mockRPC, + } + if tc.setup != nil { + tc.setup(m, &a) + } + a.resetVerifyProofTime() + + result, err := a.tryGenerateBatchProof(proverCtx, proverMock) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + }) + } +} + +func populateAccInputHashes() map[uint64]common.Hash { + accInputHashes := make(map[uint64]common.Hash) + for i := 10; i < 200; i++ { + accInputHashes[uint64(i)] = common.BytesToHash([]byte(fmt.Sprintf("hash%d", i))) + } + return accInputHashes +} + func Test_accInputHashFunctions(t *testing.T) { aggregator := Aggregator{ accInputHashes: make(map[uint64]common.Hash), @@ -1451,3 +1889,32 @@ func Test_accInputHashFunctions(t *testing.T) { aggregator.removeAccInputHashes(1, 2) assert.Equal(t, 0, len(aggregator.accInputHashes)) } + +func Test_sanityChecks(t *testing.T) { + batchToProve := state.Batch{ + BatchNumber: 1, + StateRoot: common.HexToHash("0x01"), + AccInputHash: common.HexToHash("0x02"), + } + + aggregator := Aggregator{ + accInputHashes: make(map[uint64]common.Hash), + accInputHashesMutex: &sync.Mutex{}, + } + + aggregator.performSanityChecks(log.GetDefaultLogger(), batchToProve.StateRoot, batchToProve.AccInputHash, &batchToProve) + + // Halt by SR sanity check + go func() { + aggregator.performSanityChecks(log.GetDefaultLogger(), common.HexToHash("0x03"), batchToProve.AccInputHash, &batchToProve) + time.Sleep(5 * time.Second) + return + }() + + // Halt by AIH sanity check + go func() { + aggregator.performSanityChecks(log.GetDefaultLogger(), batchToProve.StateRoot, common.HexToHash("0x04"), &batchToProve) + time.Sleep(5 * time.Second) + return + }() +} diff --git a/aggregator/config.go b/aggregator/config.go index 2d7178f7..e17d68af 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -4,7 +4,6 @@ import ( "fmt" "math/big" - "github.com/0xPolygon/cdk/aggregator/db" "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" @@ -62,14 +61,6 @@ type Config struct { // ProofStatePollingInterval is the interval time to polling the prover about the generation state of a proof ProofStatePollingInterval types.Duration `mapstructure:"ProofStatePollingInterval"` - // TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch - // possible values: base/acceptall - TxProfitabilityCheckerType TxProfitabilityCheckerType `mapstructure:"TxProfitabilityCheckerType"` - - // TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch - // this parameter is used for the base tx profitability checker - TxProfitabilityMinReward TokenAmountWithDecimals `mapstructure:"TxProfitabilityMinReward"` - // IntervalAfterWhichBatchConsolidateAnyway is the interval duration for the main sequencer to check // if there are no transactions. If there are no transactions in this interval, the sequencer will // consolidate the batch anyway. @@ -117,8 +108,8 @@ type Config struct { // UseFullWitness is a flag to enable the use of full witness in the aggregator UseFullWitness bool `mapstructure:"UseFullWitness"` - // DB is the database configuration - DB db.Config `mapstructure:"DB"` + // DBPath is the path to the database + DBPath string `mapstructure:"DBPath"` // EthTxManager is the config for the ethtxmanager EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` diff --git a/aggregator/db/config.go b/aggregator/db/config.go deleted file mode 100644 index ad56155f..00000000 --- a/aggregator/db/config.go +++ /dev/null @@ -1,25 +0,0 @@ -package db - -// Config provide fields to configure the pool -type Config struct { - // Database name - Name string `mapstructure:"Name"` - - // Database User name - User string `mapstructure:"User"` - - // Database Password of the user - Password string `mapstructure:"Password"` - - // Host address of database - Host string `mapstructure:"Host"` - - // Port Number of database - Port string `mapstructure:"Port"` - - // EnableLog - EnableLog bool `mapstructure:"EnableLog"` - - // MaxConns is the maximum number of connections in the pool. - MaxConns int `mapstructure:"MaxConns"` -} diff --git a/aggregator/db/db.go b/aggregator/db/db.go deleted file mode 100644 index ecfffc11..00000000 --- a/aggregator/db/db.go +++ /dev/null @@ -1,31 +0,0 @@ -package db - -import ( - "context" - "fmt" - - "github.com/0xPolygon/cdk/log" - "github.com/jackc/pgx/v4/pgxpool" -) - -// NewSQLDB creates a new SQL DB -func NewSQLDB(logger *log.Logger, cfg Config) (*pgxpool.Pool, error) { - config, err := pgxpool.ParseConfig(fmt.Sprintf("postgres://%s:%s@%s:%s/%s?pool_max_conns=%d", - cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, cfg.MaxConns)) - if err != nil { - logger.Errorf("Unable to parse DB config: %v\n", err) - return nil, err - } - - if cfg.EnableLog { - config.ConnConfig.Logger = dbLoggerImpl{} - } - - conn, err := pgxpool.ConnectConfig(context.Background(), config) - if err != nil { - logger.Errorf("Unable to connect to database: %v\n", err) - return nil, err - } - - return conn, nil -} diff --git a/aggregator/db/dbstorage/dbstorage.go b/aggregator/db/dbstorage/dbstorage.go new file mode 100644 index 00000000..b20a1c71 --- /dev/null +++ b/aggregator/db/dbstorage/dbstorage.go @@ -0,0 +1,35 @@ +package dbstorage + +import ( + "context" + "database/sql" + + "github.com/0xPolygon/cdk/db" +) + +// DBStorage implements the Storage interface +type DBStorage struct { + DB *sql.DB +} + +// NewDBStorage creates a new DBStorage instance +func NewDBStorage(dbPath string) (*DBStorage, error) { + db, err := db.NewSQLiteDB(dbPath) + if err != nil { + return nil, err + } + + return &DBStorage{DB: db}, nil +} + +func (d *DBStorage) BeginTx(ctx context.Context, options *sql.TxOptions) (db.Txer, error) { + return db.NewTx(ctx, d.DB) +} + +func (d *DBStorage) getExecQuerier(dbTx db.Txer) db.Querier { + if dbTx == nil { + return d.DB + } + + return dbTx +} diff --git a/aggregator/db/dbstorage/proof.go b/aggregator/db/dbstorage/proof.go new file mode 100644 index 00000000..d3065c7e --- /dev/null +++ b/aggregator/db/dbstorage/proof.go @@ -0,0 +1,356 @@ +package dbstorage + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/state" +) + +// CheckProofExistsForBatch checks if the batch is already included in any proof +func (d *DBStorage) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx db.Txer) (bool, error) { + const checkProofExistsForBatchSQL = ` + SELECT EXISTS (SELECT 1 FROM proof p WHERE $1 >= p.batch_num AND $1 <= p.batch_num_final) + ` + e := d.getExecQuerier(dbTx) + var exists bool + err := e.QueryRow(checkProofExistsForBatchSQL, batchNumber).Scan(&exists) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return exists, err + } + return exists, nil +} + +// CheckProofContainsCompleteSequences checks if a recursive proof contains complete sequences +func (d *DBStorage) CheckProofContainsCompleteSequences( + ctx context.Context, proof *state.Proof, dbTx db.Txer, +) (bool, error) { + const getProofContainsCompleteSequencesSQL = ` + SELECT EXISTS (SELECT 1 FROM sequence s1 WHERE s1.from_batch_num = $1) AND + EXISTS (SELECT 1 FROM sequence s2 WHERE s2.to_batch_num = $2) + ` + e := d.getExecQuerier(dbTx) + var exists bool + err := e.QueryRow(getProofContainsCompleteSequencesSQL, proof.BatchNumber, proof.BatchNumberFinal).Scan(&exists) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return exists, err + } + return exists, nil +} + +// GetProofReadyToVerify return the proof that is ready to verify +func (d *DBStorage) GetProofReadyToVerify( + ctx context.Context, lastVerfiedBatchNumber uint64, dbTx db.Txer, +) (*state.Proof, error) { + const getProofReadyToVerifySQL = ` + SELECT + p.batch_num, + p.batch_num_final, + p.proof, + p.proof_id, + p.input_prover, + p.prover, + p.prover_id, + p.generating_since, + p.created_at, + p.updated_at + FROM proof p + WHERE batch_num = $1 AND generating_since IS NULL AND + EXISTS (SELECT 1 FROM sequence s1 WHERE s1.from_batch_num = p.batch_num) AND + EXISTS (SELECT 1 FROM sequence s2 WHERE s2.to_batch_num = p.batch_num_final) + ` + + var proof = &state.Proof{} + + e := d.getExecQuerier(dbTx) + row := e.QueryRow(getProofReadyToVerifySQL, lastVerfiedBatchNumber+1) + + var ( + generatingSince *uint64 + createdAt *uint64 + updatedAt *uint64 + ) + err := row.Scan( + &proof.BatchNumber, &proof.BatchNumberFinal, &proof.Proof, &proof.ProofID, + &proof.InputProver, &proof.Prover, &proof.ProverID, &generatingSince, + &createdAt, &updatedAt, + ) + + if generatingSince != nil { + timeSince := time.Unix(int64(*generatingSince), 0) + proof.GeneratingSince = &timeSince + } + + if createdAt != nil { + proof.CreatedAt = time.Unix(int64(*createdAt), 0) + } + + if updatedAt != nil { + proof.UpdatedAt = time.Unix(int64(*updatedAt), 0) + } + + if errors.Is(err, sql.ErrNoRows) { + return nil, state.ErrNotFound + } else if err != nil { + return nil, err + } + + return proof, err +} + +// GetProofsToAggregate return the next to proof that it is possible to aggregate +func (d *DBStorage) GetProofsToAggregate(ctx context.Context, dbTx db.Txer) (*state.Proof, *state.Proof, error) { + var ( + proof1 = &state.Proof{} + proof2 = &state.Proof{} + ) + + // TODO: add comments to explain the query + const getProofsToAggregateSQL = ` + SELECT + p1.batch_num as p1_batch_num, + p1.batch_num_final as p1_batch_num_final, + p1.proof as p1_proof, + p1.proof_id as p1_proof_id, + p1.input_prover as p1_input_prover, + p1.prover as p1_prover, + p1.prover_id as p1_prover_id, + p1.generating_since as p1_generating_since, + p1.created_at as p1_created_at, + p1.updated_at as p1_updated_at, + p2.batch_num as p2_batch_num, + p2.batch_num_final as p2_batch_num_final, + p2.proof as p2_proof, + p2.proof_id as p2_proof_id, + p2.input_prover as p2_input_prover, + p2.prover as p2_prover, + p2.prover_id as p2_prover_id, + p2.generating_since as p2_generating_since, + p2.created_at as p2_created_at, + p2.updated_at as p2_updated_at + FROM proof p1 INNER JOIN proof p2 ON p1.batch_num_final = p2.batch_num - 1 + WHERE p1.generating_since IS NULL AND p2.generating_since IS NULL AND + p1.proof IS NOT NULL AND p2.proof IS NOT NULL AND + ( + EXISTS ( + SELECT 1 FROM sequence s + WHERE p1.batch_num >= s.from_batch_num AND p1.batch_num <= s.to_batch_num AND + p1.batch_num_final >= s.from_batch_num AND p1.batch_num_final <= s.to_batch_num AND + p2.batch_num >= s.from_batch_num AND p2.batch_num <= s.to_batch_num AND + p2.batch_num_final >= s.from_batch_num AND p2.batch_num_final <= s.to_batch_num + ) + OR + ( + EXISTS ( SELECT 1 FROM sequence s WHERE p1.batch_num = s.from_batch_num) AND + EXISTS ( SELECT 1 FROM sequence s WHERE p1.batch_num_final = s.to_batch_num) AND + EXISTS ( SELECT 1 FROM sequence s WHERE p2.batch_num = s.from_batch_num) AND + EXISTS ( SELECT 1 FROM sequence s WHERE p2.batch_num_final = s.to_batch_num) + ) + ) + ORDER BY p1.batch_num ASC + LIMIT 1 + ` + + e := d.getExecQuerier(dbTx) + row := e.QueryRow(getProofsToAggregateSQL) + + var ( + generatingSince1, generatingSince2 *uint64 + createdAt1, createdAt2 *uint64 + updatedAt1, updatedAt2 *uint64 + ) + + err := row.Scan( + &proof1.BatchNumber, &proof1.BatchNumberFinal, &proof1.Proof, &proof1.ProofID, + &proof1.InputProver, &proof1.Prover, &proof1.ProverID, &generatingSince1, + &createdAt1, &updatedAt1, + &proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID, + &proof2.InputProver, &proof2.Prover, &proof2.ProverID, &generatingSince2, + &createdAt1, &updatedAt1, + ) + + if generatingSince1 != nil { + timeSince1 := time.Unix(int64(*generatingSince1), 0) + proof1.GeneratingSince = &timeSince1 + } + + if generatingSince2 != nil { + timeSince2 := time.Unix(int64(*generatingSince2), 0) + proof2.GeneratingSince = &timeSince2 + } + + if createdAt1 != nil { + proof1.CreatedAt = time.Unix(int64(*createdAt1), 0) + } + + if createdAt2 != nil { + proof2.CreatedAt = time.Unix(int64(*createdAt2), 0) + } + + if updatedAt1 != nil { + proof1.UpdatedAt = time.Unix(int64(*updatedAt1), 0) + } + + if updatedAt2 != nil { + proof2.UpdatedAt = time.Unix(int64(*updatedAt2), 0) + } + + if errors.Is(err, sql.ErrNoRows) { + return nil, nil, state.ErrNotFound + } else if err != nil { + return nil, nil, err + } + + return proof1, proof2, err +} + +// AddGeneratedProof adds a generated proof to the storage +func (d *DBStorage) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx db.Txer) error { + const addGeneratedProofSQL = ` + INSERT INTO proof ( + batch_num, batch_num_final, proof, proof_id, input_prover, prover, + prover_id, generating_since, created_at, updated_at + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 + ) + ` + e := d.getExecQuerier(dbTx) + now := time.Now().UTC().Round(time.Microsecond) + + var ( + generatingSince *uint64 + createdAt *uint64 + updatedAt *uint64 + ) + + if proof.GeneratingSince != nil { + generatingSince = new(uint64) + *generatingSince = uint64(proof.GeneratingSince.Unix()) + } + + if !proof.CreatedAt.IsZero() { + createdAt = new(uint64) + *createdAt = uint64(proof.CreatedAt.Unix()) + } else { + createdAt = new(uint64) + *createdAt = uint64(now.Unix()) + } + + if !proof.UpdatedAt.IsZero() { + updatedAt = new(uint64) + *updatedAt = uint64(proof.UpdatedAt.Unix()) + } else { + updatedAt = new(uint64) + *updatedAt = uint64(now.Unix()) + } + + _, err := e.Exec( + addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, + proof.InputProver, proof.Prover, proof.ProverID, generatingSince, createdAt, updatedAt, + ) + return err +} + +// UpdateGeneratedProof updates a generated proof in the storage +func (d *DBStorage) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx db.Txer) error { + const updateGeneratedProofSQL = ` + UPDATE proof + SET proof = $3, + proof_id = $4, + input_prover = $5, + prover = $6, + prover_id = $7, + generating_since = $8, + updated_at = $9 + WHERE batch_num = $1 + AND batch_num_final = $2 + ` + e := d.getExecQuerier(dbTx) + now := time.Now().UTC().Round(time.Microsecond) + + var ( + generatingSince *uint64 + updatedAt *uint64 + ) + + if proof.GeneratingSince != nil { + generatingSince = new(uint64) + *generatingSince = uint64(proof.GeneratingSince.Unix()) + } + + if !proof.UpdatedAt.IsZero() { + updatedAt = new(uint64) + *updatedAt = uint64(proof.UpdatedAt.Unix()) + } else { + updatedAt = new(uint64) + *updatedAt = uint64(now.Unix()) + } + _, err := e.Exec( + updateGeneratedProofSQL, proof.Proof, proof.ProofID, proof.InputProver, + proof.Prover, proof.ProverID, generatingSince, updatedAt, proof.BatchNumber, proof.BatchNumberFinal, + ) + return err +} + +// DeleteGeneratedProofs deletes from the storage the generated proofs falling +// inside the batch numbers range. +func (d *DBStorage) DeleteGeneratedProofs( + ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx db.Txer, +) error { + const deleteGeneratedProofSQL = "DELETE FROM proof WHERE batch_num >= $1 AND batch_num_final <= $2" + e := d.getExecQuerier(dbTx) + _, err := e.Exec(deleteGeneratedProofSQL, batchNumber, batchNumberFinal) + return err +} + +// CleanupGeneratedProofs deletes from the storage the generated proofs up to +// the specified batch number included. +func (d *DBStorage) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx db.Txer) error { + const deleteGeneratedProofSQL = "DELETE FROM proof WHERE batch_num_final <= $1" + e := d.getExecQuerier(dbTx) + _, err := e.Exec(deleteGeneratedProofSQL, batchNumber) + return err +} + +// CleanupLockedProofs deletes from the storage the proofs locked in generating +// state for more than the provided threshold. +func (d *DBStorage) CleanupLockedProofs(ctx context.Context, duration string, dbTx db.Txer) (int64, error) { + seconds, err := convertDurationToSeconds(duration) + if err != nil { + return 0, err + } + + difference := time.Now().Unix() - seconds + + sql := fmt.Sprintf("DELETE FROM proof WHERE generating_since is not null and generating_since < %d", difference) + e := d.getExecQuerier(dbTx) + ct, err := e.Exec(sql) + if err != nil { + return 0, err + } + return ct.RowsAffected() +} + +// DeleteUngeneratedProofs deletes ungenerated proofs. +// This method is meant to be use during aggregator boot-up sequence +func (d *DBStorage) DeleteUngeneratedProofs(ctx context.Context, dbTx db.Txer) error { + const deleteUngeneratedProofsSQL = "DELETE FROM proof WHERE generating_since IS NOT NULL" + e := d.getExecQuerier(dbTx) + _, err := e.Exec(deleteUngeneratedProofsSQL) + return err +} + +func convertDurationToSeconds(duration string) (int64, error) { + // Parse the duration using time.ParseDuration + parsedDuration, err := time.ParseDuration(duration) + if err != nil { + return 0, fmt.Errorf("invalid duration format: %w", err) + } + + // Return the duration in seconds + return int64(parsedDuration.Seconds()), nil +} diff --git a/aggregator/db/dbstorage/proof_test.go b/aggregator/db/dbstorage/proof_test.go new file mode 100644 index 00000000..f8095086 --- /dev/null +++ b/aggregator/db/dbstorage/proof_test.go @@ -0,0 +1,150 @@ +package dbstorage + +import ( + "context" + "math" + "testing" + "time" + + "github.com/0xPolygon/cdk/aggregator/db" + "github.com/0xPolygon/cdk/state" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + proofID = "proof_1" + prover = "prover_1" + proverID = "prover_id" +) + +func Test_Proof(t *testing.T) { + dbPath := "file::memory:?cache=shared" + err := db.RunMigrationsUp(dbPath, db.AggregatorMigrationName) + assert.NoError(t, err) + + ctx := context.Background() + now := time.Now() + + DBStorage, err := NewDBStorage(dbPath) + assert.NoError(t, err) + + dbtxer, err := DBStorage.BeginTx(ctx, nil) + require.NoError(t, err) + + exists, err := DBStorage.CheckProofExistsForBatch(ctx, 1, dbtxer) + assert.NoError(t, err) + assert.False(t, exists) + + proof := state.Proof{ + BatchNumber: 1, + BatchNumberFinal: 1, + Proof: "proof content", + InputProver: "input prover", + ProofID: &proofID, + Prover: &prover, + ProverID: &proofID, + GeneratingSince: nil, + CreatedAt: now, + UpdatedAt: now, + } + + err = DBStorage.AddGeneratedProof(ctx, &proof, dbtxer) + assert.NoError(t, err) + + err = DBStorage.AddSequence(ctx, state.Sequence{FromBatchNumber: 1, ToBatchNumber: 1}, dbtxer) + assert.NoError(t, err) + + contains, err := DBStorage.CheckProofContainsCompleteSequences(ctx, &proof, dbtxer) + assert.NoError(t, err) + assert.True(t, contains) + + proof2, err := DBStorage.GetProofReadyToVerify(ctx, 0, dbtxer) + assert.NoError(t, err) + assert.NotNil(t, proof2) + + require.Equal(t, proof.BatchNumber, proof2.BatchNumber) + require.Equal(t, proof.BatchNumberFinal, proof2.BatchNumberFinal) + require.Equal(t, proof.Proof, proof2.Proof) + require.Equal(t, *proof.ProofID, *proof2.ProofID) + require.Equal(t, proof.InputProver, proof2.InputProver) + require.Equal(t, *proof.Prover, *proof2.Prover) + require.Equal(t, *proof.ProverID, *proof2.ProverID) + require.Equal(t, proof.CreatedAt.Unix(), proof2.CreatedAt.Unix()) + require.Equal(t, proof.UpdatedAt.Unix(), proof2.UpdatedAt.Unix()) + + proof = state.Proof{ + BatchNumber: 1, + BatchNumberFinal: 1, + Proof: "proof content", + InputProver: "input prover", + ProofID: &proofID, + Prover: &prover, + ProverID: &proofID, + GeneratingSince: &now, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + err = DBStorage.UpdateGeneratedProof(ctx, &proof, dbtxer) + assert.NoError(t, err) + + sequence := state.Sequence{FromBatchNumber: 3, ToBatchNumber: 4} + + proof3 := state.Proof{ + BatchNumber: 3, + BatchNumberFinal: 3, + GeneratingSince: nil, + } + + proof4 := state.Proof{ + BatchNumber: 4, + BatchNumberFinal: 4, + GeneratingSince: nil, + } + + err = DBStorage.AddSequence(ctx, sequence, dbtxer) + assert.NoError(t, err) + + err = DBStorage.AddGeneratedProof(ctx, &proof3, dbtxer) + assert.NoError(t, err) + + err = DBStorage.AddGeneratedProof(ctx, &proof4, dbtxer) + assert.NoError(t, err) + + proof5, proof6, err := DBStorage.GetProofsToAggregate(ctx, dbtxer) + assert.NoError(t, err) + assert.NotNil(t, proof5) + assert.NotNil(t, proof6) + + err = DBStorage.DeleteGeneratedProofs(ctx, 1, math.MaxInt, dbtxer) + assert.NoError(t, err) + + err = DBStorage.CleanupGeneratedProofs(ctx, 1, dbtxer) + assert.NoError(t, err) + + now = time.Now() + + proof3.GeneratingSince = &now + proof4.GeneratingSince = &now + + err = DBStorage.AddGeneratedProof(ctx, &proof3, dbtxer) + assert.NoError(t, err) + + err = DBStorage.AddGeneratedProof(ctx, &proof4, dbtxer) + assert.NoError(t, err) + + time.Sleep(5 * time.Second) + + affected, err := DBStorage.CleanupLockedProofs(ctx, "4s", dbtxer) + assert.NoError(t, err) + require.Equal(t, int64(2), affected) + + proof5, proof6, err = DBStorage.GetProofsToAggregate(ctx, dbtxer) + assert.EqualError(t, err, state.ErrNotFound.Error()) + assert.Nil(t, proof5) + assert.Nil(t, proof6) + + err = DBStorage.DeleteUngeneratedProofs(ctx, dbtxer) + assert.NoError(t, err) +} diff --git a/aggregator/db/dbstorage/sequence.go b/aggregator/db/dbstorage/sequence.go new file mode 100644 index 00000000..96063201 --- /dev/null +++ b/aggregator/db/dbstorage/sequence.go @@ -0,0 +1,21 @@ +package dbstorage + +import ( + "context" + + "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/state" +) + +// AddSequence stores the sequence information to allow the aggregator verify sequences. +func (d *DBStorage) AddSequence(ctx context.Context, sequence state.Sequence, dbTx db.Txer) error { + const addSequenceSQL = ` + INSERT INTO sequence (from_batch_num, to_batch_num) + VALUES($1, $2) + ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2 + ` + + e := d.getExecQuerier(dbTx) + _, err := e.Exec(addSequenceSQL, sequence.FromBatchNumber, sequence.ToBatchNumber) + return err +} diff --git a/aggregator/db/migrations.go b/aggregator/db/migrations.go index 20e8c29a..221fb145 100644 --- a/aggregator/db/migrations.go +++ b/aggregator/db/migrations.go @@ -4,15 +4,14 @@ import ( "embed" "fmt" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/stdlib" migrate "github.com/rubenv/sql-migrate" ) const ( // AggregatorMigrationName is the name of the migration used to associate with the migrations dir - AggregatorMigrationName = "zkevm-aggregator-db" + AggregatorMigrationName = "aggregator-db" ) var ( @@ -28,38 +27,33 @@ func init() { } // RunMigrationsUp runs migrate-up for the given config. -func RunMigrationsUp(cfg Config, name string) error { +func RunMigrationsUp(dbPath string, name string) error { log.Info("running migrations up") - return runMigrations(cfg, name, migrate.Up) + return runMigrations(dbPath, name, migrate.Up) } // CheckMigrations runs migrate-up for the given config. -func CheckMigrations(cfg Config, name string) error { - return checkMigrations(cfg, name) +func CheckMigrations(dbPath string, name string) error { + return checkMigrations(dbPath, name) } // RunMigrationsDown runs migrate-down for the given config. -func RunMigrationsDown(cfg Config, name string) error { +func RunMigrationsDown(dbPath string, name string) error { log.Info("running migrations down") - return runMigrations(cfg, name, migrate.Down) + return runMigrations(dbPath, name, migrate.Down) } // runMigrations will execute pending migrations if needed to keep // the database updated with the latest changes in either direction, // up or down. -func runMigrations(cfg Config, name string, direction migrate.MigrationDirection) error { - c, err := pgx.ParseConfig(fmt.Sprintf( - "postgres://%s:%s@%s:%s/%s", - cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, - )) +func runMigrations(dbPath string, name string, direction migrate.MigrationDirection) error { + db, err := db.NewSQLiteDB(dbPath) if err != nil { return err } - db := stdlib.OpenDB(*c) - embedMigration, ok := embedMigrations[name] if !ok { return fmt.Errorf("migration not found with name: %v", name) @@ -70,7 +64,7 @@ func runMigrations(cfg Config, name string, direction migrate.MigrationDirection Root: "migrations", } - nMigrations, err := migrate.Exec(db, "postgres", migrations, direction) + nMigrations, err := migrate.Exec(db, "sqlite3", migrations, direction) if err != nil { return err } @@ -80,17 +74,12 @@ func runMigrations(cfg Config, name string, direction migrate.MigrationDirection return nil } -func checkMigrations(cfg Config, name string) error { - c, err := pgx.ParseConfig(fmt.Sprintf( - "postgres://%s:%s@%s:%s/%s", - cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Name, - )) +func checkMigrations(dbPath string, name string) error { + db, err := db.NewSQLiteDB(dbPath) if err != nil { return err } - db := stdlib.OpenDB(*c) - embedMigration, ok := embedMigrations[name] if !ok { return fmt.Errorf("migration not found with name: %v", name) diff --git a/aggregator/db/migrations/0001.sql b/aggregator/db/migrations/0001.sql index 963dbea7..651597a3 100644 --- a/aggregator/db/migrations/0001.sql +++ b/aggregator/db/migrations/0001.sql @@ -1,32 +1,24 @@ -- +migrate Down -DROP SCHEMA IF EXISTS aggregator CASCADE; +DROP TABLE IF EXISTS proof; +DROP TABLE IF EXISTS sequence; -- +migrate Up -CREATE SCHEMA aggregator; - -CREATE TABLE IF NOT EXISTS aggregator.batch ( +CREATE TABLE IF NOT EXISTS proof ( batch_num BIGINT NOT NULL, - batch jsonb NOT NULL, - datastream varchar NOT NULL, - PRIMARY KEY (batch_num) -); - -CREATE TABLE IF NOT EXISTS aggregator.proof ( - batch_num BIGINT NOT NULL REFERENCES aggregator.batch (batch_num) ON DELETE CASCADE, batch_num_final BIGINT NOT NULL, - proof varchar NULL, - proof_id varchar NULL, - input_prover varchar NULL, - prover varchar NULL, - prover_id varchar NULL, - created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), - updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), - generating_since timestamptz NULL, + proof TEXT NULL, + proof_id TEXT NULL, + input_prover TEXT NULL, + prover TEXT NULL, + prover_id TEXT NULL, + created_at BIGINT NOT NULL, + updated_at BIGINT NOT NULL, + generating_since BIGINT DEFAULT NULL, PRIMARY KEY (batch_num, batch_num_final) ); -CREATE TABLE IF NOT EXISTS aggregator.sequence ( - from_batch_num BIGINT NOT NULL REFERENCES aggregator.batch (batch_num) ON DELETE CASCADE, +CREATE TABLE IF NOT EXISTS sequence ( + from_batch_num BIGINT NOT NULL, to_batch_num BIGINT NOT NULL, PRIMARY KEY (from_batch_num) ); diff --git a/aggregator/db/migrations/0002.sql b/aggregator/db/migrations/0002.sql deleted file mode 100644 index e2290e13..00000000 --- a/aggregator/db/migrations/0002.sql +++ /dev/null @@ -1,8 +0,0 @@ --- +migrate Up -DELETE FROM aggregator.batch; -ALTER TABLE aggregator.batch - ADD COLUMN IF NOT EXISTS witness varchar NOT NULL; - --- +migrate Down -ALTER TABLE aggregator.batch - DROP COLUMN IF EXISTS witness; diff --git a/aggregator/db/migrations/0003.sql b/aggregator/db/migrations/0003.sql deleted file mode 100644 index 5351f8e7..00000000 --- a/aggregator/db/migrations/0003.sql +++ /dev/null @@ -1,7 +0,0 @@ --- +migrate Up -ALTER TABLE aggregator.batch - ALTER COLUMN witness DROP NOT NULL; - --- +migrate Down -ALTER TABLE aggregator.batch - ALTER COLUMN witness SET NOT NULL; diff --git a/aggregator/db/migrations/0004.sql b/aggregator/db/migrations/0004.sql deleted file mode 100644 index cb186fc0..00000000 --- a/aggregator/db/migrations/0004.sql +++ /dev/null @@ -1,23 +0,0 @@ --- +migrate Down -CREATE TABLE IF NOT EXISTS aggregator.batch ( - batch_num BIGINT NOT NULL, - batch jsonb NOT NULL, - datastream varchar NOT NULL, - PRIMARY KEY (batch_num) -); - -ALTER TABLE aggregator.proof - ADD CONSTRAINT IF NOT EXISTS proof_batch_num_fkey FOREIGN KEY (batch_num) REFERENCES aggregator.batch (batch_num) ON DELETE CASCADE; - -ALTER TABLE aggregator.sequence - ADD CONSTRAINT IF NOT EXISTS sequence_from_batch_num_fkey FOREIGN KEY (from_batch_num) REFERENCES aggregator.batch (batch_num) ON DELETE CASCADE; - - --- +migrate Up -ALTER TABLE aggregator.proof - DROP CONSTRAINT IF EXISTS proof_batch_num_fkey; - -ALTER TABLE aggregator.sequence - DROP CONSTRAINT IF EXISTS sequence_from_batch_num_fkey; - -DROP TABLE IF EXISTS aggregator.batch; diff --git a/aggregator/db/migrations_test.go b/aggregator/db/migrations_test.go index 0a118c69..317178e9 100644 --- a/aggregator/db/migrations_test.go +++ b/aggregator/db/migrations_test.go @@ -16,3 +16,12 @@ func Test_checkMigrations(t *testing.T) { _, err := migrationSource.FileSystem.ReadFile("migrations/0001.sql") assert.NoError(t, err) } + +func Test_runMigrations(t *testing.T) { + dbPath := "file::memory:?cache=shared" + err := runMigrations(dbPath, AggregatorMigrationName, migrate.Up) + assert.NoError(t, err) + + err = runMigrations(dbPath, AggregatorMigrationName, migrate.Down) + assert.NoError(t, err) +} diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index f1673c46..5979272d 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -2,10 +2,12 @@ package aggregator import ( "context" + "database/sql" "math/big" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" @@ -13,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/jackc/pgx/v4" ) // Consumer interfaces required by the package. @@ -53,19 +54,19 @@ type aggregatorTxProfitabilityChecker interface { } // StateInterface gathers the methods to interact with the state. -type StateInterface interface { - BeginStateTransaction(ctx context.Context) (pgx.Tx, error) - CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) - GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) - GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) - AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error - UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error - DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error - DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error - CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error - CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) - CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) - AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error +type StorageInterface interface { + BeginTx(ctx context.Context, options *sql.TxOptions) (db.Txer, error) + CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx db.Txer) (bool, error) + GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx db.Txer) (*state.Proof, error) + GetProofsToAggregate(ctx context.Context, dbTx db.Txer) (*state.Proof, *state.Proof, error) + AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx db.Txer) error + UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx db.Txer) error + DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx db.Txer) error + DeleteUngeneratedProofs(ctx context.Context, dbTx db.Txer) error + CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx db.Txer) error + CleanupLockedProofs(ctx context.Context, duration string, dbTx db.Txer) (int64, error) + CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx db.Txer) (bool, error) + AddSequence(ctx context.Context, sequence state.Sequence, dbTx db.Txer) error } // EthTxManagerClient represents the eth tx manager interface diff --git a/aggregator/mocks/mock_dbtx.go b/aggregator/mocks/mock_dbtx.go deleted file mode 100644 index f870cd57..00000000 --- a/aggregator/mocks/mock_dbtx.go +++ /dev/null @@ -1,350 +0,0 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - pgconn "github.com/jackc/pgconn" - mock "github.com/stretchr/testify/mock" - - pgx "github.com/jackc/pgx/v4" -) - -// DbTxMock is an autogenerated mock type for the Tx type -type DbTxMock struct { - mock.Mock -} - -// Begin provides a mock function with given fields: ctx -func (_m *DbTxMock) Begin(ctx context.Context) (pgx.Tx, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Begin") - } - - var r0 pgx.Tx - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pgx.Tx) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BeginFunc provides a mock function with given fields: ctx, f -func (_m *DbTxMock) BeginFunc(ctx context.Context, f func(pgx.Tx) error) error { - ret := _m.Called(ctx, f) - - if len(ret) == 0 { - panic("no return value specified for BeginFunc") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, func(pgx.Tx) error) error); ok { - r0 = rf(ctx, f) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Commit provides a mock function with given fields: ctx -func (_m *DbTxMock) Commit(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Commit") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Conn provides a mock function with given fields: -func (_m *DbTxMock) Conn() *pgx.Conn { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Conn") - } - - var r0 *pgx.Conn - if rf, ok := ret.Get(0).(func() *pgx.Conn); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*pgx.Conn) - } - } - - return r0 -} - -// CopyFrom provides a mock function with given fields: ctx, tableName, columnNames, rowSrc -func (_m *DbTxMock) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) { - ret := _m.Called(ctx, tableName, columnNames, rowSrc) - - if len(ret) == 0 { - panic("no return value specified for CopyFrom") - } - - var r0 int64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) (int64, error)); ok { - return rf(ctx, tableName, columnNames, rowSrc) - } - if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) int64); ok { - r0 = rf(ctx, tableName, columnNames, rowSrc) - } else { - r0 = ret.Get(0).(int64) - } - - if rf, ok := ret.Get(1).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) error); ok { - r1 = rf(ctx, tableName, columnNames, rowSrc) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Exec provides a mock function with given fields: ctx, sql, arguments -func (_m *DbTxMock) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error) { - var _ca []interface{} - _ca = append(_ca, ctx, sql) - _ca = append(_ca, arguments...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Exec") - } - - var r0 pgconn.CommandTag - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgconn.CommandTag, error)); ok { - return rf(ctx, sql, arguments...) - } - if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgconn.CommandTag); ok { - r0 = rf(ctx, sql, arguments...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pgconn.CommandTag) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { - r1 = rf(ctx, sql, arguments...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LargeObjects provides a mock function with given fields: -func (_m *DbTxMock) LargeObjects() pgx.LargeObjects { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for LargeObjects") - } - - var r0 pgx.LargeObjects - if rf, ok := ret.Get(0).(func() pgx.LargeObjects); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(pgx.LargeObjects) - } - - return r0 -} - -// Prepare provides a mock function with given fields: ctx, name, sql -func (_m *DbTxMock) Prepare(ctx context.Context, name string, sql string) (*pgconn.StatementDescription, error) { - ret := _m.Called(ctx, name, sql) - - if len(ret) == 0 { - panic("no return value specified for Prepare") - } - - var r0 *pgconn.StatementDescription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) (*pgconn.StatementDescription, error)); ok { - return rf(ctx, name, sql) - } - if rf, ok := ret.Get(0).(func(context.Context, string, string) *pgconn.StatementDescription); ok { - r0 = rf(ctx, name, sql) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*pgconn.StatementDescription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { - r1 = rf(ctx, name, sql) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query provides a mock function with given fields: ctx, sql, args -func (_m *DbTxMock) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) { - var _ca []interface{} - _ca = append(_ca, ctx, sql) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Query") - } - - var r0 pgx.Rows - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgx.Rows, error)); ok { - return rf(ctx, sql, args...) - } - if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Rows); ok { - r0 = rf(ctx, sql, args...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pgx.Rows) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { - r1 = rf(ctx, sql, args...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryFunc provides a mock function with given fields: ctx, sql, args, scans, f -func (_m *DbTxMock) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { - ret := _m.Called(ctx, sql, args, scans, f) - - if len(ret) == 0 { - panic("no return value specified for QueryFunc") - } - - var r0 pgconn.CommandTag - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error)); ok { - return rf(ctx, sql, args, scans, f) - } - if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) pgconn.CommandTag); ok { - r0 = rf(ctx, sql, args, scans, f) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pgconn.CommandTag) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) error); ok { - r1 = rf(ctx, sql, args, scans, f) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryRow provides a mock function with given fields: ctx, sql, args -func (_m *DbTxMock) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row { - var _ca []interface{} - _ca = append(_ca, ctx, sql) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for QueryRow") - } - - var r0 pgx.Row - if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Row); ok { - r0 = rf(ctx, sql, args...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pgx.Row) - } - } - - return r0 -} - -// Rollback provides a mock function with given fields: ctx -func (_m *DbTxMock) Rollback(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Rollback") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SendBatch provides a mock function with given fields: ctx, b -func (_m *DbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults { - ret := _m.Called(ctx, b) - - if len(ret) == 0 { - panic("no return value specified for SendBatch") - } - - var r0 pgx.BatchResults - if rf, ok := ret.Get(0).(func(context.Context, *pgx.Batch) pgx.BatchResults); ok { - r0 = rf(ctx, b) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pgx.BatchResults) - } - } - - return r0 -} - -// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewDbTxMock(t interface { - mock.TestingT - Cleanup(func()) -}) *DbTxMock { - mock := &DbTxMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_storage.go similarity index 54% rename from aggregator/mocks/mock_state.go rename to aggregator/mocks/mock_storage.go index 74c9021b..405cba46 100644 --- a/aggregator/mocks/mock_state.go +++ b/aggregator/mocks/mock_storage.go @@ -5,19 +5,21 @@ package mocks import ( context "context" - pgx "github.com/jackc/pgx/v4" + db "github.com/0xPolygon/cdk/db" mock "github.com/stretchr/testify/mock" + sql "database/sql" + state "github.com/0xPolygon/cdk/state" ) -// StateInterfaceMock is an autogenerated mock type for the StateInterface type -type StateInterfaceMock struct { +// StorageInterfaceMock is an autogenerated mock type for the StorageInterface type +type StorageInterfaceMock struct { mock.Mock } // AddGeneratedProof provides a mock function with given fields: ctx, proof, dbTx -func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { +func (_m *StorageInterfaceMock) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx db.Txer) error { ret := _m.Called(ctx, proof, dbTx) if len(ret) == 0 { @@ -25,7 +27,7 @@ func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *stat } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, db.Txer) error); ok { r0 = rf(ctx, proof, dbTx) } else { r0 = ret.Error(0) @@ -35,7 +37,7 @@ func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *stat } // AddSequence provides a mock function with given fields: ctx, sequence, dbTx -func (_m *StateInterfaceMock) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { +func (_m *StorageInterfaceMock) AddSequence(ctx context.Context, sequence state.Sequence, dbTx db.Txer) error { ret := _m.Called(ctx, sequence, dbTx) if len(ret) == 0 { @@ -43,7 +45,7 @@ func (_m *StateInterfaceMock) AddSequence(ctx context.Context, sequence state.Se } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, state.Sequence, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, state.Sequence, db.Txer) error); ok { r0 = rf(ctx, sequence, dbTx) } else { r0 = ret.Error(0) @@ -52,29 +54,29 @@ func (_m *StateInterfaceMock) AddSequence(ctx context.Context, sequence state.Se return r0 } -// BeginStateTransaction provides a mock function with given fields: ctx -func (_m *StateInterfaceMock) BeginStateTransaction(ctx context.Context) (pgx.Tx, error) { - ret := _m.Called(ctx) +// BeginTx provides a mock function with given fields: ctx, options +func (_m *StorageInterfaceMock) BeginTx(ctx context.Context, options *sql.TxOptions) (db.Txer, error) { + ret := _m.Called(ctx, options) if len(ret) == 0 { - panic("no return value specified for BeginStateTransaction") + panic("no return value specified for BeginTx") } - var r0 pgx.Tx + var r0 db.Txer var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, *sql.TxOptions) (db.Txer, error)); ok { + return rf(ctx, options) } - if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, *sql.TxOptions) db.Txer); ok { + r0 = rf(ctx, options) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(pgx.Tx) + r0 = ret.Get(0).(db.Txer) } } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + if rf, ok := ret.Get(1).(func(context.Context, *sql.TxOptions) error); ok { + r1 = rf(ctx, options) } else { r1 = ret.Error(1) } @@ -83,7 +85,7 @@ func (_m *StateInterfaceMock) BeginStateTransaction(ctx context.Context) (pgx.Tx } // CheckProofContainsCompleteSequences provides a mock function with given fields: ctx, proof, dbTx -func (_m *StateInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) { +func (_m *StorageInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx db.Txer) (bool, error) { ret := _m.Called(ctx, proof, dbTx) if len(ret) == 0 { @@ -92,16 +94,16 @@ func (_m *StateInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Co var r0 bool var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, db.Txer) (bool, error)); ok { return rf(ctx, proof, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) bool); ok { + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, db.Txer) bool); ok { r0 = rf(ctx, proof, dbTx) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *state.Proof, db.Txer) error); ok { r1 = rf(ctx, proof, dbTx) } else { r1 = ret.Error(1) @@ -111,7 +113,7 @@ func (_m *StateInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Co } // CheckProofExistsForBatch provides a mock function with given fields: ctx, batchNumber, dbTx -func (_m *StateInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) { +func (_m *StorageInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx db.Txer) (bool, error) { ret := _m.Called(ctx, batchNumber, dbTx) if len(ret) == 0 { @@ -120,16 +122,16 @@ func (_m *StateInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batc var r0 bool var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (bool, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, db.Txer) (bool, error)); ok { return rf(ctx, batchNumber, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) bool); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, db.Txer) bool); ok { r0 = rf(ctx, batchNumber, dbTx) } else { r0 = ret.Get(0).(bool) } - if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint64, db.Txer) error); ok { r1 = rf(ctx, batchNumber, dbTx) } else { r1 = ret.Error(1) @@ -139,7 +141,7 @@ func (_m *StateInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batc } // CleanupGeneratedProofs provides a mock function with given fields: ctx, batchNumber, dbTx -func (_m *StateInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { +func (_m *StorageInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx db.Txer) error { ret := _m.Called(ctx, batchNumber, dbTx) if len(ret) == 0 { @@ -147,7 +149,7 @@ func (_m *StateInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchN } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, db.Txer) error); ok { r0 = rf(ctx, batchNumber, dbTx) } else { r0 = ret.Error(0) @@ -157,7 +159,7 @@ func (_m *StateInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchN } // CleanupLockedProofs provides a mock function with given fields: ctx, duration, dbTx -func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) { +func (_m *StorageInterfaceMock) CleanupLockedProofs(ctx context.Context, duration string, dbTx db.Txer) (int64, error) { ret := _m.Called(ctx, duration, dbTx) if len(ret) == 0 { @@ -166,16 +168,16 @@ func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration var r0 int64 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) (int64, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, db.Txer) (int64, error)); ok { return rf(ctx, duration, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) int64); ok { + if rf, ok := ret.Get(0).(func(context.Context, string, db.Txer) int64); ok { r0 = rf(ctx, duration, dbTx) } else { r0 = ret.Get(0).(int64) } - if rf, ok := ret.Get(1).(func(context.Context, string, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, string, db.Txer) error); ok { r1 = rf(ctx, duration, dbTx) } else { r1 = ret.Error(1) @@ -185,7 +187,7 @@ func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration } // DeleteGeneratedProofs provides a mock function with given fields: ctx, batchNumber, batchNumberFinal, dbTx -func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error { +func (_m *StorageInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx db.Txer) error { ret := _m.Called(ctx, batchNumber, batchNumberFinal, dbTx) if len(ret) == 0 { @@ -193,7 +195,7 @@ func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNu } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, db.Txer) error); ok { r0 = rf(ctx, batchNumber, batchNumberFinal, dbTx) } else { r0 = ret.Error(0) @@ -203,7 +205,7 @@ func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNu } // DeleteUngeneratedProofs provides a mock function with given fields: ctx, dbTx -func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error { +func (_m *StorageInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx db.Txer) error { ret := _m.Called(ctx, dbTx) if len(ret) == 0 { @@ -211,7 +213,7 @@ func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, db.Txer) error); ok { r0 = rf(ctx, dbTx) } else { r0 = ret.Error(0) @@ -221,7 +223,7 @@ func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx } // GetProofReadyToVerify provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx -func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) { +func (_m *StorageInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx db.Txer) (*state.Proof, error) { ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx) if len(ret) == 0 { @@ -230,10 +232,10 @@ func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVer var r0 *state.Proof var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Proof, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, db.Txer) (*state.Proof, error)); ok { return rf(ctx, lastVerfiedBatchNumber, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Proof); ok { + if rf, ok := ret.Get(0).(func(context.Context, uint64, db.Txer) *state.Proof); ok { r0 = rf(ctx, lastVerfiedBatchNumber, dbTx) } else { if ret.Get(0) != nil { @@ -241,7 +243,7 @@ func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVer } } - if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, uint64, db.Txer) error); ok { r1 = rf(ctx, lastVerfiedBatchNumber, dbTx) } else { r1 = ret.Error(1) @@ -251,7 +253,7 @@ func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVer } // GetProofsToAggregate provides a mock function with given fields: ctx, dbTx -func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) { +func (_m *StorageInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx db.Txer) (*state.Proof, *state.Proof, error) { ret := _m.Called(ctx, dbTx) if len(ret) == 0 { @@ -261,10 +263,10 @@ func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx var r0 *state.Proof var r1 *state.Proof var r2 error - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.Proof, *state.Proof, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, db.Txer) (*state.Proof, *state.Proof, error)); ok { return rf(ctx, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.Proof); ok { + if rf, ok := ret.Get(0).(func(context.Context, db.Txer) *state.Proof); ok { r0 = rf(ctx, dbTx) } else { if ret.Get(0) != nil { @@ -272,7 +274,7 @@ func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx } } - if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) *state.Proof); ok { + if rf, ok := ret.Get(1).(func(context.Context, db.Txer) *state.Proof); ok { r1 = rf(ctx, dbTx) } else { if ret.Get(1) != nil { @@ -280,7 +282,7 @@ func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx } } - if rf, ok := ret.Get(2).(func(context.Context, pgx.Tx) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, db.Txer) error); ok { r2 = rf(ctx, dbTx) } else { r2 = ret.Error(2) @@ -290,7 +292,7 @@ func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx } // UpdateGeneratedProof provides a mock function with given fields: ctx, proof, dbTx -func (_m *StateInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { +func (_m *StorageInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx db.Txer) error { ret := _m.Called(ctx, proof, dbTx) if len(ret) == 0 { @@ -298,7 +300,7 @@ func (_m *StateInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *s } var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, db.Txer) error); ok { r0 = rf(ctx, proof, dbTx) } else { r0 = ret.Error(0) @@ -307,13 +309,13 @@ func (_m *StateInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *s return r0 } -// NewStateInterfaceMock creates a new instance of StateInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// NewStorageInterfaceMock creates a new instance of StorageInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. -func NewStateInterfaceMock(t interface { +func NewStorageInterfaceMock(t interface { mock.TestingT Cleanup(func()) -}) *StateInterfaceMock { - mock := &StateInterfaceMock{} +}) *StorageInterfaceMock { + mock := &StorageInterfaceMock{} mock.Mock.Test(t) t.Cleanup(func() { mock.AssertExpectations(t) }) diff --git a/aggregator/mocks/mock_txer.go b/aggregator/mocks/mock_txer.go new file mode 100644 index 00000000..1de07124 --- /dev/null +++ b/aggregator/mocks/mock_txer.go @@ -0,0 +1,163 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + sql "database/sql" + + mock "github.com/stretchr/testify/mock" +) + +// TxerMock is an autogenerated mock type for the Txer type +type TxerMock struct { + mock.Mock +} + +// AddCommitCallback provides a mock function with given fields: cb +func (_m *TxerMock) AddCommitCallback(cb func()) { + _m.Called(cb) +} + +// AddRollbackCallback provides a mock function with given fields: cb +func (_m *TxerMock) AddRollbackCallback(cb func()) { + _m.Called(cb) +} + +// Commit provides a mock function with given fields: +func (_m *TxerMock) Commit() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Exec provides a mock function with given fields: query, args +func (_m *TxerMock) Exec(query string, args ...interface{}) (sql.Result, error) { + var _ca []interface{} + _ca = append(_ca, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Exec") + } + + var r0 sql.Result + var r1 error + if rf, ok := ret.Get(0).(func(string, ...interface{}) (sql.Result, error)); ok { + return rf(query, args...) + } + if rf, ok := ret.Get(0).(func(string, ...interface{}) sql.Result); ok { + r0 = rf(query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(sql.Result) + } + } + + if rf, ok := ret.Get(1).(func(string, ...interface{}) error); ok { + r1 = rf(query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: query, args +func (_m *TxerMock) Query(query string, args ...interface{}) (*sql.Rows, error) { + var _ca []interface{} + _ca = append(_ca, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 *sql.Rows + var r1 error + if rf, ok := ret.Get(0).(func(string, ...interface{}) (*sql.Rows, error)); ok { + return rf(query, args...) + } + if rf, ok := ret.Get(0).(func(string, ...interface{}) *sql.Rows); ok { + r0 = rf(query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.Rows) + } + } + + if rf, ok := ret.Get(1).(func(string, ...interface{}) error); ok { + r1 = rf(query, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryRow provides a mock function with given fields: query, args +func (_m *TxerMock) QueryRow(query string, args ...interface{}) *sql.Row { + var _ca []interface{} + _ca = append(_ca, query) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for QueryRow") + } + + var r0 *sql.Row + if rf, ok := ret.Get(0).(func(string, ...interface{}) *sql.Row); ok { + r0 = rf(query, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.Row) + } + } + + return r0 +} + +// Rollback provides a mock function with given fields: +func (_m *TxerMock) Rollback() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Rollback") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewTxerMock creates a new instance of TxerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxerMock(t interface { + mock.TestingT + Cleanup(func()) +}) *TxerMock { + mock := &TxerMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/profitabilitychecker.go b/aggregator/profitabilitychecker.go deleted file mode 100644 index dc91a21e..00000000 --- a/aggregator/profitabilitychecker.go +++ /dev/null @@ -1,92 +0,0 @@ -package aggregator - -import ( - "context" - "math/big" - "time" -) - -// TxProfitabilityCheckerType checks profitability of batch validation -type TxProfitabilityCheckerType string - -const ( - // ProfitabilityBase checks pol collateral with min reward - ProfitabilityBase = "base" - // ProfitabilityAcceptAll validate batch anyway and don't check anything - ProfitabilityAcceptAll = "acceptall" -) - -// TxProfitabilityCheckerBase checks pol collateral with min reward -type TxProfitabilityCheckerBase struct { - State StateInterface - IntervalAfterWhichBatchSentAnyway time.Duration - MinReward *big.Int -} - -// NewTxProfitabilityCheckerBase init base tx profitability checker -func NewTxProfitabilityCheckerBase( - state StateInterface, interval time.Duration, minReward *big.Int, -) *TxProfitabilityCheckerBase { - return &TxProfitabilityCheckerBase{ - State: state, - IntervalAfterWhichBatchSentAnyway: interval, - MinReward: minReward, - } -} - -// IsProfitable checks pol collateral with min reward -func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polCollateral *big.Int) (bool, error) { - // if pc.IntervalAfterWhichBatchSentAnyway != 0 { - // ok, err := isConsolidatedBatchAppeared(ctx, pc.State, pc.IntervalAfterWhichBatchSentAnyway) - // if err != nil { - // return false, err - // } - // if ok { - // return true, nil - // } - // } - return polCollateral.Cmp(pc.MinReward) >= 0, nil -} - -// TxProfitabilityCheckerAcceptAll validate batch anyway and don't check anything -type TxProfitabilityCheckerAcceptAll struct { - State StateInterface - IntervalAfterWhichBatchSentAnyway time.Duration -} - -// NewTxProfitabilityCheckerAcceptAll init tx profitability checker that accept all txs -func NewTxProfitabilityCheckerAcceptAll(state StateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { - return &TxProfitabilityCheckerAcceptAll{ - State: state, - IntervalAfterWhichBatchSentAnyway: interval, - } -} - -// IsProfitable validate batch anyway and don't check anything -func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, polCollateral *big.Int) (bool, error) { - // if pc.IntervalAfterWhichBatchSentAnyway != 0 { - // ok, err := isConsolidatedBatchAppeared(ctx, pc.State, pc.IntervalAfterWhichBatchSentAnyway) - // if err != nil { - // return false, err - // } - // if ok { - // return true, nil - // } - // } - return true, nil -} - -// TODO: now it's impossible to check, when batch got consolidated, bcs it's not saved -// func isConsolidatedBatchAppeared(ctx context.Context, state StateInterface, -// intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) { -// batch, err := state.GetLastVerifiedBatch(ctx, nil) -// if err != nil { -// return false, fmt.Errorf("failed to get last verified batch, err: %v", err) -// } -// interval := intervalAfterWhichBatchConsolidatedAnyway * time.Minute -// if batch..Before(time.Now().Add(-interval)) { -// return true, nil -// } -// -// return false, err -// } diff --git a/aggregator/prover/mocks/mock_channel.go b/aggregator/prover/mocks/mock_channel.go new file mode 100644 index 00000000..d125896d --- /dev/null +++ b/aggregator/prover/mocks/mock_channel.go @@ -0,0 +1,176 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + metadata "google.golang.org/grpc/metadata" + + prover "github.com/0xPolygon/cdk/aggregator/prover" +) + +// ChannelMock is an autogenerated mock type for the AggregatorService_ChannelServer type +type ChannelMock struct { + mock.Mock +} + +// Context provides a mock function with given fields: +func (_m *ChannelMock) Context() context.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Context") + } + + var r0 context.Context + if rf, ok := ret.Get(0).(func() context.Context); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(context.Context) + } + } + + return r0 +} + +// Recv provides a mock function with given fields: +func (_m *ChannelMock) Recv() (*prover.ProverMessage, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Recv") + } + + var r0 *prover.ProverMessage + var r1 error + if rf, ok := ret.Get(0).(func() (*prover.ProverMessage, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() *prover.ProverMessage); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*prover.ProverMessage) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RecvMsg provides a mock function with given fields: m +func (_m *ChannelMock) RecvMsg(m interface{}) error { + ret := _m.Called(m) + + if len(ret) == 0 { + panic("no return value specified for RecvMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(m) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Send provides a mock function with given fields: _a0 +func (_m *ChannelMock) Send(_a0 *prover.AggregatorMessage) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for Send") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*prover.AggregatorMessage) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendHeader provides a mock function with given fields: _a0 +func (_m *ChannelMock) SendHeader(_a0 metadata.MD) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SendHeader") + } + + var r0 error + if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendMsg provides a mock function with given fields: m +func (_m *ChannelMock) SendMsg(m interface{}) error { + ret := _m.Called(m) + + if len(ret) == 0 { + panic("no return value specified for SendMsg") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(m) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetHeader provides a mock function with given fields: _a0 +func (_m *ChannelMock) SetHeader(_a0 metadata.MD) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SetHeader") + } + + var r0 error + if rf, ok := ret.Get(0).(func(metadata.MD) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetTrailer provides a mock function with given fields: _a0 +func (_m *ChannelMock) SetTrailer(_a0 metadata.MD) { + _m.Called(_a0) +} + +// NewChannelMock creates a new instance of ChannelMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewChannelMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ChannelMock { + mock := &ChannelMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/prover/prover.go b/aggregator/prover/prover.go index a5f7e9eb..ad9c9895 100644 --- a/aggregator/prover/prover.go +++ b/aggregator/prover/prover.go @@ -18,10 +18,10 @@ import ( ) const ( - stateRootStartIndex = 19 - stateRootFinalIndex = stateRootStartIndex + 8 - accInputHashStartIndex = 27 - accInputHashFinalIndex = accInputHashStartIndex + 8 + StateRootStartIndex = 19 + StateRootFinalIndex = StateRootStartIndex + 8 + AccInputHashStartIndex = 27 + AccInputHashFinalIndex = AccInputHashStartIndex + 8 ) var ( @@ -298,13 +298,13 @@ func (p *Prover) WaitRecursiveProof(ctx context.Context, proofID string) (string ) } - sr, err := GetSanityCheckHashFromProof(p.logger, resProof.RecursiveProof, stateRootStartIndex, stateRootFinalIndex) + sr, err := GetSanityCheckHashFromProof(p.logger, resProof.RecursiveProof, StateRootStartIndex, StateRootFinalIndex) if err != nil && sr != (common.Hash{}) { p.logger.Errorf("Error getting state root from proof: %v", err) } accInputHash, err := GetSanityCheckHashFromProof(p.logger, resProof.RecursiveProof, - accInputHashStartIndex, accInputHashFinalIndex) + AccInputHashStartIndex, AccInputHashFinalIndex) if err != nil && accInputHash != (common.Hash{}) { p.logger.Errorf("Error getting acc input hash from proof: %v", err) } diff --git a/aggregator/prover/prover_test.go b/aggregator/prover/prover_test.go index ee12c3ac..952a55ae 100644 --- a/aggregator/prover/prover_test.go +++ b/aggregator/prover/prover_test.go @@ -1,12 +1,19 @@ package prover_test import ( + "context" "fmt" + "net" "os" "testing" + "time" "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/aggregator/prover/mocks" + "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/log" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -20,6 +27,50 @@ type TestStateRoot struct { Publics []string `mapstructure:"publics"` } +func TestProver(t *testing.T) { + mockChannel := mocks.ChannelMock{} + var addr net.Addr + + mockChannel.On("Send", mock.Anything).Return(nil) + mockChannel.On("Recv").Return(&prover.ProverMessage{ + Id: "test", + Response: &prover.ProverMessage_GetStatusResponse{ + GetStatusResponse: &prover.GetStatusResponse{ + Status: prover.GetStatusResponse_STATUS_IDLE, + ProverName: "testName", + ProverId: "testId", + }, + }, + }, nil).Times(1) + + p, err := prover.New(log.GetDefaultLogger(), &mockChannel, addr, types.Duration{Duration: time.Second * 5}) + require.NoError(t, err) + name := p.Name() + require.Equal(t, "testName", name, "name does not match") + address := p.Addr() + require.Equal(t, "", address, "address does not match") + id := p.ID() + require.Equal(t, "testId", id, "id does not match") + + mockChannel.On("Recv").Return(&prover.ProverMessage{ + Id: "test", + Response: &prover.ProverMessage_GetProofResponse{ + GetProofResponse: &prover.GetProofResponse{ + Proof: &prover.GetProofResponse_RecursiveProof{ + RecursiveProof: "this is a proof", + }, + Result: prover.GetProofResponse_RESULT_COMPLETED_OK, + }, + }, + }, nil) + + proof, sr, accinputHash, err := p.WaitRecursiveProof(context.Background(), "proofID") + require.NoError(t, err) + + require.NotNil(t, proof, "proof is nil") + require.NotNil(t, sr, "state root is nil") + require.Equal(t, common.Hash{}, accinputHash, "state root is not empty") +} func TestCalculateStateRoots(t *testing.T) { var expectedStateRoots = map[string]string{ "1871.json": "0x0ed594d8bc0bb38f3190ff25fb1e5b4fe1baf0e2e0c1d7bf3307f07a55d3a60f", @@ -42,13 +93,18 @@ func TestCalculateStateRoots(t *testing.T) { require.NoError(t, err) // Get the state root from the batch proof - fileStateRoot, err := prover.GetSanityCheckHashFromProof(log.GetDefaultLogger(), string(data), stateRootStartIndex, stateRootFinalIndex) + fileStateRoot, err := prover.GetSanityCheckHashFromProof(log.GetDefaultLogger(), string(data), prover.StateRootStartIndex, prover.StateRootFinalIndex) require.NoError(t, err) // Get the expected state root expectedStateRoot, ok := expectedStateRoots[file.Name()] require.True(t, ok, "Expected state root not found") + // Check Acc Input Hash + accInputHash, err := prover.GetSanityCheckHashFromProof(log.GetDefaultLogger(), string(data), prover.AccInputHashStartIndex, prover.AccInputHashFinalIndex) + require.NotEqual(t, common.Hash{}, accInputHash, "Acc Input Hash is empty") + require.NoError(t, err) + // Compare the state roots require.Equal(t, expectedStateRoot, fileStateRoot.String(), "State roots do not match") } diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go index 08730572..4075508a 100644 --- a/aggsender/aggsender.go +++ b/aggsender/aggsender.go @@ -8,7 +8,6 @@ import ( "fmt" "math/big" "os" - "slices" "time" "github.com/0xPolygon/cdk/agglayer" @@ -28,8 +27,7 @@ var ( errNoBridgesAndClaims = errors.New("no bridges and claims to build certificate") errInvalidSignatureSize = errors.New("invalid signature size") - zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") - nonSettledStatuses = []agglayer.CertificateStatus{agglayer.Pending, agglayer.Candidate, agglayer.Proven} + zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") ) // AggSender is a component that will send certificates to the aggLayer @@ -57,7 +55,11 @@ func New( l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, l2Syncer types.L2BridgeSyncer, epochNotifier types.EpochNotifier) (*AggSender, error) { - storage, err := db.NewAggSenderSQLStorage(logger, cfg.StoragePath) + storageConfig := db.AggSenderSQLStorageConfig{ + DBPath: cfg.StoragePath, + KeepCertificatesHistory: cfg.KeepCertificatesHistory, + } + storage, err := db.NewAggSenderSQLStorage(logger, storageConfig) if err != nil { return nil, err } @@ -83,9 +85,31 @@ func New( // Start starts the AggSender func (a *AggSender) Start(ctx context.Context) { + a.log.Info("AggSender started") + a.checkInitialStatus(ctx) a.sendCertificates(ctx) } +// checkInitialStatus check local status vs agglayer status +func (a *AggSender) checkInitialStatus(ctx context.Context) { + ticker := time.NewTicker(a.cfg.DelayBeetweenRetries.Duration) + defer ticker.Stop() + + for { + if err := a.checkLastCertificateFromAgglayer(ctx); err != nil { + a.log.Errorf("error checking initial status: %w, retrying in %s", err, a.cfg.DelayBeetweenRetries.String()) + } else { + a.log.Info("Initial status checked successfully") + return + } + select { + case <-ctx.Done(): + return + case <-ticker.C: + } + } +} + // sendCertificates sends certificates to the aggLayer func (a *AggSender) sendCertificates(ctx context.Context) { chEpoch := a.epochNotifier.Subscribe("aggsender") @@ -96,7 +120,7 @@ func (a *AggSender) sendCertificates(ctx context.Context) { thereArePendingCerts := a.checkPendingCertificatesStatus(ctx) if !thereArePendingCerts { if _, err := a.sendCertificate(ctx); err != nil { - log.Error(err) + a.log.Error(err) } } else { log.Infof("Skipping epoch %s because there are pending certificates", @@ -132,12 +156,16 @@ func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertif if err != nil { return nil, err } - - previousToBlock := lastSentCertificateInfo.ToBlock - if lastSentCertificateInfo.Status == agglayer.InError { - // if the last certificate was in error, we need to resend it - // from the block before the error - previousToBlock = lastSentCertificateInfo.FromBlock - 1 + previousToBlock := uint64(0) + retryCount := 0 + if lastSentCertificateInfo != nil { + previousToBlock = lastSentCertificateInfo.ToBlock + if lastSentCertificateInfo.Status == agglayer.InError { + // if the last certificate was in error, we need to resend it + // from the block before the error + previousToBlock = lastSentCertificateInfo.FromBlock - 1 + retryCount = lastSentCertificateInfo.RetryCount + 1 + } } if previousToBlock >= lasL2BlockSynced { @@ -177,42 +205,66 @@ func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertif } a.saveCertificateToFile(signedCertificate) - a.log.Infof("certificate ready to be send to AggLayer: %s", signedCertificate.String()) - + a.log.Infof("certificate ready to be send to AggLayer: %s", signedCertificate.Brief()) certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) if err != nil { return nil, fmt.Errorf("error sending certificate: %w", err) } - a.log.Debugf("certificate send: Height: %d hash: %s", signedCertificate.Height, certificateHash.String()) + a.log.Debugf("certificate send: Height: %d cert: %s", signedCertificate.Height, signedCertificate.Brief()) raw, err := json.Marshal(signedCertificate) if err != nil { - return nil, fmt.Errorf("error marshalling signed certificate: %w", err) + return nil, fmt.Errorf("error marshalling signed certificate. Cert:%s. Err: %w", signedCertificate.Brief(), err) } createdTime := time.Now().UTC().UnixMilli() + prevLER := common.BytesToHash(certificate.PrevLocalExitRoot[:]) certInfo := types.CertificateInfo{ - Height: certificate.Height, - CertificateID: certificateHash, - NewLocalExitRoot: certificate.NewLocalExitRoot, - FromBlock: fromBlock, - ToBlock: toBlock, - CreatedAt: createdTime, - UpdatedAt: createdTime, - SignedCertificate: string(raw), - } - - if err := a.storage.SaveLastSentCertificate(ctx, certInfo); err != nil { + Height: certificate.Height, + RetryCount: retryCount, + CertificateID: certificateHash, + NewLocalExitRoot: certificate.NewLocalExitRoot, + PreviousLocalExitRoot: &prevLER, + FromBlock: fromBlock, + ToBlock: toBlock, + CreatedAt: createdTime, + UpdatedAt: createdTime, + SignedCertificate: string(raw), + } + // TODO: Improve this case, if a cert is not save in the storage, we are going to settle a unknown certificate + err = a.saveCertificateToStorage(ctx, certInfo, a.cfg.MaxRetriesStoreCertificate) + if err != nil { + a.log.Errorf("error saving certificate to storage. Cert:%s Err: %w", certInfo.String(), err) return nil, fmt.Errorf("error saving last sent certificate %s in db: %w", certInfo.String(), err) } a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d) cert:%s", - certificateHash, fromBlock, toBlock, signedCertificate.String()) + certInfo.ID(), fromBlock, toBlock, signedCertificate.Brief()) return signedCertificate, nil } +// saveCertificateToStorage saves the certificate to the storage +// it retries if it fails. if param retries == 0 it retries indefinitely +func (a *AggSender) saveCertificateToStorage(ctx context.Context, cert types.CertificateInfo, maxRetries int) error { + retries := 1 + err := fmt.Errorf("initial_error") + for err != nil { + if err = a.storage.SaveLastSentCertificate(ctx, cert); err != nil { + // If this happens we can't work as normal, because local DB is outdated, we have to retry + a.log.Errorf("error saving last sent certificate %s in db: %w", cert.String(), err) + if retries == maxRetries { + return fmt.Errorf("error saving last sent certificate %s in db: %w", cert.String(), err) + } else { + retries++ + time.Sleep(a.cfg.DelayBeetweenRetries.Duration) + } + } + } + return nil +} + // saveCertificate saves the certificate to a tmp file func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCertificate) { if signedCertificate == nil || a.cfg.SaveCertificatesToFilesPath == "" { @@ -233,30 +285,53 @@ func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCert // getNextHeightAndPreviousLER returns the height and previous LER for the new certificate func (a *AggSender) getNextHeightAndPreviousLER( - lastSentCertificateInfo *types.CertificateInfo) (uint64, common.Hash) { - height := lastSentCertificateInfo.Height + 1 - if lastSentCertificateInfo.Status == agglayer.InError { - // previous certificate was in error, so we need to resend it - a.log.Debugf("Last certificate %s failed so reusing height %d", - lastSentCertificateInfo.CertificateID, lastSentCertificateInfo.Height) - height = lastSentCertificateInfo.Height + lastSentCertificateInfo *types.CertificateInfo) (uint64, common.Hash, error) { + if lastSentCertificateInfo == nil { + return 0, zeroLER, nil } - - previousLER := lastSentCertificateInfo.NewLocalExitRoot - if lastSentCertificateInfo.NewLocalExitRoot == (common.Hash{}) { - // meaning this is the first certificate - height = 0 - previousLER = zeroLER + if !lastSentCertificateInfo.Status.IsClosed() { + return 0, zeroLER, fmt.Errorf("last certificate %s is not closed (status: %s)", + lastSentCertificateInfo.ID(), lastSentCertificateInfo.Status.String()) + } + if lastSentCertificateInfo.Status.IsSettled() { + return lastSentCertificateInfo.Height + 1, lastSentCertificateInfo.NewLocalExitRoot, nil } - return height, previousLER + if lastSentCertificateInfo.Status.IsInError() { + // We can reuse last one of lastCert? + if lastSentCertificateInfo.PreviousLocalExitRoot != nil { + return lastSentCertificateInfo.Height, *lastSentCertificateInfo.PreviousLocalExitRoot, nil + } + // Is the first one, so we can set the zeroLER + if lastSentCertificateInfo.Height == 0 { + return 0, zeroLER, nil + } + // We get previous certificate that must be settled + a.log.Debugf("last certificate %s is in error, getting previous settled certificate height:%d", + lastSentCertificateInfo.Height-1) + lastSettleCert, err := a.storage.GetCertificateByHeight(lastSentCertificateInfo.Height - 1) + if err != nil { + return 0, common.Hash{}, fmt.Errorf("error getting last settled certificate: %w", err) + } + if lastSettleCert == nil { + return 0, common.Hash{}, fmt.Errorf("none settled certificate: %w", err) + } + if !lastSettleCert.Status.IsSettled() { + return 0, common.Hash{}, fmt.Errorf("last settled certificate %s is not settled (status: %s)", + lastSettleCert.ID(), lastSettleCert.Status.String()) + } + + return lastSentCertificateInfo.Height, lastSettleCert.NewLocalExitRoot, nil + } + return 0, zeroLER, fmt.Errorf("last certificate %s has an unknown status: %s", + lastSentCertificateInfo.ID(), lastSentCertificateInfo.Status.String()) } // buildCertificate builds a certificate from the bridge events func (a *AggSender) buildCertificate(ctx context.Context, bridges []bridgesync.Bridge, claims []bridgesync.Claim, - lastSentCertificateInfo types.CertificateInfo, + lastSentCertificateInfo *types.CertificateInfo, toBlock uint64) (*agglayer.Certificate, error) { if len(bridges) == 0 && len(claims) == 0 { return nil, errNoBridgesAndClaims @@ -279,7 +354,10 @@ func (a *AggSender) buildCertificate(ctx context.Context, return nil, fmt.Errorf("error getting exit root by index: %d. Error: %w", depositCount, err) } - height, previousLER := a.getNextHeightAndPreviousLER(&lastSentCertificateInfo) + height, previousLER, err := a.getNextHeightAndPreviousLER(lastSentCertificateInfo) + if err != nil { + return nil, fmt.Errorf("error getting next height and previous LER: %w", err) + } return &agglayer.Certificate{ NetworkID: a.l2Syncer.OriginNetwork(), @@ -383,7 +461,9 @@ func (a *AggSender) getImportedBridgeExits( for i, claim := range claims { l1Info := claimL1Info[i] - a.log.Debugf("claim[%d]: destAddr: %s GER:%s", i, claim.DestinationAddress.String(), claim.GlobalExitRoot.String()) + a.log.Debugf("claim[%d]: destAddr: %s GER: %s Block: %d Pos: %d GlobalIndex: 0x%x", + i, claim.DestinationAddress.String(), claim.GlobalExitRoot.String(), + claim.BlockNum, claim.BlockPos, claim.GlobalIndex) ibe, err := a.convertClaimToImportedBridgeExit(claim) if err != nil { return nil, fmt.Errorf("error converting claim to imported bridge exit: %w", err) @@ -489,54 +569,72 @@ func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglaye // It returns: // bool -> if there are pending certificates func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) bool { - pendingCertificates, err := a.storage.GetCertificatesByStatus(nonSettledStatuses) + pendingCertificates, err := a.storage.GetCertificatesByStatus(agglayer.NonSettledStatuses) if err != nil { - err = fmt.Errorf("error getting pending certificates: %w", err) - a.log.Error(err) + a.log.Errorf("error getting pending certificates: %w", err) return true } - thereArePendingCerts := false + a.log.Debugf("checkPendingCertificatesStatus num of pendingCertificates: %d", len(pendingCertificates)) + thereArePendingCerts := false + for _, certificate := range pendingCertificates { certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) if err != nil { - err = fmt.Errorf("error getting certificate header of %d/%s from agglayer: %w", - certificate.Height, certificate.String(), err) - a.log.Error(err) + a.log.Errorf("error getting certificate header of %s from agglayer: %w", + certificate.ID(), err) return true } - elapsedTime := time.Now().UTC().Sub(time.UnixMilli(certificate.CreatedAt)) + a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s elapsed time:%s", certificateHeader.Status, - certificateHeader.String(), - elapsedTime) - - if certificateHeader.Status != certificate.Status { - a.log.Infof("certificate %s changed status from [%s] to [%s] elapsed time: %s", - certificateHeader.String(), certificate.Status, certificateHeader.Status, elapsedTime) + certificateHeader.ID(), + certificate.ElapsedTimeSinceCreation()) - certificate.Status = certificateHeader.Status - certificate.UpdatedAt = time.Now().UTC().UnixMilli() - - if err := a.storage.UpdateCertificateStatus(ctx, *certificate); err != nil { - err = fmt.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) - a.log.Error(err) - return true - } + if err := a.updateCertificateStatus(ctx, certificate, certificateHeader); err != nil { + a.log.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) + return true } - if slices.Contains(nonSettledStatuses, certificateHeader.Status) { + + if !certificate.IsClosed() { a.log.Infof("certificate %s is still pending, elapsed time:%s ", - certificateHeader.String(), elapsedTime) + certificateHeader.ID(), certificate.ElapsedTimeSinceCreation()) thereArePendingCerts = true } } return thereArePendingCerts } +// updateCertificate updates the certificate status in the storage +func (a *AggSender) updateCertificateStatus(ctx context.Context, + localCert *types.CertificateInfo, + agglayerCert *agglayer.CertificateHeader) error { + if localCert.Status == agglayerCert.Status { + return nil + } + a.log.Infof("certificate %s changed status from [%s] to [%s] elapsed time: %s full_cert: %s", + localCert.ID(), localCert.Status, agglayerCert.Status, localCert.ElapsedTimeSinceCreation(), + localCert.String()) + + // That is a strange situation + if agglayerCert.Status.IsOpen() && localCert.Status.IsClosed() { + a.log.Warnf("certificate %s is reopened! from [%s] to [%s]", + localCert.ID(), localCert.Status, agglayerCert.Status) + } + + localCert.Status = agglayerCert.Status + localCert.UpdatedAt = time.Now().UTC().UnixMilli() + if err := a.storage.UpdateCertificate(ctx, *localCert); err != nil { + a.log.Errorf("error updating certificate %s status in storage: %w", agglayerCert.ID(), err) + return fmt.Errorf("error updating certificate. Err: %w", err) + } + return nil +} + // shouldSendCertificate checks if a certificate should be sent at given time // if we have pending certificates, then we wait until they are settled func (a *AggSender) shouldSendCertificate() (bool, error) { - pendingCertificates, err := a.storage.GetCertificatesByStatus(nonSettledStatuses) + pendingCertificates, err := a.storage.GetCertificatesByStatus(agglayer.NonSettledStatuses) if err != nil { return false, fmt.Errorf("error getting pending certificates: %w", err) } @@ -544,6 +642,84 @@ func (a *AggSender) shouldSendCertificate() (bool, error) { return len(pendingCertificates) == 0, nil } +// checkLastCertificateFromAgglayer checks the last certificate from agglayer +func (a *AggSender) checkLastCertificateFromAgglayer(ctx context.Context) error { + networkID := a.l2Syncer.OriginNetwork() + a.log.Infof("recovery: checking last certificate from AggLayer for network %d", networkID) + aggLayerLastCert, err := a.aggLayerClient.GetLatestKnownCertificateHeader(networkID) + if err != nil { + return fmt.Errorf("recovery: error getting latest known certificate header from agglayer: %w", err) + } + a.log.Infof("recovery: last certificate from AggLayer: %s", aggLayerLastCert.String()) + localLastCert, err := a.storage.GetLastSentCertificate() + if err != nil { + return fmt.Errorf("recovery: error getting last sent certificate from local storage: %w", err) + } + a.log.Infof("recovery: last certificate in storage: %s", localLastCert.String()) + + // CASE 1: No certificates in local storage and agglayer + if localLastCert == nil && aggLayerLastCert == nil { + a.log.Info("recovery: No certificates in local storage and agglayer: initial state") + return nil + } + // CASE 2: No certificates in local storage but agglayer has one + if localLastCert == nil && aggLayerLastCert != nil { + a.log.Info("recovery: No certificates in local storage but agglayer have one: recovery aggSender cert: %s", + aggLayerLastCert.String()) + if _, err := a.updateLocalStorageWithAggLayerCert(ctx, aggLayerLastCert); err != nil { + return fmt.Errorf("recovery: error updating local storage with agglayer certificate: %w", err) + } + return nil + } + // CASE 2.1: certificate in storage but not in agglayer + // this is a non-sense, so throw an error + if localLastCert != nil && aggLayerLastCert == nil { + return fmt.Errorf("recovery: certificate exists in storage but not in agglayer. Inconsistency") + } + // CASE 3.1: the certificate on the agglayer has less height than the one stored in the local storage + if aggLayerLastCert.Height < localLastCert.Height { + return fmt.Errorf("recovery: the last certificate in the agglayer has less height (%d) "+ + "than the one in the local storage (%d)", aggLayerLastCert.Height, localLastCert.Height) + } + // CASE 3.2: aggsender stopped between sending to agglayer and storing to the local storage + if aggLayerLastCert.Height == localLastCert.Height+1 { + a.log.Infof("recovery: AggLayer has the next cert (height: %d), so is a recovery case: storing cert: %s", + aggLayerLastCert.Height, aggLayerLastCert.String()) + // we need to store the certificate in the local storage. + localLastCert, err = a.updateLocalStorageWithAggLayerCert(ctx, aggLayerLastCert) + if err != nil { + log.Errorf("recovery: error updating certificate: %s, reason: %w", aggLayerLastCert.String(), err) + return fmt.Errorf("recovery: error updating certificate: %w", err) + } + } + // CASE 4: AggSender and AggLayer are not on the same page + // note: we don't need to check individual fields of the certificate + // because CertificateID is a hash of all the fields + if localLastCert.CertificateID != aggLayerLastCert.CertificateID { + a.log.Errorf("recovery: Local certificate:\n %s \n is different from agglayer certificate:\n %s", + localLastCert.String(), aggLayerLastCert.String()) + return fmt.Errorf("recovery: mismatch between local and agglayer certificates") + } + // CASE 5: AggSender and AggLayer are at same page + // just update status + err = a.updateCertificateStatus(ctx, localLastCert, aggLayerLastCert) + if err != nil { + a.log.Errorf("recovery: error updating status certificate: %s status: %w", aggLayerLastCert.String(), err) + return fmt.Errorf("recovery: error updating certificate status: %w", err) + } + + a.log.Infof("recovery: successfully checked last certificate from AggLayer for network %d", networkID) + return nil +} + +// updateLocalStorageWithAggLayerCert updates the local storage with the certificate from the AggLayer +func (a *AggSender) updateLocalStorageWithAggLayerCert(ctx context.Context, + aggLayerCert *agglayer.CertificateHeader) (*types.CertificateInfo, error) { + certInfo := NewCertificateInfoFromAgglayerCertHeader(aggLayerCert) + a.log.Infof("setting initial certificate from AggLayer: %s", certInfo.String()) + return certInfo, a.storage.SaveLastSentCertificate(ctx, *certInfo) +} + // extractSignatureData extracts the R, S, and V from a 65-byte signature func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, err error) { if len(signature) != signatureSize { @@ -562,3 +738,29 @@ func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, func createCertificateMetadata(toBlock uint64) common.Hash { return common.BigToHash(new(big.Int).SetUint64(toBlock)) } + +func extractFromCertificateMetadataToBlock(metadata common.Hash) uint64 { + return metadata.Big().Uint64() +} + +func NewCertificateInfoFromAgglayerCertHeader(c *agglayer.CertificateHeader) *types.CertificateInfo { + if c == nil { + return nil + } + now := time.Now().UTC().UnixMilli() + res := &types.CertificateInfo{ + Height: c.Height, + CertificateID: c.CertificateID, + NewLocalExitRoot: c.NewLocalExitRoot, + FromBlock: 0, + ToBlock: extractFromCertificateMetadataToBlock(c.Metadata), + Status: c.Status, + CreatedAt: now, + UpdatedAt: now, + SignedCertificate: "na/agglayer header", + } + if c.PreviousLocalExitRoot != nil { + res.PreviousLocalExitRoot = c.PreviousLocalExitRoot + } + return res +} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go index b9242bdf..766ad1ad 100644 --- a/aggsender/aggsender_test.go +++ b/aggsender/aggsender_test.go @@ -8,10 +8,12 @@ import ( "fmt" "math/big" "os" + "runtime" "testing" "time" "github.com/0xPolygon/cdk/agglayer" + "github.com/0xPolygon/cdk/aggsender/db" "github.com/0xPolygon/cdk/aggsender/mocks" aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" "github.com/0xPolygon/cdk/bridgesync" @@ -25,21 +27,15 @@ import ( "github.com/stretchr/testify/require" ) -func TestExploratoryGetCertificateHeader(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32796") - certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") - certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) - require.NoError(t, err) - fmt.Print(certificateHeader) -} -func TestExploratoryGetEpochConfiguration(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := agglayer.NewAggLayerClient("http://localhost:32796") - clockConfig, err := aggLayerClient.GetEpochConfiguration() - require.NoError(t, err) - fmt.Print(clockConfig) -} +const ( + networkIDTest = uint32(1234) +) + +var ( + errTest = errors.New("unitest error") + ler1 = common.HexToHash("0x123") + ler2 = common.HexToHash("0x12345") +) func TestConfigString(t *testing.T) { config := Config{ @@ -59,7 +55,6 @@ func TestConfigString(t *testing.T) { "BlockGetInterval: 10s\n" + "CheckSettledInterval: 20s\n" + "AggsenderPrivateKeyPath: /path/to/key\n" + - "AggsenderPrivateKeyPassword: password\n" + "URLRPCL2: http://l2.rpc.url\n" + "BlockFinality: latestBlock\n" + "EpochNotificationPercentage: 50\n" + @@ -281,7 +276,7 @@ func TestGetBridgeExits(t *testing.T) { } func TestAggSenderStart(t *testing.T) { - AggLayerMock := agglayer.NewAgglayerClientMock(t) + aggLayerMock := agglayer.NewAgglayerClientMock(t) epochNotifierMock := mocks.NewEpochNotifier(t) bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) ctx, cancel := context.WithCancel(context.Background()) @@ -290,9 +285,10 @@ func TestAggSenderStart(t *testing.T) { ctx, log.WithFields("test", "unittest"), Config{ - StoragePath: "file::memory:?cache=shared", + StoragePath: "file:TestAggSenderStart?mode=memory&cache=shared", + DelayBeetweenRetries: types.Duration{Duration: 1 * time.Microsecond}, }, - AggLayerMock, + aggLayerMock, nil, bridgeL2SyncerMock, epochNotifierMock) @@ -300,7 +296,9 @@ func TestAggSenderStart(t *testing.T) { require.NotNil(t, aggSender) ch := make(chan aggsendertypes.EpochEvent) epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) + bridgeL2SyncerMock.EXPECT().OriginNetwork().Return(uint32(1)) bridgeL2SyncerMock.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), nil) + aggLayerMock.EXPECT().GetLatestKnownCertificateHeader(mock.Anything).Return(nil, nil) go aggSender.Start(ctx) ch <- aggsendertypes.EpochEvent{ @@ -631,6 +629,7 @@ func TestBuildCertificate(t *testing.T) { lastSentCertificateInfo: aggsendertypes.CertificateInfo{ NewLocalExitRoot: common.HexToHash("0x123"), Height: 1, + Status: agglayer.Settled, }, toBlock: 10, expectedCert: &agglayer.Certificate{ @@ -788,7 +787,7 @@ func TestBuildCertificate(t *testing.T) { l1infoTreeSyncer: mockL1InfoTreeSyncer, log: log.WithFields("test", "unittest"), } - cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, tt.lastSentCertificateInfo, tt.toBlock) + cert, err := aggSender.buildCertificate(context.Background(), tt.bridges, tt.claims, &tt.lastSentCertificateInfo, tt.toBlock) if tt.expectedError { require.Error(t, err) @@ -902,15 +901,15 @@ func TestCheckIfCertificatesAreSettled(t *testing.T) { mockAggLayerClient := agglayer.NewAgglayerClientMock(t) mockLogger := log.WithFields("test", "unittest") - mockStorage.On("GetCertificatesByStatus", nonSettledStatuses).Return( + mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses).Return( tt.pendingCertificates, tt.getFromDBError) for certID, header := range tt.certificateHeaders { mockAggLayerClient.On("GetCertificateHeader", certID).Return(header, tt.clientError) } if tt.updateDBError != nil { - mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(tt.updateDBError) + mockStorage.On("UpdateCertificate", mock.Anything, mock.Anything).Return(tt.updateDBError) } else if tt.clientError == nil && tt.getFromDBError == nil { - mockStorage.On("UpdateCertificateStatus", mock.Anything, mock.Anything).Return(nil) + mockStorage.On("UpdateCertificate", mock.Anything, mock.Anything).Return(nil) } aggSender := &AggSender{ @@ -961,7 +960,7 @@ func TestSendCertificate(t *testing.T) { var ( aggsender = &AggSender{ log: log.WithFields("aggsender", 1), - cfg: Config{}, + cfg: Config{MaxRetriesStoreCertificate: 1}, sequencerKey: cfg.sequencerKey, } mockStorage *mocks.AggSenderStorage @@ -973,8 +972,8 @@ func TestSendCertificate(t *testing.T) { if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || cfg.saveLastSentCertificate != nil { mockStorage = mocks.NewAggSenderStorage(t) - mockStorage.On("GetCertificatesByStatus", nonSettledStatuses). - Return(cfg.shouldSendCertificate...).Once() + mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses). + Return(cfg.shouldSendCertificate...) aggsender.storage = mockStorage @@ -983,7 +982,7 @@ func TestSendCertificate(t *testing.T) { } if cfg.saveLastSentCertificate != nil { - mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(cfg.saveLastSentCertificate...).Once() + mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(cfg.saveLastSentCertificate...) } } @@ -1055,14 +1054,14 @@ func TestSendCertificate(t *testing.T) { name: "error getting last sent certificate", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(8), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{}, errors.New("error getting last sent certificate")}, + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{}, errors.New("error getting last sent certificate")}, expectedError: "error getting last sent certificate", }, { name: "no new blocks to send certificate", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(41), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 41, CertificateID: common.HexToHash("0x111"), NewLocalExitRoot: common.HexToHash("0x13223"), @@ -1074,7 +1073,7 @@ func TestSendCertificate(t *testing.T) { name: "get bridges error", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(59), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 50, CertificateID: common.HexToHash("0x1111"), NewLocalExitRoot: common.HexToHash("0x132233"), @@ -1088,7 +1087,7 @@ func TestSendCertificate(t *testing.T) { name: "no bridges", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(69), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 60, CertificateID: common.HexToHash("0x11111"), NewLocalExitRoot: common.HexToHash("0x1322233"), @@ -1101,7 +1100,7 @@ func TestSendCertificate(t *testing.T) { name: "get claims error", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(79), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 70, CertificateID: common.HexToHash("0x121111"), NewLocalExitRoot: common.HexToHash("0x13122233"), @@ -1123,7 +1122,7 @@ func TestSendCertificate(t *testing.T) { name: "error getting info by global exit root", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 80, CertificateID: common.HexToHash("0x1321111"), NewLocalExitRoot: common.HexToHash("0x131122233"), @@ -1150,7 +1149,7 @@ func TestSendCertificate(t *testing.T) { name: "error getting L1 Info tree root by index", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 80, CertificateID: common.HexToHash("0x1321111"), NewLocalExitRoot: common.HexToHash("0x131122233"), @@ -1187,7 +1186,7 @@ func TestSendCertificate(t *testing.T) { name: "error getting L1 Info tree merkle proof from index to root", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 80, CertificateID: common.HexToHash("0x1321111"), NewLocalExitRoot: common.HexToHash("0x131122233"), @@ -1226,12 +1225,14 @@ func TestSendCertificate(t *testing.T) { name: "send certificate error", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(99), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ - Height: 90, - CertificateID: common.HexToHash("0x1121111"), - NewLocalExitRoot: common.HexToHash("0x111122211"), - FromBlock: 80, - ToBlock: 81, + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ + Height: 90, + CertificateID: common.HexToHash("0x1121111"), + NewLocalExitRoot: common.HexToHash("0x111122211"), + PreviousLocalExitRoot: &ler1, + FromBlock: 80, + ToBlock: 81, + Status: agglayer.Settled, }, nil}, getBridges: []interface{}{[]bridgesync.Bridge{ { @@ -1253,12 +1254,13 @@ func TestSendCertificate(t *testing.T) { name: "store last sent certificate error", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(109), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 100, CertificateID: common.HexToHash("0x11121111"), NewLocalExitRoot: common.HexToHash("0x1211122211"), FromBlock: 90, ToBlock: 91, + Status: agglayer.Settled, }, nil}, getBridges: []interface{}{[]bridgesync.Bridge{ { @@ -1281,12 +1283,13 @@ func TestSendCertificate(t *testing.T) { name: "successful sending of certificate", shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, lastL2BlockProcessed: []interface{}{uint64(119), nil}, - getLastSentCertificate: []interface{}{aggsendertypes.CertificateInfo{ + getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ Height: 110, CertificateID: common.HexToHash("0x12121111"), NewLocalExitRoot: common.HexToHash("0x1221122211"), FromBlock: 100, ToBlock: 101, + Status: agglayer.Settled, }, nil}, getBridges: []interface{}{[]bridgesync.Bridge{ { @@ -1493,14 +1496,18 @@ func TestGetNextHeightAndPreviousLER(t *testing.T) { t.Parallel() tests := []struct { - name string - lastSentCertificateInfo aggsendertypes.CertificateInfo - expectedHeight uint64 - expectedPreviousLER common.Hash + name string + lastSentCertificateInfo *aggsendertypes.CertificateInfo + lastSettleCertificateInfoCall bool + lastSettleCertificateInfo *aggsendertypes.CertificateInfo + lastSettleCertificateInfoError error + expectedHeight uint64 + expectedPreviousLER common.Hash + expectedError bool }{ { name: "Normal case", - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ Height: 10, NewLocalExitRoot: common.HexToHash("0x123"), Status: agglayer.Settled, @@ -1509,24 +1516,107 @@ func TestGetNextHeightAndPreviousLER(t *testing.T) { expectedPreviousLER: common.HexToHash("0x123"), }, { - name: "Previous certificate in error", - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - Height: 10, + name: "First certificate", + lastSentCertificateInfo: nil, + expectedHeight: 0, + expectedPreviousLER: zeroLER, + }, + { + name: "First certificate error, with prevLER", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 0, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.InError, + PreviousLocalExitRoot: &ler1, + }, + expectedHeight: 0, + expectedPreviousLER: ler1, + }, + { + name: "First certificate error, no prevLER", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 0, NewLocalExitRoot: common.HexToHash("0x123"), Status: agglayer.InError, }, + expectedHeight: 0, + expectedPreviousLER: zeroLER, + }, + { + name: "n certificate error, prevLER", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + PreviousLocalExitRoot: &ler1, + Status: agglayer.InError, + }, expectedHeight: 10, - expectedPreviousLER: common.HexToHash("0x123"), + expectedPreviousLER: ler1, }, { - name: "First certificate", - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - Height: 0, - NewLocalExitRoot: common.Hash{}, + name: "last cert not closed, error", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + PreviousLocalExitRoot: &ler1, + Status: agglayer.Pending, + }, + expectedHeight: 10, + expectedPreviousLER: ler1, + expectedError: true, + }, + { + name: "Previous certificate in error, no prevLER", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.InError, + }, + lastSettleCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 9, + NewLocalExitRoot: common.HexToHash("0x3456"), Status: agglayer.Settled, }, - expectedHeight: 0, - expectedPreviousLER: zeroLER, + expectedHeight: 10, + expectedPreviousLER: common.HexToHash("0x3456"), + }, + { + name: "Previous certificate in error, no prevLER. Error getting previous cert", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.InError, + }, + lastSettleCertificateInfo: nil, + lastSettleCertificateInfoError: errors.New("error getting last settle certificate"), + expectedError: true, + }, + { + name: "Previous certificate in error, no prevLER. prev cert not available on storage", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.InError, + }, + lastSettleCertificateInfoCall: true, + lastSettleCertificateInfo: nil, + lastSettleCertificateInfoError: nil, + expectedError: true, + }, + { + name: "Previous certificate in error, no prevLER. prev cert not available on storage", + lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 10, + NewLocalExitRoot: common.HexToHash("0x123"), + Status: agglayer.InError, + }, + lastSettleCertificateInfo: &aggsendertypes.CertificateInfo{ + Height: 9, + NewLocalExitRoot: common.HexToHash("0x3456"), + Status: agglayer.InError, + }, + lastSettleCertificateInfoError: nil, + expectedError: true, }, } @@ -1535,12 +1625,19 @@ func TestGetNextHeightAndPreviousLER(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - - aggSender := &AggSender{log: log.WithFields("aggsender-test", "getNextHeightAndPreviousLER")} - height, previousLER := aggSender.getNextHeightAndPreviousLER(&tt.lastSentCertificateInfo) - - require.Equal(t, tt.expectedHeight, height) - require.Equal(t, tt.expectedPreviousLER, previousLER) + storageMock := mocks.NewAggSenderStorage(t) + aggSender := &AggSender{log: log.WithFields("aggsender-test", "getNextHeightAndPreviousLER"), storage: storageMock} + if tt.lastSettleCertificateInfoCall || tt.lastSettleCertificateInfo != nil || tt.lastSettleCertificateInfoError != nil { + storageMock.EXPECT().GetCertificateByHeight(mock.Anything).Return(tt.lastSettleCertificateInfo, tt.lastSettleCertificateInfoError).Once() + } + height, previousLER, err := aggSender.getNextHeightAndPreviousLER(tt.lastSentCertificateInfo) + if tt.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expectedHeight, height) + require.Equal(t, tt.expectedPreviousLER, previousLER) + } }) } } @@ -1570,12 +1667,13 @@ func TestSendCertificate_NoClaims(t *testing.T) { }, } - mockStorage.On("GetCertificatesByStatus", nonSettledStatuses).Return([]*aggsendertypes.CertificateInfo{}, nil).Once() - mockStorage.On("GetLastSentCertificate").Return(aggsendertypes.CertificateInfo{ + mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses).Return([]*aggsendertypes.CertificateInfo{}, nil).Once() + mockStorage.On("GetLastSentCertificate").Return(&aggsendertypes.CertificateInfo{ NewLocalExitRoot: common.HexToHash("0x123"), Height: 1, FromBlock: 0, ToBlock: 10, + Status: agglayer.Settled, }, nil).Once() mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(nil).Once() mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(uint64(50), nil) @@ -1611,3 +1709,299 @@ func TestSendCertificate_NoClaims(t *testing.T) { mockAggLayerClient.AssertExpectations(t) mockL1InfoTreeSyncer.AssertExpectations(t) } + +func TestMetadataConversions(t *testing.T) { + toBlock := uint64(123567890) + c := createCertificateMetadata(toBlock) + extractBlock := extractFromCertificateMetadataToBlock(c) + require.Equal(t, toBlock, extractBlock) +} + +func TestExtractFromCertificateMetadataToBlock(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + metadata common.Hash + expected uint64 + }{ + { + name: "Valid metadata", + metadata: common.BigToHash(big.NewInt(123567890)), + expected: 123567890, + }, + { + name: "Zero metadata", + metadata: common.BigToHash(big.NewInt(0)), + expected: 0, + }, + { + name: "Max uint64 metadata", + metadata: common.BigToHash(new(big.Int).SetUint64(^uint64(0))), + expected: ^uint64(0), + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := extractFromCertificateMetadataToBlock(tt.metadata) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestCheckLastCertificateFromAgglayer_ErrorAggLayer(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, fmt.Errorf("unittest error")).Once() + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.Error(t, err) +} + +func TestCheckLastCertificateFromAgglayer_ErrorStorageGetLastSentCertificate(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(nil, fmt.Errorf("unittest error")) + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.Error(t, err) +} + +// TestCheckLastCertificateFromAgglayer_Case1NoCerts +// CASE 1: No certificates in local storage and agglayer +// Aggsender and agglayer are empty so it's ok +func TestCheckLastCertificateFromAgglayer_Case1NoCerts(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagNone) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, nil).Once() + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.NoError(t, err) +} + +// TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemote +// CASE 2: No certificates in local storage but agglayer has one +// The local DB is empty and we set the lastCert reported by AggLayer +func TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemote(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagNone) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(certInfoToCertHeader(&testData.testCerts[0], networkIDTest), nil).Once() + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.NoError(t, err) + localCert, err := testData.sut.storage.GetLastSentCertificate() + require.NoError(t, err) + require.Equal(t, testData.testCerts[0].CertificateID, localCert.CertificateID) +} + +// TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemoteErrorStorage +// sub case of previous one that fails to update local storage +func TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemoteErrorStorage(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(certInfoToCertHeader(&testData.testCerts[0], networkIDTest), nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(nil, nil) + testData.storageMock.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(errTest).Once() + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.Error(t, err) +} + +// CASE 2.1: certificate in storage but not in agglayer +// sub case of previous one that fails to update local storage +func TestCheckLastCertificateFromAgglayer_Case2_1NoCertRemoteButCertLocal(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(nil, nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.Error(t, err) +} + +// CASE 3.1: the certificate on the agglayer has less height than the one stored in the local storage +func TestCheckLastCertificateFromAgglayer_Case3_1LessHeight(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(certInfoToCertHeader(&testData.testCerts[0], networkIDTest), nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[1], nil) + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.ErrorContains(t, err, "recovery: the last certificate in the agglayer has less height (1) than the one in the local storage (2)") +} + +// CASE 3.2: AggSender and AggLayer not same height. AggLayer has a new certificate +func TestCheckLastCertificateFromAgglayer_Case3_2Mismatch(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(certInfoToCertHeader(&testData.testCerts[1], networkIDTest), nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) + testData.storageMock.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(nil).Once() + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.NoError(t, err) +} + +// CASE 4: AggSender and AggLayer not same certificateID +func TestCheckLastCertificateFromAgglayer_Case4Mismatch(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(certInfoToCertHeader(&testData.testCerts[0], networkIDTest), nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[1], nil) + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.Error(t, err) +} + +// CASE 5: AggSender and AggLayer same certificateID and same status +func TestCheckLastCertificateFromAgglayer_Case5SameStatus(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(certInfoToCertHeader(&testData.testCerts[0], networkIDTest), nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.NoError(t, err) +} + +// CASE 5: AggSender and AggLayer same certificateID and differ on status +func TestCheckLastCertificateFromAgglayer_Case5UpdateStatus(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + aggLayerCert := certInfoToCertHeader(&testData.testCerts[0], networkIDTest) + aggLayerCert.Status = agglayer.Settled + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(aggLayerCert, nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) + testData.storageMock.EXPECT().UpdateCertificate(mock.Anything, mock.Anything).Return(nil).Once() + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.NoError(t, err) +} + +// CASE 4: AggSender and AggLayer same certificateID and differ on status but fails update +func TestCheckLastCertificateFromAgglayer_Case4ErrorUpdateStatus(t *testing.T) { + testData := newAggsenderTestData(t, testDataFlagMockStorage) + testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() + aggLayerCert := certInfoToCertHeader(&testData.testCerts[0], networkIDTest) + aggLayerCert.Status = agglayer.Settled + testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). + Return(aggLayerCert, nil).Once() + testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) + testData.storageMock.EXPECT().UpdateCertificate(mock.Anything, mock.Anything).Return(errTest).Once() + + err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) + + require.Error(t, err) +} + +type testDataFlags = int + +const ( + testDataFlagNone testDataFlags = 0 + testDataFlagMockStorage testDataFlags = 1 +) + +type aggsenderTestData struct { + ctx context.Context + agglayerClientMock *agglayer.AgglayerClientMock + l2syncerMock *mocks.L2BridgeSyncer + l1InfoTreeSyncerMock *mocks.L1InfoTreeSyncer + storageMock *mocks.AggSenderStorage + sut *AggSender + testCerts []aggsendertypes.CertificateInfo +} + +func certInfoToCertHeader(certInfo *aggsendertypes.CertificateInfo, networkID uint32) *agglayer.CertificateHeader { + if certInfo == nil { + return nil + } + return &agglayer.CertificateHeader{ + Height: certInfo.Height, + NetworkID: networkID, + CertificateID: certInfo.CertificateID, + NewLocalExitRoot: certInfo.NewLocalExitRoot, + Status: agglayer.Pending, + Metadata: createCertificateMetadata(certInfo.ToBlock), + } +} + +func newAggsenderTestData(t *testing.T, creationFlags testDataFlags) *aggsenderTestData { + t.Helper() + l2syncerMock := mocks.NewL2BridgeSyncer(t) + agglayerClientMock := agglayer.NewAgglayerClientMock(t) + l1InfoTreeSyncerMock := mocks.NewL1InfoTreeSyncer(t) + logger := log.WithFields("aggsender-test", "checkLastCertificateFromAgglayer") + var storageMock *mocks.AggSenderStorage + var storage db.AggSenderStorage + var err error + if creationFlags&testDataFlagMockStorage != 0 { + storageMock = mocks.NewAggSenderStorage(t) + storage = storageMock + } else { + pc, _, _, _ := runtime.Caller(1) + part := runtime.FuncForPC(pc) + dbPath := fmt.Sprintf("file:%d?mode=memory&cache=shared", part.Entry()) + storageConfig := db.AggSenderSQLStorageConfig{ + DBPath: dbPath, + KeepCertificatesHistory: true, + } + storage, err = db.NewAggSenderSQLStorage(logger, storageConfig) + require.NoError(t, err) + } + + ctx := context.TODO() + sut := &AggSender{ + log: logger, + l2Syncer: l2syncerMock, + aggLayerClient: agglayerClientMock, + storage: storage, + l1infoTreeSyncer: l1InfoTreeSyncerMock, + } + testCerts := []aggsendertypes.CertificateInfo{ + { + Height: 1, + CertificateID: common.HexToHash("0x1"), + NewLocalExitRoot: common.HexToHash("0x2"), + Status: agglayer.Pending, + }, + { + Height: 2, + CertificateID: common.HexToHash("0x1a111"), + NewLocalExitRoot: common.HexToHash("0x2a2"), + Status: agglayer.Pending, + }, + } + + return &aggsenderTestData{ + ctx: ctx, + agglayerClientMock: agglayerClientMock, + l2syncerMock: l2syncerMock, + l1InfoTreeSyncerMock: l1InfoTreeSyncerMock, + storageMock: storageMock, + sut: sut, + testCerts: testCerts, + } +} diff --git a/aggsender/block_notifier_polling.go b/aggsender/block_notifier_polling.go index 17dafefa..dbae1a38 100644 --- a/aggsender/block_notifier_polling.go +++ b/aggsender/block_notifier_polling.go @@ -158,7 +158,7 @@ func (b *BlockNotifierPolling) step(ctx context.Context, newState := previousState.incommingNewBlock(currentBlock.Number.Uint64()) b.logger.Debugf("New block seen [finality:%s]: %d. blockRate:%s", b.config.BlockFinalityType, currentBlock.Number.Uint64(), newState.previousBlockTime) - + eventToEmit.BlockRate = *newState.previousBlockTime return b.nextBlockRequestDelay(newState, nil), newState, eventToEmit } diff --git a/aggsender/block_notifier_polling_test.go b/aggsender/block_notifier_polling_test.go index 83b3b643..e4f15ad7 100644 --- a/aggsender/block_notifier_polling_test.go +++ b/aggsender/block_notifier_polling_test.go @@ -32,11 +32,8 @@ func TestExploratoryBlockNotifierPolling(t *testing.T) { require.NoError(t, errSut) go sut.Start(context.Background()) ch := sut.Subscribe("test") - for { - select { - case block := <-ch: - fmt.Println(block) - } + for block := range ch { + fmt.Println(block) } } diff --git a/aggsender/config.go b/aggsender/config.go index 8ae0b759..cfd0b63c 100644 --- a/aggsender/config.go +++ b/aggsender/config.go @@ -29,6 +29,15 @@ type Config struct { EpochNotificationPercentage uint `mapstructure:"EpochNotificationPercentage"` // SaveCertificatesToFilesPath if != "" tells the AggSender to save the certificates to a file in this path SaveCertificatesToFilesPath string `mapstructure:"SaveCertificatesToFilesPath"` + + // MaxRetriesStoreCertificate is the maximum number of retries to store a certificate + // 0 is infinite + MaxRetriesStoreCertificate int `mapstructure:"MaxRetriesStoreCertificate"` + // DelayBeetweenRetries is the delay between retries: + // is used on store Certificate and also in initial check + DelayBeetweenRetries types.Duration `mapstructure:"DelayBeetweenRetries"` + // KeepCertificatesHistory is a flag to keep the certificates history on storage + KeepCertificatesHistory bool `mapstructure:"KeepCertificatesHistory"` } // String returns a string representation of the Config @@ -38,7 +47,6 @@ func (c Config) String() string { "BlockGetInterval: " + c.BlockGetInterval.String() + "\n" + "CheckSettledInterval: " + c.CheckSettledInterval.String() + "\n" + "AggsenderPrivateKeyPath: " + c.AggsenderPrivateKey.Path + "\n" + - "AggsenderPrivateKeyPassword: " + c.AggsenderPrivateKey.Password + "\n" + "URLRPCL2: " + c.URLRPCL2 + "\n" + "BlockFinality: " + c.BlockFinality + "\n" + "EpochNotificationPercentage: " + fmt.Sprintf("%d", c.EpochNotificationPercentage) + "\n" + diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go index 15866c29..597a9bd0 100644 --- a/aggsender/db/aggsender_db_storage.go +++ b/aggsender/db/aggsender_db_storage.go @@ -21,41 +21,48 @@ const errWhileRollbackFormat = "error while rolling back tx: %w" // AggSenderStorage is the interface that defines the methods to interact with the storage type AggSenderStorage interface { // GetCertificateByHeight returns a certificate by its height - GetCertificateByHeight(height uint64) (types.CertificateInfo, error) + GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) // GetLastSentCertificate returns the last certificate sent to the aggLayer - GetLastSentCertificate() (types.CertificateInfo, error) + GetLastSentCertificate() (*types.CertificateInfo, error) // SaveLastSentCertificate saves the last certificate sent to the aggLayer SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error // DeleteCertificate deletes a certificate from the storage DeleteCertificate(ctx context.Context, certificateID common.Hash) error // GetCertificatesByStatus returns a list of certificates by their status GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) - // UpdateCertificateStatus updates the status of a certificate - UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error + // UpdateCertificate updates certificate in db + UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error } var _ AggSenderStorage = (*AggSenderSQLStorage)(nil) +// AggSenderSQLStorageConfig is the configuration for the AggSenderSQLStorage +type AggSenderSQLStorageConfig struct { + DBPath string + KeepCertificatesHistory bool +} + // AggSenderSQLStorage is the struct that implements the AggSenderStorage interface type AggSenderSQLStorage struct { logger *log.Logger db *sql.DB + cfg AggSenderSQLStorageConfig } // NewAggSenderSQLStorage creates a new AggSenderSQLStorage -func NewAggSenderSQLStorage(logger *log.Logger, dbPath string) (*AggSenderSQLStorage, error) { - if err := migrations.RunMigrations(dbPath); err != nil { +func NewAggSenderSQLStorage(logger *log.Logger, cfg AggSenderSQLStorageConfig) (*AggSenderSQLStorage, error) { + db, err := db.NewSQLiteDB(cfg.DBPath) + if err != nil { return nil, err } - - db, err := db.NewSQLiteDB(dbPath) - if err != nil { + if err := migrations.RunMigrations(logger, db); err != nil { return nil, err } return &AggSenderSQLStorage{ db: db, logger: logger, + cfg: cfg, }, nil } @@ -88,38 +95,38 @@ func (a *AggSenderSQLStorage) GetCertificatesByStatus( } // GetCertificateByHeight returns a certificate by its height -func (a *AggSenderSQLStorage) GetCertificateByHeight(height uint64) (types.CertificateInfo, error) { +func (a *AggSenderSQLStorage) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { return getCertificateByHeight(a.db, height) } // getCertificateByHeight returns a certificate by its height using the provided db -func getCertificateByHeight(db meddler.DB, - height uint64) (types.CertificateInfo, error) { +func getCertificateByHeight(db db.Querier, + height uint64) (*types.CertificateInfo, error) { var certificateInfo types.CertificateInfo if err := meddler.QueryRow(db, &certificateInfo, "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { - return types.CertificateInfo{}, getSelectQueryError(height, err) + return nil, getSelectQueryError(height, err) } - return certificateInfo, nil + return &certificateInfo, nil } // GetLastSentCertificate returns the last certificate sent to the aggLayer -func (a *AggSenderSQLStorage) GetLastSentCertificate() (types.CertificateInfo, error) { +func (a *AggSenderSQLStorage) GetLastSentCertificate() (*types.CertificateInfo, error) { var certificateInfo types.CertificateInfo if err := meddler.QueryRow(a.db, &certificateInfo, "SELECT * FROM certificate_info ORDER BY height DESC LIMIT 1;"); err != nil { - return types.CertificateInfo{}, getSelectQueryError(0, err) + return nil, getSelectQueryError(0, err) } - return certificateInfo, nil + return &certificateInfo, nil } // SaveLastSentCertificate saves the last certificate sent to the aggLayer func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { tx, err := db.NewTx(ctx, a.db) if err != nil { - return err + return fmt.Errorf("saveLastSentCertificate NewTx. Err: %w", err) } defer func() { if err != nil { @@ -131,14 +138,14 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi cert, err := getCertificateByHeight(tx, certificate.Height) if err != nil && !errors.Is(err, db.ErrNotFound) { - return err + return fmt.Errorf("saveLastSentCertificate getCertificateByHeight. Err: %w", err) } - if cert.CertificateID != (common.Hash{}) { + if cert != nil { // we already have a certificate with this height // we need to delete it before inserting the new one - if err = deleteCertificate(tx, cert.CertificateID); err != nil { - return err + if err = a.moveCertificateToHistoryOrDelete(tx, cert); err != nil { + return fmt.Errorf("saveLastSentCertificate moveCertificateToHistory Err: %w", err) } } @@ -147,7 +154,7 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi } if err = tx.Commit(); err != nil { - return err + return fmt.Errorf("saveLastSentCertificate commit. Err: %w", err) } a.logger.Debugf("inserted certificate - Height: %d. Hash: %s", certificate.Height, certificate.CertificateID) @@ -155,6 +162,23 @@ func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certi return nil } +func (a *AggSenderSQLStorage) moveCertificateToHistoryOrDelete(tx db.Querier, + certificate *types.CertificateInfo) error { + if a.cfg.KeepCertificatesHistory { + a.logger.Debugf("moving certificate to history - new CertificateID: %s", certificate.ID()) + if _, err := tx.Exec(`INSERT INTO certificate_info_history SELECT * FROM certificate_info WHERE height = $1;`, + certificate.Height); err != nil { + return fmt.Errorf("error moving certificate to history: %w", err) + } + } + a.logger.Debugf("deleting certificate - CertificateID: %s", certificate.ID()) + if err := deleteCertificate(tx, certificate.CertificateID); err != nil { + return fmt.Errorf("deleteCertificate %s . Error: %w", certificate.ID(), err) + } + + return nil +} + // DeleteCertificate deletes a certificate from the storage func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { tx, err := db.NewTx(ctx, a.db) @@ -169,7 +193,7 @@ func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificate } }() - if err = deleteCertificate(a.db, certificateID); err != nil { + if err = deleteCertificate(tx, certificateID); err != nil { return err } @@ -183,16 +207,16 @@ func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificate } // deleteCertificate deletes a certificate from the storage using the provided db -func deleteCertificate(db meddler.DB, certificateID common.Hash) error { - if _, err := db.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID.String()); err != nil { +func deleteCertificate(tx db.Querier, certificateID common.Hash) error { + if _, err := tx.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID.String()); err != nil { return fmt.Errorf("error deleting certificate info: %w", err) } return nil } -// UpdateCertificateStatus updates the status of a certificate -func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { +// UpdateCertificate updates a certificate +func (a *AggSenderSQLStorage) UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error { tx, err := db.NewTx(ctx, a.db) if err != nil { return err @@ -205,8 +229,8 @@ func (a *AggSenderSQLStorage) UpdateCertificateStatus(ctx context.Context, certi } }() - if _, err = tx.Exec(`UPDATE certificate_info SET status = $1 WHERE certificate_id = $2;`, - certificate.Status, certificate.CertificateID.String()); err != nil { + if _, err = tx.Exec(`UPDATE certificate_info SET status = $1, updated_at = $2 WHERE certificate_id = $3;`, + certificate.Status, certificate.UpdatedAt, certificate.CertificateID.String()); err != nil { return fmt.Errorf("error updating certificate info: %w", err) } if err = tx.Commit(); err != nil { diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go index a0a20894..1af0df86 100644 --- a/aggsender/db/aggsender_db_storage_test.go +++ b/aggsender/db/aggsender_db_storage_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db/migrations" "github.com/0xPolygon/cdk/aggsender/types" "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/log" @@ -22,9 +21,12 @@ func Test_Storage(t *testing.T) { path := path.Join(t.TempDir(), "file::memory:?cache=shared") log.Debugf("sqlite path: %s", path) - require.NoError(t, migrations.RunMigrations(path)) + cfg := AggSenderSQLStorageConfig{ + DBPath: path, + KeepCertificatesHistory: true, + } - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) + storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) require.NoError(t, err) updateTime := time.Now().UTC().UnixMilli() @@ -45,7 +47,7 @@ func Test_Storage(t *testing.T) { certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) - require.Equal(t, certificate, certificateFromDB) + require.Equal(t, certificate, *certificateFromDB) require.NoError(t, storage.clean()) }) @@ -66,7 +68,7 @@ func Test_Storage(t *testing.T) { certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.ErrorIs(t, err, db.ErrNotFound) - require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.Nil(t, certificateFromDB) require.NoError(t, storage.clean()) }) @@ -74,7 +76,7 @@ func Test_Storage(t *testing.T) { // try getting a certificate that doesn't exist certificateFromDB, err := storage.GetLastSentCertificate() require.NoError(t, err) - require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.Nil(t, certificateFromDB) // try getting a certificate that exists certificate := types.CertificateInfo{ @@ -91,8 +93,8 @@ func Test_Storage(t *testing.T) { certificateFromDB, err = storage.GetLastSentCertificate() require.NoError(t, err) - - require.Equal(t, certificate, certificateFromDB) + require.NotNil(t, certificateFromDB) + require.Equal(t, certificate, *certificateFromDB) require.NoError(t, storage.clean()) }) @@ -100,12 +102,12 @@ func Test_Storage(t *testing.T) { // try getting height 0 certificateFromDB, err := storage.GetCertificateByHeight(0) require.NoError(t, err) - require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.Nil(t, certificateFromDB) // try getting a certificate that doesn't exist certificateFromDB, err = storage.GetCertificateByHeight(4) require.ErrorIs(t, err, db.ErrNotFound) - require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.Nil(t, certificateFromDB) // try getting a certificate that exists certificate := types.CertificateInfo{ @@ -122,8 +124,8 @@ func Test_Storage(t *testing.T) { certificateFromDB, err = storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) - - require.Equal(t, certificate, certificateFromDB) + require.NotNil(t, certificateFromDB) + require.Equal(t, certificate, *certificateFromDB) require.NoError(t, storage.clean()) }) @@ -201,6 +203,7 @@ func Test_Storage(t *testing.T) { // Insert a certificate certificate := types.CertificateInfo{ Height: 13, + RetryCount: 1234, CertificateID: common.HexToHash("0xD"), NewLocalExitRoot: common.HexToHash("0xE"), FromBlock: 13, @@ -213,12 +216,14 @@ func Test_Storage(t *testing.T) { // Update the status of the certificate certificate.Status = agglayer.Settled - require.NoError(t, storage.UpdateCertificateStatus(ctx, certificate)) + certificate.UpdatedAt = updateTime + 1 + require.NoError(t, storage.UpdateCertificate(ctx, certificate)) // Fetch the certificate and verify the status has been updated certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) - require.Equal(t, certificate.Status, certificateFromDB.Status) + require.Equal(t, certificate.Status, certificateFromDB.Status, "equal status") + require.Equal(t, certificate.UpdatedAt, certificateFromDB.UpdatedAt, "equal updated at") require.NoError(t, storage.clean()) }) @@ -229,9 +234,12 @@ func Test_SaveLastSentCertificate(t *testing.T) { path := path.Join(t.TempDir(), "file::memory:?cache=shared") log.Debugf("sqlite path: %s", path) - require.NoError(t, migrations.RunMigrations(path)) + cfg := AggSenderSQLStorageConfig{ + DBPath: path, + KeepCertificatesHistory: true, + } - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), path) + storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) require.NoError(t, err) updateTime := time.Now().UTC().UnixMilli() @@ -251,7 +259,7 @@ func Test_SaveLastSentCertificate(t *testing.T) { certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) - require.Equal(t, certificate, certificateFromDB) + require.Equal(t, certificate, *certificateFromDB) require.NoError(t, storage.clean()) }) @@ -281,7 +289,7 @@ func Test_SaveLastSentCertificate(t *testing.T) { certificateFromDB, err := storage.GetCertificateByHeight(updatedCertificate.Height) require.NoError(t, err) - require.Equal(t, updatedCertificate, certificateFromDB) + require.Equal(t, updatedCertificate, *certificateFromDB) require.NoError(t, storage.clean()) }) @@ -310,7 +318,7 @@ func Test_SaveLastSentCertificate(t *testing.T) { certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.ErrorIs(t, err, db.ErrNotFound) - require.Equal(t, types.CertificateInfo{}, certificateFromDB) + require.Nil(t, certificateFromDB) require.NoError(t, storage.clean()) }) @@ -362,9 +370,50 @@ func Test_SaveLastSentCertificate(t *testing.T) { certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) require.NoError(t, err) - require.Equal(t, certificate, certificateFromDB) + require.Equal(t, certificate, *certificateFromDB) require.Equal(t, raw, []byte(certificateFromDB.SignedCertificate)) require.NoError(t, storage.clean()) }) } + +func Test_StoragePreviousLER(t *testing.T) { + ctx := context.TODO() + dbPath := path.Join(t.TempDir(), "Test_StoragePreviousLER.sqlite") + cfg := AggSenderSQLStorageConfig{ + DBPath: dbPath, + KeepCertificatesHistory: true, + } + storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) + require.NoError(t, err) + require.NotNil(t, storage) + + certNoLER := types.CertificateInfo{ + Height: 0, + CertificateID: common.HexToHash("0x1"), + Status: agglayer.InError, + NewLocalExitRoot: common.HexToHash("0x2"), + } + err = storage.SaveLastSentCertificate(ctx, certNoLER) + require.NoError(t, err) + + readCertNoLER, err := storage.GetCertificateByHeight(0) + require.NoError(t, err) + require.NotNil(t, readCertNoLER) + require.Equal(t, certNoLER, *readCertNoLER) + + certLER := types.CertificateInfo{ + Height: 1, + CertificateID: common.HexToHash("0x2"), + Status: agglayer.InError, + NewLocalExitRoot: common.HexToHash("0x2"), + PreviousLocalExitRoot: &common.Hash{}, + } + err = storage.SaveLastSentCertificate(ctx, certLER) + require.NoError(t, err) + + readCertWithLER, err := storage.GetCertificateByHeight(1) + require.NoError(t, err) + require.NotNil(t, readCertWithLER) + require.Equal(t, certLER, *readCertWithLER) +} diff --git a/aggsender/db/migrations/0001.sql b/aggsender/db/migrations/0001.sql index b2d600b8..d418f1d8 100644 --- a/aggsender/db/migrations/0001.sql +++ b/aggsender/db/migrations/0001.sql @@ -1,15 +1,35 @@ -- +migrate Down DROP TABLE IF EXISTS certificate_info; +DROP TABLE IF EXISTS certificate_info_history; +DROP TABLE IF EXISTS certificate_info_history; -- +migrate Up CREATE TABLE certificate_info ( height INTEGER NOT NULL, - certificate_id VARCHAR NOT NULL PRIMARY KEY, + retry_count INTEGER DEFAULT 0, + certificate_id VARCHAR NOT NULL, status INTEGER NOT NULL, + previous_local_exit_root VARCHAR, new_local_exit_root VARCHAR NOT NULL, from_block INTEGER NOT NULL, to_block INTEGER NOT NULL, created_at INTEGER NOT NULL, updated_at INTEGER NOT NULL, - signed_certificate TEXT -); \ No newline at end of file + signed_certificate TEXT, + PRIMARY KEY (height) +); + +CREATE TABLE certificate_info_history ( + height INTEGER NOT NULL , + retry_count INTEGER DEFAULT 0, + certificate_id VARCHAR NOT NULL, + status INTEGER NOT NULL, + previous_local_exit_root VARCHAR, + new_local_exit_root VARCHAR NOT NULL, + from_block INTEGER NOT NULL, + to_block INTEGER NOT NULL, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + signed_certificate TEXT, + PRIMARY KEY (height, retry_count) +); diff --git a/aggsender/db/migrations/migrations.go b/aggsender/db/migrations/migrations.go index 31f16fd2..78c58b85 100644 --- a/aggsender/db/migrations/migrations.go +++ b/aggsender/db/migrations/migrations.go @@ -1,16 +1,18 @@ package migrations import ( + "database/sql" _ "embed" "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/db/types" + "github.com/0xPolygon/cdk/log" ) //go:embed 0001.sql var mig001 string -func RunMigrations(dbPath string) error { +func RunMigrations(logger *log.Logger, database *sql.DB) error { migrations := []types.Migration{ { ID: "0001", @@ -18,5 +20,5 @@ func RunMigrations(dbPath string) error { }, } - return db.RunMigrations(dbPath, migrations) + return db.RunMigrationsDB(logger, database, migrations) } diff --git a/aggsender/epoch_notifier_per_block.go b/aggsender/epoch_notifier_per_block.go index 3b560731..80494cc0 100644 --- a/aggsender/epoch_notifier_per_block.go +++ b/aggsender/epoch_notifier_per_block.go @@ -31,6 +31,14 @@ type ConfigEpochNotifierPerBlock struct { EpochNotificationPercentage uint } +func (c *ConfigEpochNotifierPerBlock) String() string { + if c == nil { + return "nil" + } + return fmt.Sprintf("{startEpochBlock=%d, sizeEpoch=%d, threshold=%d%%}", + c.StartingEpochBlock, c.NumBlockPerEpoch, c.EpochNotificationPercentage) +} + func NewConfigEpochNotifierPerBlock(aggLayer agglayer.AggLayerClientGetEpochConfiguration, epochNotificationPercentage uint) (*ConfigEpochNotifierPerBlock, error) { if aggLayer == nil { @@ -89,9 +97,7 @@ func NewEpochNotifierPerBlock(blockNotifier types.BlockNotifier, } func (e *EpochNotifierPerBlock) String() string { - return fmt.Sprintf("EpochNotifierPerBlock: startingEpochBlock=%d, numBlockPerEpoch=%d,"+ - " EpochNotificationPercentage=%d", - e.Config.StartingEpochBlock, e.Config.NumBlockPerEpoch, e.Config.EpochNotificationPercentage) + return fmt.Sprintf("EpochNotifierPerBlock: config: %s", e.Config.String()) } // StartAsync starts the notifier in a goroutine @@ -147,6 +153,14 @@ func (e *EpochNotifierPerBlock) step(status internalStatus, status.lastBlockSeen = currentBlock needNotify, closingEpoch := e.isNotificationRequired(currentBlock, status.waitingForEpoch) + percentEpoch := e.percentEpoch(currentBlock) + logFunc := e.logger.Debugf + if needNotify { + logFunc = e.logger.Infof + } + logFunc("New block seen [finality:%s]: %d. blockRate:%s Epoch:%d Percent:%f%% notify:%v config:%s", + newBlock.BlockFinalityType, newBlock.BlockNumber, newBlock.BlockRate, closingEpoch, + percentEpoch*maxPercent, needNotify, e.Config.String()) if needNotify { // Notify the epoch has started info := e.infoEpoch(currentBlock, closingEpoch) @@ -179,7 +193,6 @@ func (e *EpochNotifierPerBlock) isNotificationRequired(currentBlock, lastEpochNo thresholdPercent = maxTresholdPercent } if percentEpoch < thresholdPercent { - e.logger.Debugf("Block %d is at %f%% of the epoch no notify", currentBlock, percentEpoch*maxPercent) return false, e.epochNumber(currentBlock) } nextEpoch := e.epochNumber(currentBlock) + 1 diff --git a/aggsender/epoch_notifier_per_block_test.go b/aggsender/epoch_notifier_per_block_test.go index 203116d0..ac35350e 100644 --- a/aggsender/epoch_notifier_per_block_test.go +++ b/aggsender/epoch_notifier_per_block_test.go @@ -14,6 +14,17 @@ import ( "github.com/stretchr/testify/require" ) +func TestConfigEpochNotifierPerBlockString(t *testing.T) { + cfg := ConfigEpochNotifierPerBlock{ + StartingEpochBlock: 123, + NumBlockPerEpoch: 456, + EpochNotificationPercentage: 789, + } + require.Equal(t, "{startEpochBlock=123, sizeEpoch=456, threshold=789%}", cfg.String()) + var cfg2 *ConfigEpochNotifierPerBlock + require.Equal(t, "nil", cfg2.String()) +} + func TestStartingBlockEpoch(t *testing.T) { testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ StartingEpochBlock: 9, diff --git a/aggsender/mocks/agg_sender_storage.go b/aggsender/mocks/agg_sender_storage.go index 1816d4a3..b6337180 100644 --- a/aggsender/mocks/agg_sender_storage.go +++ b/aggsender/mocks/agg_sender_storage.go @@ -74,22 +74,24 @@ func (_c *AggSenderStorage_DeleteCertificate_Call) RunAndReturn(run func(context } // GetCertificateByHeight provides a mock function with given fields: height -func (_m *AggSenderStorage) GetCertificateByHeight(height uint64) (types.CertificateInfo, error) { +func (_m *AggSenderStorage) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { ret := _m.Called(height) if len(ret) == 0 { panic("no return value specified for GetCertificateByHeight") } - var r0 types.CertificateInfo + var r0 *types.CertificateInfo var r1 error - if rf, ok := ret.Get(0).(func(uint64) (types.CertificateInfo, error)); ok { + if rf, ok := ret.Get(0).(func(uint64) (*types.CertificateInfo, error)); ok { return rf(height) } - if rf, ok := ret.Get(0).(func(uint64) types.CertificateInfo); ok { + if rf, ok := ret.Get(0).(func(uint64) *types.CertificateInfo); ok { r0 = rf(height) } else { - r0 = ret.Get(0).(types.CertificateInfo) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.CertificateInfo) + } } if rf, ok := ret.Get(1).(func(uint64) error); ok { @@ -119,12 +121,12 @@ func (_c *AggSenderStorage_GetCertificateByHeight_Call) Run(run func(height uint return _c } -func (_c *AggSenderStorage_GetCertificateByHeight_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificateByHeight_Call { +func (_c *AggSenderStorage_GetCertificateByHeight_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificateByHeight_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *AggSenderStorage_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (types.CertificateInfo, error)) *AggSenderStorage_GetCertificateByHeight_Call { +func (_c *AggSenderStorage_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (*types.CertificateInfo, error)) *AggSenderStorage_GetCertificateByHeight_Call { _c.Call.Return(run) return _c } @@ -188,22 +190,24 @@ func (_c *AggSenderStorage_GetCertificatesByStatus_Call) RunAndReturn(run func([ } // GetLastSentCertificate provides a mock function with given fields: -func (_m *AggSenderStorage) GetLastSentCertificate() (types.CertificateInfo, error) { +func (_m *AggSenderStorage) GetLastSentCertificate() (*types.CertificateInfo, error) { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for GetLastSentCertificate") } - var r0 types.CertificateInfo + var r0 *types.CertificateInfo var r1 error - if rf, ok := ret.Get(0).(func() (types.CertificateInfo, error)); ok { + if rf, ok := ret.Get(0).(func() (*types.CertificateInfo, error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() types.CertificateInfo); ok { + if rf, ok := ret.Get(0).(func() *types.CertificateInfo); ok { r0 = rf() } else { - r0 = ret.Get(0).(types.CertificateInfo) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.CertificateInfo) + } } if rf, ok := ret.Get(1).(func() error); ok { @@ -232,12 +236,12 @@ func (_c *AggSenderStorage_GetLastSentCertificate_Call) Run(run func()) *AggSend return _c } -func (_c *AggSenderStorage_GetLastSentCertificate_Call) Return(_a0 types.CertificateInfo, _a1 error) *AggSenderStorage_GetLastSentCertificate_Call { +func (_c *AggSenderStorage_GetLastSentCertificate_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggSenderStorage_GetLastSentCertificate_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *AggSenderStorage_GetLastSentCertificate_Call) RunAndReturn(run func() (types.CertificateInfo, error)) *AggSenderStorage_GetLastSentCertificate_Call { +func (_c *AggSenderStorage_GetLastSentCertificate_Call) RunAndReturn(run func() (*types.CertificateInfo, error)) *AggSenderStorage_GetLastSentCertificate_Call { _c.Call.Return(run) return _c } @@ -289,12 +293,12 @@ func (_c *AggSenderStorage_SaveLastSentCertificate_Call) RunAndReturn(run func(c return _c } -// UpdateCertificateStatus provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorage) UpdateCertificateStatus(ctx context.Context, certificate types.CertificateInfo) error { +// UpdateCertificate provides a mock function with given fields: ctx, certificate +func (_m *AggSenderStorage) UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error { ret := _m.Called(ctx, certificate) if len(ret) == 0 { - panic("no return value specified for UpdateCertificateStatus") + panic("no return value specified for UpdateCertificate") } var r0 error @@ -307,31 +311,31 @@ func (_m *AggSenderStorage) UpdateCertificateStatus(ctx context.Context, certifi return r0 } -// AggSenderStorage_UpdateCertificateStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificateStatus' -type AggSenderStorage_UpdateCertificateStatus_Call struct { +// AggSenderStorage_UpdateCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificate' +type AggSenderStorage_UpdateCertificate_Call struct { *mock.Call } -// UpdateCertificateStatus is a helper method to define mock.On call +// UpdateCertificate is a helper method to define mock.On call // - ctx context.Context // - certificate types.CertificateInfo -func (_e *AggSenderStorage_Expecter) UpdateCertificateStatus(ctx interface{}, certificate interface{}) *AggSenderStorage_UpdateCertificateStatus_Call { - return &AggSenderStorage_UpdateCertificateStatus_Call{Call: _e.mock.On("UpdateCertificateStatus", ctx, certificate)} +func (_e *AggSenderStorage_Expecter) UpdateCertificate(ctx interface{}, certificate interface{}) *AggSenderStorage_UpdateCertificate_Call { + return &AggSenderStorage_UpdateCertificate_Call{Call: _e.mock.On("UpdateCertificate", ctx, certificate)} } -func (_c *AggSenderStorage_UpdateCertificateStatus_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_UpdateCertificateStatus_Call { +func (_c *AggSenderStorage_UpdateCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_UpdateCertificate_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(types.CertificateInfo)) }) return _c } -func (_c *AggSenderStorage_UpdateCertificateStatus_Call) Return(_a0 error) *AggSenderStorage_UpdateCertificateStatus_Call { +func (_c *AggSenderStorage_UpdateCertificate_Call) Return(_a0 error) *AggSenderStorage_UpdateCertificate_Call { _c.Call.Return(_a0) return _c } -func (_c *AggSenderStorage_UpdateCertificateStatus_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_UpdateCertificateStatus_Call { +func (_c *AggSenderStorage_UpdateCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_UpdateCertificate_Call { _c.Call.Return(run) return _c } diff --git a/aggsender/mocks/logger.go b/aggsender/mocks/logger.go index bb26739e..54be6942 100644 --- a/aggsender/mocks/logger.go +++ b/aggsender/mocks/logger.go @@ -189,6 +189,50 @@ func (_c *Logger_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *Lo return _c } +// Fatalf provides a mock function with given fields: format, args +func (_m *Logger) Fatalf(format string, args ...interface{}) { + var _ca []interface{} + _ca = append(_ca, format) + _ca = append(_ca, args...) + _m.Called(_ca...) +} + +// Logger_Fatalf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Fatalf' +type Logger_Fatalf_Call struct { + *mock.Call +} + +// Fatalf is a helper method to define mock.On call +// - format string +// - args ...interface{} +func (_e *Logger_Expecter) Fatalf(format interface{}, args ...interface{}) *Logger_Fatalf_Call { + return &Logger_Fatalf_Call{Call: _e.mock.On("Fatalf", + append([]interface{}{format}, args...)...)} +} + +func (_c *Logger_Fatalf_Call) Run(run func(format string, args ...interface{})) *Logger_Fatalf_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]interface{}, len(args)-1) + for i, a := range args[1:] { + if a != nil { + variadicArgs[i] = a.(interface{}) + } + } + run(args[0].(string), variadicArgs...) + }) + return _c +} + +func (_c *Logger_Fatalf_Call) Return() *Logger_Fatalf_Call { + _c.Call.Return() + return _c +} + +func (_c *Logger_Fatalf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Fatalf_Call { + _c.Call.Return(run) + return _c +} + // Info provides a mock function with given fields: args func (_m *Logger) Info(args ...interface{}) { var _ca []interface{} diff --git a/aggsender/types/block_notifier.go b/aggsender/types/block_notifier.go index 475abc1b..5dde2702 100644 --- a/aggsender/types/block_notifier.go +++ b/aggsender/types/block_notifier.go @@ -1,10 +1,15 @@ package types -import "github.com/0xPolygon/cdk/etherman" +import ( + "time" + + "github.com/0xPolygon/cdk/etherman" +) type EventNewBlock struct { BlockNumber uint64 BlockFinalityType etherman.BlockNumberFinality + BlockRate time.Duration } // BlockNotifier is the interface that wraps the basic methods to notify a new block. diff --git a/aggsender/types/epoch_notifier.go b/aggsender/types/epoch_notifier.go index 045ba7ff..426ad362 100644 --- a/aggsender/types/epoch_notifier.go +++ b/aggsender/types/epoch_notifier.go @@ -23,6 +23,3 @@ type EpochNotifier interface { Start(ctx context.Context) String() string } - -type BridgeL2Syncer interface { -} diff --git a/aggsender/types/types.go b/aggsender/types/types.go index d9e0b2e7..66ed4fe6 100644 --- a/aggsender/types/types.go +++ b/aggsender/types/types.go @@ -43,6 +43,7 @@ type EthClient interface { // Logger is an interface that defines the methods to log messages type Logger interface { + Fatalf(format string, args ...interface{}) Info(args ...interface{}) Infof(format string, args ...interface{}) Error(args ...interface{}) @@ -54,34 +55,73 @@ type Logger interface { } type CertificateInfo struct { - Height uint64 `meddler:"height"` - CertificateID common.Hash `meddler:"certificate_id,hash"` - NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` - FromBlock uint64 `meddler:"from_block"` - ToBlock uint64 `meddler:"to_block"` - Status agglayer.CertificateStatus `meddler:"status"` - CreatedAt int64 `meddler:"created_at"` - UpdatedAt int64 `meddler:"updated_at"` - SignedCertificate string `meddler:"signed_certificate"` + Height uint64 `meddler:"height"` + RetryCount int `meddler:"retry_count"` + CertificateID common.Hash `meddler:"certificate_id,hash"` + // PreviousLocalExitRoot if it's nil means no reported + PreviousLocalExitRoot *common.Hash `meddler:"previous_local_exit_root,hash"` + NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` + FromBlock uint64 `meddler:"from_block"` + ToBlock uint64 `meddler:"to_block"` + Status agglayer.CertificateStatus `meddler:"status"` + CreatedAt int64 `meddler:"created_at"` + UpdatedAt int64 `meddler:"updated_at"` + SignedCertificate string `meddler:"signed_certificate"` } -func (c CertificateInfo) String() string { - return fmt.Sprintf( - "Height: %d\n"+ - "CertificateID: %s\n"+ - "FromBlock: %d\n"+ - "ToBlock: %d\n"+ - "NewLocalExitRoot: %s\n"+ - "Status: %s\n"+ - "CreatedAt: %s\n"+ - "UpdatedAt: %s\n", +func (c *CertificateInfo) String() string { + if c == nil { + //nolint:all + return "nil" + } + previousLocalExitRoot := "nil" + if c.PreviousLocalExitRoot != nil { + previousLocalExitRoot = c.PreviousLocalExitRoot.String() + } + return fmt.Sprintf("aggsender.CertificateInfo: "+ + "Height: %d "+ + "RetryCount: %d "+ + "CertificateID: %s "+ + "PreviousLocalExitRoot: %s "+ + "NewLocalExitRoot: %s "+ + "Status: %s "+ + "FromBlock: %d "+ + "ToBlock: %d "+ + "CreatedAt: %s "+ + "UpdatedAt: %s", c.Height, + c.RetryCount, c.CertificateID.String(), - c.FromBlock, - c.ToBlock, + previousLocalExitRoot, c.NewLocalExitRoot.String(), c.Status.String(), + c.FromBlock, + c.ToBlock, time.UnixMilli(c.CreatedAt), time.UnixMilli(c.UpdatedAt), ) } + +// ID returns a string with the unique identifier of the cerificate (height+certificateID) +func (c *CertificateInfo) ID() string { + if c == nil { + return "nil" + } + return fmt.Sprintf("%d/%s (retry %d)", c.Height, c.CertificateID.String(), c.RetryCount) +} + +// IsClosed returns true if the certificate is closed (settled or inError) +func (c *CertificateInfo) IsClosed() bool { + if c == nil { + return false + } + return c.Status.IsClosed() +} + +// ElapsedTimeSinceCreation returns the time elapsed since the certificate was created +func (c *CertificateInfo) ElapsedTimeSinceCreation() time.Duration { + if c == nil { + return 0 + } + return time.Now().UTC().Sub(time.UnixMilli(c.CreatedAt)) +} diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index b3c3c853..dc0ca8d4 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -5,6 +5,7 @@ import ( "time" "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" tree "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" @@ -111,7 +112,8 @@ func newBridgeSync( originNetwork uint32, syncFullClaims bool, ) (*BridgeSync, error) { - processor, err := newProcessor(dbPath, l1OrL2ID) + logger := log.WithFields("bridge-syncer", l1OrL2ID) + processor, err := newProcessor(dbPath, logger) if err != nil { return nil, err } @@ -156,6 +158,13 @@ func newBridgeSync( if err != nil { return nil, err } + logger.Infof("BridgeSyncer [%s] created: dbPath: %s initialBlock: %d bridgeAddr: %s, syncFullClaims: %d,"+ + " maxRetryAttemptsAfterError: %d RetryAfterErrorPeriod: %s"+ + "syncBlockChunkSize: %d, blockFinalityType: %s waitForNewBlocksPeriod: %s", + l1OrL2ID, + dbPath, initialBlock, bridge.String(), syncFullClaims, + maxRetryAttemptsAfterError, retryAfterErrorPeriod.String(), + syncBlockChunkSize, blockFinalityType, waitForNewBlocksPeriod.String()) return &BridgeSync{ processor: processor, diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index 6f1e10c4..0b350006 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -2,7 +2,6 @@ package bridgesync_test import ( "context" - "fmt" "math/big" "path" "testing" @@ -63,25 +62,9 @@ func TestBridgeEventE2E(t *testing.T) { } // Wait for syncer to catch up - syncerUpToDate := false - - var errMsg string lb, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - - for i := 0; i < 10; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if lpb == lb { - syncerUpToDate = true - - break - } - - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) - } - require.True(t, syncerUpToDate, errMsg) + helpers.RequireProcessorUpdated(t, syncer, lb) // Get bridges lastBlock, err := client.Client().BlockNumber(ctx) diff --git a/bridgesync/processor.go b/bridgesync/processor.go index b2e0ed24..249f890c 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -109,7 +109,7 @@ type processor struct { bridgeContract BridgeContractor } -func newProcessor(dbPath, loggerPrefix string) (*processor, error) { +func newProcessor(dbPath string, logger *log.Logger) (*processor, error) { err := migrations.RunMigrations(dbPath) if err != nil { return nil, err @@ -118,7 +118,7 @@ func newProcessor(dbPath, loggerPrefix string) (*processor, error) { if err != nil { return nil, err } - logger := log.WithFields("bridge-syncer", loggerPrefix) + exitTree := tree.NewAppendOnlyTree(db, "") return &processor{ db: db, diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index ab31f17d..21f4d51a 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -82,7 +82,8 @@ func TestProceessor(t *testing.T) { log.Debugf("sqlite path: %s", path) err := migrationsBridge.RunMigrations(path) require.NoError(t, err) - p, err := newProcessor(path, "foo") + logger := log.WithFields("bridge-syncer", "foo") + p, err := newProcessor(path, logger) require.NoError(t, err) actions := []processAction{ // processed: ~ @@ -735,7 +736,8 @@ func TestInsertAndGetClaim(t *testing.T) { log.Debugf("sqlite path: %s", path) err := migrationsBridge.RunMigrations(path) require.NoError(t, err) - p, err := newProcessor(path, "foo") + logger := log.WithFields("bridge-syncer", "foo") + p, err := newProcessor(path, logger) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) @@ -828,7 +830,8 @@ func TestGetBridgesPublished(t *testing.T) { path := path.Join(t.TempDir(), "file::memory:?cache=shared") require.NoError(t, migrationsBridge.RunMigrations(path)) - p, err := newProcessor(path, "foo") + logger := log.WithFields("bridge-syncer", "foo") + p, err := newProcessor(path, logger) require.NoError(t, err) tx, err := p.db.BeginTx(context.Background(), nil) diff --git a/cmd/run.go b/cmd/run.go index 0c0c283c..365e2407 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -36,15 +36,12 @@ import ( "github.com/0xPolygon/cdk/rpc" "github.com/0xPolygon/cdk/sequencesender" "github.com/0xPolygon/cdk/sequencesender/txbuilder" - "github.com/0xPolygon/cdk/state" - "github.com/0xPolygon/cdk/state/pgstatestorage" "github.com/0xPolygon/cdk/translator" ethtxman "github.com/0xPolygon/zkevm-ethtx-manager/etherman" "github.com/0xPolygon/zkevm-ethtx-manager/etherman/etherscan" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" "github.com/ethereum/go-ethereum/ethclient" - "github.com/jackc/pgx/v4/pgxpool" "github.com/urfave/cli/v2" ) @@ -201,17 +198,8 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool) logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations if runMigrations { - logger.Infof( - "Running DB migrations host: %s:%s db:%s user:%s", - c.Aggregator.DB.Host, c.Aggregator.DB.Port, c.Aggregator.DB.Name, c.Aggregator.DB.User, - ) - runAggregatorMigrations(c.Aggregator.DB) - } - - // DB - stateSQLDB, err := db.NewSQLDB(logger, c.Aggregator.DB) - if err != nil { - logger.Fatal(err) + logger.Infof("Running DB migrations. File %s", c.Aggregator.DBPath) + runAggregatorMigrations(c.Aggregator.DBPath) } etherman, err := newEtherman(c) @@ -230,9 +218,7 @@ func createAggregator(ctx context.Context, c config.Config, runMigrations bool) c.Aggregator.ChainID = l2ChainID } - st := newState(&c, c.Aggregator.ChainID, stateSQLDB) - - aggregator, err := aggregator.New(ctx, c.Aggregator, logger, st, etherman) + aggregator, err := aggregator.New(ctx, c.Aggregator, logger, etherman) if err != nil { logger.Fatal(err) } @@ -274,7 +260,9 @@ func createSequenceSender( if cfg.SequenceSender.EthTxManager.CustodialAssets.Enable { cfg.SequenceSender.SenderAddress = cfg.SequenceSender.EthTxManager.CustodialAssets.SequencerAddr } else { - auth, _, err := ethman.LoadAuthFromKeyStore(cfg.SequenceSender.PrivateKey.Path, cfg.SequenceSender.PrivateKey.Password) + auth, _, err := ethman.LoadAuthFromKeyStore( + cfg.SequenceSender.PrivateKey.Path, + cfg.SequenceSender.PrivateKey.Password) if err != nil { logger.Fatal(err) } @@ -460,13 +448,13 @@ func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavail return dataavailability.New(daBackend) } -func runAggregatorMigrations(c db.Config) { - runMigrations(c, db.AggregatorMigrationName) +func runAggregatorMigrations(dbPath string) { + runMigrations(dbPath, db.AggregatorMigrationName) } -func runMigrations(c db.Config, name string) { +func runMigrations(dbPath string, name string) { log.Infof("running migrations for %v", name) - err := db.RunMigrationsUp(c, name) + err := db.RunMigrationsUp(dbPath, name) if err != nil { log.Fatal(err) } @@ -512,19 +500,6 @@ func waitSignal(cancelFuncs []context.CancelFunc) { } } -func newState(c *config.Config, l2ChainID uint64, sqlDB *pgxpool.Pool) *state.State { - stateCfg := state.Config{ - DB: c.Aggregator.DB, - ChainID: l2ChainID, - } - - stateDB := pgstatestorage.NewPostgresStorage(stateCfg, sqlDB) - - st := state.NewState(stateCfg, stateDB) - - return st -} - func newReorgDetector( cfg *reorgdetector.Config, client *ethclient.Client, @@ -557,7 +532,7 @@ func runL1InfoTreeSyncerIfNeeded( reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.RPC, - cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER}, components) { + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER, cdkcommon.L1INFOTREESYNC}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -588,6 +563,7 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER, + cdkcommon.L1INFOTREESYNC, }, components) { return nil } @@ -622,7 +598,8 @@ func runReorgDetectorL1IfNeeded( ) (*reorgdetector.ReorgDetector, chan error) { if !isNeeded([]string{ cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER}, + cdkcommon.AGGORACLE, cdkcommon.RPC, cdkcommon.AGGSENDER, + cdkcommon.L1INFOTREESYNC}, components) { return nil, nil } diff --git a/common/components.go b/common/components.go index 7ef9d285..2c8ab188 100644 --- a/common/components.go +++ b/common/components.go @@ -15,4 +15,6 @@ const ( PROVER = "prover" // AGGSENDER name to identify the aggsender component AGGSENDER = "aggsender" + // L1INFOTREESYNC name to identify the l1infotreesync component + L1INFOTREESYNC = "l1infotreesync" ) diff --git a/config/default.go b/config/default.go index 61b099c8..6a505b88 100644 --- a/config/default.go +++ b/config/default.go @@ -137,17 +137,10 @@ SettlementBackend = "l1" AggLayerTxTimeout = "5m" AggLayerURL = "{{AggLayerURL}}" SyncModeOnlyEnabled = false +DBPath = "{{PathRWData}}/aggregator_db.sqlite" [Aggregator.SequencerPrivateKey] Path = "{{SequencerPrivateKeyPath}}" Password = "{{SequencerPrivateKeyPassword}}" - [Aggregator.DB] - Name = "aggregator_db" - User = "aggregator_user" - Password = "aggregator_password" - Host = "cdk-aggregator-db" - Port = "5432" - EnableLog = false - MaxConns = 200 [Aggregator.Log] Environment ="{{Log.Environment}}" # "production" or "development" Level = "{{Log.Level}}" @@ -220,7 +213,7 @@ SyncBlockChunkSize=100 BlockFinality="LatestBlock" URLRPCL1="{{L1URL}}" WaitForNewBlocksPeriod="100ms" -InitialBlock=0 +InitialBlock={{genesisBlockNumber}} [AggOracle] TargetChainType="EVM" @@ -245,7 +238,7 @@ WaitPeriodNextGER="100ms" ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "/{{PathRWData}}/ethtxmanager-sequencesender.sqlite" + StoragePath = "{{PathRWData}}/ethtxmanager-sequencesender.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -263,7 +256,7 @@ WriteTimeout = "2s" MaxRequestsPerIPAndSecond = 10 [ClaimSponsor] -DBPath = "/{{PathRWData}}/claimsopnsor.sqlite" +DBPath = "{{PathRWData}}/claimsopnsor.sqlite" Enabled = true SenderAddr = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" BridgeAddrL2 = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" @@ -284,7 +277,7 @@ GasOffset = 0 ForcedGas = 0 GasPriceMarginFactor = 1 MaxGasPriceLimit = 0 - StoragePath = "/{{PathRWData}}/ethtxmanager-claimsponsor.sqlite" + StoragePath = "{{PathRWData}}/ethtxmanager-claimsponsor.sqlite" ReadPendingL1Txs = false SafeStatusL1NumberOfBlocks = 5 FinalizedStatusL1NumberOfBlocks = 10 @@ -344,4 +337,7 @@ CheckSettledInterval = "2s" BlockFinality = "LatestBlock" EpochNotificationPercentage = 50 SaveCertificatesToFilesPath = "" +MaxRetriesStoreCertificate = 3 +DelayBeetweenRetries = "60s" +KeepCertificatesHistory = true ` diff --git a/crates/cdk/versions.json b/crates/cdk/versions.json index bafbd00b..39bfb4dc 100644 --- a/crates/cdk/versions.json +++ b/crates/cdk/versions.json @@ -1,15 +1,15 @@ { "agglayer_image": "ghcr.io/agglayer/agglayer:0.2.0-rc.5", "cdk_erigon_node_image": "hermeznetwork/cdk-erigon:v2.1.2", - "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta5", + "cdk_node_image": "ghcr.io/0xpolygon/cdk:0.4.0-beta8", "cdk_validium_node_image": "0xpolygon/cdk-validium-node:0.7.0-cdk", "zkevm_bridge_proxy_image": "haproxy:3.0-bookworm", "zkevm_bridge_service_image": "hermeznetwork/zkevm-bridge-service:v0.6.0-RC1", - "zkevm_bridge_ui_image": "leovct/zkevm-bridge-ui:multi-network-2", + "zkevm_bridge_ui_image": "leovct/zkevm-bridge-ui:multi-network", "zkevm_contracts_image": "leovct/zkevm-contracts:v8.0.0-rc.4-fork.12", "zkevm_da_image": "0xpolygon/cdk-data-availability:0.0.10", "zkevm_node_image": "hermeznetwork/zkevm-node:v0.7.3", - "zkevm_pool_manager_image": "hermeznetwork/zkevm-pool-manager:v0.1.1", + "zkevm_pool_manager_image": "hermeznetwork/zkevm-pool-manager:v0.1.2", "zkevm_prover_image": "hermeznetwork/zkevm-prover:v8.0.0-RC14-fork.12", "zkevm_sequence_sender_image": "hermeznetwork/zkevm-sequence-sender:v0.2.4" } diff --git a/db/meddler.go b/db/meddler.go index 8dd17fe8..83df3b8a 100644 --- a/db/meddler.go +++ b/db/meddler.go @@ -154,26 +154,48 @@ func (b HashMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err // PostRead is called after a Scan operation for fields that have the HashMeddler func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { - ptr, ok := scanTarget.(*string) + rawHashPtr, ok := scanTarget.(*string) if !ok { return errors.New("scanTarget is not *string") } - if ptr == nil { - return fmt.Errorf("HashMeddler.PostRead: nil pointer") - } + + // Handle the case where fieldPtr is a *common.Hash field, ok := fieldPtr.(*common.Hash) - if !ok { - return errors.New("fieldPtr is not common.Hash") + if ok { + *field = common.HexToHash(*rawHashPtr) + return nil } - *field = common.HexToHash(*ptr) - return nil + + // Handle the case where fieldPtr is a **common.Hash (nullable field) + hashPtr, ok := fieldPtr.(**common.Hash) + if ok { + // If the string is empty, set the hash to nil + if len(*rawHashPtr) == 0 { + *hashPtr = nil + // Otherwise, convert the string to a common.Hash and assign it + } else { + tmp := common.HexToHash(*rawHashPtr) + *hashPtr = &tmp + } + return nil + } + + // If fieldPtr is neither a *common.Hash nor a **common.Hash, return an error + return errors.New("fieldPtr is not *common.Hash or **common.Hash") } // PreWrite is called before an Insert or Update operation for fields that have the HashMeddler func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { field, ok := fieldPtr.(common.Hash) if !ok { - return nil, errors.New("fieldPtr is not common.Hash") + hashPtr, ok := fieldPtr.(*common.Hash) + if !ok { + return nil, errors.New("fieldPtr is not common.Hash") + } + if hashPtr == nil { + return []byte{}, nil + } + return hashPtr.Hex(), nil } return field.Hex(), nil } diff --git a/db/migrations.go b/db/migrations.go index 1a56874e..8af35874 100644 --- a/db/migrations.go +++ b/db/migrations.go @@ -1,6 +1,7 @@ package db import ( + "database/sql" "fmt" "strings" @@ -23,6 +24,10 @@ func RunMigrations(dbPath string, migrations []types.Migration) error { if err != nil { return fmt.Errorf("error creating DB %w", err) } + return RunMigrationsDB(log.GetDefaultLogger(), db, migrations) +} + +func RunMigrationsDB(logger *log.Logger, db *sql.DB, migrations []types.Migration) error { migs := &migrate.MemoryMigrationSource{Migrations: []*migrate.Migration{}} for _, m := range migrations { prefixed := strings.ReplaceAll(m.SQL, dbPrefixReplacer, m.Prefix) @@ -34,15 +39,15 @@ func RunMigrations(dbPath string, migrations []types.Migration) error { }) } - log.Debugf("running migrations:") + logger.Debugf("running migrations:") for _, m := range migs.Migrations { - log.Debugf("%+v", m.Id) + logger.Debugf("%+v", m.Id) } nMigrations, err := migrate.Exec(db, "sqlite3", migs, migrate.Up) if err != nil { return fmt.Errorf("error executing migration %w", err) } - log.Infof("successfully ran %d migrations", nMigrations) + logger.Infof("successfully ran %d migrations", nMigrations) return nil } diff --git a/go.mod b/go.mod index 73c20e53..13e94bd4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/hermeznetwork/tracerr v0.3.2 github.com/iden3/go-iden3-crypto v0.0.17 github.com/invopop/jsonschema v0.12.0 - github.com/jackc/pgconn v1.14.3 github.com/jackc/pgx/v4 v4.18.3 github.com/knadh/koanf/parsers/json v0.1.0 github.com/knadh/koanf/parsers/toml v0.1.0 @@ -91,6 +90,7 @@ require ( github.com/holiman/uint256 v1.3.1 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgproto3/v2 v2.3.3 // indirect diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 132f563f..ffb00e35 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -36,7 +36,6 @@ func newSimulatedClient(t *testing.T) ( *verifybatchesmock.Verifybatchesmock, ) { t.Helper() - ctx := context.Background() client, setup := helpers.SimulatedBackend(t, nil, 0) @@ -79,11 +78,11 @@ func TestE2E(t *testing.T) { client.Commit() g, err := gerSc.L1InfoRootMap(nil, uint32(i+1)) require.NoError(t, err) - // Let the processor catch up - time.Sleep(time.Millisecond * 100) receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash()) require.NoError(t, err) require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) + // Let the processor catch up + helpers.RequireProcessorUpdated(t, syncer, receipt.BlockNumber.Uint64()) expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) require.NoError(t, err) @@ -118,17 +117,7 @@ func TestE2E(t *testing.T) { require.True(t, len(receipt.Logs) == 1+i%2+i%2) // Let the processor catch - processorUpdated := false - for i := 0; i < 30; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - if receipt.BlockNumber.Uint64() == lpb { - processorUpdated = true - break - } - time.Sleep(time.Millisecond * 10) - } - require.True(t, processorUpdated) + helpers.RequireProcessorUpdated(t, syncer, receipt.BlockNumber.Uint64()) // Assert rollup exit root expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) @@ -351,24 +340,17 @@ func TestStressAndReorgs(t *testing.T) { func waitForSyncerToCatchUp(ctx context.Context, t *testing.T, syncer *l1infotreesync.L1InfoTreeSync, client *simulated.Backend) { t.Helper() - - syncerUpToDate := false - var errMsg string - - for i := 0; i < 200; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) + for { + lastBlockNum, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - lb, err := client.Client().BlockNumber(ctx) + helpers.RequireProcessorUpdated(t, syncer, lastBlockNum) + time.Sleep(time.Second / 2) + lastBlockNum2, err := client.Client().BlockNumber(ctx) require.NoError(t, err) - if lpb == lb { - syncerUpToDate = true - break + if lastBlockNum == lastBlockNum2 { + return } - time.Sleep(time.Second / 2) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) } - - require.True(t, syncerUpToDate, errMsg) } // commitBlocks commits the specified number of blocks with the given client and waits for the specified duration after each block diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index 9719fcd7..e6262ffb 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -106,7 +106,7 @@ func (s *L1InfoTreeSync) Start(ctx context.Context) { // GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) (types.Proof, types.Root, error) { - if s.processor.halted { + if s.processor.isHalted() { return types.Proof{}, types.Root{}, sync.ErrInconsistentState } return s.processor.GetL1InfoTreeMerkleProof(ctx, index) @@ -118,7 +118,7 @@ func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof( networkID uint32, root common.Hash, ) (types.Proof, error) { - if s.processor.halted { + if s.processor.isHalted() { return types.Proof{}, sync.ErrInconsistentState } if networkID == 0 { @@ -141,7 +141,7 @@ func translateError(err error) error { // - ErrBlockNotProcessed, // - ErrNotFound func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } leaf, err := s.processor.GetLatestInfoUntilBlock(ctx, blockNum) @@ -150,7 +150,7 @@ func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum u // GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree func (s *L1InfoTreeSync) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetInfoByIndex(ctx, index) @@ -158,7 +158,7 @@ func (s *L1InfoTreeSync) GetInfoByIndex(ctx context.Context, index uint32) (*L1I // GetL1InfoTreeRootByIndex returns the root of the L1 info tree at the moment the leaf with the given index was added func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (types.Root, error) { - if s.processor.halted { + if s.processor.isHalted() { return types.Root{}, sync.ErrInconsistentState } return s.processor.l1InfoTree.GetRootByIndex(ctx, index) @@ -166,7 +166,7 @@ func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uin // GetLastRollupExitRoot return the last rollup exit root processed func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, error) { - if s.processor.halted { + if s.processor.isHalted() { return types.Root{}, sync.ErrInconsistentState } return s.processor.rollupExitTree.GetLastRoot(nil) @@ -174,7 +174,7 @@ func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, // GetLastL1InfoTreeRoot return the last root and index processed from the L1 Info tree func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, error) { - if s.processor.halted { + if s.processor.isHalted() { return types.Root{}, sync.ErrInconsistentState } return s.processor.l1InfoTree.GetLastRoot(nil) @@ -182,7 +182,7 @@ func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, // GetLastProcessedBlock return the last processed block func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - if s.processor.halted { + if s.processor.isHalted() { return 0, sync.ErrInconsistentState } return s.processor.GetLastProcessedBlock(ctx) @@ -191,7 +191,7 @@ func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, err func (s *L1InfoTreeSync) GetLocalExitRoot( ctx context.Context, networkID uint32, rollupExitRoot common.Hash, ) (common.Hash, error) { - if s.processor.halted { + if s.processor.isHalted() { return common.Hash{}, sync.ErrInconsistentState } if networkID == 0 { @@ -202,56 +202,56 @@ func (s *L1InfoTreeSync) GetLocalExitRoot( } func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetLastVerifiedBatches(rollupID) } func (s *L1InfoTreeSync) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetFirstVerifiedBatches(rollupID) } func (s *L1InfoTreeSync) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetFirstVerifiedBatchesAfterBlock(rollupID, blockNum) } func (s *L1InfoTreeSync) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetFirstL1InfoWithRollupExitRoot(rollupExitRoot) } func (s *L1InfoTreeSync) GetLastInfo() (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetLastInfo() } func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetFirstInfo() } func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetFirstInfoAfterBlock(blockNum) } func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetInfoByGlobalExitRoot(ger) @@ -261,7 +261,7 @@ func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLe func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( ctx context.Context, index uint32, root common.Hash, ) (types.Proof, error) { - if s.processor.halted { + if s.processor.isHalted() { return types.Proof{}, sync.ErrInconsistentState } return s.processor.l1InfoTree.GetProof(ctx, index, root) @@ -269,7 +269,7 @@ func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( // GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set func (s *L1InfoTreeSync) GetInitL1InfoRootMap(ctx context.Context) (*L1InfoTreeInitial, error) { - if s.processor.halted { + if s.processor.isHalted() { return nil, sync.ErrInconsistentState } return s.processor.GetInitL1InfoRootMap(nil) diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index ee94e829..dc830afc 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "errors" "fmt" + mutex "sync" "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/l1infotreesync/migrations" @@ -28,6 +29,7 @@ type processor struct { db *sql.DB l1InfoTree *tree.AppendOnlyTree rollupExitTree *tree.UpdatableTree + mu mutex.RWMutex halted bool haltedReason string } @@ -267,6 +269,8 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return err } if rowsAffected > 0 { + p.mu.Lock() + defer p.mu.Unlock() p.halted = false p.haltedReason = "" } @@ -277,7 +281,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree // and updates the last processed block (can be called without events for that purpose) func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - if p.halted { + if p.isHalted() { log.Errorf("processor is halted due to: %s", p.haltedReason) return sync.ErrInconsistentState } @@ -361,8 +365,10 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { block.Num, ) log.Error(errStr) + p.mu.Lock() p.haltedReason = errStr p.halted = true + p.mu.Unlock() return sync.ErrInconsistentState } } @@ -391,8 +397,11 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { return fmt.Errorf("err: %w", err) } shouldRollback = false - - log.Infof("block %d processed with %d events", block.Num, len(block.Events)) + logFunc := log.Debugf + if len(block.Events) > 0 { + logFunc = log.Infof + } + logFunc("block %d processed with %d events", block.Num, len(block.Events)) return nil } @@ -464,3 +473,9 @@ func (p *processor) getDBQuerier(tx db.Txer) db.Querier { } return p.db } + +func (p *processor) isHalted() bool { + p.mu.RLock() + defer p.mu.RUnlock() + return p.halted +} diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index 9b9a6f36..6f62f12d 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -11,6 +11,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/lastgersync" "github.com/0xPolygon/cdk/test/aggoraclehelpers" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -49,22 +50,9 @@ func TestE2E(t *testing.T) { require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) // Wait for syncer to catch up - syncerUpToDate := false - var errMsg string - for i := 0; i < 10; i++ { - lpb, err := syncer.GetLastProcessedBlock(ctx) - require.NoError(t, err) - lb, err := env.L2Client.Client().BlockNumber(ctx) - require.NoError(t, err) - if lpb == lb { - syncerUpToDate = true - - break - } - time.Sleep(time.Millisecond * 100) - errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb) - } - require.True(t, syncerUpToDate, errMsg) + lb, err := env.L2Client.Client().BlockNumber(ctx) + require.NoError(t, err) + helpers.RequireProcessorUpdated(t, syncer, lb) e, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i)) require.NoError(t, err, fmt.Sprint("iteration: ", i)) diff --git a/reorgdetector/types.go b/reorgdetector/types.go index 20d4562c..2c860277 100644 --- a/reorgdetector/types.go +++ b/reorgdetector/types.go @@ -93,9 +93,9 @@ func (hl *headersList) get(num uint64) *header { // getSorted returns headers in sorted order func (hl *headersList) getSorted() []header { + hl.RLock() sortedBlocks := make([]header, 0, len(hl.headers)) - hl.RLock() for _, b := range hl.headers { sortedBlocks = append(sortedBlocks, b) } diff --git a/scripts/local_config b/scripts/local_config index 5830b6e6..90b5ae11 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -3,7 +3,7 @@ source $(dirname $0)/../test/scripts/env.sh ############################################################################### function log_debug() { - echo -e "\033[0;30mDebug: $*" "\033[0m" + echo -e "\033[0;90mDebug: $*" "\033[0m" } ############################################################################### function log_error() { @@ -194,9 +194,6 @@ function export_values_of_cdk_node_config(){ if [ $? -ne 0 ]; then export_key_from_toml_file zkevm_l2_agglayer_address $_CDK_CONFIG_FILE "." SenderProofToL1Addr fi - export_key_from_toml_file_or_fatal aggregator_db_name $_CDK_CONFIG_FILE Aggregator.DB Name - export_key_from_toml_file_or_fatal aggregator_db_user $_CDK_CONFIG_FILE Aggregator.DB User - export_key_from_toml_file_or_fatal aggregator_db_password $_CDK_CONFIG_FILE Aggregator.DB Password export_obj_key_from_toml_file zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE Aggregator.EthTxManager PrivateKeys Password if [ $? -ne 0 ]; then export_key_from_toml_file zkevm_l2_aggregator_keystore_password $_CDK_CONFIG_FILE "." AggregatorPrivateKeyPassword diff --git a/state/config.go b/state/config.go deleted file mode 100644 index e5a65e8b..00000000 --- a/state/config.go +++ /dev/null @@ -1,13 +0,0 @@ -package state - -import ( - "github.com/0xPolygon/cdk/aggregator/db" -) - -// Config is state config -type Config struct { - // ChainID is the L2 ChainID provided by the Network Config - ChainID uint64 - // DB is the database configuration - DB db.Config `mapstructure:"DB"` -} diff --git a/state/interfaces.go b/state/interfaces.go deleted file mode 100644 index fc4eb495..00000000 --- a/state/interfaces.go +++ /dev/null @@ -1,26 +0,0 @@ -package state - -import ( - "context" - - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" -) - -type storage interface { - Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) - Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) - QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row - Begin(ctx context.Context) (pgx.Tx, error) - AddSequence(ctx context.Context, sequence Sequence, dbTx pgx.Tx) error - CheckProofContainsCompleteSequences(ctx context.Context, proof *Proof, dbTx pgx.Tx) (bool, error) - GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*Proof, error) - GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*Proof, *Proof, error) - AddGeneratedProof(ctx context.Context, proof *Proof, dbTx pgx.Tx) error - UpdateGeneratedProof(ctx context.Context, proof *Proof, dbTx pgx.Tx) error - DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error - DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error - CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error - CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) - CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) -} diff --git a/state/pgstatestorage/interfaces.go b/state/pgstatestorage/interfaces.go deleted file mode 100644 index e5f7402b..00000000 --- a/state/pgstatestorage/interfaces.go +++ /dev/null @@ -1,14 +0,0 @@ -package pgstatestorage - -import ( - "context" - - "github.com/jackc/pgconn" - "github.com/jackc/pgx/v4" -) - -type ExecQuerier interface { - Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) - Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) - QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row -} diff --git a/state/pgstatestorage/pgstatestorage.go b/state/pgstatestorage/pgstatestorage.go deleted file mode 100644 index 7e294c6b..00000000 --- a/state/pgstatestorage/pgstatestorage.go +++ /dev/null @@ -1,29 +0,0 @@ -package pgstatestorage - -import ( - "github.com/0xPolygon/cdk/state" - "github.com/jackc/pgx/v4" - "github.com/jackc/pgx/v4/pgxpool" -) - -// PostgresStorage implements the Storage interface -type PostgresStorage struct { - cfg state.Config - *pgxpool.Pool -} - -// NewPostgresStorage creates a new StateDB -func NewPostgresStorage(cfg state.Config, db *pgxpool.Pool) *PostgresStorage { - return &PostgresStorage{ - cfg, - db, - } -} - -// getExecQuerier determines which execQuerier to use, dbTx or the main pgxpool -func (p *PostgresStorage) getExecQuerier(dbTx pgx.Tx) ExecQuerier { - if dbTx != nil { - return dbTx - } - return p -} diff --git a/state/pgstatestorage/proof.go b/state/pgstatestorage/proof.go deleted file mode 100644 index fa32fc99..00000000 --- a/state/pgstatestorage/proof.go +++ /dev/null @@ -1,266 +0,0 @@ -package pgstatestorage - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/0xPolygon/cdk/state" - "github.com/jackc/pgx/v4" -) - -// CheckProofExistsForBatch checks if the batch is already included in any proof -func (p *PostgresStorage) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) { - const checkProofExistsForBatchSQL = ` - SELECT EXISTS (SELECT 1 FROM aggregator.proof p WHERE $1 >= p.batch_num AND $1 <= p.batch_num_final) - ` - e := p.getExecQuerier(dbTx) - var exists bool - err := e.QueryRow(ctx, checkProofExistsForBatchSQL, batchNumber).Scan(&exists) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return exists, err - } - return exists, nil -} - -// CheckProofContainsCompleteSequences checks if a recursive proof contains complete sequences -func (p *PostgresStorage) CheckProofContainsCompleteSequences( - ctx context.Context, proof *state.Proof, dbTx pgx.Tx, -) (bool, error) { - const getProofContainsCompleteSequencesSQL = ` - SELECT EXISTS (SELECT 1 FROM aggregator.sequence s1 WHERE s1.from_batch_num = $1) AND - EXISTS (SELECT 1 FROM aggregator.sequence s2 WHERE s2.to_batch_num = $2) - ` - e := p.getExecQuerier(dbTx) - var exists bool - err := e.QueryRow(ctx, getProofContainsCompleteSequencesSQL, proof.BatchNumber, proof.BatchNumberFinal).Scan(&exists) - if err != nil && !errors.Is(err, pgx.ErrNoRows) { - return exists, err - } - return exists, nil -} - -// GetProofReadyToVerify return the proof that is ready to verify -func (p *PostgresStorage) GetProofReadyToVerify( - ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx, -) (*state.Proof, error) { - const getProofReadyToVerifySQL = ` - SELECT - p.batch_num, - p.batch_num_final, - p.proof, - p.proof_id, - p.input_prover, - p.prover, - p.prover_id, - p.generating_since, - p.created_at, - p.updated_at - FROM aggregator.proof p - WHERE batch_num = $1 AND generating_since IS NULL AND - EXISTS (SELECT 1 FROM aggregator.sequence s1 WHERE s1.from_batch_num = p.batch_num) AND - EXISTS (SELECT 1 FROM aggregator.sequence s2 WHERE s2.to_batch_num = p.batch_num_final) - ` - - var proof = &state.Proof{} - - e := p.getExecQuerier(dbTx) - row := e.QueryRow(ctx, getProofReadyToVerifySQL, lastVerfiedBatchNumber+1) - err := row.Scan( - &proof.BatchNumber, &proof.BatchNumberFinal, &proof.Proof, &proof.ProofID, - &proof.InputProver, &proof.Prover, &proof.ProverID, &proof.GeneratingSince, - &proof.CreatedAt, &proof.UpdatedAt, - ) - - if errors.Is(err, pgx.ErrNoRows) { - return nil, state.ErrNotFound - } else if err != nil { - return nil, err - } - - return proof, err -} - -// GetProofsToAggregate return the next to proof that it is possible to aggregate -func (p *PostgresStorage) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) { - var ( - proof1 = &state.Proof{} - proof2 = &state.Proof{} - ) - - // TODO: add comments to explain the query - const getProofsToAggregateSQL = ` - SELECT - p1.batch_num as p1_batch_num, - p1.batch_num_final as p1_batch_num_final, - p1.proof as p1_proof, - p1.proof_id as p1_proof_id, - p1.input_prover as p1_input_prover, - p1.prover as p1_prover, - p1.prover_id as p1_prover_id, - p1.generating_since as p1_generating_since, - p1.created_at as p1_created_at, - p1.updated_at as p1_updated_at, - p2.batch_num as p2_batch_num, - p2.batch_num_final as p2_batch_num_final, - p2.proof as p2_proof, - p2.proof_id as p2_proof_id, - p2.input_prover as p2_input_prover, - p2.prover as p2_prover, - p2.prover_id as p2_prover_id, - p2.generating_since as p2_generating_since, - p2.created_at as p2_created_at, - p2.updated_at as p2_updated_at - FROM aggregator.proof p1 INNER JOIN aggregator.proof p2 ON p1.batch_num_final = p2.batch_num - 1 - WHERE p1.generating_since IS NULL AND p2.generating_since IS NULL AND - p1.proof IS NOT NULL AND p2.proof IS NOT NULL AND - ( - EXISTS ( - SELECT 1 FROM aggregator.sequence s - WHERE p1.batch_num >= s.from_batch_num AND p1.batch_num <= s.to_batch_num AND - p1.batch_num_final >= s.from_batch_num AND p1.batch_num_final <= s.to_batch_num AND - p2.batch_num >= s.from_batch_num AND p2.batch_num <= s.to_batch_num AND - p2.batch_num_final >= s.from_batch_num AND p2.batch_num_final <= s.to_batch_num - ) - OR - ( - EXISTS ( SELECT 1 FROM aggregator.sequence s WHERE p1.batch_num = s.from_batch_num) AND - EXISTS ( SELECT 1 FROM aggregator.sequence s WHERE p1.batch_num_final = s.to_batch_num) AND - EXISTS ( SELECT 1 FROM aggregator.sequence s WHERE p2.batch_num = s.from_batch_num) AND - EXISTS ( SELECT 1 FROM aggregator.sequence s WHERE p2.batch_num_final = s.to_batch_num) - ) - ) - ORDER BY p1.batch_num ASC - LIMIT 1 - ` - - e := p.getExecQuerier(dbTx) - row := e.QueryRow(ctx, getProofsToAggregateSQL) - err := row.Scan( - &proof1.BatchNumber, &proof1.BatchNumberFinal, &proof1.Proof, &proof1.ProofID, - &proof1.InputProver, &proof1.Prover, &proof1.ProverID, &proof1.GeneratingSince, - &proof1.CreatedAt, &proof1.UpdatedAt, - &proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID, - &proof2.InputProver, &proof2.Prover, &proof2.ProverID, &proof2.GeneratingSince, - &proof2.CreatedAt, &proof2.UpdatedAt, - ) - - if errors.Is(err, pgx.ErrNoRows) { - return nil, nil, state.ErrNotFound - } else if err != nil { - return nil, nil, err - } - - return proof1, proof2, err -} - -// AddGeneratedProof adds a generated proof to the storage -func (p *PostgresStorage) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { - const addGeneratedProofSQL = ` - INSERT INTO aggregator.proof ( - batch_num, batch_num_final, proof, proof_id, input_prover, prover, - prover_id, generating_since, created_at, updated_at - ) VALUES ( - $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 - ) - ` - e := p.getExecQuerier(dbTx) - now := time.Now().UTC().Round(time.Microsecond) - _, err := e.Exec( - ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, - proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, now, - ) - return err -} - -// UpdateGeneratedProof updates a generated proof in the storage -func (p *PostgresStorage) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { - const addGeneratedProofSQL = ` - UPDATE aggregator.proof - SET proof = $3, - proof_id = $4, - input_prover = $5, - prover = $6, - prover_id = $7, - generating_since = $8, - updated_at = $9 - WHERE batch_num = $1 - AND batch_num_final = $2 - ` - e := p.getExecQuerier(dbTx) - now := time.Now().UTC().Round(time.Microsecond) - _, err := e.Exec( - ctx, addGeneratedProofSQL, proof.BatchNumber, proof.BatchNumberFinal, proof.Proof, proof.ProofID, - proof.InputProver, proof.Prover, proof.ProverID, proof.GeneratingSince, now, - ) - return err -} - -// DeleteGeneratedProofs deletes from the storage the generated proofs falling -// inside the batch numbers range. -func (p *PostgresStorage) DeleteGeneratedProofs( - ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx, -) error { - const deleteGeneratedProofSQL = "DELETE FROM aggregator.proof WHERE batch_num >= $1 AND batch_num_final <= $2" - e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, deleteGeneratedProofSQL, batchNumber, batchNumberFinal) - return err -} - -// CleanupGeneratedProofs deletes from the storage the generated proofs up to -// the specified batch number included. -func (p *PostgresStorage) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { - const deleteGeneratedProofSQL = "DELETE FROM aggregator.proof WHERE batch_num_final <= $1" - e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, deleteGeneratedProofSQL, batchNumber) - return err -} - -// CleanupLockedProofs deletes from the storage the proofs locked in generating -// state for more than the provided threshold. -func (p *PostgresStorage) CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) { - interval, err := toPostgresInterval(duration) - if err != nil { - return 0, err - } - sql := fmt.Sprintf("DELETE FROM aggregator.proof WHERE generating_since < (NOW() - interval '%s')", interval) - e := p.getExecQuerier(dbTx) - ct, err := e.Exec(ctx, sql) - if err != nil { - return 0, err - } - return ct.RowsAffected(), nil -} - -// DeleteUngeneratedProofs deletes ungenerated proofs. -// This method is meant to be use during aggregator boot-up sequence -func (p *PostgresStorage) DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error { - const deleteUngeneratedProofsSQL = "DELETE FROM aggregator.proof WHERE generating_since IS NOT NULL" - e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, deleteUngeneratedProofsSQL) - return err -} - -func toPostgresInterval(duration string) (string, error) { - unit := duration[len(duration)-1] - var pgUnit string - - switch unit { - case 's': - pgUnit = "second" - case 'm': - pgUnit = "minute" - case 'h': - pgUnit = "hour" - default: - return "", state.ErrUnsupportedDuration - } - - isMoreThanOne := duration[0] != '1' || len(duration) > 2 //nolint:mnd - if isMoreThanOne { - pgUnit += "s" - } - - return fmt.Sprintf("%s %s", duration[:len(duration)-1], pgUnit), nil -} diff --git a/state/pgstatestorage/sequence.go b/state/pgstatestorage/sequence.go deleted file mode 100644 index 7d5be9fb..00000000 --- a/state/pgstatestorage/sequence.go +++ /dev/null @@ -1,21 +0,0 @@ -package pgstatestorage - -import ( - "context" - - "github.com/0xPolygon/cdk/state" - "github.com/jackc/pgx/v4" -) - -// AddSequence stores the sequence information to allow the aggregator verify sequences. -func (p *PostgresStorage) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { - const addSequenceSQL = ` - INSERT INTO aggregator.sequence (from_batch_num, to_batch_num) - VALUES($1, $2) - ON CONFLICT (from_batch_num) DO UPDATE SET to_batch_num = $2 - ` - - e := p.getExecQuerier(dbTx) - _, err := e.Exec(ctx, addSequenceSQL, sequence.FromBatchNumber, sequence.ToBatchNumber) - return err -} diff --git a/state/state.go b/state/state.go deleted file mode 100644 index c9235ce4..00000000 --- a/state/state.go +++ /dev/null @@ -1,40 +0,0 @@ -package state - -import ( - "context" - - "github.com/ethereum/go-ethereum/common" - "github.com/jackc/pgx/v4" -) - -var ( - // ZeroHash is the hash 0x0000000000000000000000000000000000000000000000000000000000000000 - ZeroHash = common.Hash{} - // ZeroAddress is the address 0x0000000000000000000000000000000000000000 - ZeroAddress = common.Address{} -) - -// State is an implementation of the state -type State struct { - cfg Config - storage -} - -// NewState creates a new State -func NewState(cfg Config, storage storage) *State { - state := &State{ - cfg: cfg, - storage: storage, - } - - return state -} - -// BeginStateTransaction starts a state transaction -func (s *State) BeginStateTransaction(ctx context.Context) (pgx.Tx, error) { - tx, err := s.Begin(ctx) - if err != nil { - return nil, err - } - return tx, nil -} diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index 13539f2f..2c32b7ea 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -3,6 +3,7 @@ package sync import ( "context" "errors" + "fmt" "math/big" "time" @@ -163,9 +164,13 @@ func (d *EVMDownloaderImplementation) WaitForNewBlocks( case <-ticker.C: header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality) if err != nil { - attempts++ - d.log.Error("error getting last block num from eth client: ", err) - d.rh.Handle("waitForNewBlocks", attempts) + if ctx.Err() == nil { + attempts++ + d.log.Error("error getting last block num from eth client: ", err) + d.rh.Handle("waitForNewBlocks", attempts) + } else { + d.log.Warn("context has been canceled while trying to get header by number") + } continue } if header.Number.Uint64() > lastBlockSeen { @@ -225,6 +230,11 @@ func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, } } +func filterQueryToString(query ethereum.FilterQuery) string { + return fmt.Sprintf("FromBlock: %s, ToBlock: %s, Addresses: %s, Topics: %s", + query.FromBlock.String(), query.ToBlock.String(), query.Addresses, query.Topics) +} + func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { query := ethereum.FilterQuery{ FromBlock: new(big.Int).SetUint64(fromBlock), @@ -245,7 +255,10 @@ func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, to } attempts++ - d.log.Error("error calling FilterLogs to eth client: ", err) + d.log.Errorf("error calling FilterLogs to eth client: filter: %s err:%w ", + filterQueryToString(query), + err, + ) d.rh.Handle("getLogs", attempts) continue } diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go index 04c92e72..f654db05 100644 --- a/sync/evmdownloader_test.go +++ b/sync/evmdownloader_test.go @@ -381,6 +381,44 @@ func TestGetBlockHeader(t *testing.T) { assert.False(t, isCanceled) } +func TestFilterQueryToString(t *testing.T) { + addr1 := common.HexToAddress("0xf000") + addr2 := common.HexToAddress("0xabcd") + query := ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(1000), + Addresses: []common.Address{addr1, addr2}, + ToBlock: new(big.Int).SetUint64(1100), + } + + assert.Equal(t, "FromBlock: 1000, ToBlock: 1100, Addresses: [0x000000000000000000000000000000000000f000 0x000000000000000000000000000000000000ABcD], Topics: []", filterQueryToString(query)) + + query = ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(1000), + Addresses: []common.Address{addr1, addr2}, + ToBlock: new(big.Int).SetUint64(1100), + Topics: [][]common.Hash{{common.HexToHash("0x1234"), common.HexToHash("0x5678")}}, + } + assert.Equal(t, "FromBlock: 1000, ToBlock: 1100, Addresses: [0x000000000000000000000000000000000000f000 0x000000000000000000000000000000000000ABcD], Topics: [[0x0000000000000000000000000000000000000000000000000000000000001234 0x0000000000000000000000000000000000000000000000000000000000005678]]", filterQueryToString(query)) +} + +func TestGetLogs(t *testing.T) { + mockEthClient := NewL2Mock(t) + sut := EVMDownloaderImplementation{ + ethClient: mockEthClient, + adressessToQuery: []common.Address{contractAddr}, + log: log.WithFields("test", "EVMDownloaderImplementation"), + rh: &RetryHandler{ + RetryAfterErrorPeriod: time.Millisecond, + MaxRetryAttemptsAfterError: 5, + }, + } + ctx := context.TODO() + mockEthClient.EXPECT().FilterLogs(ctx, mock.Anything).Return(nil, errors.New("foo")).Once() + mockEthClient.EXPECT().FilterLogs(ctx, mock.Anything).Return(nil, nil).Once() + logs := sut.GetLogs(ctx, 0, 1) + require.Equal(t, []types.Log{}, logs) +} + func buildAppender() LogAppenderMap { appender := make(LogAppenderMap) appender[eventSignature] = func(b *EVMBlock, l types.Log) error { diff --git a/sync/mock_downloader_test.go b/sync/mock_downloader_test.go index f28045b5..45a53b84 100644 --- a/sync/mock_downloader_test.go +++ b/sync/mock_downloader_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.45.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync @@ -14,11 +14,49 @@ type EVMDownloaderMock struct { mock.Mock } +type EVMDownloaderMock_Expecter struct { + mock *mock.Mock +} + +func (_m *EVMDownloaderMock) EXPECT() *EVMDownloaderMock_Expecter { + return &EVMDownloaderMock_Expecter{mock: &_m.Mock} +} + // Download provides a mock function with given fields: ctx, fromBlock, downloadedCh func (_m *EVMDownloaderMock) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { _m.Called(ctx, fromBlock, downloadedCh) } +// EVMDownloaderMock_Download_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Download' +type EVMDownloaderMock_Download_Call struct { + *mock.Call +} + +// Download is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - downloadedCh chan EVMBlock +func (_e *EVMDownloaderMock_Expecter) Download(ctx interface{}, fromBlock interface{}, downloadedCh interface{}) *EVMDownloaderMock_Download_Call { + return &EVMDownloaderMock_Download_Call{Call: _e.mock.On("Download", ctx, fromBlock, downloadedCh)} +} + +func (_c *EVMDownloaderMock_Download_Call) Run(run func(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock)) *EVMDownloaderMock_Download_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(chan EVMBlock)) + }) + return _c +} + +func (_c *EVMDownloaderMock_Download_Call) Return() *EVMDownloaderMock_Download_Call { + _c.Call.Return() + return _c +} + +func (_c *EVMDownloaderMock_Download_Call) RunAndReturn(run func(context.Context, uint64, chan EVMBlock)) *EVMDownloaderMock_Download_Call { + _c.Call.Return(run) + return _c +} + // GetBlockHeader provides a mock function with given fields: ctx, blockNum func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { ret := _m.Called(ctx, blockNum) @@ -47,6 +85,35 @@ func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64 return r0, r1 } +// EVMDownloaderMock_GetBlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHeader' +type EVMDownloaderMock_GetBlockHeader_Call struct { + *mock.Call +} + +// GetBlockHeader is a helper method to define mock.On call +// - ctx context.Context +// - blockNum uint64 +func (_e *EVMDownloaderMock_Expecter) GetBlockHeader(ctx interface{}, blockNum interface{}) *EVMDownloaderMock_GetBlockHeader_Call { + return &EVMDownloaderMock_GetBlockHeader_Call{Call: _e.mock.On("GetBlockHeader", ctx, blockNum)} +} + +func (_c *EVMDownloaderMock_GetBlockHeader_Call) Run(run func(ctx context.Context, blockNum uint64)) *EVMDownloaderMock_GetBlockHeader_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *EVMDownloaderMock_GetBlockHeader_Call) Return(_a0 EVMBlockHeader, _a1 bool) *EVMDownloaderMock_GetBlockHeader_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EVMDownloaderMock_GetBlockHeader_Call) RunAndReturn(run func(context.Context, uint64) (EVMBlockHeader, bool)) *EVMDownloaderMock_GetBlockHeader_Call { + _c.Call.Return(run) + return _c +} + // GetEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock func (_m *EVMDownloaderMock) GetEventsByBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) []EVMBlock { ret := _m.Called(ctx, fromBlock, toBlock) @@ -67,6 +134,36 @@ func (_m *EVMDownloaderMock) GetEventsByBlockRange(ctx context.Context, fromBloc return r0 } +// EVMDownloaderMock_GetEventsByBlockRange_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEventsByBlockRange' +type EVMDownloaderMock_GetEventsByBlockRange_Call struct { + *mock.Call +} + +// GetEventsByBlockRange is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *EVMDownloaderMock_Expecter) GetEventsByBlockRange(ctx interface{}, fromBlock interface{}, toBlock interface{}) *EVMDownloaderMock_GetEventsByBlockRange_Call { + return &EVMDownloaderMock_GetEventsByBlockRange_Call{Call: _e.mock.On("GetEventsByBlockRange", ctx, fromBlock, toBlock)} +} + +func (_c *EVMDownloaderMock_GetEventsByBlockRange_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *EVMDownloaderMock_GetEventsByBlockRange_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *EVMDownloaderMock_GetEventsByBlockRange_Call) Return(_a0 []EVMBlock) *EVMDownloaderMock_GetEventsByBlockRange_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EVMDownloaderMock_GetEventsByBlockRange_Call) RunAndReturn(run func(context.Context, uint64, uint64) []EVMBlock) *EVMDownloaderMock_GetEventsByBlockRange_Call { + _c.Call.Return(run) + return _c +} + // GetLogs provides a mock function with given fields: ctx, fromBlock, toBlock func (_m *EVMDownloaderMock) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64) []types.Log { ret := _m.Called(ctx, fromBlock, toBlock) @@ -87,6 +184,36 @@ func (_m *EVMDownloaderMock) GetLogs(ctx context.Context, fromBlock uint64, toBl return r0 } +// EVMDownloaderMock_GetLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLogs' +type EVMDownloaderMock_GetLogs_Call struct { + *mock.Call +} + +// GetLogs is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock uint64 +func (_e *EVMDownloaderMock_Expecter) GetLogs(ctx interface{}, fromBlock interface{}, toBlock interface{}) *EVMDownloaderMock_GetLogs_Call { + return &EVMDownloaderMock_GetLogs_Call{Call: _e.mock.On("GetLogs", ctx, fromBlock, toBlock)} +} + +func (_c *EVMDownloaderMock_GetLogs_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *EVMDownloaderMock_GetLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) + }) + return _c +} + +func (_c *EVMDownloaderMock_GetLogs_Call) Return(_a0 []types.Log) *EVMDownloaderMock_GetLogs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *EVMDownloaderMock_GetLogs_Call) RunAndReturn(run func(context.Context, uint64, uint64) []types.Log) *EVMDownloaderMock_GetLogs_Call { + _c.Call.Return(run) + return _c +} + // WaitForNewBlocks provides a mock function with given fields: ctx, lastBlockSeen func (_m *EVMDownloaderMock) WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) uint64 { ret := _m.Called(ctx, lastBlockSeen) @@ -105,6 +232,35 @@ func (_m *EVMDownloaderMock) WaitForNewBlocks(ctx context.Context, lastBlockSeen return r0 } +// EVMDownloaderMock_WaitForNewBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitForNewBlocks' +type EVMDownloaderMock_WaitForNewBlocks_Call struct { + *mock.Call +} + +// WaitForNewBlocks is a helper method to define mock.On call +// - ctx context.Context +// - lastBlockSeen uint64 +func (_e *EVMDownloaderMock_Expecter) WaitForNewBlocks(ctx interface{}, lastBlockSeen interface{}) *EVMDownloaderMock_WaitForNewBlocks_Call { + return &EVMDownloaderMock_WaitForNewBlocks_Call{Call: _e.mock.On("WaitForNewBlocks", ctx, lastBlockSeen)} +} + +func (_c *EVMDownloaderMock_WaitForNewBlocks_Call) Run(run func(ctx context.Context, lastBlockSeen uint64)) *EVMDownloaderMock_WaitForNewBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *EVMDownloaderMock_WaitForNewBlocks_Call) Return(newLastBlock uint64) *EVMDownloaderMock_WaitForNewBlocks_Call { + _c.Call.Return(newLastBlock) + return _c +} + +func (_c *EVMDownloaderMock_WaitForNewBlocks_Call) RunAndReturn(run func(context.Context, uint64) uint64) *EVMDownloaderMock_WaitForNewBlocks_Call { + _c.Call.Return(run) + return _c +} + // NewEVMDownloaderMock creates a new instance of EVMDownloaderMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEVMDownloaderMock(t interface { diff --git a/sync/mock_l2_test.go b/sync/mock_l2_test.go index 7a4bae36..955af0db 100644 --- a/sync/mock_l2_test.go +++ b/sync/mock_l2_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync @@ -20,6 +20,14 @@ type L2Mock struct { mock.Mock } +type L2Mock_Expecter struct { + mock *mock.Mock +} + +func (_m *L2Mock) EXPECT() *L2Mock_Expecter { + return &L2Mock_Expecter{mock: &_m.Mock} +} + // BlockByHash provides a mock function with given fields: ctx, hash func (_m *L2Mock) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { ret := _m.Called(ctx, hash) @@ -50,6 +58,35 @@ func (_m *L2Mock) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo return r0, r1 } +// L2Mock_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' +type L2Mock_BlockByHash_Call struct { + *mock.Call +} + +// BlockByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *L2Mock_Expecter) BlockByHash(ctx interface{}, hash interface{}) *L2Mock_BlockByHash_Call { + return &L2Mock_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} +} + +func (_c *L2Mock_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *L2Mock_BlockByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *L2Mock_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *L2Mock_BlockByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *L2Mock_BlockByHash_Call { + _c.Call.Return(run) + return _c +} + // BlockByNumber provides a mock function with given fields: ctx, number func (_m *L2Mock) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { ret := _m.Called(ctx, number) @@ -80,6 +117,35 @@ func (_m *L2Mock) BlockByNumber(ctx context.Context, number *big.Int) (*types.Bl return r0, r1 } +// L2Mock_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' +type L2Mock_BlockByNumber_Call struct { + *mock.Call +} + +// BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *L2Mock_Expecter) BlockByNumber(ctx interface{}, number interface{}) *L2Mock_BlockByNumber_Call { + return &L2Mock_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} +} + +func (_c *L2Mock_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *L2Mock_BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *L2Mock_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *L2Mock_BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *L2Mock_BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // BlockNumber provides a mock function with given fields: ctx func (_m *L2Mock) BlockNumber(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) @@ -108,6 +174,34 @@ func (_m *L2Mock) BlockNumber(ctx context.Context) (uint64, error) { return r0, r1 } +// L2Mock_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' +type L2Mock_BlockNumber_Call struct { + *mock.Call +} + +// BlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *L2Mock_Expecter) BlockNumber(ctx interface{}) *L2Mock_BlockNumber_Call { + return &L2Mock_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} +} + +func (_c *L2Mock_BlockNumber_Call) Run(run func(ctx context.Context)) *L2Mock_BlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L2Mock_BlockNumber_Call) Return(_a0 uint64, _a1 error) *L2Mock_BlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2Mock_BlockNumber_Call { + _c.Call.Return(run) + return _c +} + // CallContract provides a mock function with given fields: ctx, call, blockNumber func (_m *L2Mock) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, call, blockNumber) @@ -138,6 +232,36 @@ func (_m *L2Mock) CallContract(ctx context.Context, call ethereum.CallMsg, block return r0, r1 } +// L2Mock_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' +type L2Mock_CallContract_Call struct { + *mock.Call +} + +// CallContract is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +// - blockNumber *big.Int +func (_e *L2Mock_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *L2Mock_CallContract_Call { + return &L2Mock_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} +} + +func (_c *L2Mock_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *L2Mock_CallContract_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) + }) + return _c +} + +func (_c *L2Mock_CallContract_Call) Return(_a0 []byte, _a1 error) *L2Mock_CallContract_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *L2Mock_CallContract_Call { + _c.Call.Return(run) + return _c +} + // CodeAt provides a mock function with given fields: ctx, contract, blockNumber func (_m *L2Mock) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { ret := _m.Called(ctx, contract, blockNumber) @@ -168,6 +292,36 @@ func (_m *L2Mock) CodeAt(ctx context.Context, contract common.Address, blockNumb return r0, r1 } +// L2Mock_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' +type L2Mock_CodeAt_Call struct { + *mock.Call +} + +// CodeAt is a helper method to define mock.On call +// - ctx context.Context +// - contract common.Address +// - blockNumber *big.Int +func (_e *L2Mock_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *L2Mock_CodeAt_Call { + return &L2Mock_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} +} + +func (_c *L2Mock_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *L2Mock_CodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) + }) + return _c +} + +func (_c *L2Mock_CodeAt_Call) Return(_a0 []byte, _a1 error) *L2Mock_CodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *L2Mock_CodeAt_Call { + _c.Call.Return(run) + return _c +} + // EstimateGas provides a mock function with given fields: ctx, call func (_m *L2Mock) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { ret := _m.Called(ctx, call) @@ -196,6 +350,35 @@ func (_m *L2Mock) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint6 return r0, r1 } +// L2Mock_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' +type L2Mock_EstimateGas_Call struct { + *mock.Call +} + +// EstimateGas is a helper method to define mock.On call +// - ctx context.Context +// - call ethereum.CallMsg +func (_e *L2Mock_Expecter) EstimateGas(ctx interface{}, call interface{}) *L2Mock_EstimateGas_Call { + return &L2Mock_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} +} + +func (_c *L2Mock_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *L2Mock_EstimateGas_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.CallMsg)) + }) + return _c +} + +func (_c *L2Mock_EstimateGas_Call) Return(_a0 uint64, _a1 error) *L2Mock_EstimateGas_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *L2Mock_EstimateGas_Call { + _c.Call.Return(run) + return _c +} + // FilterLogs provides a mock function with given fields: ctx, q func (_m *L2Mock) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { ret := _m.Called(ctx, q) @@ -226,6 +409,35 @@ func (_m *L2Mock) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]typ return r0, r1 } +// L2Mock_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' +type L2Mock_FilterLogs_Call struct { + *mock.Call +} + +// FilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +func (_e *L2Mock_Expecter) FilterLogs(ctx interface{}, q interface{}) *L2Mock_FilterLogs_Call { + return &L2Mock_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} +} + +func (_c *L2Mock_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *L2Mock_FilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) + }) + return _c +} + +func (_c *L2Mock_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *L2Mock_FilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *L2Mock_FilterLogs_Call { + _c.Call.Return(run) + return _c +} + // HeaderByHash provides a mock function with given fields: ctx, hash func (_m *L2Mock) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { ret := _m.Called(ctx, hash) @@ -256,6 +468,35 @@ func (_m *L2Mock) HeaderByHash(ctx context.Context, hash common.Hash) (*types.He return r0, r1 } +// L2Mock_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' +type L2Mock_HeaderByHash_Call struct { + *mock.Call +} + +// HeaderByHash is a helper method to define mock.On call +// - ctx context.Context +// - hash common.Hash +func (_e *L2Mock_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *L2Mock_HeaderByHash_Call { + return &L2Mock_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} +} + +func (_c *L2Mock_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *L2Mock_HeaderByHash_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *L2Mock_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *L2Mock_HeaderByHash_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *L2Mock_HeaderByHash_Call { + _c.Call.Return(run) + return _c +} + // HeaderByNumber provides a mock function with given fields: ctx, number func (_m *L2Mock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { ret := _m.Called(ctx, number) @@ -286,6 +527,35 @@ func (_m *L2Mock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H return r0, r1 } +// L2Mock_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' +type L2Mock_HeaderByNumber_Call struct { + *mock.Call +} + +// HeaderByNumber is a helper method to define mock.On call +// - ctx context.Context +// - number *big.Int +func (_e *L2Mock_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *L2Mock_HeaderByNumber_Call { + return &L2Mock_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} +} + +func (_c *L2Mock_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *L2Mock_HeaderByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*big.Int)) + }) + return _c +} + +func (_c *L2Mock_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *L2Mock_HeaderByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *L2Mock_HeaderByNumber_Call { + _c.Call.Return(run) + return _c +} + // PendingCodeAt provides a mock function with given fields: ctx, account func (_m *L2Mock) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { ret := _m.Called(ctx, account) @@ -316,6 +586,35 @@ func (_m *L2Mock) PendingCodeAt(ctx context.Context, account common.Address) ([] return r0, r1 } +// L2Mock_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' +type L2Mock_PendingCodeAt_Call struct { + *mock.Call +} + +// PendingCodeAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *L2Mock_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *L2Mock_PendingCodeAt_Call { + return &L2Mock_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} +} + +func (_c *L2Mock_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *L2Mock_PendingCodeAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *L2Mock_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *L2Mock_PendingCodeAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *L2Mock_PendingCodeAt_Call { + _c.Call.Return(run) + return _c +} + // PendingNonceAt provides a mock function with given fields: ctx, account func (_m *L2Mock) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { ret := _m.Called(ctx, account) @@ -344,6 +643,35 @@ func (_m *L2Mock) PendingNonceAt(ctx context.Context, account common.Address) (u return r0, r1 } +// L2Mock_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' +type L2Mock_PendingNonceAt_Call struct { + *mock.Call +} + +// PendingNonceAt is a helper method to define mock.On call +// - ctx context.Context +// - account common.Address +func (_e *L2Mock_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *L2Mock_PendingNonceAt_Call { + return &L2Mock_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} +} + +func (_c *L2Mock_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *L2Mock_PendingNonceAt_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Address)) + }) + return _c +} + +func (_c *L2Mock_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *L2Mock_PendingNonceAt_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *L2Mock_PendingNonceAt_Call { + _c.Call.Return(run) + return _c +} + // SendTransaction provides a mock function with given fields: ctx, tx func (_m *L2Mock) SendTransaction(ctx context.Context, tx *types.Transaction) error { ret := _m.Called(ctx, tx) @@ -362,6 +690,35 @@ func (_m *L2Mock) SendTransaction(ctx context.Context, tx *types.Transaction) er return r0 } +// L2Mock_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' +type L2Mock_SendTransaction_Call struct { + *mock.Call +} + +// SendTransaction is a helper method to define mock.On call +// - ctx context.Context +// - tx *types.Transaction +func (_e *L2Mock_Expecter) SendTransaction(ctx interface{}, tx interface{}) *L2Mock_SendTransaction_Call { + return &L2Mock_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} +} + +func (_c *L2Mock_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *L2Mock_SendTransaction_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*types.Transaction)) + }) + return _c +} + +func (_c *L2Mock_SendTransaction_Call) Return(_a0 error) *L2Mock_SendTransaction_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *L2Mock_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *L2Mock_SendTransaction_Call { + _c.Call.Return(run) + return _c +} + // SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch func (_m *L2Mock) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { ret := _m.Called(ctx, q, ch) @@ -392,6 +749,36 @@ func (_m *L2Mock) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuer return r0, r1 } +// L2Mock_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' +type L2Mock_SubscribeFilterLogs_Call struct { + *mock.Call +} + +// SubscribeFilterLogs is a helper method to define mock.On call +// - ctx context.Context +// - q ethereum.FilterQuery +// - ch chan<- types.Log +func (_e *L2Mock_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *L2Mock_SubscribeFilterLogs_Call { + return &L2Mock_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} +} + +func (_c *L2Mock_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *L2Mock_SubscribeFilterLogs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) + }) + return _c +} + +func (_c *L2Mock_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *L2Mock_SubscribeFilterLogs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *L2Mock_SubscribeFilterLogs_Call { + _c.Call.Return(run) + return _c +} + // SubscribeNewHead provides a mock function with given fields: ctx, ch func (_m *L2Mock) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { ret := _m.Called(ctx, ch) @@ -422,6 +809,35 @@ func (_m *L2Mock) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) return r0, r1 } +// L2Mock_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' +type L2Mock_SubscribeNewHead_Call struct { + *mock.Call +} + +// SubscribeNewHead is a helper method to define mock.On call +// - ctx context.Context +// - ch chan<- *types.Header +func (_e *L2Mock_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *L2Mock_SubscribeNewHead_Call { + return &L2Mock_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} +} + +func (_c *L2Mock_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *L2Mock_SubscribeNewHead_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(chan<- *types.Header)) + }) + return _c +} + +func (_c *L2Mock_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *L2Mock_SubscribeNewHead_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *L2Mock_SubscribeNewHead_Call { + _c.Call.Return(run) + return _c +} + // SuggestGasPrice provides a mock function with given fields: ctx func (_m *L2Mock) SuggestGasPrice(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) @@ -452,6 +868,34 @@ func (_m *L2Mock) SuggestGasPrice(ctx context.Context) (*big.Int, error) { return r0, r1 } +// L2Mock_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' +type L2Mock_SuggestGasPrice_Call struct { + *mock.Call +} + +// SuggestGasPrice is a helper method to define mock.On call +// - ctx context.Context +func (_e *L2Mock_Expecter) SuggestGasPrice(ctx interface{}) *L2Mock_SuggestGasPrice_Call { + return &L2Mock_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} +} + +func (_c *L2Mock_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *L2Mock_SuggestGasPrice_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L2Mock_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *L2Mock_SuggestGasPrice_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *L2Mock_SuggestGasPrice_Call { + _c.Call.Return(run) + return _c +} + // SuggestGasTipCap provides a mock function with given fields: ctx func (_m *L2Mock) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { ret := _m.Called(ctx) @@ -482,6 +926,34 @@ func (_m *L2Mock) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { return r0, r1 } +// L2Mock_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' +type L2Mock_SuggestGasTipCap_Call struct { + *mock.Call +} + +// SuggestGasTipCap is a helper method to define mock.On call +// - ctx context.Context +func (_e *L2Mock_Expecter) SuggestGasTipCap(ctx interface{}) *L2Mock_SuggestGasTipCap_Call { + return &L2Mock_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} +} + +func (_c *L2Mock_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *L2Mock_SuggestGasTipCap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *L2Mock_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *L2Mock_SuggestGasTipCap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *L2Mock_SuggestGasTipCap_Call { + _c.Call.Return(run) + return _c +} + // TransactionCount provides a mock function with given fields: ctx, blockHash func (_m *L2Mock) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { ret := _m.Called(ctx, blockHash) @@ -510,6 +982,35 @@ func (_m *L2Mock) TransactionCount(ctx context.Context, blockHash common.Hash) ( return r0, r1 } +// L2Mock_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' +type L2Mock_TransactionCount_Call struct { + *mock.Call +} + +// TransactionCount is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +func (_e *L2Mock_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *L2Mock_TransactionCount_Call { + return &L2Mock_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} +} + +func (_c *L2Mock_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *L2Mock_TransactionCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *L2Mock_TransactionCount_Call) Return(_a0 uint, _a1 error) *L2Mock_TransactionCount_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *L2Mock_TransactionCount_Call { + _c.Call.Return(run) + return _c +} + // TransactionInBlock provides a mock function with given fields: ctx, blockHash, index func (_m *L2Mock) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { ret := _m.Called(ctx, blockHash, index) @@ -540,6 +1041,36 @@ func (_m *L2Mock) TransactionInBlock(ctx context.Context, blockHash common.Hash, return r0, r1 } +// L2Mock_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' +type L2Mock_TransactionInBlock_Call struct { + *mock.Call +} + +// TransactionInBlock is a helper method to define mock.On call +// - ctx context.Context +// - blockHash common.Hash +// - index uint +func (_e *L2Mock_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *L2Mock_TransactionInBlock_Call { + return &L2Mock_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} +} + +func (_c *L2Mock_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *L2Mock_TransactionInBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) + }) + return _c +} + +func (_c *L2Mock_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *L2Mock_TransactionInBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *L2Mock_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *L2Mock_TransactionInBlock_Call { + _c.Call.Return(run) + return _c +} + // NewL2Mock creates a new instance of L2Mock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewL2Mock(t interface { diff --git a/sync/mock_processor_test.go b/sync/mock_processor_test.go index afbb34cb..96ece8d4 100644 --- a/sync/mock_processor_test.go +++ b/sync/mock_processor_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync @@ -13,6 +13,14 @@ type ProcessorMock struct { mock.Mock } +type ProcessorMock_Expecter struct { + mock *mock.Mock +} + +func (_m *ProcessorMock) EXPECT() *ProcessorMock_Expecter { + return &ProcessorMock_Expecter{mock: &_m.Mock} +} + // GetLastProcessedBlock provides a mock function with given fields: ctx func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { ret := _m.Called(ctx) @@ -41,6 +49,34 @@ func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, err return r0, r1 } +// ProcessorMock_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' +type ProcessorMock_GetLastProcessedBlock_Call struct { + *mock.Call +} + +// GetLastProcessedBlock is a helper method to define mock.On call +// - ctx context.Context +func (_e *ProcessorMock_Expecter) GetLastProcessedBlock(ctx interface{}) *ProcessorMock_GetLastProcessedBlock_Call { + return &ProcessorMock_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} +} + +func (_c *ProcessorMock_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *ProcessorMock_GetLastProcessedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ProcessorMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *ProcessorMock_GetLastProcessedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ProcessorMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *ProcessorMock_GetLastProcessedBlock_Call { + _c.Call.Return(run) + return _c +} + // ProcessBlock provides a mock function with given fields: ctx, block func (_m *ProcessorMock) ProcessBlock(ctx context.Context, block Block) error { ret := _m.Called(ctx, block) @@ -59,6 +95,35 @@ func (_m *ProcessorMock) ProcessBlock(ctx context.Context, block Block) error { return r0 } +// ProcessorMock_ProcessBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlock' +type ProcessorMock_ProcessBlock_Call struct { + *mock.Call +} + +// ProcessBlock is a helper method to define mock.On call +// - ctx context.Context +// - block Block +func (_e *ProcessorMock_Expecter) ProcessBlock(ctx interface{}, block interface{}) *ProcessorMock_ProcessBlock_Call { + return &ProcessorMock_ProcessBlock_Call{Call: _e.mock.On("ProcessBlock", ctx, block)} +} + +func (_c *ProcessorMock_ProcessBlock_Call) Run(run func(ctx context.Context, block Block)) *ProcessorMock_ProcessBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(Block)) + }) + return _c +} + +func (_c *ProcessorMock_ProcessBlock_Call) Return(_a0 error) *ProcessorMock_ProcessBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ProcessorMock_ProcessBlock_Call) RunAndReturn(run func(context.Context, Block) error) *ProcessorMock_ProcessBlock_Call { + _c.Call.Return(run) + return _c +} + // Reorg provides a mock function with given fields: ctx, firstReorgedBlock func (_m *ProcessorMock) Reorg(ctx context.Context, firstReorgedBlock uint64) error { ret := _m.Called(ctx, firstReorgedBlock) @@ -77,6 +142,35 @@ func (_m *ProcessorMock) Reorg(ctx context.Context, firstReorgedBlock uint64) er return r0 } +// ProcessorMock_Reorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reorg' +type ProcessorMock_Reorg_Call struct { + *mock.Call +} + +// Reorg is a helper method to define mock.On call +// - ctx context.Context +// - firstReorgedBlock uint64 +func (_e *ProcessorMock_Expecter) Reorg(ctx interface{}, firstReorgedBlock interface{}) *ProcessorMock_Reorg_Call { + return &ProcessorMock_Reorg_Call{Call: _e.mock.On("Reorg", ctx, firstReorgedBlock)} +} + +func (_c *ProcessorMock_Reorg_Call) Run(run func(ctx context.Context, firstReorgedBlock uint64)) *ProcessorMock_Reorg_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ProcessorMock_Reorg_Call) Return(_a0 error) *ProcessorMock_Reorg_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ProcessorMock_Reorg_Call) RunAndReturn(run func(context.Context, uint64) error) *ProcessorMock_Reorg_Call { + _c.Call.Return(run) + return _c +} + // NewProcessorMock creates a new instance of ProcessorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewProcessorMock(t interface { diff --git a/sync/mock_reorgdetector_test.go b/sync/mock_reorgdetector_test.go index 9689f7e7..43551baa 100644 --- a/sync/mock_reorgdetector_test.go +++ b/sync/mock_reorgdetector_test.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.39.0. DO NOT EDIT. +// Code generated by mockery. DO NOT EDIT. package sync @@ -17,6 +17,14 @@ type ReorgDetectorMock struct { mock.Mock } +type ReorgDetectorMock_Expecter struct { + mock *mock.Mock +} + +func (_m *ReorgDetectorMock) EXPECT() *ReorgDetectorMock_Expecter { + return &ReorgDetectorMock_Expecter{mock: &_m.Mock} +} + // AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash func (_m *ReorgDetectorMock) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { ret := _m.Called(ctx, id, blockNum, blockHash) @@ -35,6 +43,37 @@ func (_m *ReorgDetectorMock) AddBlockToTrack(ctx context.Context, id string, blo return r0 } +// ReorgDetectorMock_AddBlockToTrack_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBlockToTrack' +type ReorgDetectorMock_AddBlockToTrack_Call struct { + *mock.Call +} + +// AddBlockToTrack is a helper method to define mock.On call +// - ctx context.Context +// - id string +// - blockNum uint64 +// - blockHash common.Hash +func (_e *ReorgDetectorMock_Expecter) AddBlockToTrack(ctx interface{}, id interface{}, blockNum interface{}, blockHash interface{}) *ReorgDetectorMock_AddBlockToTrack_Call { + return &ReorgDetectorMock_AddBlockToTrack_Call{Call: _e.mock.On("AddBlockToTrack", ctx, id, blockNum, blockHash)} +} + +func (_c *ReorgDetectorMock_AddBlockToTrack_Call) Run(run func(ctx context.Context, id string, blockNum uint64, blockHash common.Hash)) *ReorgDetectorMock_AddBlockToTrack_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(common.Hash)) + }) + return _c +} + +func (_c *ReorgDetectorMock_AddBlockToTrack_Call) Return(_a0 error) *ReorgDetectorMock_AddBlockToTrack_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *ReorgDetectorMock_AddBlockToTrack_Call) RunAndReturn(run func(context.Context, string, uint64, common.Hash) error) *ReorgDetectorMock_AddBlockToTrack_Call { + _c.Call.Return(run) + return _c +} + // Subscribe provides a mock function with given fields: id func (_m *ReorgDetectorMock) Subscribe(id string) (*reorgdetector.Subscription, error) { ret := _m.Called(id) @@ -65,6 +104,34 @@ func (_m *ReorgDetectorMock) Subscribe(id string) (*reorgdetector.Subscription, return r0, r1 } +// ReorgDetectorMock_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' +type ReorgDetectorMock_Subscribe_Call struct { + *mock.Call +} + +// Subscribe is a helper method to define mock.On call +// - id string +func (_e *ReorgDetectorMock_Expecter) Subscribe(id interface{}) *ReorgDetectorMock_Subscribe_Call { + return &ReorgDetectorMock_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} +} + +func (_c *ReorgDetectorMock_Subscribe_Call) Run(run func(id string)) *ReorgDetectorMock_Subscribe_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *ReorgDetectorMock_Subscribe_Call) Return(_a0 *reorgdetector.Subscription, _a1 error) *ReorgDetectorMock_Subscribe_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ReorgDetectorMock_Subscribe_Call) RunAndReturn(run func(string) (*reorgdetector.Subscription, error)) *ReorgDetectorMock_Subscribe_Call { + _c.Call.Return(run) + return _c +} + // NewReorgDetectorMock creates a new instance of ReorgDetectorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewReorgDetectorMock(t interface { diff --git a/test/Makefile b/test/Makefile index 12f406fd..05823e66 100644 --- a/test/Makefile +++ b/test/Makefile @@ -45,21 +45,23 @@ generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool .PHONY: generate-mocks-sync generate-mocks-sync: ## Generates mocks for sync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go ${COMMON_MOCKERY_PARAMS} + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go ${COMMON_MOCKERY_PARAMS} + .PHONY: generate-mocks-aggregator generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StorageInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StorageInterfaceMock --filename=mock_storage.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Txer --dir=../db --output=../aggregator/mocks --outpkg=mocks --structname=TxerMock --filename=mock_txer.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go - + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AggregatorService_ChannelServer --dir=../aggregator/prover --output=../aggregator/prover/mocks --outpkg=mocks --structname=ChannelMock --filename=mock_channel.go + .PHONY: generate-mocks-aggsender generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool @@ -79,22 +81,27 @@ generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop ./run-e2e.sh fork9 cdk-validium - bats . + bats bats/fep/ .PHONY: test-e2e-fork11-rollup test-e2e-fork11-rollup: stop ./run-e2e.sh fork11 rollup - bats . + bats bats/fep/ .PHONY: test-e2e-fork12-validium test-e2e-fork12-validium: stop ./run-e2e.sh fork12 cdk-validium - bats . + bats bats/fep/ .PHONY: test-e2e-fork12-rollup test-e2e-fork12-rollup: stop ./run-e2e.sh fork12 rollup - bats . + bats bats/fep/ + +.PHONY: test-e2e-fork12-pessimistic +test-e2e-fork12-pessimistic: stop + ./run-e2e.sh fork12 pessimistic + bats bats/pp/ .PHONY: stop stop: diff --git a/test/access-list-e2e.bats b/test/bats/fep/access-list-e2e.bats similarity index 98% rename from test/access-list-e2e.bats rename to test/bats/fep/access-list-e2e.bats index 83947c03..b1e07f78 100644 --- a/test/access-list-e2e.bats +++ b/test/bats/fep/access-list-e2e.bats @@ -1,6 +1,6 @@ setup() { - load 'helpers/common-setup' - load 'helpers/common' + load '../../helpers/common-setup' + load '../../helpers/common' _common_setup readonly erigon_sequencer_node=${KURTOSIS_ERIGON_SEQUENCER:-cdk-erigon-sequencer-001} diff --git a/test/basic-e2e.bats b/test/bats/fep/basic-e2e.bats similarity index 99% rename from test/basic-e2e.bats rename to test/bats/fep/basic-e2e.bats index 1024ac4a..088ee239 100644 --- a/test/basic-e2e.bats +++ b/test/bats/fep/basic-e2e.bats @@ -1,6 +1,6 @@ setup() { - load 'helpers/common-setup' - load 'helpers/common' + load '../../helpers/common-setup' + load '../../helpers/common' _common_setup readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} diff --git a/test/bridge-e2e.bats b/test/bats/fep/bridge-e2e.bats similarity index 83% rename from test/bridge-e2e.bats rename to test/bats/fep/bridge-e2e.bats index e754ef70..35ca9382 100644 --- a/test/bridge-e2e.bats +++ b/test/bats/fep/bridge-e2e.bats @@ -1,8 +1,8 @@ setup() { - load 'helpers/common-setup' + load '../../helpers/common-setup' _common_setup - load 'helpers/common' - load 'helpers/lxly-bridge-test' + load '../../helpers/common' + load '../../helpers/lxly-bridge-test' if [ -z "$BRIDGE_ADDRESS" ]; then local combined_json_file="/opt/zkevm/combined.json" @@ -47,39 +47,62 @@ setup() { readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) } -@test "Native gas token deposit to WETH" { +# Helper function to run native gas token deposit to WETH +native_gas_token_deposit_to_WETH() { + local bridge_type="$1" + + echo "Bridge_type: $bridge_type" >&3 + destination_addr=$sender_addr local initial_receiver_balance=$(cast call --rpc-url "$l2_rpc_url" "$weth_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 - echo "=== Running LxLy deposit on L1 to network: $l2_rpc_network_id native_token: $native_token_addr" >&3 + echo "=== Running LxLy deposit $bridge_type on L1 to network: $l2_rpc_network_id native_token: $native_token_addr" >&3 destination_net=$l2_rpc_network_id - run bridgeAsset "$native_token_addr" "$l1_rpc_url" + + if [[ $bridge_type == "bridgeMessage" ]]; then + run bridge_message "$native_token_addr" "$l1_rpc_url" + else + run bridge_asset "$native_token_addr" "$l1_rpc_url" + fi assert_success - echo "=== Running LxLy claim on L2" >&3 + echo "=== Claiming on L2..." >&3 timeout="120" claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" + run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" "$bridge_type" assert_success run verify_balance "$l2_rpc_url" "$weth_token_addr" "$destination_addr" "$initial_receiver_balance" "$ether_value" assert_success - echo "=== bridgeAsset L2 WETH: $weth_token_addr to L1 ETH" >&3 + echo "=== $bridge_type L2 WETH: $weth_token_addr to L1 ETH" >&3 destination_addr=$sender_addr destination_net=0 - run bridgeAsset "$weth_token_addr" "$l2_rpc_url" + + if [[ $bridge_type == "bridgeMessage" ]]; then + run bridge_message "$weth_token_addr" "$l2_rpc_url" + else + run bridge_asset "$weth_token_addr" "$l2_rpc_url" + fi assert_success - echo "=== Claim in L1 ETH" >&3 + echo "=== Claiming on L1..." >&3 timeout="400" claim_frequency="60" - run wait_for_claim "$timeout" "$claim_frequency" "$l1_rpc_url" + run wait_for_claim "$timeout" "$claim_frequency" "$l1_rpc_url" "$bridge_type" assert_success } +@test "Native gas token deposit to WETH - BridgeAsset" { + run native_gas_token_deposit_to_WETH "bridgeAsset" +} + +@test "Native gas token deposit to WETH - BridgeMessage" { + run native_gas_token_deposit_to_WETH "bridgeMessage" +} + @test "Custom gas token deposit" { echo "Gas token addr $gas_token_addr, L1 RPC: $l1_rpc_url" >&3 @@ -127,13 +150,13 @@ setup() { destination_addr=$receiver destination_net=$l2_rpc_network_id amount=$wei_amount - run bridgeAsset "$gas_token_addr" "$l1_rpc_url" + run bridge_asset "$gas_token_addr" "$l1_rpc_url" assert_success # Claim deposits (settle them on the L2) timeout="120" claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" + run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" "bridgeAsset" assert_success # Validate that the native token of receiver on L2 has increased by the bridge tokens amount @@ -150,14 +173,14 @@ setup() { echo "Receiver balance of gas token on L1 $initial_receiver_balance" >&3 destination_net=$l1_rpc_network_id - run bridgeAsset "$native_token_addr" "$l2_rpc_url" + run bridge_asset "$native_token_addr" "$l2_rpc_url" assert_success # Claim withdrawals (settle them on the L1) timeout="360" claim_frequency="10" destination_net=$l1_rpc_network_id - run wait_for_claim "$timeout" "$claim_frequency" "$l1_rpc_url" + run wait_for_claim "$timeout" "$claim_frequency" "$l1_rpc_url" "bridgeAsset" assert_success # Validate that the token of receiver on L1 has increased by the bridge tokens amount diff --git a/test/e2e.bats b/test/bats/fep/e2e.bats similarity index 56% rename from test/e2e.bats rename to test/bats/fep/e2e.bats index c85e33ce..2b3206ba 100644 --- a/test/e2e.bats +++ b/test/bats/fep/e2e.bats @@ -1,10 +1,10 @@ setup() { - load 'helpers/common-setup' + load '../../helpers/common-setup' _common_setup } @test "Verify batches" { echo "Waiting 10 minutes to get some verified batch...." - run $PROJECT_ROOT/test/scripts/batch_verification_monitor.sh 0 600 + run $PROJECT_ROOT/../scripts/batch_verification_monitor.sh 0 600 assert_success } diff --git a/test/bats/pp/bridge-e2e.bats b/test/bats/pp/bridge-e2e.bats new file mode 100644 index 00000000..18ed5c86 --- /dev/null +++ b/test/bats/pp/bridge-e2e.bats @@ -0,0 +1,76 @@ +setup() { + load '../../helpers/common-setup' + _common_setup + load '../../helpers/common' + load '../../helpers/lxly-bridge-test' + + if [ -z "$BRIDGE_ADDRESS" ]; then + local combined_json_file="/opt/zkevm/combined.json" + echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 + + # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress + combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) + bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) + BRIDGE_ADDRESS=$bridge_default_address + fi + echo "Bridge address=$BRIDGE_ADDRESS" >&3 + + readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} + readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" + destination_net=${DESTINATION_NET:-"1"} + destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} + ether_value=${ETHER_VALUE:-"0.0200000054"} + amount=$(cast to-wei $ether_value ether) + readonly native_token_addr=${NATIVE_TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} + if [[ -n "$GAS_TOKEN_ADDR" ]]; then + echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 + gas_token_addr="$GAS_TOKEN_ADDR" + else + echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 + readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json + run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" + assert_success + assert_output --regexp "0x[a-fA-F0-9]{40}" + gas_token_addr=$output + fi + readonly is_forced=${IS_FORCED:-"true"} + readonly bridge_addr=$BRIDGE_ADDRESS + readonly meta_bytes=${META_BYTES:-"0x"} + + readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} + readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} + + readonly dry_run=${DRY_RUN:-"false"} + readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') + readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') + gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") + readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) +} + +@test "Native gas token deposit to WETH" { + destination_addr=$sender_addr + local initial_receiver_balance=$(cast call --rpc-url "$l2_rpc_url" "$weth_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') + echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 + + echo "=== Running LxLy deposit on L1 to network: $l2_rpc_network_id native_token: $native_token_addr" >&3 + + destination_net=$l2_rpc_network_id + run bridge_asset "$native_token_addr" "$l1_rpc_url" + assert_success + + echo "=== Running LxLy claim on L2" >&3 + timeout="120" + claim_frequency="10" + run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" + assert_success + + run verify_balance "$l2_rpc_url" "$weth_token_addr" "$destination_addr" "$initial_receiver_balance" "$ether_value" + assert_success + + echo "=== bridgeAsset L2 WETH: $weth_token_addr to L1 ETH" >&3 + destination_addr=$sender_addr + destination_net=0 + run bridge_asset "$weth_token_addr" "$l2_rpc_url" + assert_success +} + diff --git a/test/bats/pp/e2e-pp.bats b/test/bats/pp/e2e-pp.bats new file mode 100644 index 00000000..f7963592 --- /dev/null +++ b/test/bats/pp/e2e-pp.bats @@ -0,0 +1,25 @@ +setup() { + load '../../helpers/common-setup' + _common_setup + + if [ -z "$BRIDGE_ADDRESS" ]; then + local combined_json_file="/opt/zkevm/combined.json" + echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 + + # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress + combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) + bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) + BRIDGE_ADDRESS=$bridge_default_address + fi + echo "Bridge address=$BRIDGE_ADDRESS" >&3 +} + +@test "Verify certificate settlement" { + echo "Waiting 10 minutes to get some settle certificate...." >&3 + + readonly bridge_addr=$BRIDGE_ADDRESS + readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') + + run $PROJECT_ROOT/../scripts/agglayer_certificates_monitor.sh 1 600 $l2_rpc_network_id + assert_success +} diff --git a/test/combinations/fork11-rollup.yml b/test/combinations/fork11-rollup.yml index 79baa92d..80491430 100644 --- a/test/combinations/fork11-rollup.yml +++ b/test/combinations/fork11-rollup.yml @@ -1,10 +1,9 @@ args: - zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11 + zkevm_contracts_image: leovct/zkevm-contracts:v7.0.0-rc.2-fork.11-patch.1 zkevm_prover_image: hermeznetwork/zkevm-prover:v7.0.2-fork.11 cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.0-fork11-RC1 cdk_node_image: cdk - zkevm_use_gas_token_contract: true + gas_token_enabled: true data_availability_mode: rollup sequencer_type: erigon - \ No newline at end of file diff --git a/test/combinations/fork12-cdk-validium.yml b/test/combinations/fork12-cdk-validium.yml index c17444b3..f4d914c6 100644 --- a/test/combinations/fork12-cdk-validium.yml +++ b/test/combinations/fork12-cdk-validium.yml @@ -1,10 +1,8 @@ args: - zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12-patch.1 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 cdk_node_image: cdk - zkevm_use_gas_token_contract: true + gas_token_enabled: true data_availability_mode: cdk-validium sequencer_type: erigon - - diff --git a/test/combinations/fork12-pessimistic.yml b/test/combinations/fork12-pessimistic.yml new file mode 100644 index 00000000..a3734f6a --- /dev/null +++ b/test/combinations/fork12-pessimistic.yml @@ -0,0 +1,15 @@ +args: + agglayer_image: ghcr.io/agglayer/agglayer:0.2.0-rc.12 + cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.60.0-beta8 + cdk_node_image: cdk + zkevm_bridge_proxy_image: haproxy:3.0-bookworm + zkevm_bridge_service_image: hermeznetwork/zkevm-bridge-service:v0.6.0-RC1 + zkevm_bridge_ui_image: leovct/zkevm-bridge-ui:multi-network + zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 + additional_services: [] + consensus_contract_type: pessimistic + sequencer_type: erigon + erigon_strict_mode: false + gas_token_enabled: true + agglayer_prover_sp1_key: {{.agglayer_prover_sp1_key}} + enable_normalcy: true diff --git a/test/combinations/fork12-rollup.yml b/test/combinations/fork12-rollup.yml index 95a5111a..32d3ef8e 100644 --- a/test/combinations/fork12-rollup.yml +++ b/test/combinations/fork12-rollup.yml @@ -1,8 +1,8 @@ args: - zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12 + zkevm_contracts_image: leovct/zkevm-contracts:v8.0.0-rc.4-fork.12-patch.1 zkevm_prover_image: hermeznetwork/zkevm-prover:v8.0.0-RC12-fork.12 cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 cdk_node_image: cdk - zkevm_use_gas_token_contract: true + gas_token_enabled: true data_availability_mode: rollup sequencer_type: erigon diff --git a/test/combinations/fork9-cdk-validium.yml b/test/combinations/fork9-cdk-validium.yml index e0543654..515819b6 100644 --- a/test/combinations/fork9-cdk-validium.yml +++ b/test/combinations/fork9-cdk-validium.yml @@ -1,11 +1,11 @@ args: - zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9 + zkevm_contracts_image: leovct/zkevm-contracts:v6.0.0-rc.1-fork.9-patch.1 zkevm_prover_image: hermeznetwork/zkevm-prover:v6.0.6 cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.1.2 zkevm_node_image: hermeznetwork/zkevm-node:v0.7.3-RC1 cdk_validium_node_image: 0xpolygon/cdk-validium-node:0.7.0-cdk cdk_node_image: cdk - zkevm_use_gas_token_contract: true + gas_token_enabled: true additional_services: - pless_zkevm_node data_availability_mode: cdk-validium diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 4069b350..45ef7464 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -53,14 +53,6 @@ Outputs = ["stderr"] VerifyProofInterval = "10s" GasOffset = 150000 SettlementBackend = "agglayer" - [Aggregator.DB] - Name = "{{.aggregator_db.name}}" - User = "{{.aggregator_db.user}}" - Password = "{{.aggregator_db.password}}" - Host = "{{.aggregator_db.hostname}}" - Port = "{{.aggregator_db.port}}" - EnableLog = false - MaxConns = 200 [AggSender] CertificateSendInterval = "1m" diff --git a/test/config/test.config.toml b/test/config/test.config.toml index 94940469..9da00c79 100644 --- a/test/config/test.config.toml +++ b/test/config/test.config.toml @@ -58,14 +58,6 @@ AggLayerURL = "" SyncModeOnlyEnabled = false UseFullWitness = false SequencerPrivateKey = {} - [Aggregator.DB] - Name = "aggregator_db" - User = "aggregator_user" - Password = "aggregator_password" - Host = "cdk-aggregator-db" - Port = "5432" - EnableLog = false - MaxConns = 200 [Aggregator.Log] Environment = "development" # "production" or "development" Level = "info" diff --git a/test/helpers/lxly-bridge-test.bash b/test/helpers/lxly-bridge-test.bash index ad5ab943..53af437e 100644 --- a/test/helpers/lxly-bridge-test.bash +++ b/test/helpers/lxly-bridge-test.bash @@ -1,9 +1,9 @@ #!/usr/bin/env bash # Error code reference https://hackmd.io/WwahVBZERJKdfK3BbKxzQQ -function bridgeAsset() { +function bridge_message() { local token_addr="$1" local rpc_url="$2" - readonly bridge_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' + local bridge_sig='bridgeMessage(uint32,address,bool,bytes)' if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then echo "The ETH balance for sender "$sender_addr":" >&3 @@ -15,7 +15,37 @@ function bridgeAsset() { echo "$(cast --from-wei "$balance_wei")" >&3 fi - echo "Attempting to deposit $amount [wei] to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$rpc_url)" >&3 + echo "Attempting to deposit $amount [wei] using bridgeMessage to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$rpc_url)" >&3 + + if [[ $dry_run == "true" ]]; then + cast calldata $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes + else + if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then + echo "cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $is_forced $meta_bytes" >&3 + cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $is_forced $meta_bytes + else + echo "cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $is_forced $meta_bytes" + cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $is_forced $meta_bytes + fi + fi +} + +function bridge_asset() { + local token_addr="$1" + local rpc_url="$2" + local bridge_sig='bridgeAsset(uint32,address,uint256,address,bool,bytes)' + + if [[ $token_addr == "0x0000000000000000000000000000000000000000" ]]; then + echo "The ETH balance for sender "$sender_addr":" >&3 + cast balance -e --rpc-url $rpc_url $sender_addr >&3 + else + echo "The "$token_addr" token balance for sender "$sender_addr":" >&3 + echo "cast call --rpc-url $rpc_url $token_addr \"$balance_of_fn_sig\" $sender_addr" >&3 + balance_wei=$(cast call --rpc-url "$rpc_url" "$token_addr" "$balance_of_fn_sig" "$sender_addr" | awk '{print $1}') + echo "$(cast --from-wei "$balance_wei")" >&3 + fi + + echo "Attempting to deposit $amount [wei] using bridgeAsset to $destination_addr, token $token_addr (sender=$sender_addr, network id=$destination_net, rpc url=$rpc_url)" >&3 if [[ $dry_run == "true" ]]; then cast calldata $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes @@ -24,7 +54,7 @@ function bridgeAsset() { echo "cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes" >&3 cast send --legacy --private-key $sender_private_key --value $amount --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes else - echo "cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr \"$bridge_sig\" $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes" + echo "cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes" cast send --legacy --private-key $sender_private_key --rpc-url $rpc_url $bridge_addr $bridge_sig $destination_net $destination_addr $amount $token_addr $is_forced $meta_bytes fi fi @@ -32,7 +62,12 @@ function bridgeAsset() { function claim() { local destination_rpc_url="$1" - readonly claim_sig="claimAsset(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)" + local bridge_type="$2" + local claim_sig="claimAsset(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)" + if [[ $bridge_type == "bridgeMessage" ]]; then + claim_sig="claimMessage(bytes32[32],bytes32[32],uint256,bytes32,bytes32,uint32,address,uint32,address,uint256,bytes)" + fi + readonly bridge_deposit_file=$(mktemp) readonly claimable_deposit_file=$(mktemp) echo "Getting full list of deposits" >&3 @@ -94,6 +129,7 @@ function wait_for_claim() { local timeout="$1" # timeout (in seconds) local claim_frequency="$2" # claim frequency (in seconds) local destination_rpc_url="$3" # destination rpc url + local bridge_type="$4" # bridgeAsset or bridgeMessage local start_time=$(date +%s) local end_time=$((start_time + timeout)) @@ -104,7 +140,7 @@ function wait_for_claim() { exit 1 fi - run claim $destination_rpc_url + run claim $destination_rpc_url $bridge_type if [ $status -eq 0 ]; then break fi diff --git a/test/helpers/wait.go b/test/helpers/wait.go new file mode 100644 index 00000000..86a6f9fb --- /dev/null +++ b/test/helpers/wait.go @@ -0,0 +1,32 @@ +package helpers + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type Processorer interface { + GetLastProcessedBlock(ctx context.Context) (uint64, error) +} + +func RequireProcessorUpdated(t *testing.T, processor Processorer, targetBlock uint64) { + t.Helper() + const ( + maxIterations = 100 + sleepTimePerIteration = time.Millisecond * 10 + ) + ctx := context.Background() + for i := 0; i < maxIterations; i++ { + lpb, err := processor.GetLastProcessedBlock(ctx) + require.NoError(t, err) + if targetBlock <= lpb { + return + } + time.Sleep(sleepTimePerIteration) + } + require.NoError(t, errors.New("processor not updated")) +} diff --git a/test/run-e2e.sh b/test/run-e2e.sh index 08a6b2cd..adbbcbcb 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -9,7 +9,7 @@ fi DATA_AVAILABILITY_MODE=$2 if [ -z $DATA_AVAILABILITY_MODE ]; then - echo "Missing DATA_AVAILABILITY_MODE: ['rollup', 'cdk-validium']" + echo "Missing DATA_AVAILABILITY_MODE: ['rollup', 'cdk-validium', 'pessimistic']" exit 1 fi @@ -27,4 +27,9 @@ fi kurtosis clean --all echo "Override cdk config file" cp $BASE_FOLDER/config/kurtosis-cdk-node-config.toml.template $KURTOSIS_FOLDER/templates/trusted-node/cdk-node-config.toml -kurtosis run --enclave cdk --args-file "combinations/$FORK-$DATA_AVAILABILITY_MODE.yml" --image-download always $KURTOSIS_FOLDER +KURTOSIS_CONFIG_FILE="combinations/$FORK-$DATA_AVAILABILITY_MODE.yml" +TEMP_CONFIG_FILE=$(mktemp --suffix ".yml") +echo "rendering $KURTOSIS_CONFIG_FILE to temp file $TEMP_CONFIG_FILE" +go run ../scripts/run_template.go $KURTOSIS_CONFIG_FILE > $TEMP_CONFIG_FILE +kurtosis run --enclave cdk --args-file "$TEMP_CONFIG_FILE" --image-download always $KURTOSIS_FOLDER +rm $TEMP_CONFIG_FILE \ No newline at end of file diff --git a/test/scripts/agglayer_certificates_monitor.sh b/test/scripts/agglayer_certificates_monitor.sh new file mode 100755 index 00000000..c530548f --- /dev/null +++ b/test/scripts/agglayer_certificates_monitor.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# This script monitors the agglayer certificates progress of pessimistic proof. + +function parse_params(){ + # Check if the required arguments are provided. + if [ "$#" -lt 3 ]; then + echo "Usage: $0 " + exit 1 + fi + + # The number of batches to be verified. + settle_certificates_target="$1" + + # The script timeout (in seconds). + timeout="$2" + + # The network id of the L2 network. + l2_rpc_network_id="$3" +} + +function check_timeout(){ + local _end_time=$1 + current_time=$(date +%s) + if ((current_time > _end_time)); then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached not found the expected numbers of settled certs!" + exit 1 + fi +} + +function check_num_certificates(){ + readonly agglayer_rpc_url="$(kurtosis port print cdk agglayer agglayer)" + + cast_output=$(cast rpc --rpc-url "$agglayer_rpc_url" "interop_getLatestKnownCertificateHeader" "$l2_rpc_network_id" 2>&1) + + if [ $? -ne 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] Error executing command cast rpc: $cast_output" + return + fi + + height=$(extract_certificate_height "$cast_output") + [[ -z "$height" ]] && { + echo "Error: Failed to extract certificate height: $height." >&3 + return + } + + status=$(extract_certificate_status "$cast_output") + [[ -z "$status" ]] && { + echo "Error: Failed to extract certificate status." >&3 + return + } + + echo "[$(date '+%Y-%m-%d %H:%M:%S')] Last known agglayer certificate height: $height, status: $status" >&3 + + if (( height > settle_certificates_target - 1 )); then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ Success! The number of settled certificates has reached the target." >&3 + exit 0 + fi + + if (( height == settle_certificates_target - 1 )); then + if [ "$status" == "Settled" ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ Success! The number of settled certificates has reached the target." >&3 + exit 0 + fi + + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ⚠️ Warning! The number of settled certificates is one less than the target." >&3 + fi +} + +function extract_certificate_height() { + local cast_output="$1" + echo "$cast_output" | jq -r '.height' +} + +function extract_certificate_status() { + local cast_output="$1" + echo "$cast_output" | jq -r '.status' +} + +# MAIN + +parse_params $* +start_time=$(date +%s) +end_time=$((start_time + timeout)) +echo "[$(date '+%Y-%m-%d %H:%M:%S')] Start monitoring agglayer certificates progress..." +while true; do + check_num_certificates + check_timeout $end_time + sleep 10 +done