diff --git a/.changeset/mean-dots-move.md b/.changeset/mean-dots-move.md new file mode 100644 index 00000000000..1169d8379e9 --- /dev/null +++ b/.changeset/mean-dots-move.md @@ -0,0 +1,5 @@ +--- +"chainlink": patch +--- + +Add config var Mercury.Transmitter.TransmitConcurrency #added diff --git a/.changeset/shiny-owls-destroy.md b/.changeset/shiny-owls-destroy.md new file mode 100644 index 00000000000..d132d6dbff8 --- /dev/null +++ b/.changeset/shiny-owls-destroy.md @@ -0,0 +1,6 @@ +--- +"chainlink": patch +--- + +Logging improvements for LLO +#internal diff --git a/.changeset/three-mayflies-learn.md b/.changeset/three-mayflies-learn.md new file mode 100644 index 00000000000..1ea4fad3924 --- /dev/null +++ b/.changeset/three-mayflies-learn.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +#updated Update few incorrect occurences of the password for notreal@fakeemail.ch. diff --git a/.github/e2e-tests.yml b/.github/e2e-tests.yml index 2ac1cd90505..b837c6f235b 100644 --- a/.github/e2e-tests.yml +++ b/.github/e2e-tests.yml @@ -961,7 +961,7 @@ runner-test-matrix: pyroscope_env: ci-smoke-ccipv1_6-evm-simulated test_env_vars: E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - E2E_JD_VERSION: 0.4.0 + E2E_JD_VERSION: 0.6.0 - id: smoke/ccip_usdc_test.go:* path: integration-tests/smoke/ccip_usdc_test.go @@ -975,7 +975,7 @@ runner-test-matrix: pyroscope_env: ci-smoke-ccipv1_6-evm-simulated test_env_vars: E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - E2E_JD_VERSION: 0.4.0 + E2E_JD_VERSION: 0.6.0 - id: smoke/fee_boosting_test.go:* path: integration-tests/smoke/fee_boosting_test.go @@ -989,7 +989,107 @@ runner-test-matrix: pyroscope_env: ci-smoke-ccipv1_6-evm-simulated test_env_vars: E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - E2E_JD_VERSION: 0.4.0 + E2E_JD_VERSION: 0.6.0 + + - id: smoke/ccip_rmn_test.go:^TestRMN_TwoMessagesOnTwoLanesIncludingBatching$ + path: integration-tests/smoke/ccip_rmn_test.go + test_env_type: docker + runs_on: ubuntu-latest + triggers: + - PR E2E Core Tests + - Merge Queue E2E Core Tests + - Nightly E2E Tests + test_cmd: cd integration-tests/smoke && go test -test.run ^TestRMN_TwoMessagesOnTwoLanesIncludingBatching$ -timeout 12m -test.parallel=1 -count=1 -json + pyroscope_env: ci-smoke-ccipv1_6-evm-simulated + test_env_vars: + E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 + E2E_JD_VERSION: 0.6.0 + E2E_RMN_RAGEPROXY_VERSION: master-5208d09 + E2E_RMN_AFN2PROXY_VERSION: master-5208d09 + + - id: smoke/ccip_rmn_test.go:^TestRMN_MultipleMessagesOnOneLaneNoWaitForExec$ + path: integration-tests/smoke/ccip_rmn_test.go + test_env_type: docker + runs_on: ubuntu-latest + triggers: + - PR E2E Core Tests + - Merge Queue E2E Core Tests + - Nightly E2E Tests + test_cmd: cd integration-tests/smoke && go test -test.run ^TestRMN_MultipleMessagesOnOneLaneNoWaitForExec$ -timeout 12m -test.parallel=1 -count=1 -json + pyroscope_env: ci-smoke-ccipv1_6-evm-simulated + test_env_vars: + E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 + E2E_JD_VERSION: 0.6.0 + E2E_RMN_RAGEPROXY_VERSION: master-5208d09 + E2E_RMN_AFN2PROXY_VERSION: master-5208d09 + +# Enable after flaking issue is resolved +# - id: smoke/ccip_rmn_test.go:^TestRMN_NotEnoughObservers$ +# path: integration-tests/smoke/ccip_rmn_test.go +# test_env_type: docker +# runs_on: ubuntu-latest +# triggers: +# - PR E2E Core Tests +# - Merge Queue E2E Core Tests +# - Nightly E2E Tests +# test_cmd: cd integration-tests/smoke && go test -test.run ^TestRMN_NotEnoughObservers$ -timeout 12m -test.parallel=1 -count=1 -json +# pyroscope_env: ci-smoke-ccipv1_6-evm-simulated +# test_env_vars: +# E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 +# E2E_JD_VERSION: 0.6.0 +# E2E_RMN_RAGEPROXY_VERSION: master-5208d09 +# E2E_RMN_AFN2PROXY_VERSION: master-5208d09 + + - id: smoke/ccip_rmn_test.go:^TestRMN_DifferentSigners$ + path: integration-tests/smoke/ccip_rmn_test.go + test_env_type: docker + runs_on: ubuntu-latest + triggers: + - PR E2E Core Tests + - Merge Queue E2E Core Tests + - Nightly E2E Tests + test_cmd: cd integration-tests/smoke && go test -test.run ^TestRMN_DifferentSigners$ -timeout 12m -test.parallel=1 -count=1 -json + pyroscope_env: ci-smoke-ccipv1_6-evm-simulated + test_env_vars: + E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 + E2E_JD_VERSION: 0.6.0 + E2E_RMN_RAGEPROXY_VERSION: master-5208d09 + E2E_RMN_AFN2PROXY_VERSION: master-5208d09 + +# Enable after flaking issue is resolved +# - id: smoke/ccip_rmn_test.go:^TestRMN_NotEnoughSigners$ +# path: integration-tests/smoke/ccip_rmn_test.go +# test_env_type: docker +# runs_on: ubuntu-latest +# triggers: +# - PR E2E Core Tests +# - Merge Queue E2E Core Tests +# - Nightly E2E Tests +# test_cmd: cd integration-tests/smoke && go test -test.run ^TestRMN_NotEnoughSigners$ -timeout 12m -test.parallel=1 -count=1 -json +# pyroscope_env: ci-smoke-ccipv1_6-evm-simulated +# test_env_vars: +# E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 +# E2E_JD_VERSION: 0.6.0 +# E2E_RMN_RAGEPROXY_VERSION: master-5208d09 +# E2E_RMN_AFN2PROXY_VERSION: master-5208d09 + + + - id: smoke/ccip_rmn_test.go:^TestRMN_DifferentRmnNodesForDifferentChains$ + path: integration-tests/smoke/ccip_rmn_test.go + test_env_type: docker + runs_on: ubuntu-latest + triggers: + - PR E2E Core Tests + - Merge Queue E2E Core Tests + - Nightly E2E Tests + test_cmd: cd integration-tests/smoke/ && go test -test.run ^TestRMN_DifferentRmnNodesForDifferentChains$ -timeout 12m -test.parallel=1 -count=1 -json + pyroscope_env: ci-smoke-ccipv1_6-evm-simulated + test_env_vars: + E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 + E2E_JD_VERSION: 0.6.0 + E2E_RMN_RAGEPROXY_VERSION: master-5208d09 + E2E_RMN_AFN2PROXY_VERSION: master-5208d09 + # END: CCIPv1.6 tests diff --git a/.github/workflows/find-new-flaky-tests.yml b/.github/workflows/find-new-flaky-tests.yml index 363305af468..0cdfb2b3091 100644 --- a/.github/workflows/find-new-flaky-tests.yml +++ b/.github/workflows/find-new-flaky-tests.yml @@ -100,7 +100,7 @@ jobs: - name: Install flakeguard shell: bash - run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@9c9821d6013f4838eb26970c2eef594f4d25398b + run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@8b02ed1703ef40755a4c46ff454cf4ff2e89275d - name: Find new or updated test packages if: ${{ inputs.runAllTests == false }} @@ -259,7 +259,7 @@ jobs: - name: Install flakeguard shell: bash - run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@9c9821d6013f4838eb26970c2eef594f4d25398b + run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@8b02ed1703ef40755a4c46ff454cf4ff2e89275d - name: Run tests with flakeguard shell: bash @@ -301,7 +301,7 @@ jobs: - name: Install flakeguard shell: bash - run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@9c9821d6013f4838eb26970c2eef594f4d25398b + run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/flakeguard@8b02ed1703ef40755a4c46ff454cf4ff2e89275d - name: Set combined test results id: set_test_results @@ -316,16 +316,16 @@ jobs: PATH=$PATH:$(go env GOPATH)/bin export PATH - # Use flakeguard aggregate-all to aggregate test results - flakeguard aggregate-all --results-path . --output-results ../all_tests.json + # Use flakeguard to aggregate all test results + flakeguard aggregate-results --results-path . --output-results ../all_tests.json # Count all tests ALL_TESTS_COUNT=$(jq 'length' ../all_tests.json) echo "All tests count: $ALL_TESTS_COUNT" echo "all_tests_count=$ALL_TESTS_COUNT" >> "$GITHUB_OUTPUT" - # Use flakeguard aggregate-failed to filter and output failed tests based on PassRatio threshold - flakeguard aggregate-failed --threshold "${{ inputs.runThreshold }}" --min-pass-ratio=${{ env.MIN_PASS_RATIO }} --results-path . --output-results ../failed_tests.json --output-logs ../failed_test_logs.json + # Use flakeguard to filter and output failed tests based on PassRatio threshold + flakeguard aggregate-results --filter-failed=true --threshold "${{ inputs.runThreshold }}" --min-pass-ratio=${{ env.MIN_PASS_RATIO }} --results-path . --output-results ../failed_tests.json --output-logs ../failed_test_logs.json # Count failed tests if [ -f "../failed_tests.json" ]; then @@ -347,6 +347,14 @@ jobs: threshold_percentage=$(echo '${{ inputs.runThreshold }}' | awk '{printf "%.0f", $1 * 100}') echo "threshold_percentage=$threshold_percentage" >> $GITHUB_OUTPUT + - name: Upload All Test Results as Artifact + if: ${{ fromJson(steps.set_test_results.outputs.all_tests_count) > 0 }} + uses: actions/upload-artifact@v4.4.3 + with: + path: all_tests.json + name: all-test-results.json + retention-days: 7 + - name: Upload Failed Test Results as Artifact if: ${{ fromJson(steps.set_test_results.outputs.failed_tests_count) > 0 }} uses: actions/upload-artifact@v4.4.3 diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 08383aed12d..1034a8fe834 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -210,7 +210,7 @@ jobs: contents: read needs: [build-chainlink, changes] if: github.event_name == 'pull_request' && ( needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true') - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0 + uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@27467f0073162e0ca77d33ce26f649b3d0f4c188 #ctf-run-tests@0.2.0 with: workflow_name: Run Core E2E Tests For PR chainlink_version: ${{ inputs.evm-ref || github.sha }} @@ -251,7 +251,7 @@ jobs: contents: read needs: [build-chainlink, changes] if: github.event_name == 'merge_group' && ( needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true') - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@5412507526722a7b1c5d719fa686eed5a1bc4035 # ctf-run-tests@0.2.0 + uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@27467f0073162e0ca77d33ce26f649b3d0f4c188 #ctf-run-tests@1.0.0 with: workflow_name: Run Core E2E Tests For Merge Queue chainlink_version: ${{ inputs.evm-ref || github.sha }} diff --git a/contracts/.changeset/wet-eyes-accept.md b/contracts/.changeset/wet-eyes-accept.md new file mode 100644 index 00000000000..ea783366220 --- /dev/null +++ b/contracts/.changeset/wet-eyes-accept.md @@ -0,0 +1,7 @@ +--- +'@chainlink/contracts': patch +--- + +Refactor MockCCIPRouter to support EVMExtraArgsV2 + +PR issue : CCIP-4288 diff --git a/contracts/gas-snapshots/ccip.gas-snapshot b/contracts/gas-snapshots/ccip.gas-snapshot index 54b9cdf22d7..d864b30804d 100644 --- a/contracts/gas-snapshots/ccip.gas-snapshot +++ b/contracts/gas-snapshots/ccip.gas-snapshot @@ -257,11 +257,14 @@ MerkleMultiProofTest:test_EmptyLeaf_Revert() (gas: 3563) MerkleMultiProofTest:test_MerkleRoot256() (gas: 394891) MerkleMultiProofTest:test_MerkleRootSingleLeaf_Success() (gas: 3661) MerkleMultiProofTest:test_SpecSync_gas() (gas: 34152) -MockRouterTest:test_ccipSendWithInsufficientNativeTokens_Revert() (gas: 34081) -MockRouterTest:test_ccipSendWithInvalidMsgValue_Revert() (gas: 60886) -MockRouterTest:test_ccipSendWithLinkFeeTokenAndValidMsgValue_Success() (gas: 126575) -MockRouterTest:test_ccipSendWithLinkFeeTokenbutInsufficientAllowance_Revert() (gas: 63499) -MockRouterTest:test_ccipSendWithSufficientNativeFeeTokens_Success() (gas: 44056) +MockRouterTest:test_ccipSendWithEVMExtraArgsV1_Success() (gas: 110095) +MockRouterTest:test_ccipSendWithEVMExtraArgsV2_Success() (gas: 132614) +MockRouterTest:test_ccipSendWithInsufficientNativeTokens_Revert() (gas: 34059) +MockRouterTest:test_ccipSendWithInvalidEVMExtraArgs_Revert() (gas: 106706) +MockRouterTest:test_ccipSendWithInvalidMsgValue_Revert() (gas: 60864) +MockRouterTest:test_ccipSendWithLinkFeeTokenAndValidMsgValue_Success() (gas: 126685) +MockRouterTest:test_ccipSendWithLinkFeeTokenbutInsufficientAllowance_Revert() (gas: 63477) +MockRouterTest:test_ccipSendWithSufficientNativeFeeTokens_Success() (gas: 44070) MultiAggregateRateLimiter_applyRateLimiterConfigUpdates:test_ConfigRateMoreThanCapacity_Revert() (gas: 16554) MultiAggregateRateLimiter_applyRateLimiterConfigUpdates:test_ConfigRateZero_Revert() (gas: 16634) MultiAggregateRateLimiter_applyRateLimiterConfigUpdates:test_DiableConfigCapacityNonZero_Revert() (gas: 16585) diff --git a/contracts/src/v0.8/ccip/test/mocks/MockRouter.sol b/contracts/src/v0.8/ccip/test/mocks/MockRouter.sol index 0abe4fdb7e5..3ded9fd78f0 100644 --- a/contracts/src/v0.8/ccip/test/mocks/MockRouter.sol +++ b/contracts/src/v0.8/ccip/test/mocks/MockRouter.sol @@ -121,12 +121,19 @@ contract MockCCIPRouter is IRouter, IRouterClient { function _fromBytes( bytes calldata extraArgs - ) internal pure returns (Client.EVMExtraArgsV1 memory) { + ) internal pure returns (Client.EVMExtraArgsV2 memory) { if (extraArgs.length == 0) { - return Client.EVMExtraArgsV1({gasLimit: DEFAULT_GAS_LIMIT}); + return Client.EVMExtraArgsV2({gasLimit: DEFAULT_GAS_LIMIT, allowOutOfOrderExecution: false}); } - if (bytes4(extraArgs) != Client.EVM_EXTRA_ARGS_V1_TAG) revert InvalidExtraArgsTag(); - return abi.decode(extraArgs[4:], (Client.EVMExtraArgsV1)); + + bytes4 extraArgsTag = bytes4(extraArgs); + if (extraArgsTag == Client.EVM_EXTRA_ARGS_V2_TAG) { + return abi.decode(extraArgs[4:], (Client.EVMExtraArgsV2)); + } else if (extraArgsTag == Client.EVM_EXTRA_ARGS_V1_TAG) { + return Client.EVMExtraArgsV2({gasLimit: abi.decode(extraArgs[4:], (uint256)), allowOutOfOrderExecution: false}); + } + + revert InvalidExtraArgsTag(); } /// @notice Always returns true to make sure this check can be performed on any chain. diff --git a/contracts/src/v0.8/ccip/test/mocks/test/MockRouterTest.t.sol b/contracts/src/v0.8/ccip/test/mocks/test/MockRouterTest.t.sol index cd0aabf1776..549d6b8f843 100644 --- a/contracts/src/v0.8/ccip/test/mocks/test/MockRouterTest.t.sol +++ b/contracts/src/v0.8/ccip/test/mocks/test/MockRouterTest.t.sol @@ -65,4 +65,24 @@ contract MockRouterTest is TokenSetup { mockRouter.ccipSend(MOCK_CHAIN_SELECTOR, message); } + + function test_ccipSendWithEVMExtraArgsV1_Success() public { + Client.EVMExtraArgsV1 memory extraArgs = Client.EVMExtraArgsV1({gasLimit: 500_000}); + message.extraArgs = Client._argsToBytes(extraArgs); + mockRouter.ccipSend{value: 0.1 ether}(MOCK_CHAIN_SELECTOR, message); + } + + function test_ccipSendWithEVMExtraArgsV2_Success() public { + Client.EVMExtraArgsV2 memory extraArgs = Client.EVMExtraArgsV2({gasLimit: 500_000, allowOutOfOrderExecution: true}); + message.extraArgs = Client._argsToBytes(extraArgs); + mockRouter.ccipSend{value: 0.1 ether}(MOCK_CHAIN_SELECTOR, message); + } + + function test_ccipSendWithInvalidEVMExtraArgs_Revert() public { + uint256 gasLimit = 500_000; + bytes4 invalidExtraArgsTag = bytes4(keccak256("CCIP EVMExtraArgsInvalid")); + message.extraArgs = abi.encodeWithSelector(invalidExtraArgsTag, gasLimit); + vm.expectRevert(MockCCIPRouter.InvalidExtraArgsTag.selector); + mockRouter.ccipSend{value: 0.1 ether}(MOCK_CHAIN_SELECTOR, message); + } } diff --git a/core/capabilities/targets/write_target.go b/core/capabilities/targets/write_target.go index 9315a1ee199..8fe0d58018a 100644 --- a/core/capabilities/targets/write_target.go +++ b/core/capabilities/targets/write_target.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "fmt" "math/big" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -186,15 +187,23 @@ func evaluate(rawRequest capabilities.CapabilityRequest) (r Request, err error) } if hex.EncodeToString(reportMetadata.WorkflowExecutionID[:]) != rawRequest.Metadata.WorkflowExecutionID { - return r, fmt.Errorf("WorkflowExecutionID in the report does not match WorkflowExecutionID in the request metadata. Report WorkflowExecutionID: %+v, request WorkflowExecutionID: %+v", reportMetadata.WorkflowExecutionID, rawRequest.Metadata.WorkflowExecutionID) + return r, fmt.Errorf("WorkflowExecutionID in the report does not match WorkflowExecutionID in the request metadata. Report WorkflowExecutionID: %+v, request WorkflowExecutionID: %+v", hex.EncodeToString(reportMetadata.WorkflowExecutionID[:]), rawRequest.Metadata.WorkflowExecutionID) } - if hex.EncodeToString(reportMetadata.WorkflowOwner[:]) != rawRequest.Metadata.WorkflowOwner { - return r, fmt.Errorf("WorkflowOwner in the report does not match WorkflowOwner in the request metadata. Report WorkflowOwner: %+v, request WorkflowOwner: %+v", reportMetadata.WorkflowOwner, rawRequest.Metadata.WorkflowOwner) + // case-insensitive verification of the owner address (so that a check-summed address matches its non-checksummed version). + if !strings.EqualFold(hex.EncodeToString(reportMetadata.WorkflowOwner[:]), rawRequest.Metadata.WorkflowOwner) { + return r, fmt.Errorf("WorkflowOwner in the report does not match WorkflowOwner in the request metadata. Report WorkflowOwner: %+v, request WorkflowOwner: %+v", hex.EncodeToString(reportMetadata.WorkflowOwner[:]), rawRequest.Metadata.WorkflowOwner) } - if hex.EncodeToString(reportMetadata.WorkflowName[:]) != rawRequest.Metadata.WorkflowName { - return r, fmt.Errorf("WorkflowName in the report does not match WorkflowName in the request metadata. Report WorkflowName: %+v, request WorkflowName: %+v", reportMetadata.WorkflowName, rawRequest.Metadata.WorkflowName) + // workflowNames are padded to 10bytes + decodedName, err := hex.DecodeString(rawRequest.Metadata.WorkflowName) + if err != nil { + return r, err + } + var workflowName [10]byte + copy(workflowName[:], decodedName) + if !bytes.Equal(reportMetadata.WorkflowName[:], workflowName[:]) { + return r, fmt.Errorf("WorkflowName in the report does not match WorkflowName in the request metadata. Report WorkflowName: %+v, request WorkflowName: %+v", hex.EncodeToString(reportMetadata.WorkflowName[:]), hex.EncodeToString(workflowName[:])) } if hex.EncodeToString(reportMetadata.WorkflowCID[:]) != rawRequest.Metadata.WorkflowID { diff --git a/core/capabilities/targets/write_target_test.go b/core/capabilities/targets/write_target_test.go index 38136f07df0..801bdf2ea9a 100644 --- a/core/capabilities/targets/write_target_test.go +++ b/core/capabilities/targets/write_target_test.go @@ -42,6 +42,10 @@ func TestWriteTarget(t *testing.T) { require.NoError(t, err) reportID := [2]byte{0x00, 0x01} + var workflowName [10]byte + copy(workflowName[:], []byte("name")) + workflowOwnerString := "219BFD3D78fbb740c614432975CBE829E26C490e" + workflowOwner := common.HexToAddress(workflowOwnerString) reportMetadata := targets.ReportV1Metadata{ Version: 1, WorkflowExecutionID: [32]byte{}, @@ -49,8 +53,8 @@ func TestWriteTarget(t *testing.T) { DonID: 0, DonConfigVersion: 0, WorkflowCID: [32]byte{}, - WorkflowName: [10]byte{}, - WorkflowOwner: [20]byte{}, + WorkflowName: workflowName, + WorkflowOwner: workflowOwner, ReportID: reportID, } @@ -69,7 +73,7 @@ func TestWriteTarget(t *testing.T) { validMetadata := capabilities.RequestMetadata{ WorkflowID: hex.EncodeToString(reportMetadata.WorkflowCID[:]), - WorkflowOwner: hex.EncodeToString(reportMetadata.WorkflowOwner[:]), + WorkflowOwner: workflowOwnerString, WorkflowName: hex.EncodeToString(reportMetadata.WorkflowName[:]), WorkflowExecutionID: hex.EncodeToString(reportMetadata.WorkflowExecutionID[:]), } @@ -218,4 +222,44 @@ func TestWriteTarget(t *testing.T) { _, err2 := writeTarget.Execute(ctx, req) require.Error(t, err2) }) + + tests := []struct { + name string + modifyRequest func(*capabilities.CapabilityRequest) + expectedError string + }{ + { + name: "non-matching WorkflowOwner", + modifyRequest: func(req *capabilities.CapabilityRequest) { + req.Metadata.WorkflowOwner = "nonmatchingowner" + }, + expectedError: "WorkflowOwner in the report does not match WorkflowOwner in the request metadata", + }, + { + name: "non-matching WorkflowName", + modifyRequest: func(req *capabilities.CapabilityRequest) { + req.Metadata.WorkflowName = hex.EncodeToString([]byte("nonmatchingname")) + }, + expectedError: "WorkflowName in the report does not match WorkflowName in the request metadata", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := capabilities.CapabilityRequest{ + Metadata: validMetadata, + Config: config, + Inputs: validInputs, + } + tt.modifyRequest(&req) + + _, err := writeTarget.Execute(ctx, req) + if tt.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } + }) + } } diff --git a/core/cmd/shell.go b/core/cmd/shell.go index 966fa1a0ff8..e4f4c5bd6e3 100644 --- a/core/cmd/shell.go +++ b/core/cmd/shell.go @@ -246,9 +246,9 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G } evmFactoryCfg := chainlink.EVMFactoryConfig{ - CSAETHKeystore: keyStore, - ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, DS: ds}, - MercuryTransmitter: cfg.Mercury().Transmitter(), + CSAETHKeystore: keyStore, + ChainOpts: legacyevm.ChainOpts{AppConfig: cfg, MailMon: mailMon, DS: ds}, + MercuryConfig: cfg.Mercury(), } // evm always enabled for backward compatibility // TODO BCF-2510 this needs to change in order to clear the path for EVM extraction diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml index e0fc76f449c..edd1494e4f0 100644 --- a/core/config/docs/core.toml +++ b/core/config/docs/core.toml @@ -689,6 +689,10 @@ TransmitQueueMaxSize = 10_000 # Default # when sending a message to the mercury server, before aborting and considering # the transmission to be failed. TransmitTimeout = "5s" # Default +# TransmitConcurrency is the max number of concurrent transmits to each server. +# +# Only has effect with LLO jobs. +TransmitConcurrency = 100 # Default # Telemetry holds OTEL settings. # This data includes open telemetry metrics, traces, & logs. diff --git a/core/config/mercury_config.go b/core/config/mercury_config.go index d1b4b142e20..2e58ff0ee9d 100644 --- a/core/config/mercury_config.go +++ b/core/config/mercury_config.go @@ -20,6 +20,7 @@ type MercuryTLS interface { type MercuryTransmitter interface { TransmitQueueMaxSize() uint32 TransmitTimeout() commonconfig.Duration + TransmitConcurrency() uint32 } type Mercury interface { diff --git a/core/config/toml/types.go b/core/config/toml/types.go index d9302b81fb0..610d18b6b4d 100644 --- a/core/config/toml/types.go +++ b/core/config/toml/types.go @@ -1330,6 +1330,7 @@ func (m *MercuryTLS) ValidateConfig() (err error) { type MercuryTransmitter struct { TransmitQueueMaxSize *uint32 TransmitTimeout *commonconfig.Duration + TransmitConcurrency *uint32 } func (m *MercuryTransmitter) setFrom(f *MercuryTransmitter) { @@ -1339,6 +1340,9 @@ func (m *MercuryTransmitter) setFrom(f *MercuryTransmitter) { if v := f.TransmitTimeout; v != nil { m.TransmitTimeout = v } + if v := f.TransmitConcurrency; v != nil { + m.TransmitConcurrency = v + } } type Mercury struct { diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 32c63e7944c..29515df7034 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -418,8 +418,8 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn MailMon: mailMon, DS: ds, }, - CSAETHKeystore: keyStore, - MercuryTransmitter: cfg.Mercury().Transmitter(), + CSAETHKeystore: keyStore, + MercuryConfig: cfg.Mercury(), } if cfg.EVMEnabled() { diff --git a/core/scripts/chaincli/README.md b/core/scripts/chaincli/README.md index 992250ae77c..bd32c3cbf11 100644 --- a/core/scripts/chaincli/README.md +++ b/core/scripts/chaincli/README.md @@ -101,7 +101,7 @@ You can also combine the `bootstrap` and `launch-and-test` commands into a singl ```shell ./chaincli keeper launch-and-test --bootstrap ``` -In the output of this command, you will see the http address of the nodes, e.g. `http://localhost:6688`. This is the Chainlink Operator GUI. You can use the default username `notreal@fakeemail.ch` and password `fj293fbBnlQ!f9vNs~#` to log in. +In the output of this command, you will see the http address of the nodes, e.g. `http://localhost:6688`. This is the Chainlink Operator GUI. You can use the default username `notreal@fakeemail.ch` and password `fj293fbBnlQ!f9vNs` to log in. ### Logs Now that the nodes are running, you can use the `logs` subcommand to stream the output of the containers to your local terminal: diff --git a/core/scripts/chaincli/handler/handler.go b/core/scripts/chaincli/handler/handler.go index d40ee84a312..50576fe0fe8 100644 --- a/core/scripts/chaincli/handler/handler.go +++ b/core/scripts/chaincli/handler/handler.go @@ -44,7 +44,7 @@ import ( const ( defaultChainlinkNodeLogin = "notreal@fakeemail.ch" - defaultChainlinkNodePassword = "fj293fbBnlQ!f9vNs~#" + defaultChainlinkNodePassword = "fj293fbBnlQ!f9vNs" ethKeysEndpoint = "/v2/keys/eth" ocr2KeysEndpoint = "/v2/keys/ocr2" p2pKeysEndpoint = "/v2/keys/p2p" diff --git a/core/scripts/go.mod b/core/scripts/go.mod index 7cefe5e8808..c8d2adfe836 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -299,7 +299,7 @@ require ( github.com/smartcontractkit/chain-selectors v1.0.29 // indirect github.com/smartcontractkit/chainlink-ccip v0.0.0-20241118091009-43c2b4804cec // indirect github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index f9b31946475..827a56054d7 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1098,8 +1098,8 @@ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef06 github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef068/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e/go.mod h1:iK3BNHKCLgSgkOyiu3iE7sfZ20Qnuk7xwjV/yO/6gnQ= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 h1:1BMTG66HnCIz+KMBWGvyzELNM6VHGwv2WKFhN7H49Sg= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57/go.mod h1:QPiorgpbLv4+Jn4YO6xxU4ftTu4T3QN8HwX3ImP59DE= github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6AnNt+Wg64sVG+XSA49c= github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM= diff --git a/core/scripts/keystone/src/01_deploy_contracts_cmd.go b/core/scripts/keystone/src/01_deploy_contracts_cmd.go index 6fc3f1399cf..24fcaacd36c 100644 --- a/core/scripts/keystone/src/01_deploy_contracts_cmd.go +++ b/core/scripts/keystone/src/01_deploy_contracts_cmd.go @@ -157,7 +157,7 @@ func deploy( func setOCR3Config( env helpers.Environment, - ocrConfig ksdeploy.Orc2drOracleConfig, + ocrConfig ksdeploy.OCR2OracleConfig, artefacts string, ) { loadedContracts, err := LoadDeployedContracts(artefacts) diff --git a/core/scripts/keystone/src/88_gen_ocr3_config.go b/core/scripts/keystone/src/88_gen_ocr3_config.go index a437410346a..f4292e9a1d4 100644 --- a/core/scripts/keystone/src/88_gen_ocr3_config.go +++ b/core/scripts/keystone/src/88_gen_ocr3_config.go @@ -10,7 +10,7 @@ func mustReadConfig(fileName string) (output ksdeploy.TopLevelConfigSource) { return mustParseJSON[ksdeploy.TopLevelConfigSource](fileName) } -func generateOCR3Config(nodeList string, configFile string, chainID int64, pubKeysPath string) ksdeploy.Orc2drOracleConfig { +func generateOCR3Config(nodeList string, configFile string, chainID int64, pubKeysPath string) ksdeploy.OCR2OracleConfig { topLevelCfg := mustReadConfig(configFile) cfg := topLevelCfg.OracleConfig cfg.OCRSecrets = deployment.XXXGenerateTestOCRSecrets() diff --git a/core/services/chainlink/config_mercury.go b/core/services/chainlink/config_mercury.go index bc4aed6fb07..0e56105406b 100644 --- a/core/services/chainlink/config_mercury.go +++ b/core/services/chainlink/config_mercury.go @@ -50,6 +50,10 @@ func (m *mercuryTransmitterConfig) TransmitTimeout() commonconfig.Duration { return *m.c.TransmitTimeout } +func (m *mercuryTransmitterConfig) TransmitConcurrency() uint32 { + return *m.c.TransmitConcurrency +} + type mercuryConfig struct { c toml.Mercury s toml.MercurySecrets diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go index 76b80672dbb..e04d6d7e25b 100644 --- a/core/services/chainlink/config_test.go +++ b/core/services/chainlink/config_test.go @@ -838,6 +838,7 @@ func TestConfig_Marshal(t *testing.T) { Transmitter: toml.MercuryTransmitter{ TransmitQueueMaxSize: ptr(uint32(123)), TransmitTimeout: commoncfg.MustNewDuration(234 * time.Second), + TransmitConcurrency: ptr(uint32(456)), }, VerboseLogging: ptr(true), } @@ -1348,6 +1349,7 @@ CertFile = '/path/to/cert.pem' [Mercury.Transmitter] TransmitQueueMaxSize = 123 TransmitTimeout = '3m54s' +TransmitConcurrency = 456 `}, {"full", full, fullTOML}, {"multi-chain", multiChain, multiChainTOML}, diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go index 3740878fd19..cec7e5bb48c 100644 --- a/core/services/chainlink/relayer_factory.go +++ b/core/services/chainlink/relayer_factory.go @@ -54,7 +54,7 @@ func (r *RelayerFactory) NewDummy(config DummyFactoryConfig) (loop.Relayer, erro type EVMFactoryConfig struct { legacyevm.ChainOpts evmrelay.CSAETHKeystore - coreconfig.MercuryTransmitter + MercuryConfig coreconfig.Mercury } func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (map[types.RelayID]evmrelay.LOOPRelayAdapter, error) { @@ -83,7 +83,7 @@ func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (m DS: ccOpts.DS, CSAETHKeystore: config.CSAETHKeystore, MercuryPool: r.MercuryPool, - TransmitterConfig: config.MercuryTransmitter, + MercuryConfig: config.MercuryConfig, CapabilitiesRegistry: r.CapabilitiesRegistry, HTTPClient: r.HTTPClient, RetirementReportCache: r.RetirementReportCache, diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml index cd51afac5f8..0f26b02ab6f 100644 --- a/core/services/chainlink/testdata/config-empty-effective.toml +++ b/core/services/chainlink/testdata/config-empty-effective.toml @@ -237,6 +237,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml index c6a5302a459..3191ce576ed 100644 --- a/core/services/chainlink/testdata/config-full.toml +++ b/core/services/chainlink/testdata/config-full.toml @@ -247,6 +247,7 @@ CertFile = '/path/to/cert.pem' [Mercury.Transmitter] TransmitQueueMaxSize = 123 TransmitTimeout = '3m54s' +TransmitConcurrency = 456 [Capabilities] [Capabilities.Peering] diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml index e8da8142181..5f52c06ca1f 100644 --- a/core/services/chainlink/testdata/config-multi-chain-effective.toml +++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml @@ -237,6 +237,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/core/services/llo/codecs.go b/core/services/llo/codecs.go index 7813c8923ea..2ccadfe330b 100644 --- a/core/services/llo/codecs.go +++ b/core/services/llo/codecs.go @@ -1,6 +1,7 @@ package llo import ( + "github.com/smartcontractkit/chainlink-common/pkg/logger" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" "github.com/smartcontractkit/chainlink-data-streams/llo" @@ -8,11 +9,11 @@ import ( ) // NOTE: All supported codecs must be specified here -func NewReportCodecs() map[llotypes.ReportFormat]llo.ReportCodec { +func NewReportCodecs(lggr logger.Logger) map[llotypes.ReportFormat]llo.ReportCodec { codecs := make(map[llotypes.ReportFormat]llo.ReportCodec) codecs[llotypes.ReportFormatJSON] = llo.JSONReportCodec{} - codecs[llotypes.ReportFormatEVMPremiumLegacy] = evm.ReportCodecPremiumLegacy{} + codecs[llotypes.ReportFormatEVMPremiumLegacy] = evm.NewReportCodecPremiumLegacy(lggr) return codecs } diff --git a/core/services/llo/codecs_test.go b/core/services/llo/codecs_test.go index 4a7f3f65571..3af881a1de0 100644 --- a/core/services/llo/codecs_test.go +++ b/core/services/llo/codecs_test.go @@ -6,10 +6,11 @@ import ( "github.com/stretchr/testify/assert" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" + "github.com/smartcontractkit/chainlink/v2/core/logger" ) func Test_NewReportCodecs(t *testing.T) { - c := NewReportCodecs() + c := NewReportCodecs(logger.TestLogger(t)) assert.Contains(t, c, llotypes.ReportFormatJSON, "expected JSON to be supported") assert.Contains(t, c, llotypes.ReportFormatEVMPremiumLegacy, "expected EVMPremiumLegacy to be supported") diff --git a/core/services/llo/data_source.go b/core/services/llo/data_source.go index ef333f821a1..0585dec49dc 100644 --- a/core/services/llo/data_source.go +++ b/core/services/llo/data_source.go @@ -3,8 +3,10 @@ package llo import ( "context" "fmt" + "slices" "sort" "sync" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -85,11 +87,7 @@ func newDataSource(lggr logger.Logger, registry Registry, t Telemeter) *dataSour // Observe looks up all streams in the registry and populates a map of stream ID => value func (d *dataSource) Observe(ctx context.Context, streamValues llo.StreamValues, opts llo.DSOpts) error { - var wg sync.WaitGroup - wg.Add(len(streamValues)) - var svmu sync.Mutex - var errs []ErrObservationFailed - var errmu sync.Mutex + now := time.Now() if opts.VerboseLogging() { streamIDs := make([]streams.StreamID, 0, len(streamValues)) @@ -100,6 +98,13 @@ func (d *dataSource) Observe(ctx context.Context, streamValues llo.StreamValues, d.lggr.Debugw("Observing streams", "streamIDs", streamIDs, "configDigest", opts.ConfigDigest(), "seqNr", opts.OutCtx().SeqNr) } + var wg sync.WaitGroup + wg.Add(len(streamValues)) + + var mu sync.Mutex + successfulStreamIDs := make([]streams.StreamID, 0, len(streamValues)) + var errs []ErrObservationFailed + for _, streamID := range maps.Keys(streamValues) { go func(streamID llotypes.StreamID) { defer wg.Done() @@ -108,17 +113,17 @@ func (d *dataSource) Observe(ctx context.Context, streamValues llo.StreamValues, stream, exists := d.registry.Get(streamID) if !exists { - errmu.Lock() + mu.Lock() errs = append(errs, ErrObservationFailed{streamID: streamID, reason: fmt.Sprintf("missing stream: %d", streamID)}) - errmu.Unlock() + mu.Unlock() promMissingStreamCount.WithLabelValues(fmt.Sprintf("%d", streamID)).Inc() return } run, trrs, err := stream.Run(ctx) if err != nil { - errmu.Lock() + mu.Lock() errs = append(errs, ErrObservationFailed{inner: err, run: run, streamID: streamID, reason: "pipeline run failed"}) - errmu.Unlock() + mu.Unlock() promObservationErrorCount.WithLabelValues(fmt.Sprintf("%d", streamID)).Inc() // TODO: Consolidate/reduce telemetry. We should send all observation results in a single packet // https://smartcontract-it.atlassian.net/browse/MERC-6290 @@ -129,44 +134,50 @@ func (d *dataSource) Observe(ctx context.Context, streamValues llo.StreamValues, // https://smartcontract-it.atlassian.net/browse/MERC-6290 val, err = ExtractStreamValue(trrs) if err != nil { - errmu.Lock() + mu.Lock() errs = append(errs, ErrObservationFailed{inner: err, run: run, streamID: streamID, reason: "failed to extract big.Int"}) - errmu.Unlock() + mu.Unlock() return } d.t.EnqueueV3PremiumLegacy(run, trrs, streamID, opts, val, nil) + mu.Lock() + defer mu.Unlock() + + successfulStreamIDs = append(successfulStreamIDs, streamID) if val != nil { - svmu.Lock() - defer svmu.Unlock() streamValues[streamID] = val } }(streamID) } wg.Wait() + elapsed := time.Since(now) - // Failed observations are always logged at warn level - var failedStreamIDs []streams.StreamID - if len(errs) > 0 { + // Only log on errors or if VerboseLogging is turned on + if len(errs) > 0 || opts.VerboseLogging() { + slices.Sort(successfulStreamIDs) sort.Slice(errs, func(i, j int) bool { return errs[i].streamID < errs[j].streamID }) - failedStreamIDs = make([]streams.StreamID, len(errs)) + + failedStreamIDs := make([]streams.StreamID, len(errs)) errStrs := make([]string, len(errs)) for i, e := range errs { errStrs[i] = e.String() failedStreamIDs[i] = e.streamID } - d.lggr.Warnw("Observation failed for streams", "failedStreamIDs", failedStreamIDs, "errs", errStrs, "configDigest", opts.ConfigDigest(), "seqNr", opts.OutCtx().SeqNr) - } - if opts.VerboseLogging() { - successes := make([]streams.StreamID, 0, len(streamValues)) - for strmID := range streamValues { - successes = append(successes, strmID) + lggr := logger.With(d.lggr, "elapsed", elapsed, "nSuccessfulStreams", len(successfulStreamIDs), "nFailedStreams", len(failedStreamIDs), "successfulStreamIDs", successfulStreamIDs, "failedStreamIDs", failedStreamIDs, "errs", errStrs, "configDigest", opts.ConfigDigest(), "seqNr", opts.OutCtx().SeqNr) + + if opts.VerboseLogging() { + lggr = logger.With(lggr, "streamValues", streamValues) + } + + if len(errs) == 0 && opts.VerboseLogging() { + lggr.Infow("Observation succeeded for all streams") + } else if len(errs) > 0 { + lggr.Warnw("Observation failed for streams") } - sort.Slice(successes, func(i, j int) bool { return successes[i] < successes[j] }) - d.lggr.Debugw("Observation complete", "successfulStreamIDs", successes, "failedStreamIDs", failedStreamIDs, "configDigest", opts.ConfigDigest(), "values", streamValues, "seqNr", opts.OutCtx().SeqNr) } return nil diff --git a/core/services/llo/delegate.go b/core/services/llo/delegate.go index f5f9b5f05f1..fabc8dc2682 100644 --- a/core/services/llo/delegate.go +++ b/core/services/llo/delegate.go @@ -19,6 +19,7 @@ import ( "github.com/smartcontractkit/chainlink-data-streams/llo" datastreamsllo "github.com/smartcontractkit/chainlink-data-streams/llo" + corelogger "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/streams" ) @@ -91,7 +92,13 @@ func NewDelegate(cfg DelegateConfig) (job.ServiceCtx, error) { if cfg.ShouldRetireCache == nil { return nil, errors.New("ShouldRetireCache must not be nil") } - reportCodecs := NewReportCodecs() + var codecLggr logger.Logger + if cfg.ReportingPluginConfig.VerboseLogging { + codecLggr = logger.Named(lggr, "ReportCodecs") + } else { + codecLggr = corelogger.NullLogger + } + reportCodecs := NewReportCodecs(codecLggr) var t TelemeterService if cfg.CaptureEATelemetry { @@ -126,7 +133,7 @@ func (d *delegate) Start(ctx context.Context) error { case 1: lggr = logger.With(lggr, "instanceType", "Green") } - ocrLogger := logger.NewOCRWrapper(lggr, d.cfg.TraceLogging, func(msg string) { + ocrLogger := logger.NewOCRWrapper(NewSuppressedLogger(lggr, d.cfg.ReportingPluginConfig.VerboseLogging), d.cfg.TraceLogging, func(msg string) { // TODO: do we actually need to DB-persist errors? // MERC-3524 }) @@ -144,7 +151,7 @@ func (d *delegate) Start(ctx context.Context) error { OffchainKeyring: d.cfg.OffchainKeyring, OnchainKeyring: d.cfg.OnchainKeyring, ReportingPluginFactory: datastreamsllo.NewPluginFactory( - d.cfg.ReportingPluginConfig, psrrc, d.src, d.cfg.RetirementReportCodec, d.cfg.ChannelDefinitionCache, d.ds, logger.Named(lggr, "LLOReportingPlugin"), llo.EVMOnchainConfigCodec{}, d.reportCodecs, + d.cfg.ReportingPluginConfig, psrrc, d.src, d.cfg.RetirementReportCodec, d.cfg.ChannelDefinitionCache, d.ds, logger.Named(lggr, "ReportingPlugin"), llo.EVMOnchainConfigCodec{}, d.reportCodecs, ), MetricsRegisterer: prometheus.WrapRegistererWith(map[string]string{"job_name": d.cfg.JobName.ValueOrZero()}, prometheus.DefaultRegisterer), }) diff --git a/core/services/llo/evm/fees_test.go b/core/services/llo/evm/fees_test.go index 16ee98db7df..33888de14ec 100644 --- a/core/services/llo/evm/fees_test.go +++ b/core/services/llo/evm/fees_test.go @@ -42,4 +42,14 @@ func Test_Fees(t *testing.T) { fee := CalculateFee(tokenPriceInUSD, BaseUSDFee) assert.Equal(t, big.NewInt(0), fee) }) + + t.Run("ridiculously high value rounds down fee to zero", func(t *testing.T) { + // 20dp + tokenPriceInUSD, err := decimal.NewFromString("12984833000000000000") + require.NoError(t, err) + BaseUSDFee, err = decimal.NewFromString("0.1") + require.NoError(t, err) + fee := CalculateFee(tokenPriceInUSD, BaseUSDFee) + assert.Equal(t, big.NewInt(0), fee) + }) } diff --git a/core/services/llo/evm/report_codec_premium_legacy.go b/core/services/llo/evm/report_codec_premium_legacy.go index 9bca9587a0e..700ba6e6533 100644 --- a/core/services/llo/evm/report_codec_premium_legacy.go +++ b/core/services/llo/evm/report_codec_premium_legacy.go @@ -17,8 +17,8 @@ import ( v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3" "github.com/smartcontractkit/chainlink-data-streams/llo" + "github.com/smartcontractkit/chainlink-common/pkg/logger" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" - "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec" reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/types" @@ -28,10 +28,12 @@ var ( _ llo.ReportCodec = ReportCodecPremiumLegacy{} ) -type ReportCodecPremiumLegacy struct{ logger.Logger } +type ReportCodecPremiumLegacy struct { + logger.Logger +} -func NewReportCodecPremiumLegacy(lggr logger.Logger) llo.ReportCodec { - return ReportCodecPremiumLegacy{lggr.Named("ReportCodecPremiumLegacy")} +func NewReportCodecPremiumLegacy(lggr logger.Logger) ReportCodecPremiumLegacy { + return ReportCodecPremiumLegacy{logger.Sugared(lggr).Named("ReportCodecPremiumLegacy")} } type ReportFormatEVMPremiumLegacyOpts struct { @@ -92,6 +94,9 @@ func (r ReportCodecPremiumLegacy) Encode(ctx context.Context, report llo.Report, Bid: quote.Bid.Mul(multiplier).BigInt(), Ask: quote.Ask.Mul(multiplier).BigInt(), } + + r.Logger.Debugw("Encoding report", "report", report, "opts", opts, "nativePrice", nativePrice, "linkPrice", linkPrice, "quote", quote, "multiplier", multiplier, "rf", rf) + return codec.BuildReport(ctx, rf) } diff --git a/core/services/llo/evm/report_codec_premium_legacy_test.go b/core/services/llo/evm/report_codec_premium_legacy_test.go index 804555d06be..d5d816da1d5 100644 --- a/core/services/llo/evm/report_codec_premium_legacy_test.go +++ b/core/services/llo/evm/report_codec_premium_legacy_test.go @@ -12,6 +12,7 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/chainlink-common/pkg/utils/tests" + "github.com/smartcontractkit/chainlink/v2/core/logger" reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/types" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" @@ -32,7 +33,7 @@ func newValidPremiumLegacyReport() llo.Report { } func Test_ReportCodecPremiumLegacy(t *testing.T) { - rc := ReportCodecPremiumLegacy{} + rc := ReportCodecPremiumLegacy{logger.TestLogger(t)} feedID := [32]uint8{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} cd := llotypes.ChannelDefinition{Opts: llotypes.ChannelOpts(fmt.Sprintf(`{"baseUSDFee":"10.50","expirationWindow":60,"feedId":"0x%x","multiplier":10}`, feedID))} diff --git a/core/services/llo/mercurytransmitter/queue.go b/core/services/llo/mercurytransmitter/queue.go index a5a606c5b32..eae9a0b9d0c 100644 --- a/core/services/llo/mercurytransmitter/queue.go +++ b/core/services/llo/mercurytransmitter/queue.go @@ -95,8 +95,8 @@ func (tq *transmitQueue) Push(t *Transmission) (ok bool) { if tq.maxlen != 0 && tq.pq.Len() == tq.maxlen { // evict oldest entry to make room - tq.lggr.Criticalf("Transmit queue is full; dropping oldest transmission (reached max length of %d)", tq.maxlen) removed := heap.PopMax(tq.pq) + tq.lggr.Criticalw(fmt.Sprintf("Transmit queue is full; dropping oldest transmission (reached max length of %d)", tq.maxlen), "transmission", removed) if removed, ok := removed.(*Transmission); ok { tq.asyncDeleter.AsyncDelete(removed.Hash()) } diff --git a/core/services/llo/mercurytransmitter/server.go b/core/services/llo/mercurytransmitter/server.go index 70e76655961..84b2c2889fa 100644 --- a/core/services/llo/mercurytransmitter/server.go +++ b/core/services/llo/mercurytransmitter/server.go @@ -10,12 +10,17 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-common/pkg/services" llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo" "github.com/smartcontractkit/chainlink-data-streams/llo" + corelogger "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/llo/evm" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb" @@ -49,10 +54,15 @@ var ( ) ) +type ReportPacker interface { + Pack(digest types.ConfigDigest, seqNr uint64, report ocr2types.Report, sigs []ocr2types.AttributedOnchainSignature) ([]byte, error) +} + // A server handles the queue for a given mercury server type server struct { - lggr logger.SugaredLogger + lggr logger.SugaredLogger + verboseLogging bool transmitTimeout time.Duration @@ -64,6 +74,9 @@ type server struct { url string + evmPremiumLegacyPacker ReportPacker + jsonPacker ReportPacker + transmitSuccessCount prometheus.Counter transmitDuplicateCount prometheus.Counter transmitConnectionErrorCount prometheus.Counter @@ -77,17 +90,27 @@ type QueueConfig interface { TransmitTimeout() commonconfig.Duration } -func newServer(lggr logger.Logger, cfg QueueConfig, client wsrpc.Client, orm ORM, serverURL string) *server { +func newServer(lggr logger.Logger, verboseLogging bool, cfg QueueConfig, client wsrpc.Client, orm ORM, serverURL string) *server { pm := NewPersistenceManager(lggr, orm, serverURL, int(cfg.TransmitQueueMaxSize()), flushDeletesFrequency, pruneFrequency) donIDStr := fmt.Sprintf("%d", pm.DonID()) + var codecLggr logger.Logger + if verboseLogging { + codecLggr = lggr + } else { + codecLggr = corelogger.NullLogger + } + return &server{ logger.Sugared(lggr), + verboseLogging, cfg.TransmitTimeout().Duration(), client, pm, NewTransmitQueue(lggr, serverURL, int(cfg.TransmitQueueMaxSize()), pm), make(chan [32]byte, int(cfg.TransmitQueueMaxSize())), serverURL, + evm.NewReportCodecPremiumLegacy(codecLggr), + llo.JSONReportCodec{}, transmitSuccessCount.WithLabelValues(donIDStr, serverURL), transmitDuplicateCount.WithLabelValues(donIDStr, serverURL), transmitConnectionErrorCount.WithLabelValues(donIDStr, serverURL), @@ -162,7 +185,7 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donI // queue was closed return } - res, err := func(ctx context.Context) (*pb.TransmitResponse, error) { + req, res, err := func(ctx context.Context) (*pb.TransmitRequest, *pb.TransmitResponse, error) { ctx, cancelFn := context.WithTimeout(ctx, utils.WithJitter(s.transmitTimeout)) defer cancelFn() return s.transmit(ctx, t) @@ -172,7 +195,7 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donI return } else if err != nil { s.transmitConnectionErrorCount.Inc() - s.lggr.Errorw("Transmit report failed", "err", err, "transmission", t) + s.lggr.Errorw("Transmit report failed", "err", err, "req", req, "transmission", t) if ok := s.q.Push(t); !ok { s.lggr.Error("Failed to push report to transmit queue; queue is closed") return @@ -190,7 +213,7 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donI b.Reset() if res.Error == "" { s.transmitSuccessCount.Inc() - s.lggr.Debugw("Transmit report success", "transmission", t, "response", res) + s.lggr.Debugw("Transmit report success", "req.ReportFormat", req.ReportFormat, "req.Payload", req.Payload, "transmission", t, "response", res) } else { // We don't need to retry here because the mercury server // has confirmed it received the report. We only need to retry @@ -199,36 +222,36 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donI case DuplicateReport: s.transmitSuccessCount.Inc() s.transmitDuplicateCount.Inc() - s.lggr.Debugw("Transmit report success; duplicate report", "transmission", t, "response", res) + s.lggr.Debugw("Transmit report success; duplicate report", "req.ReportFormat", req.ReportFormat, "req.Payload", req.Payload, "transmission", t, "response", res) default: transmitServerErrorCount.WithLabelValues(donIDStr, s.url, fmt.Sprintf("%d", res.Code)).Inc() - s.lggr.Errorw("Transmit report failed; mercury server returned error", "response", res, "transmission", t, "err", res.Error, "code", res.Code) + s.lggr.Errorw("Transmit report failed; mercury server returned error", "req.ReportFormat", req.ReportFormat, "req.Payload", req.Payload, "response", res, "transmission", t, "err", res.Error, "code", res.Code) } } select { case s.deleteQueue <- t.Hash(): default: - s.lggr.Criticalw("Delete queue is full", "transmission", t) + s.lggr.Criticalw("Delete queue is full", "transmission", t, "transmissionHash", fmt.Sprintf("%x", t.Hash())) } } } -func (s *server) transmit(ctx context.Context, t *Transmission) (*pb.TransmitResponse, error) { +func (s *server) transmit(ctx context.Context, t *Transmission) (*pb.TransmitRequest, *pb.TransmitResponse, error) { var payload []byte var err error switch t.Report.Info.ReportFormat { case llotypes.ReportFormatJSON: - payload, err = llo.JSONReportCodec{}.Pack(t.ConfigDigest, t.SeqNr, t.Report.Report, t.Sigs) + payload, err = s.jsonPacker.Pack(t.ConfigDigest, t.SeqNr, t.Report.Report, t.Sigs) case llotypes.ReportFormatEVMPremiumLegacy: - payload, err = evm.ReportCodecPremiumLegacy{}.Pack(t.ConfigDigest, t.SeqNr, t.Report.Report, t.Sigs) + payload, err = s.evmPremiumLegacyPacker.Pack(t.ConfigDigest, t.SeqNr, t.Report.Report, t.Sigs) default: - return nil, fmt.Errorf("Transmit failed; unsupported report format: %q", t.Report.Info.ReportFormat) + return nil, nil, fmt.Errorf("Transmit failed; don't know how to Pack unsupported report format: %q", t.Report.Info.ReportFormat) } if err != nil { - return nil, fmt.Errorf("Transmit: encode failed; %w", err) + return nil, nil, fmt.Errorf("Transmit: encode failed; %w", err) } req := &pb.TransmitRequest{ @@ -236,5 +259,6 @@ func (s *server) transmit(ctx context.Context, t *Transmission) (*pb.TransmitRes ReportFormat: uint32(t.Report.Info.ReportFormat), } - return s.c.Transmit(ctx, req) + resp, err := s.c.Transmit(ctx, req) + return req, resp, err } diff --git a/core/services/llo/mercurytransmitter/transmitter.go b/core/services/llo/mercurytransmitter/transmitter.go index 33090ed9574..024a98174c6 100644 --- a/core/services/llo/mercurytransmitter/transmitter.go +++ b/core/services/llo/mercurytransmitter/transmitter.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "strconv" "sync" "github.com/prometheus/client_golang/prometheus" @@ -97,12 +98,14 @@ var _ Transmitter = (*transmitter)(nil) type Config interface { TransmitQueueMaxSize() uint32 TransmitTimeout() commonconfig.Duration + TransmitConcurrency() uint32 } type transmitter struct { services.StateMachine - lggr logger.SugaredLogger - cfg Config + lggr logger.SugaredLogger + verboseLogging bool + cfg Config orm ORM servers map[string]*server @@ -115,12 +118,13 @@ type transmitter struct { } type Opts struct { - Lggr logger.Logger - Cfg Config - Clients map[string]wsrpc.Client - FromAccount ed25519.PublicKey - DonID uint32 - ORM ORM + Lggr logger.Logger + VerboseLogging bool + Cfg Config + Clients map[string]wsrpc.Client + FromAccount ed25519.PublicKey + DonID uint32 + ORM ORM } func New(opts Opts) Transmitter { @@ -132,11 +136,12 @@ func newTransmitter(opts Opts) *transmitter { servers := make(map[string]*server, len(opts.Clients)) for serverURL, client := range opts.Clients { sLggr := sugared.Named(serverURL).With("serverURL", serverURL) - servers[serverURL] = newServer(sLggr, opts.Cfg, client, opts.ORM, serverURL) + servers[serverURL] = newServer(sLggr, opts.VerboseLogging, opts.Cfg, client, opts.ORM, serverURL) } return &transmitter{ services.StateMachine{}, sugared.Named("LLOMercuryTransmitter").With("donID", opts.ORM.DonID()), + opts.VerboseLogging, opts.Cfg, opts.ORM, servers, @@ -149,7 +154,9 @@ func newTransmitter(opts Opts) *transmitter { func (mt *transmitter) Start(ctx context.Context) (err error) { return mt.StartOnce("LLOMercuryTransmitter", func() error { - mt.lggr.Debugw("Loading transmit requests from database") + if mt.verboseLogging { + mt.lggr.Debugw("Loading transmit requests from database") + } { var startClosers []services.StartClose @@ -159,12 +166,23 @@ func (mt *transmitter) Start(ctx context.Context) (err error) { return err } s.q.Init(transmissions) - // starting pm after loading from it is fine because it simply spawns some garbage collection/prune goroutines + // starting pm after loading from it is fine because it simply + // spawns some garbage collection/prune goroutines startClosers = append(startClosers, s.c, s.q, s.pm) - mt.wg.Add(2) - go s.runDeleteQueueLoop(mt.stopCh, mt.wg) - go s.runQueueLoop(mt.stopCh, mt.wg, fmt.Sprintf("%d", mt.donID)) + // Number of goroutines per server will be roughly + // 2*nServers*TransmitConcurrency because each server has a + // delete queue and a transmit queue. + // + // This could potentially be reduced by implementing transmit batching, + // see: https://smartcontract-it.atlassian.net/browse/MERC-6635 + nThreads := int(mt.cfg.TransmitConcurrency()) + mt.wg.Add(2 * nThreads) + donIDStr := strconv.FormatUint(uint64(mt.donID), 10) + for i := 0; i < nThreads; i++ { + go s.runDeleteQueueLoop(mt.stopCh, mt.wg) + go s.runQueueLoop(mt.stopCh, mt.wg, donIDStr) + } } if err := (&services.MultiStart{}).Start(ctx, startClosers...); err != nil { return err @@ -234,7 +252,9 @@ func (mt *transmitter) Transmit( g := new(errgroup.Group) for i := range transmissions { t := transmissions[i] - mt.lggr.Debugw("LLOMercuryTransmit", "digest", digest.Hex(), "seqNr", seqNr, "reportFormat", report.Info.ReportFormat, "reportLifeCycleStage", report.Info.LifeCycleStage, "transmissionHash", fmt.Sprintf("%x", t.Hash())) + if mt.verboseLogging { + mt.lggr.Debugw("LLOMercuryTransmit", "digest", digest.Hex(), "seqNr", seqNr, "reportFormat", report.Info.ReportFormat, "reportLifeCycleStage", report.Info.LifeCycleStage, "transmissionHash", fmt.Sprintf("%x", t.Hash())) + } g.Go(func() error { s := mt.servers[t.ServerURL] if ok := s.q.Push(t); !ok { diff --git a/core/services/llo/mercurytransmitter/transmitter_test.go b/core/services/llo/mercurytransmitter/transmitter_test.go index db3d0d2e584..7477e848b78 100644 --- a/core/services/llo/mercurytransmitter/transmitter_test.go +++ b/core/services/llo/mercurytransmitter/transmitter_test.go @@ -33,6 +33,10 @@ func (m mockCfg) TransmitTimeout() commonconfig.Duration { return *commonconfig.MustNewDuration(1 * time.Hour) } +func (m mockCfg) TransmitConcurrency() uint32 { + return 5 +} + func Test_Transmitter_Transmit(t *testing.T) { lggr := logger.TestLogger(t) db := pgtest.NewSqlxDB(t) @@ -135,7 +139,7 @@ func Test_Transmitter_runQueueLoop(t *testing.T) { orm := NewORM(db, donID) cfg := mockCfg{} - s := newServer(lggr, cfg, c, orm, sURL) + s := newServer(lggr, true, cfg, c, orm, sURL) t.Run("pulls from queue and transmits successfully", func(t *testing.T) { transmit := make(chan *pb.TransmitRequest, 1) diff --git a/core/services/llo/suppressed_logger.go b/core/services/llo/suppressed_logger.go new file mode 100644 index 00000000000..9fe6e6731e5 --- /dev/null +++ b/core/services/llo/suppressed_logger.go @@ -0,0 +1,51 @@ +package llo + +import "github.com/smartcontractkit/chainlink-common/pkg/logger" + +// Suppressed logger swallows debug/info unless the verbose flag is turned on +// Useful for OCR to calm down its verbosity + +var _ logger.Logger = &SuppressedLogger{} + +func NewSuppressedLogger(lggr logger.Logger, verbose bool) logger.Logger { + return &SuppressedLogger{ + Logger: lggr, + Verbose: verbose, + } +} + +type SuppressedLogger struct { + logger.Logger + Verbose bool +} + +func (s *SuppressedLogger) Debug(args ...interface{}) { + if s.Verbose { + s.Logger.Debug(args...) + } +} +func (s *SuppressedLogger) Info(args ...interface{}) { + if s.Verbose { + s.Logger.Info(args...) + } +} +func (s *SuppressedLogger) Debugf(format string, values ...interface{}) { + if s.Verbose { + s.Logger.Debugf(format, values...) + } +} +func (s *SuppressedLogger) Infof(format string, values ...interface{}) { + if s.Verbose { + s.Logger.Infof(format, values...) + } +} +func (s *SuppressedLogger) Debugw(msg string, keysAndValues ...interface{}) { + if s.Verbose { + s.Logger.Debugw(msg, keysAndValues...) + } +} +func (s *SuppressedLogger) Infow(msg string, keysAndValues ...interface{}) { + if s.Verbose { + s.Logger.Infow(msg, keysAndValues...) + } +} diff --git a/core/services/llo/telemetry.go b/core/services/llo/telemetry.go index 888ee9d5d36..bb86679dc52 100644 --- a/core/services/llo/telemetry.go +++ b/core/services/llo/telemetry.go @@ -39,7 +39,11 @@ func NewTelemeterService(lggr logger.Logger, monitoringEndpoint commontypes.Moni } func newTelemeter(lggr logger.Logger, monitoringEndpoint commontypes.MonitoringEndpoint, donID uint32) *telemeter { - chTelemetryObservation := make(chan TelemetryObservation, 100) + // NOTE: This channel must take multiple telemetry packets per round (1 per + // feed) so we need to make sure the buffer is large enough. + // + // 2000 feeds * 5s/250ms = 40_000 should hold ~5s of buffer in the worst case. + chTelemetryObservation := make(chan TelemetryObservation, 40_000) t := &telemeter{ chTelemetryObservation: chTelemetryObservation, monitoringEndpoint: monitoringEndpoint, diff --git a/core/services/llo/transmitter.go b/core/services/llo/transmitter.go index 7696c69c291..1ff5c1b36ac 100644 --- a/core/services/llo/transmitter.go +++ b/core/services/llo/transmitter.go @@ -47,8 +47,9 @@ type TransmitterRetirementReportCacheWriter interface { type transmitter struct { services.StateMachine - lggr logger.Logger - fromAccount string + lggr logger.Logger + verboseLogging bool + fromAccount string subTransmitters []Transmitter retirementReportCache TransmitterRetirementReportCacheWriter @@ -56,6 +57,7 @@ type transmitter struct { type TransmitterOpts struct { Lggr logger.Logger + VerboseLogging bool FromAccount string MercuryTransmitterOpts mercurytransmitter.Opts RetirementReportCache TransmitterRetirementReportCacheWriter @@ -69,6 +71,7 @@ func NewTransmitter(opts TransmitterOpts) Transmitter { return &transmitter{ services.StateMachine{}, opts.Lggr, + opts.VerboseLogging, opts.FromAccount, subTransmitters, opts.RetirementReportCache, @@ -114,6 +117,10 @@ func (t *transmitter) Transmit( report ocr3types.ReportWithInfo[llotypes.ReportInfo], sigs []types.AttributedOnchainSignature, ) (err error) { + if t.verboseLogging { + t.lggr.Debugw("Transmit report", "digest", digest, "seqNr", seqNr, "report", report, "sigs", sigs) + } + if report.Info.ReportFormat == llotypes.ReportFormatRetirement { // Retirement reports don't get transmitted; rather, they are stored in // the RetirementReportCache diff --git a/core/services/ocr2/plugins/llo/helpers_test.go b/core/services/ocr2/plugins/llo/helpers_test.go index 0ca6eeb60cb..9cd8742ffa8 100644 --- a/core/services/ocr2/plugins/llo/helpers_test.go +++ b/core/services/ocr2/plugins/llo/helpers_test.go @@ -185,6 +185,7 @@ func setupNode( // [Mercury] c.Mercury.VerboseLogging = ptr(true) + c.Mercury.Transmitter.TransmitConcurrency = ptr(uint32(5)) // Avoid a ridiculous number of goroutines }) lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel) diff --git a/core/services/ocr2/plugins/llo/integration_test.go b/core/services/ocr2/plugins/llo/integration_test.go index bdd773910f4..043ce34c946 100644 --- a/core/services/ocr2/plugins/llo/integration_test.go +++ b/core/services/ocr2/plugins/llo/integration_test.go @@ -509,7 +509,8 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi assert.Equal(t, expectedBid.String(), reportElems["bid"].(*big.Int).String()) assert.Equal(t, expectedAsk.String(), reportElems["ask"].(*big.Int).String()) - t.Run(fmt.Sprintf("emulate mercury server verifying report (local verification) - node %x", req.pk), func(t *testing.T) { + // emulate mercury server verifying report (local verification) + { rv := mercuryverifier.NewVerifier() reportSigners, err := rv.Verify(mercuryverifier.SignedReport{ @@ -522,14 +523,13 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi require.NoError(t, err) assert.GreaterOrEqual(t, len(reportSigners), int(fNodes+1)) assert.Subset(t, signerAddresses, reportSigners) - }) + } - t.Run(fmt.Sprintf("test on-chain verification - node %x", req.pk), func(t *testing.T) { - t.Run("destination verifier", func(t *testing.T) { - _, err = verifierProxy.Verify(steve, req.req.Payload, []byte{}) - require.NoError(t, err) - }) - }) + // test on-chain verification + { + _, err = verifierProxy.Verify(steve, req.req.Payload, []byte{}) + require.NoError(t, err) + } t.Logf("oracle %x reported for 0x%x", req.pk[:], feedID[:]) @@ -597,7 +597,8 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi var greenDigest ocr2types.ConfigDigest allReports := make(map[types.ConfigDigest][]datastreamsllo.Report) - t.Run("start off with blue=production, green=staging (specimen reports)", func(t *testing.T) { + // start off with blue=production, green=staging (specimen reports) + { // Set config on configurator blueDigest = setProductionConfig( t, donID, steve, backend, configurator, configuratorAddress, nodes, oracles, @@ -617,8 +618,9 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi assert.Equal(t, "2976.39", r.Values[0].(*datastreamsllo.Decimal).String()) break } - }) - t.Run("setStagingConfig does not affect production", func(t *testing.T) { + } + // setStagingConfig does not affect production + { greenDigest = setStagingConfig( t, donID, steve, backend, configurator, configuratorAddress, nodes, oracles, blueDigest, ) @@ -639,8 +641,9 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi } assert.Equal(t, blueDigest, r.ConfigDigest) } - }) - t.Run("promoteStagingConfig flow has clean and gapless hand off from old production to newly promoted staging instance, leaving old production instance in 'retired' state", func(t *testing.T) { + } + // promoteStagingConfig flow has clean and gapless hand off from old production to newly promoted staging instance, leaving old production instance in 'retired' state + { promoteStagingConfig(t, donID, steve, backend, configurator, configuratorAddress, false) // NOTE: Wait for first non-specimen report for the newly promoted (green) instance @@ -704,8 +707,9 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi assert.Less(t, finalBlueReport.ValidAfterSeconds, finalBlueReport.ObservationTimestampSeconds) assert.Equal(t, finalBlueReport.ObservationTimestampSeconds, initialPromotedGreenReport.ValidAfterSeconds) assert.Less(t, initialPromotedGreenReport.ValidAfterSeconds, initialPromotedGreenReport.ObservationTimestampSeconds) - }) - t.Run("retired instance does not produce reports", func(t *testing.T) { + } + // retired instance does not produce reports + { // NOTE: Wait for five "green" reports to be produced and assert no "blue" reports i := 0 @@ -721,8 +725,9 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi assert.False(t, r.Specimen) assert.Equal(t, greenDigest, r.ConfigDigest) } - }) - t.Run("setStagingConfig replaces 'retired' instance with new config and starts producing specimen reports again", func(t *testing.T) { + } + // setStagingConfig replaces 'retired' instance with new config and starts producing specimen reports again + { blueDigest = setStagingConfig( t, donID, steve, backend, configurator, configuratorAddress, nodes, oracles, greenDigest, ) @@ -740,8 +745,9 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi } assert.Equal(t, greenDigest, r.ConfigDigest) } - }) - t.Run("promoteStagingConfig swaps the instances again", func(t *testing.T) { + } + // promoteStagingConfig swaps the instances again + { // TODO: Check that once an instance enters 'retired' state, it // doesn't produce reports or bother making observations promoteStagingConfig(t, donID, steve, backend, configurator, configuratorAddress, true) @@ -766,8 +772,9 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi assert.Less(t, finalGreenReport.ValidAfterSeconds, finalGreenReport.ObservationTimestampSeconds) assert.Equal(t, finalGreenReport.ObservationTimestampSeconds, initialPromotedBlueReport.ValidAfterSeconds) assert.Less(t, initialPromotedBlueReport.ValidAfterSeconds, initialPromotedBlueReport.ObservationTimestampSeconds) - }) - t.Run("adding a new channel definition is picked up on the fly", func(t *testing.T) { + } + // adding a new channel definition is picked up on the fly + { channelDefinitions[2] = llotypes.ChannelDefinition{ ReportFormat: llotypes.ReportFormatJSON, Streams: []llotypes.Stream{ @@ -805,7 +812,7 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi assert.Len(t, r.Values, 1) assert.Equal(t, "2976.39", r.Values[0].(*datastreamsllo.Decimal).String()) } - }) + } t.Run("deleting the jobs turns off oracles and cleans up resources", func(t *testing.T) { t.Skip("TODO - MERC-3524") }) diff --git a/core/services/ocrcommon/telemetry.go b/core/services/ocrcommon/telemetry.go index e20b2485d86..5e4a65180d5 100644 --- a/core/services/ocrcommon/telemetry.go +++ b/core/services/ocrcommon/telemetry.go @@ -164,7 +164,6 @@ func ParseMercuryEATelemetry(lggr logger.Logger, trrs pipeline.TaskRunResults, f bridgeRawResponse, ok := trr.Result.Value.(string) if !ok { - lggr.Warnw(fmt.Sprintf("cannot get bridge response from bridge task, id=%s, name=%q, expected string got %T", trr.Task.DotID(), bridgeName, trr.Result.Value), "dotID", trr.Task.DotID(), "bridgeName", bridgeName) continue } eaTelem, err := parseEATelemetry([]byte(bridgeRawResponse)) @@ -654,7 +653,6 @@ func getPricesFromResultsByOrder(lggr logger.Logger, startTask pipeline.TaskRunR // We rely on task results to be sorted in the correct order benchmarkPriceTask := allTasks.GetNextTaskOf(startTask) if benchmarkPriceTask == nil { - lggr.Warn("cannot parse enhanced EA telemetry benchmark price, task is nil") return 0, 0, 0 } if benchmarkPriceTask.Task.Type() == pipeline.TaskTypeJSONParse { @@ -668,7 +666,6 @@ func getPricesFromResultsByOrder(lggr logger.Logger, startTask pipeline.TaskRunR bidTask := allTasks.GetNextTaskOf(*benchmarkPriceTask) if bidTask == nil { - lggr.Warnf("cannot parse enhanced EA telemetry bid price, task is nil, id %s", benchmarkPriceTask.Task.DotID()) return benchmarkPrice, 0, 0 } @@ -678,7 +675,6 @@ func getPricesFromResultsByOrder(lggr logger.Logger, startTask pipeline.TaskRunR askTask := allTasks.GetNextTaskOf(*bidTask) if askTask == nil { - lggr.Warnf("cannot parse enhanced EA telemetry ask price, task is nil, id %s", benchmarkPriceTask.Task.DotID()) return benchmarkPrice, bidPrice, 0 } if askTask.Task.Type() == pipeline.TaskTypeJSONParse { diff --git a/core/services/ocrcommon/telemetry_test.go b/core/services/ocrcommon/telemetry_test.go index 8fac0ab2cbf..4c8f0eb1127 100644 --- a/core/services/ocrcommon/telemetry_test.go +++ b/core/services/ocrcommon/telemetry_test.go @@ -1,6 +1,7 @@ package ocrcommon import ( + "fmt" "math/big" "sync" "testing" @@ -658,18 +659,15 @@ func TestGetPricesFromBridgeTaskByOrder(t *testing.T) { require.Equal(t, float64(0), benchmarkPrice) require.Equal(t, float64(0), bid) require.Equal(t, float64(0), ask) - require.Equal(t, 1, logs.Len()) - require.Contains(t, logs.All()[0].Message, "cannot parse enhanced EA telemetry") + require.Equal(t, 0, logs.Len()) tt := trrsMercuryV1[:2] getPricesFromBridgeTask(lggr, trrsMercuryV1[0], tt, 1) - require.Equal(t, 2, logs.Len()) - require.Contains(t, logs.All()[1].Message, "cannot parse enhanced EA telemetry bid price, task is nil") + require.Equal(t, 0, logs.Len()) tt = trrsMercuryV1[:3] getPricesFromBridgeTask(lggr, trrsMercuryV1[0], tt, 1) - require.Equal(t, 3, logs.Len()) - require.Contains(t, logs.All()[2].Message, "cannot parse enhanced EA telemetry ask price, task is nil") + require.Equal(t, 0, logs.Len()) trrs2 := pipeline.TaskRunResults{ pipeline.TaskRunResult{ @@ -709,10 +707,10 @@ func TestGetPricesFromBridgeTaskByOrder(t *testing.T) { require.Equal(t, benchmarkPrice, float64(0)) require.Equal(t, bid, float64(0)) require.Equal(t, ask, float64(0)) - require.Equal(t, logs.Len(), 6) - require.Contains(t, logs.All()[3].Message, "cannot parse EA telemetry price to float64, DOT id ds1_benchmark") - require.Contains(t, logs.All()[4].Message, "cannot parse EA telemetry price to float64, DOT id ds2_bid") - require.Contains(t, logs.All()[5].Message, "cannot parse EA telemetry price to float64, DOT id ds3_ask") + require.Equal(t, 3, logs.Len()) + require.Contains(t, logs.All()[0].Message, "cannot parse EA telemetry price to float64, DOT id ds1_benchmark") + require.Contains(t, logs.All()[1].Message, "cannot parse EA telemetry price to float64, DOT id ds2_bid") + require.Contains(t, logs.All()[2].Message, "cannot parse EA telemetry price to float64, DOT id ds3_ask") benchmarkPrice, bid, ask = getPricesFromBridgeTask(lggr, trrsMercuryV1[0], trrsMercuryV2, 2) require.Equal(t, 123456.123456, benchmarkPrice) @@ -1024,9 +1022,8 @@ func TestCollectMercuryEnhancedTelemetryV1(t *testing.T) { } wg.Wait() - require.Equal(t, 2, logs.Len()) - require.Contains(t, logs.All()[0].Message, `cannot get bridge response from bridge task, id=ds1, name="test-mercury-bridge-1"`) - require.Contains(t, logs.All()[1].Message, "cannot parse EA telemetry") + require.Equal(t, 1, logs.Len()) + require.Contains(t, logs.All()[0].Message, "cannot parse EA telemetry") chDone <- struct{}{} } @@ -1140,11 +1137,9 @@ func TestCollectMercuryEnhancedTelemetryV2(t *testing.T) { } wg.Wait() - require.Equal(t, 4, logs.Len()) - require.Contains(t, logs.All()[0].Message, "cannot parse enhanced EA telemetry bid price") - require.Contains(t, logs.All()[1].Message, "cannot get bridge response from bridge task") - require.Contains(t, logs.All()[2].Message, "cannot parse EA telemetry") - require.Contains(t, logs.All()[3].Message, "cannot parse enhanced EA telemetry bid price") + require.Equal(t, 1, logs.Len()) + fmt.Println(logs.All()) + require.Contains(t, logs.All()[0].Message, "cannot parse EA telemetry") chDone <- struct{}{} } diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 1fc2fc46336..2194cb8be46 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -510,15 +510,23 @@ func (r *runner) run(ctx context.Context, pipeline *Pipeline, run *Run, vars Var ) } l = l.With("run.State", run.State, "fatal", run.HasFatalErrors(), "runTime", runTime) - if run.HasFatalErrors() { - // This will also log at error level in OCR if it fails Observe so the - // level is appropriate - l = l.With("run.FatalErrors", run.FatalErrors) - l.Debugw("Completed pipeline run with fatal errors") - } else if run.HasErrors() { - l = l.With("run.AllErrors", run.AllErrors) - l.Debugw("Completed pipeline run with errors") - } else { + if run.HasFatalErrors() || run.HasErrors() { + var errorsWithID []string + for _, taskRun := range run.PipelineTaskRuns { + if taskRun.Error.Valid { + err := fmt.Sprintf("%s(%s); %s", taskRun.DotID, taskRun.Type, taskRun.Error.ValueOrZero()) + errorsWithID = append(errorsWithID, err) + } + } + l = l.With("run.Errors", errorsWithID) + if run.HasFatalErrors() { + l = l.With("run.FatalErrors", run.FatalErrors) + l.Debugw("Completed pipeline run with fatal errors") + } else if run.HasErrors() { + l = l.With("run.AllErrors", run.AllErrors) + l.Debugw("Completed pipeline run with errors") + } + } else if r.config.VerboseLogging() { l.Debugw("Completed pipeline run successfully") } diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go index db0fe90796b..8008fc4fd9e 100644 --- a/core/services/relay/evm/evm.go +++ b/core/services/relay/evm/evm.go @@ -39,6 +39,7 @@ import ( txm "github.com/smartcontractkit/chainlink/v2/core/chains/evm/txmgr" evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm" + coreconfig "github.com/smartcontractkit/chainlink/v2/core/config" "github.com/smartcontractkit/chainlink/v2/core/services/keystore" "github.com/smartcontractkit/chainlink/v2/core/services/llo" "github.com/smartcontractkit/chainlink/v2/core/services/llo/bm" @@ -149,7 +150,7 @@ type Relayer struct { // Mercury mercuryORM mercury.ORM - transmitterCfg mercury.TransmitterConfig + mercuryCfg MercuryConfig triggerCapability *triggers.MercuryTriggerService // LLO/data streams @@ -162,14 +163,19 @@ type CSAETHKeystore interface { Eth() keystore.Eth } +type MercuryConfig interface { + Transmitter() coreconfig.MercuryTransmitter + VerboseLogging() bool +} + type RelayerOpts struct { DS sqlutil.DataSource CSAETHKeystore MercuryPool wsrpc.Pool RetirementReportCache llo.RetirementReportCache - TransmitterConfig mercury.TransmitterConfig - CapabilitiesRegistry coretypes.CapabilitiesRegistry - HTTPClient *http.Client + MercuryConfig + CapabilitiesRegistry coretypes.CapabilitiesRegistry + HTTPClient *http.Client } func (c RelayerOpts) Validate() error { @@ -213,7 +219,7 @@ func NewRelayer(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, cdcFactory: cdcFactory, retirementReportCache: opts.RetirementReportCache, mercuryORM: mercuryORM, - transmitterCfg: opts.TransmitterConfig, + mercuryCfg: opts.MercuryConfig, capabilitiesRegistry: opts.CapabilitiesRegistry, } @@ -484,7 +490,7 @@ func (r *Relayer) NewMercuryProvider(ctx context.Context, rargs commontypes.Rela return nil, err } - transmitter := mercury.NewTransmitter(lggr, r.transmitterCfg, clients, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.mercuryORM, transmitterCodec, benchmarkPriceDecoder, r.triggerCapability) + transmitter := mercury.NewTransmitter(lggr, r.mercuryCfg.Transmitter(), clients, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.mercuryORM, transmitterCodec, benchmarkPriceDecoder, r.triggerCapability) return NewMercuryProvider(cp, r.codec, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, reportCodecV4, lggr), nil } @@ -552,15 +558,17 @@ func (r *Relayer) NewLLOProvider(ctx context.Context, rargs commontypes.RelayArg clients[server.URL] = client } transmitter = llo.NewTransmitter(llo.TransmitterOpts{ - Lggr: r.lggr, - FromAccount: fmt.Sprintf("%x", privKey.PublicKey), // NOTE: This may need to change if we support e.g. multiple tranmsmitters, to be a composite of all keys + Lggr: r.lggr, + FromAccount: fmt.Sprintf("%x", privKey.PublicKey), // NOTE: This may need to change if we support e.g. multiple tranmsmitters, to be a composite of all keys + VerboseLogging: r.mercuryCfg.VerboseLogging(), MercuryTransmitterOpts: mercurytransmitter.Opts{ - Lggr: r.lggr, - Cfg: r.transmitterCfg, - Clients: clients, - FromAccount: privKey.PublicKey, - DonID: relayConfig.LLODONID, - ORM: mercurytransmitter.NewORM(r.ds, relayConfig.LLODONID), + Lggr: r.lggr, + VerboseLogging: r.mercuryCfg.VerboseLogging(), + Cfg: r.mercuryCfg.Transmitter(), + Clients: clients, + FromAccount: privKey.PublicKey, + DonID: relayConfig.LLODONID, + ORM: mercurytransmitter.NewORM(r.ds, relayConfig.LLODONID), }, RetirementReportCache: r.retirementReportCache, }) diff --git a/core/services/relay/evm/read/batch.go b/core/services/relay/evm/read/batch.go index dbe8c8be549..16333149f11 100644 --- a/core/services/relay/evm/read/batch.go +++ b/core/services/relay/evm/read/batch.go @@ -128,7 +128,7 @@ func newDefaultEvmBatchCaller( } // batchCall formats a batch, calls the rpc client, and unpacks results. -// this function only returns errors of type ErrRead which should wrap lower errors. +// this function only returns errors of type Error which should wrap lower errors. func (c *defaultEvmBatchCaller) batchCall(ctx context.Context, blockNumber uint64, batchCall BatchCall) ([]dataAndErr, error) { if len(batchCall) == 0 { return nil, nil @@ -147,9 +147,9 @@ func (c *defaultEvmBatchCaller) batchCall(ctx context.Context, blockNumber uint6 if err = c.evmClient.BatchCallContext(ctx, rpcBatchCalls); err != nil { // return a basic read error with no detail or result since this is a general client // error instead of an error for a specific batch call. - return nil, ErrRead{ - Err: fmt.Errorf("%w: batch call context: %s", types.ErrInternal, err.Error()), - Batch: true, + return nil, Error{ + Err: fmt.Errorf("%w: batch call context: %s", types.ErrInternal, err.Error()), + Type: batchReadType, } } @@ -176,7 +176,7 @@ func (c *defaultEvmBatchCaller) createBatchCalls( fmt.Errorf("%w: encode params: %s", types.ErrInvalidConfig, err.Error()), call, block, - true, + batchReadType, ) } @@ -217,7 +217,7 @@ func (c *defaultEvmBatchCaller) unpackBatchResults( if rpcBatchCalls[idx].Error != nil { results[idx].err = newErrorFromCall( fmt.Errorf("%w: rpc call error: %w", types.ErrInternal, rpcBatchCalls[idx].Error), - call, block, true, + call, block, batchReadType, ) continue @@ -233,7 +233,7 @@ func (c *defaultEvmBatchCaller) unpackBatchResults( if err != nil { callErr := newErrorFromCall( fmt.Errorf("%w: hex decode result: %s", types.ErrInternal, err.Error()), - call, block, true, + call, block, batchReadType, ) callErr.Result = &hexEncodedOutputs[idx] @@ -250,7 +250,7 @@ func (c *defaultEvmBatchCaller) unpackBatchResults( if len(packedBytes) == 0 { callErr := newErrorFromCall( fmt.Errorf("%w: %w: %s", types.ErrInternal, errEmptyOutput, err.Error()), - call, block, true, + call, block, batchReadType, ) callErr.Result = &hexEncodedOutputs[idx] @@ -259,7 +259,7 @@ func (c *defaultEvmBatchCaller) unpackBatchResults( } else { callErr := newErrorFromCall( fmt.Errorf("%w: codec decode result: %s", types.ErrInvalidType, err.Error()), - call, block, true, + call, block, batchReadType, ) callErr.Result = &hexEncodedOutputs[idx] @@ -290,9 +290,9 @@ func (c *defaultEvmBatchCaller) batchCallDynamicLimitRetries(ctx context.Context } if lim <= 1 { - return nil, ErrRead{ - Err: fmt.Errorf("%w: limited call: call data: %+v", err, calls), - Batch: true, + return nil, Error{ + Err: fmt.Errorf("%w: limited call: call data: %+v", err, calls), + Type: batchReadType, } } diff --git a/core/services/relay/evm/read/errors.go b/core/services/relay/evm/read/errors.go index bec14d7dd4b..422b7ded1d8 100644 --- a/core/services/relay/evm/read/errors.go +++ b/core/services/relay/evm/read/errors.go @@ -10,9 +10,17 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" ) -type ErrRead struct { +type readType string + +const ( + batchReadType readType = "BatchGetLatestValue" + singleReadType readType = "GetLatestValue" + eventReadType readType = "QueryKey" +) + +type Error struct { Err error - Batch bool + Type readType Detail *readDetail Result *string } @@ -25,10 +33,10 @@ type readDetail struct { Block string } -func newErrorFromCall(err error, call Call, block string, batch bool) ErrRead { - return ErrRead{ - Err: err, - Batch: batch, +func newErrorFromCall(err error, call Call, block string, tp readType) Error { + return Error{ + Err: err, + Type: tp, Detail: &readDetail{ Address: call.ContractAddress.Hex(), Contract: call.ContractName, @@ -40,12 +48,12 @@ func newErrorFromCall(err error, call Call, block string, batch bool) ErrRead { } } -func (e ErrRead) Error() string { +func (e Error) Error() string { var builder strings.Builder - builder.WriteString("[rpc error]") - builder.WriteString(fmt.Sprintf(" batch: %T;", e.Batch)) + builder.WriteString("[read error]") builder.WriteString(fmt.Sprintf(" err: %s;", e.Err.Error())) + builder.WriteString(fmt.Sprintf(" type: %s;", e.Type)) if e.Detail != nil { builder.WriteString(fmt.Sprintf(" block: %s;", e.Detail.Block)) @@ -63,7 +71,7 @@ func (e ErrRead) Error() string { return builder.String() } -func (e ErrRead) Unwrap() error { +func (e Error) Unwrap() error { return e.Err } diff --git a/core/services/relay/evm/read/event.go b/core/services/relay/evm/read/event.go index c37b979d7ea..d2b54e5bd64 100644 --- a/core/services/relay/evm/read/event.go +++ b/core/services/relay/evm/read/event.go @@ -247,7 +247,7 @@ func (b *EventBinding) GetLatestValueWithHeadData(ctx context.Context, address c ReadName: b.eventName, Params: params, ReturnVal: into, - }, strconv.Itoa(int(confs)), false) + }, strconv.Itoa(int(confs)), eventReadType) callErr.Result = result @@ -315,7 +315,7 @@ func (b *EventBinding) QueryKey(ctx context.Context, address common.Address, fil ContractName: b.contractName, ReadName: b.eventName, ReturnVal: sequenceDataType, - }, "", false) + }, "", eventReadType) } }() diff --git a/core/services/relay/evm/read/method.go b/core/services/relay/evm/read/method.go index 393077c6d3f..e988e4352f7 100644 --- a/core/services/relay/evm/read/method.go +++ b/core/services/relay/evm/read/method.go @@ -68,8 +68,9 @@ func (b *MethodBinding) Bind(ctx context.Context, bindings ...common.Address) er // check for contract byte code at the latest block and provided address byteCode, err := b.client.CodeAt(ctx, binding, nil) if err != nil { - return ErrRead{ - Err: fmt.Errorf("%w: code at call failure: %s", commontypes.ErrInternal, err.Error()), + return Error{ + Err: fmt.Errorf("%w: code at call failure: %s", commontypes.ErrInternal, err.Error()), + Type: singleReadType, Detail: &readDetail{ Address: binding.Hex(), Contract: b.contractName, @@ -146,7 +147,7 @@ func (b *MethodBinding) GetLatestValueWithHeadData(ctx context.Context, addr com ReadName: b.method, Params: params, ReturnVal: returnVal, - }, blockNum.String(), false) + }, blockNum.String(), singleReadType) return nil, callErr } @@ -167,7 +168,7 @@ func (b *MethodBinding) GetLatestValueWithHeadData(ctx context.Context, addr com ReadName: b.method, Params: params, ReturnVal: returnVal, - }, blockNum.String(), false) + }, blockNum.String(), singleReadType) return nil, callErr } @@ -181,7 +182,7 @@ func (b *MethodBinding) GetLatestValueWithHeadData(ctx context.Context, addr com ReadName: b.method, Params: params, ReturnVal: returnVal, - }, blockNum.String(), false) + }, blockNum.String(), singleReadType) strResult := hexutil.Encode(bytes) callErr.Result = &strResult diff --git a/core/services/workflows/store/store_db.go b/core/services/workflows/store/store_db.go index f15a6928e7e..66c78493417 100644 --- a/core/services/workflows/store/store_db.go +++ b/core/services/workflows/store/store_db.go @@ -111,15 +111,23 @@ func (d *DBStore) pruneDBEntries() { return case <-ticker.C: ctx, cancel := d.chStop.CtxWithTimeout(defaultPruneTimeoutSec * time.Second) + nPruned := int64(0) err := sqlutil.TransactDataSource(ctx, d.db, nil, func(tx sqlutil.DataSource) error { stmt := fmt.Sprintf("DELETE FROM workflow_executions WHERE (id) IN (SELECT id FROM workflow_executions WHERE (created_at < now() - interval '%d hours') LIMIT %d);", defaultPruneRecordAgeHours, defaultPruneBatchSize) - _, err := tx.ExecContext(ctx, stmt) - return err + res, err := tx.ExecContext(ctx, stmt) + if err != nil { + return err + } + nPruned, err = res.RowsAffected() + if err != nil { + d.lggr.Warnw("Failed to get number of pruned workflow_executions", "err", err) + } + return nil }) if err != nil { d.lggr.Errorw("Failed to prune workflow_executions", "err", err) - } else { - d.lggr.Infow("Pruned oldest workflow_executions", "batchSize", defaultPruneBatchSize, "ageLimitHours", defaultPruneRecordAgeHours) + } else if nPruned > 0 { + d.lggr.Debugw("Pruned oldest workflow_executions", "nPruned", nPruned, "batchSize", defaultPruneBatchSize, "ageLimitHours", defaultPruneRecordAgeHours) } cancel() } diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml index cd51afac5f8..0f26b02ab6f 100644 --- a/core/web/resolver/testdata/config-empty-effective.toml +++ b/core/web/resolver/testdata/config-empty-effective.toml @@ -237,6 +237,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml index bfb0dcb9961..113d319e3c5 100644 --- a/core/web/resolver/testdata/config-full.toml +++ b/core/web/resolver/testdata/config-full.toml @@ -247,6 +247,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 123 TransmitTimeout = '3m54s' +TransmitConcurrency = 456 [Capabilities] [Capabilities.Peering] diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml index 074cb82482b..cb2884fde8f 100644 --- a/core/web/resolver/testdata/config-multi-chain-effective.toml +++ b/core/web/resolver/testdata/config-multi-chain-effective.toml @@ -237,6 +237,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/deployment/ccip/changeset/deploy_chain.go b/deployment/ccip/changeset/deploy_chain.go new file mode 100644 index 00000000000..68f350a9af7 --- /dev/null +++ b/deployment/ccip/changeset/deploy_chain.go @@ -0,0 +1,44 @@ +package changeset + +import ( + "fmt" + + "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock" + + "github.com/smartcontractkit/chainlink/deployment" + ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" +) + +var _ deployment.ChangeSet[DeployChainContractsConfig] = DeployChainContracts + +func DeployChainContracts(env deployment.Environment, c DeployChainContractsConfig) (deployment.ChangesetOutput, error) { + newAddresses := deployment.NewMemoryAddressBook() + err := ccipdeployment.DeployChainContractsForChains(env, newAddresses, c.HomeChainSelector, c.ChainSelectors, c.MCMSCfg) + if err != nil { + env.Logger.Errorw("Failed to deploy CCIP contracts", "err", err, "newAddresses", newAddresses) + return deployment.ChangesetOutput{AddressBook: newAddresses}, deployment.MaybeDataErr(err) + } + return deployment.ChangesetOutput{ + Proposals: []timelock.MCMSWithTimelockProposal{}, + AddressBook: newAddresses, + JobSpecs: nil, + }, nil +} + +type DeployChainContractsConfig struct { + ChainSelectors []uint64 + HomeChainSelector uint64 + MCMSCfg ccipdeployment.MCMSConfig +} + +func (c DeployChainContractsConfig) Validate() error { + for _, cs := range c.ChainSelectors { + if err := deployment.IsValidChainSelector(cs); err != nil { + return fmt.Errorf("invalid chain selector: %d - %w", cs, err) + } + } + if err := deployment.IsValidChainSelector(c.HomeChainSelector); err != nil { + return fmt.Errorf("invalid home chain selector: %d - %w", c.HomeChainSelector, err) + } + return nil +} diff --git a/deployment/ccip/changeset/deploy_chain_test.go b/deployment/ccip/changeset/deploy_chain_test.go new file mode 100644 index 00000000000..b197c90eca5 --- /dev/null +++ b/deployment/ccip/changeset/deploy_chain_test.go @@ -0,0 +1,78 @@ +package changeset + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment" + ccdeploy "github.com/smartcontractkit/chainlink/deployment/ccip" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestDeployChainContractsChangeset(t *testing.T) { + lggr := logger.TestLogger(t) + e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{ + Bootstraps: 1, + Chains: 2, + Nodes: 4, + }) + selectors := e.AllChainSelectors() + homeChainSel := selectors[0] + nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain) + require.NoError(t, err) + p2pIds := nodes.NonBootstraps().PeerIDs() + // deploy home chain + homeChainCfg := DeployHomeChainConfig{ + HomeChainSel: homeChainSel, + RMNStaticConfig: ccdeploy.NewTestRMNStaticConfig(), + RMNDynamicConfig: ccdeploy.NewTestRMNDynamicConfig(), + NodeOperators: ccdeploy.NewTestNodeOperator(e.Chains[homeChainSel].DeployerKey.From), + NodeP2PIDsPerNodeOpAdmin: map[string][][32]byte{ + "NodeOperator": p2pIds, + }, + } + output, err := DeployHomeChain(e, homeChainCfg) + require.NoError(t, err) + require.NoError(t, e.ExistingAddresses.Merge(output.AddressBook)) + + // deploy pre-requisites + prerequisites, err := DeployPrerequisites(e, DeployPrerequisiteConfig{ + ChainSelectors: selectors, + }) + require.NoError(t, err) + require.NoError(t, e.ExistingAddresses.Merge(prerequisites.AddressBook)) + + // deploy ccip chain contracts + output, err = DeployChainContracts(e, DeployChainContractsConfig{ + ChainSelectors: selectors, + HomeChainSelector: homeChainSel, + MCMSCfg: ccdeploy.NewTestMCMSConfig(t, e), + }) + require.NoError(t, err) + require.NoError(t, e.ExistingAddresses.Merge(output.AddressBook)) + + // load onchain state + state, err := ccdeploy.LoadOnchainState(e) + require.NoError(t, err) + + // verify all contracts populated + require.NotNil(t, state.Chains[homeChainSel].CapabilityRegistry) + require.NotNil(t, state.Chains[homeChainSel].CCIPHome) + require.NotNil(t, state.Chains[homeChainSel].RMNHome) + for _, sel := range selectors { + require.NotNil(t, state.Chains[sel].LinkToken) + require.NotNil(t, state.Chains[sel].Weth9) + require.NotNil(t, state.Chains[sel].TokenAdminRegistry) + require.NotNil(t, state.Chains[sel].RegistryModule) + require.NotNil(t, state.Chains[sel].Router) + require.NotNil(t, state.Chains[sel].RMNRemote) + require.NotNil(t, state.Chains[sel].TestRouter) + require.NotNil(t, state.Chains[sel].NonceManager) + require.NotNil(t, state.Chains[sel].FeeQuoter) + require.NotNil(t, state.Chains[sel].OffRamp) + require.NotNil(t, state.Chains[sel].OnRamp) + } +} diff --git a/deployment/ccip/changeset/home_chain.go b/deployment/ccip/changeset/home_chain.go index 0fabd2efb18..92b5b09c695 100644 --- a/deployment/ccip/changeset/home_chain.go +++ b/deployment/ccip/changeset/home_chain.go @@ -26,7 +26,9 @@ func DeployHomeChain(env deployment.Environment, cfg DeployHomeChainConfig) (dep _, err = ccipdeployment.DeployHomeChain(env.Logger, env, ab, env.Chains[cfg.HomeChainSel], cfg.RMNStaticConfig, cfg.RMNDynamicConfig, cfg.NodeOperators, cfg.NodeP2PIDsPerNodeOpAdmin) if err != nil { env.Logger.Errorw("Failed to deploy cap reg", "err", err, "addresses", env.ExistingAddresses) - return deployment.ChangesetOutput{}, err + return deployment.ChangesetOutput{ + AddressBook: ab, + }, err } return deployment.ChangesetOutput{ diff --git a/deployment/ccip/changeset/jobspec.go b/deployment/ccip/changeset/jobspec.go new file mode 100644 index 00000000000..76352ff364f --- /dev/null +++ b/deployment/ccip/changeset/jobspec.go @@ -0,0 +1,21 @@ +package changeset + +import ( + "github.com/pkg/errors" + "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock" + + "github.com/smartcontractkit/chainlink/deployment" + ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" +) + +func Jobspec(env deployment.Environment, _ any) (deployment.ChangesetOutput, error) { + js, err := ccipdeployment.NewCCIPJobSpecs(env.NodeIDs, env.Offchain) + if err != nil { + return deployment.ChangesetOutput{}, errors.Wrapf(err, "failed to create job specs") + } + return deployment.ChangesetOutput{ + Proposals: []timelock.MCMSWithTimelockProposal{}, + AddressBook: deployment.NewMemoryAddressBook(), + JobSpecs: js, + }, nil +} diff --git a/deployment/ccip/changeset/jobspec_test.go b/deployment/ccip/changeset/jobspec_test.go new file mode 100644 index 00000000000..4a10bdc2436 --- /dev/null +++ b/deployment/ccip/changeset/jobspec_test.go @@ -0,0 +1,35 @@ +package changeset + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/deployment/environment/memory" + ccip "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/validate" + "github.com/smartcontractkit/chainlink/v2/core/logger" +) + +func TestJobSpecChangeset(t *testing.T) { + lggr := logger.TestLogger(t) + e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{ + Chains: 1, + Nodes: 4, + }) + output, err := Jobspec(e, nil) + require.NoError(t, err) + require.NotNil(t, output.JobSpecs) + nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain) + require.NoError(t, err) + for _, node := range nodes { + jobs, exists := output.JobSpecs[node.NodeID] + require.True(t, exists) + require.NotNil(t, jobs) + for _, job := range jobs { + _, err = ccip.ValidatedCCIPSpec(job) + require.NoError(t, err) + } + } +} diff --git a/deployment/ccip/changeset/prerequisites.go b/deployment/ccip/changeset/prerequisites.go index 7bead1cc05c..20ff7f5a935 100644 --- a/deployment/ccip/changeset/prerequisites.go +++ b/deployment/ccip/changeset/prerequisites.go @@ -6,7 +6,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock" - chain_selectors "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/chainlink/deployment" ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" @@ -27,7 +26,9 @@ func DeployPrerequisites(env deployment.Environment, cfg DeployPrerequisiteConfi err = ccipdeployment.DeployPrerequisiteChainContracts(env, ab, cfg.ChainSelectors) if err != nil { env.Logger.Errorw("Failed to deploy prerequisite contracts", "err", err, "addressBook", ab) - return deployment.ChangesetOutput{}, fmt.Errorf("failed to deploy prerequisite contracts: %w", err) + return deployment.ChangesetOutput{ + AddressBook: ab, + }, fmt.Errorf("failed to deploy prerequisite contracts: %w", err) } return deployment.ChangesetOutput{ Proposals: []timelock.MCMSWithTimelockProposal{}, @@ -45,14 +46,9 @@ type DeployPrerequisiteConfig struct { func (c DeployPrerequisiteConfig) Validate() error { for _, cs := range c.ChainSelectors { - if cs == 0 { - return fmt.Errorf("chain selector must be set") - } - _, err := chain_selectors.ChainIdFromSelector(cs) - if err != nil { + if err := deployment.IsValidChainSelector(cs); err != nil { return fmt.Errorf("invalid chain selector: %d - %w", cs, err) } - } return nil } diff --git a/deployment/ccip/changeset/save_existing.go b/deployment/ccip/changeset/save_existing.go index 8995fdf7f4c..76330a3a20a 100644 --- a/deployment/ccip/changeset/save_existing.go +++ b/deployment/ccip/changeset/save_existing.go @@ -6,7 +6,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock" - chain_selectors "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/chainlink/deployment" ) @@ -27,11 +26,7 @@ type ExistingContractsConfig struct { func (cfg ExistingContractsConfig) Validate() error { for _, ec := range cfg.ExistingContracts { - if ec.ChainSelector == 0 { - return fmt.Errorf("chain selectors must be set") - } - _, err := chain_selectors.ChainIdFromSelector(ec.ChainSelector) - if err != nil { + if err := deployment.IsValidChainSelector(ec.ChainSelector); err != nil { return fmt.Errorf("invalid chain selector: %d - %w", ec.ChainSelector, err) } if ec.Address == (common.Address{}) { diff --git a/deployment/ccip/deploy.go b/deployment/ccip/deploy.go index 83e233b71bb..d1f6866190d 100644 --- a/deployment/ccip/deploy.go +++ b/deployment/ccip/deploy.go @@ -321,29 +321,11 @@ func DeployCCIPContracts(e deployment.Environment, ab deployment.AddressBook, c e.Logger.Errorw("Failed to get capability registry") return fmt.Errorf("capability registry not found") } - cr, err := capReg.GetHashedCapabilityId( - &bind.CallOpts{}, CapabilityLabelledName, CapabilityVersion) - if err != nil { - e.Logger.Errorw("Failed to get hashed capability id", "err", err) - return err + ccipHome := existingState.Chains[c.HomeChainSel].CCIPHome + if ccipHome == nil { + e.Logger.Errorw("Failed to get ccip home", "err", err) + return fmt.Errorf("ccip home not found") } - if cr != CCIPCapabilityID { - return fmt.Errorf("capability registry does not support CCIP %s %s", hexutil.Encode(cr[:]), hexutil.Encode(CCIPCapabilityID[:])) - } - capability, err := capReg.GetCapability(nil, CCIPCapabilityID) - if err != nil { - e.Logger.Errorw("Failed to get capability", "err", err) - return err - } - ccipHome, err := ccip_home.NewCCIPHome(capability.ConfigurationContract, e.Chains[c.HomeChainSel].Client) - if err != nil { - e.Logger.Errorw("Failed to get ccip config", "err", err) - return err - } - if ccipHome.Address() != existingState.Chains[c.HomeChainSel].CCIPHome.Address() { - return fmt.Errorf("ccip home address mismatch") - } - rmnHome := existingState.Chains[c.HomeChainSel].RMNHome if rmnHome == nil { e.Logger.Errorw("Failed to get rmn home", "err", err) @@ -352,18 +334,10 @@ func DeployCCIPContracts(e deployment.Environment, ab deployment.AddressBook, c usdcConfiguration := make(map[cciptypes.ChainSelector]pluginconfig.USDCCCTPTokenConfig) for _, chainSel := range c.ChainsToDeploy { - chain, ok := e.Chains[chainSel] - if !ok { + chain, exists := e.Chains[chainSel] + if !exists { return fmt.Errorf("chain %d not found", chainSel) } - if existingState.Chains[chainSel].LinkToken == nil || existingState.Chains[chainSel].Weth9 == nil { - return fmt.Errorf("fee tokens not found for chain %d", chainSel) - } - err = DeployChainContracts(e, chain, ab, c.MCMSConfig, rmnHome) - if err != nil { - return err - } - if c.USDCConfig.Enabled { token, pool, messenger, transmitter, err1 := DeployUSDC(e.Logger, chain, ab, existingState.Chains[chainSel]) if err1 != nil { @@ -383,10 +357,13 @@ func DeployCCIPContracts(e deployment.Environment, ab deployment.AddressBook, c } } } - + err = DeployChainContractsForChains(e, ab, c.HomeChainSel, c.ChainsToDeploy, c.MCMSConfig) + if err != nil { + e.Logger.Errorw("Failed to deploy chain contracts", "err", err) + return err + } for _, chainSel := range c.ChainsToDeploy { chain, _ := e.Chains[chainSel] - chainAddresses, err := ab.AddressesForChain(chain.Selector) if err != nil { e.Logger.Errorw("Failed to get chain addresses", "err", err) @@ -553,6 +530,62 @@ func DeployMCMSContracts( }, nil } +func DeployChainContractsForChains(e deployment.Environment, ab deployment.AddressBook, homeChainSel uint64, chainsToDeploy []uint64, mcmsConfig MCMSConfig) error { + existingState, err := LoadOnchainState(e) + if err != nil { + e.Logger.Errorw("Failed to load existing onchain state", "err") + return err + } + + capReg := existingState.Chains[homeChainSel].CapabilityRegistry + if capReg == nil { + e.Logger.Errorw("Failed to get capability registry") + return fmt.Errorf("capability registry not found") + } + cr, err := capReg.GetHashedCapabilityId( + &bind.CallOpts{}, CapabilityLabelledName, CapabilityVersion) + if err != nil { + e.Logger.Errorw("Failed to get hashed capability id", "err", err) + return err + } + if cr != CCIPCapabilityID { + return fmt.Errorf("capability registry does not support CCIP %s %s", hexutil.Encode(cr[:]), hexutil.Encode(CCIPCapabilityID[:])) + } + capability, err := capReg.GetCapability(nil, CCIPCapabilityID) + if err != nil { + e.Logger.Errorw("Failed to get capability", "err", err) + return err + } + ccipHome, err := ccip_home.NewCCIPHome(capability.ConfigurationContract, e.Chains[homeChainSel].Client) + if err != nil { + e.Logger.Errorw("Failed to get ccip config", "err", err) + return err + } + if ccipHome.Address() != existingState.Chains[homeChainSel].CCIPHome.Address() { + return fmt.Errorf("ccip home address mismatch") + } + rmnHome := existingState.Chains[homeChainSel].RMNHome + if rmnHome == nil { + e.Logger.Errorw("Failed to get rmn home", "err", err) + return fmt.Errorf("rmn home not found") + } + for _, chainSel := range chainsToDeploy { + chain, ok := e.Chains[chainSel] + if !ok { + return fmt.Errorf("chain %d not found", chainSel) + } + if existingState.Chains[chainSel].LinkToken == nil || existingState.Chains[chainSel].Weth9 == nil { + return fmt.Errorf("fee tokens not found for chain %d", chainSel) + } + err := DeployChainContracts(e, chain, ab, mcmsConfig, rmnHome) + if err != nil { + e.Logger.Errorw("Failed to deploy chain contracts", "chain", chainSel, "err", err) + return fmt.Errorf("failed to deploy chain contracts for chain %d: %w", chainSel, err) + } + } + return nil +} + func DeployChainContracts( e deployment.Environment, chain deployment.Chain, diff --git a/deployment/ccip/deploy_test.go b/deployment/ccip/deploy_test.go index dc1927261d1..2ca9901ddbf 100644 --- a/deployment/ccip/deploy_test.go +++ b/deployment/ccip/deploy_test.go @@ -70,17 +70,3 @@ func TestDeployCCIPContracts(t *testing.T) { require.NoError(t, err) fmt.Println(string(b)) } - -func TestJobSpecGeneration(t *testing.T) { - lggr := logger.TestLogger(t) - e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{ - Chains: 1, - Nodes: 1, - }) - js, err := NewCCIPJobSpecs(e.NodeIDs, e.Offchain) - require.NoError(t, err) - for node, jb := range js { - fmt.Println(node, jb) - } - // TODO: Add job assertions -} diff --git a/deployment/environment/devenv/.sample.env b/deployment/environment/devenv/.sample.env index ddf0b97e9a9..8ab186e3044 100644 --- a/deployment/environment/devenv/.sample.env +++ b/deployment/environment/devenv/.sample.env @@ -7,6 +7,12 @@ E2E_JD_VERSION= E2E_TEST_CHAINLINK_IMAGE=public.ecr.aws/w0i8p0z9/chainlink-ccip E2E_TEST_CHAINLINK_VERSION=2.14.0-ccip1.5.0 +E2E_RMN_RAGEPROXY_IMAGE= +E2E_RMN_RAGEPROXY_VERSION=master-5208d09 +E2E_RMN_AFN2PROXY_IMAGE= +E2E_RMN_AFN2PROXY_VERSION=master-5208d09 + + # RPC Configuration E2E_TEST_SEPOLIA_WALLET_KEY= E2E_TEST_SEPOLIA_RPC_HTTP_URL_1= diff --git a/deployment/go.mod b/deployment/go.mod index 2c6bf78b0d1..6320167168b 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -402,7 +402,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smartcontractkit/chainlink-automation v0.8.1 // indirect github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241115191142-8b8369c1f44e // indirect diff --git a/deployment/go.sum b/deployment/go.sum index d265b8935fd..65db9f77800 100644 --- a/deployment/go.sum +++ b/deployment/go.sum @@ -1388,8 +1388,8 @@ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef06 github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef068/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e/go.mod h1:iK3BNHKCLgSgkOyiu3iE7sfZ20Qnuk7xwjV/yO/6gnQ= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 h1:1BMTG66HnCIz+KMBWGvyzELNM6VHGwv2WKFhN7H49Sg= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57/go.mod h1:QPiorgpbLv4+Jn4YO6xxU4ftTu4T3QN8HwX3ImP59DE= github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6AnNt+Wg64sVG+XSA49c= github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM= diff --git a/deployment/helpers.go b/deployment/helpers.go index 1f0dc3064d6..e8d2d8c8d59 100644 --- a/deployment/helpers.go +++ b/deployment/helpers.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/pkg/errors" + chain_selectors "github.com/smartcontractkit/chain-selectors" "github.com/smartcontractkit/chainlink-common/pkg/logger" ) @@ -152,3 +153,14 @@ func DeployContract[C any]( } return &contractDeploy, nil } + +func IsValidChainSelector(cs uint64) error { + if cs == 0 { + return fmt.Errorf("chain selector must be set") + } + _, err := chain_selectors.ChainIdFromSelector(cs) + if err != nil { + return fmt.Errorf("invalid chain selector: %d - %w", cs, err) + } + return nil +} diff --git a/deployment/keystone/changeset/deploy_ocr3.go b/deployment/keystone/changeset/deploy_ocr3.go index e0edf4a4440..6684d8e046b 100644 --- a/deployment/keystone/changeset/deploy_ocr3.go +++ b/deployment/keystone/changeset/deploy_ocr3.go @@ -26,11 +26,12 @@ func DeployOCR3(env deployment.Environment, config interface{}) (deployment.Chan return deployment.ChangesetOutput{AddressBook: ab}, nil } -func ConfigureOCR3Contract(lggr logger.Logger, env deployment.Environment, ab deployment.AddressBook, registryChainSel uint64, nodes []string, cfg kslib.OracleConfigWithSecrets) (deployment.ChangesetOutput, error) { - err := kslib.ConfigureOCR3ContractFromJD(&env, registryChainSel, nodes, ab, &cfg) +func ConfigureOCR3Contract(lggr logger.Logger, env deployment.Environment, cfg kslib.ConfigureOCR3Config) (deployment.ChangesetOutput, error) { + + _, err := kslib.ConfigureOCR3ContractFromJD(&env, cfg) if err != nil { return deployment.ChangesetOutput{}, fmt.Errorf("failed to configure OCR3Capability: %w", err) } - - return deployment.ChangesetOutput{AddressBook: ab}, nil + // does not create any new addresses + return deployment.ChangesetOutput{}, nil } diff --git a/deployment/keystone/deploy.go b/deployment/keystone/deploy.go index a43f906178e..3019f934a96 100644 --- a/deployment/keystone/deploy.go +++ b/deployment/keystone/deploy.go @@ -423,45 +423,65 @@ func ConfigureOCR3Contract(env *deployment.Environment, chainSel uint64, dons [] return nil } -func ConfigureOCR3ContractFromJD(env *deployment.Environment, chainSel uint64, nodeIDs []string, addrBook deployment.AddressBook, cfg *OracleConfigWithSecrets) error { - registryChain, ok := env.Chains[chainSel] +type ConfigureOCR3Resp struct { + OCR2OracleConfig +} + +type ConfigureOCR3Config struct { + ChainSel uint64 + NodeIDs []string + OCR3Config *OracleConfigWithSecrets + DryRun bool +} + +func ConfigureOCR3ContractFromJD(env *deployment.Environment, cfg ConfigureOCR3Config) (*ConfigureOCR3Resp, error) { + prefix := "" + if cfg.DryRun { + prefix = "DRY RUN: " + } + env.Logger.Infof("%sconfiguring OCR3 contract for chain %d", prefix, cfg.ChainSel) + registryChain, ok := env.Chains[cfg.ChainSel] if !ok { - return fmt.Errorf("chain %d not found in environment", chainSel) + return nil, fmt.Errorf("chain %d not found in environment", cfg.ChainSel) } contractSetsResp, err := GetContractSets(env.Logger, &GetContractSetsRequest{ Chains: env.Chains, - AddressBook: addrBook, + AddressBook: env.ExistingAddresses, }) if err != nil { - return fmt.Errorf("failed to get contract sets: %w", err) + return nil, fmt.Errorf("failed to get contract sets: %w", err) } - contracts, ok := contractSetsResp.ContractSets[chainSel] + contracts, ok := contractSetsResp.ContractSets[cfg.ChainSel] if !ok { - return fmt.Errorf("failed to get contract set for chain %d", chainSel) + return nil, fmt.Errorf("failed to get contract set for chain %d", cfg.ChainSel) } contract := contracts.OCR3 if contract == nil { - return fmt.Errorf("no ocr3 contract found for chain %d", chainSel) + return nil, fmt.Errorf("no ocr3 contract found for chain %d", cfg.ChainSel) } - nodes, err := NodesFromJD("nodes", nodeIDs, env.Offchain) + nodes, err := NodesFromJD("nodes", cfg.NodeIDs, env.Offchain) if err != nil { - return err + return nil, err } var ocr2nodes []*ocr2Node for _, node := range nodes { - n, err := newOcr2NodeFromJD(&node, chainSel) + n, err := newOcr2NodeFromJD(&node, cfg.ChainSel) if err != nil { - return fmt.Errorf("failed to create ocr2 node from clo node: %w", err) + return nil, fmt.Errorf("failed to create ocr2 node from clo node %v: %w", node, err) } ocr2nodes = append(ocr2nodes, n) } - _, err = configureOCR3contract(configureOCR3Request{ - cfg: cfg, + r, err := configureOCR3contract(configureOCR3Request{ + cfg: cfg.OCR3Config, chain: registryChain, contract: contract, nodes: ocr2nodes, + dryRun: cfg.DryRun, }) - return err + return &ConfigureOCR3Resp{ + OCR2OracleConfig: r.ocrConfig, + }, nil + } type registerCapabilitiesRequest struct { @@ -965,9 +985,10 @@ type configureOCR3Request struct { chain deployment.Chain contract *kocr3.OCR3Capability nodes []*ocr2Node + dryRun bool } type configureOCR3Response struct { - ocrConfig Orc2drOracleConfig + ocrConfig OCR2OracleConfig } func configureOCR3contract(req configureOCR3Request) (*configureOCR3Response, error) { @@ -979,6 +1000,9 @@ func configureOCR3contract(req configureOCR3Request) (*configureOCR3Response, er if err != nil { return nil, fmt.Errorf("failed to generate OCR3 config: %w", err) } + if req.dryRun { + return &configureOCR3Response{ocrConfig}, nil + } tx, err := req.contract.SetConfig(req.chain.DeployerKey, ocrConfig.Signers, ocrConfig.Transmitters, diff --git a/deployment/keystone/ocr3config.go b/deployment/keystone/ocr3config.go index 5cd8ada8c61..2c12ae3c596 100644 --- a/deployment/keystone/ocr3config.go +++ b/deployment/keystone/ocr3config.go @@ -68,7 +68,8 @@ type NodeKeys struct { EncryptionPublicKey string `json:"EncryptionPublicKey"` } -type Orc2drOracleConfig struct { +// OCR2OracleConfig is the input configuration for an OCR2/3 contract. +type OCR2OracleConfig struct { Signers [][]byte Transmitters []common.Address F uint8 @@ -77,7 +78,7 @@ type Orc2drOracleConfig struct { OffchainConfig []byte } -func (c Orc2drOracleConfig) MarshalJSON() ([]byte, error) { +func (c OCR2OracleConfig) MarshalJSON() ([]byte, error) { alias := struct { Signers []string Transmitters []string @@ -105,16 +106,16 @@ func (c Orc2drOracleConfig) MarshalJSON() ([]byte, error) { return json.Marshal(alias) } -func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOracleConfig, error) { +func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (OCR2OracleConfig, error) { onchainPubKeys := [][]byte{} allPubKeys := map[string]any{} if cfg.OCRSecrets.IsEmpty() { - return Orc2drOracleConfig{}, errors.New("OCRSecrets is required") + return OCR2OracleConfig{}, errors.New("OCRSecrets is required") } for _, n := range nca { // evm keys always required if n.OCR2OnchainPublicKey == "" { - return Orc2drOracleConfig{}, errors.New("OCR2OnchainPublicKey is required") + return OCR2OracleConfig{}, errors.New("OCR2OnchainPublicKey is required") } ethPubKey := common.HexToAddress(n.OCR2OnchainPublicKey) pubKeys := map[string]types.OnchainPublicKey{ @@ -124,7 +125,7 @@ func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOrac if n.AptosOnchainPublicKey != "" { aptosPubKey, err := hex.DecodeString(n.AptosOnchainPublicKey) if err != nil { - return Orc2drOracleConfig{}, fmt.Errorf("failed to decode AptosOnchainPublicKey: %w", err) + return OCR2OracleConfig{}, fmt.Errorf("failed to decode AptosOnchainPublicKey: %w", err) } pubKeys[string(chaintype.Aptos)] = aptosPubKey } @@ -133,13 +134,13 @@ func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOrac raw := hex.EncodeToString(key) _, exists := allPubKeys[raw] if exists { - return Orc2drOracleConfig{}, fmt.Errorf("Duplicate onchain public key: '%s'", raw) + return OCR2OracleConfig{}, fmt.Errorf("Duplicate onchain public key: '%s'", raw) } allPubKeys[raw] = struct{}{} } pubKey, err := ocrcommon.MarshalMultichainPublicKey(pubKeys) if err != nil { - return Orc2drOracleConfig{}, fmt.Errorf("failed to marshal multichain public key: %w", err) + return OCR2OracleConfig{}, fmt.Errorf("failed to marshal multichain public key: %w", err) } onchainPubKeys = append(onchainPubKeys, pubKey) } @@ -148,13 +149,13 @@ func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOrac for _, n := range nca { pkBytes, err := hex.DecodeString(n.OCR2OffchainPublicKey) if err != nil { - return Orc2drOracleConfig{}, fmt.Errorf("failed to decode OCR2OffchainPublicKey: %w", err) + return OCR2OracleConfig{}, fmt.Errorf("failed to decode OCR2OffchainPublicKey: %w", err) } pkBytesFixed := [ed25519.PublicKeySize]byte{} nCopied := copy(pkBytesFixed[:], pkBytes) if nCopied != ed25519.PublicKeySize { - return Orc2drOracleConfig{}, fmt.Errorf("wrong num elements copied from ocr2 offchain public key. expected %d but got %d", ed25519.PublicKeySize, nCopied) + return OCR2OracleConfig{}, fmt.Errorf("wrong num elements copied from ocr2 offchain public key. expected %d but got %d", ed25519.PublicKeySize, nCopied) } offchainPubKeysBytes = append(offchainPubKeysBytes, types.OffchainPublicKey(pkBytesFixed)) @@ -164,13 +165,13 @@ func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOrac for _, n := range nca { pkBytes, err := hex.DecodeString(n.OCR2ConfigPublicKey) if err != nil { - return Orc2drOracleConfig{}, fmt.Errorf("failed to decode OCR2ConfigPublicKey: %w", err) + return OCR2OracleConfig{}, fmt.Errorf("failed to decode OCR2ConfigPublicKey: %w", err) } pkBytesFixed := [ed25519.PublicKeySize]byte{} n := copy(pkBytesFixed[:], pkBytes) if n != ed25519.PublicKeySize { - return Orc2drOracleConfig{}, fmt.Errorf("wrong num elements copied from ocr2 config public key. expected %d but got %d", ed25519.PublicKeySize, n) + return OCR2OracleConfig{}, fmt.Errorf("wrong num elements copied from ocr2 config public key. expected %d but got %d", ed25519.PublicKeySize, n) } configPubKeysBytes = append(configPubKeysBytes, types.ConfigEncryptionPublicKey(pkBytesFixed)) @@ -212,7 +213,7 @@ func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOrac nil, // empty onChain config ) if err != nil { - return Orc2drOracleConfig{}, fmt.Errorf("failed to generate contract config args: %w", err) + return OCR2OracleConfig{}, fmt.Errorf("failed to generate contract config args: %w", err) } var configSigners [][]byte @@ -222,10 +223,10 @@ func GenerateOCR3Config(cfg OracleConfigWithSecrets, nca []NodeKeys) (Orc2drOrac transmitterAddresses, err := evm.AccountToAddress(transmitters) if err != nil { - return Orc2drOracleConfig{}, fmt.Errorf("failed to convert transmitters to addresses: %w", err) + return OCR2OracleConfig{}, fmt.Errorf("failed to convert transmitters to addresses: %w", err) } - config := Orc2drOracleConfig{ + config := OCR2OracleConfig{ Signers: configSigners, Transmitters: transmitterAddresses, F: f, diff --git a/docs/CONFIG.md b/docs/CONFIG.md index ff918468c07..20965d816ec 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -1875,6 +1875,7 @@ CertFile is the path to a PEM file of trusted root certificate authority certifi [Mercury.Transmitter] TransmitQueueMaxSize = 10_000 # Default TransmitTimeout = "5s" # Default +TransmitConcurrency = 100 # Default ``` Mercury.Transmitter controls settings for the mercury transmitter @@ -1897,6 +1898,14 @@ TransmitTimeout controls how long the transmitter will wait for a response when sending a message to the mercury server, before aborting and considering the transmission to be failed. +### TransmitConcurrency +```toml +TransmitConcurrency = 100 # Default +``` +TransmitConcurrency is the max number of concurrent transmits to each server. + +Only has effect with LLO jobs. + ## Telemetry ```toml [Telemetry] diff --git a/go.mod b/go.mod index e6039802f93..23db187326b 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/smartcontractkit/chainlink-ccip v0.0.0-20241118091009-43c2b4804cec github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef068 github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f - github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e + github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 github.com/smartcontractkit/chainlink-feeds v0.1.1 github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241115191142-8b8369c1f44e diff --git a/go.sum b/go.sum index 270966b2a91..386ab51a211 100644 --- a/go.sum +++ b/go.sum @@ -1082,8 +1082,8 @@ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef06 github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef068/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e/go.mod h1:iK3BNHKCLgSgkOyiu3iE7sfZ20Qnuk7xwjV/yO/6gnQ= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 h1:1BMTG66HnCIz+KMBWGvyzELNM6VHGwv2WKFhN7H49Sg= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57/go.mod h1:QPiorgpbLv4+Jn4YO6xxU4ftTu4T3QN8HwX3ImP59DE= github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6AnNt+Wg64sVG+XSA49c= github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= diff --git a/integration-tests/ccip-tests/testsetups/test_helpers.go b/integration-tests/ccip-tests/testsetups/test_helpers.go index fe7d4863952..acc7e5287ad 100644 --- a/integration-tests/ccip-tests/testsetups/test_helpers.go +++ b/integration-tests/ccip-tests/testsetups/test_helpers.go @@ -21,6 +21,7 @@ import ( "github.com/smartcontractkit/chainlink/deployment" ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" + "github.com/smartcontractkit/chainlink/deployment/ccip/changeset" "github.com/smartcontractkit/chainlink/deployment/environment/devenv" clclient "github.com/smartcontractkit/chainlink/deployment/environment/nodeclient" "github.com/smartcontractkit/chainlink/integration-tests/actions" @@ -137,10 +138,16 @@ func NewLocalDevEnvironmentWithRMN( lggr logger.Logger, numRmnNodes int, ) (ccipdeployment.DeployedEnv, devenv.RMNCluster) { - tenv, dockerenv, _ := NewLocalDevEnvironmentWithDefaultPrice(t, lggr) + tenv, dockerenv, testCfg := NewLocalDevEnvironmentWithDefaultPrice(t, lggr) state, err := ccipdeployment.LoadOnchainState(tenv.Env) require.NoError(t, err) + output, err := changeset.DeployPrerequisites(tenv.Env, changeset.DeployPrerequisiteConfig{ + ChainSelectors: tenv.Env.AllChainSelectors(), + }) + require.NoError(t, err) + require.NoError(t, tenv.Env.ExistingAddresses.Merge(output.AddressBook)) + // Deploy CCIP contracts. newAddresses := deployment.NewMemoryAddressBook() err = ccipdeployment.DeployCCIPContracts(tenv.Env, newAddresses, ccipdeployment.DeployCCIPContractConfig{ @@ -156,14 +163,15 @@ func NewLocalDevEnvironmentWithRMN( l := logging.GetTestLogger(t) config := GenerateTestRMNConfig(t, numRmnNodes, tenv, MustNetworksToRPCMap(dockerenv.EVMNetworks)) + require.NotNil(t, testCfg.CCIP) rmnCluster, err := devenv.NewRMNCluster( t, l, []string{dockerenv.DockerNetwork.ID}, config, - "rageproxy", - "latest", - "afn2proxy", - "latest", + testCfg.CCIP.RMNConfig.GetProxyImage(), + testCfg.CCIP.RMNConfig.GetProxyVersion(), + testCfg.CCIP.RMNConfig.GetAFN2ProxyImage(), + testCfg.CCIP.RMNConfig.GetAFN2ProxyVersion(), dockerenv.LogStream, ) require.NoError(t, err) diff --git a/integration-tests/go.mod b/integration-tests/go.mod index f3440a4db55..ddcd3ae65fa 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -415,7 +415,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smartcontractkit/ccip-owner-contracts v0.0.0-20240926212305-a6deabdfce86 // indirect github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241115191142-8b8369c1f44e // indirect diff --git a/integration-tests/go.sum b/integration-tests/go.sum index a8ff6db2b20..8e3869244d8 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1409,8 +1409,8 @@ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef06 github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef068/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e/go.mod h1:iK3BNHKCLgSgkOyiu3iE7sfZ20Qnuk7xwjV/yO/6gnQ= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 h1:1BMTG66HnCIz+KMBWGvyzELNM6VHGwv2WKFhN7H49Sg= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57/go.mod h1:QPiorgpbLv4+Jn4YO6xxU4ftTu4T3QN8HwX3ImP59DE= github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6AnNt+Wg64sVG+XSA49c= github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 h1:0ewLMbAz3rZrovdRUCgd028yOXX8KigB4FndAUdI2kM= diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index c04b84488ca..c548b6d9159 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -422,7 +422,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/smartcontractkit/chain-selectors v1.0.29 // indirect github.com/smartcontractkit/chainlink-automation v0.8.1 // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e // indirect + github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 // indirect github.com/smartcontractkit/chainlink-solana v1.1.1-0.20241115191142-8b8369c1f44e // indirect diff --git a/integration-tests/load/go.sum b/integration-tests/load/go.sum index 8504aecfe6e..ed0224adc21 100644 --- a/integration-tests/load/go.sum +++ b/integration-tests/load/go.sum @@ -1398,8 +1398,8 @@ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef06 github.com/smartcontractkit/chainlink-common v0.3.1-0.20241114134822-aadff98ef068/go.mod h1:ny87uTW6hLjCTLiBqBRNFEhETSXhHWevYlPclT5lSco= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f h1:BwrIaQIx5Iy6eT+DfLhFfK2XqjxRm74mVdlX8gbu4dw= github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241017133723-5277829bd53f/go.mod h1:wHtwSR3F1CQSJJZDQKuqaqFYnvkT+kMyget7dl8Clvo= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e h1:JiETqdNM0bktAUGMc62COwXIaw3rR3M77Me6bBLG0Fg= -github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241018134907-a00ba3729b5e/go.mod h1:iK3BNHKCLgSgkOyiu3iE7sfZ20Qnuk7xwjV/yO/6gnQ= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57 h1:1BMTG66HnCIz+KMBWGvyzELNM6VHGwv2WKFhN7H49Sg= +github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241114154055-8d29ea018b57/go.mod h1:QPiorgpbLv4+Jn4YO6xxU4ftTu4T3QN8HwX3ImP59DE= github.com/smartcontractkit/chainlink-feeds v0.1.1 h1:JzvUOM/OgGQA1sOqTXXl52R6AnNt+Wg64sVG+XSA49c= github.com/smartcontractkit/chainlink-feeds v0.1.1/go.mod h1:55EZ94HlKCfAsUiKUTNI7QlE/3d3IwTlsU3YNa/nBb4= github.com/smartcontractkit/chainlink-protos/orchestrator v0.3.0 h1:PBUaFfPLm+Efq7H9kdfGBivH+QhJ6vB5EZTR/sCZsxI= diff --git a/integration-tests/smoke/ccip_rmn_test.go b/integration-tests/smoke/ccip_rmn_test.go index a37b601e9d9..e8e81688239 100644 --- a/integration-tests/smoke/ccip_rmn_test.go +++ b/integration-tests/smoke/ccip_rmn_test.go @@ -16,6 +16,7 @@ import ( jobv1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/job" "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/osutil" "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext" + "github.com/smartcontractkit/chainlink/deployment" ccipdeployment "github.com/smartcontractkit/chainlink/deployment/ccip" "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_home" @@ -26,9 +27,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/logger" ) -// Set false to run the RMN tests -const skipRmnTest = true - func TestRMN_TwoMessagesOnTwoLanesIncludingBatching(t *testing.T) { runRmnTestCase(t, rmnTestCase{ name: "messages on two lanes including batching", @@ -177,9 +175,6 @@ const ( ) func runRmnTestCase(t *testing.T, tc rmnTestCase) { - if skipRmnTest { - t.Skip("Local only") - } require.NoError(t, os.Setenv("ENABLE_RMN", "true")) envWithRMN, rmnCluster := testsetups.NewLocalDevEnvironmentWithRMN(t, logger.TestLogger(t), len(tc.rmnNodes)) diff --git a/integration-tests/testconfig/ccip/config.go b/integration-tests/testconfig/ccip/config.go index 560c816d85f..3ef746e29e3 100644 --- a/integration-tests/testconfig/ccip/config.go +++ b/integration-tests/testconfig/ccip/config.go @@ -15,12 +15,16 @@ import ( ) const ( - E2E_JD_IMAGE = "E2E_JD_IMAGE" - E2E_JD_VERSION = "E2E_JD_VERSION" - E2E_JD_GRPC = "E2E_JD_GRPC" - E2E_JD_WSRPC = "E2E_JD_WSRPC" - DEFAULT_DB_NAME = "JD_DB" - DEFAULT_DB_VERSION = "14.1" + E2E_JD_IMAGE = "E2E_JD_IMAGE" + E2E_JD_VERSION = "E2E_JD_VERSION" + E2E_JD_GRPC = "E2E_JD_GRPC" + E2E_JD_WSRPC = "E2E_JD_WSRPC" + DEFAULT_DB_NAME = "JD_DB" + DEFAULT_DB_VERSION = "14.1" + E2E_RMN_RAGEPROXY_IMAGE = "E2E_RMN_RAGEPROXY_IMAGE" + E2E_RMN_RAGEPROXY_VERSION = "E2E_RMN_RAGEPROXY_VERSION" + E2E_RMN_AFN2PROXY_IMAGE = "E2E_RMN_AFN2PROXY_IMAGE" + E2E_RMN_AFN2PROXY_VERSION = "E2E_RMN_AFN2PROXY_VERSION" ) var ( @@ -45,6 +49,38 @@ type RMNConfig struct { AFNVersion *string `toml:",omitempty"` } +func (r *RMNConfig) GetProxyImage() string { + image := pointer.GetString(r.ProxyImage) + if image == "" { + return ctfconfig.MustReadEnvVar_String(E2E_RMN_RAGEPROXY_IMAGE) + } + return image +} + +func (r *RMNConfig) GetProxyVersion() string { + version := pointer.GetString(r.ProxyVersion) + if version == "" { + return ctfconfig.MustReadEnvVar_String(E2E_RMN_RAGEPROXY_VERSION) + } + return version +} + +func (r *RMNConfig) GetAFN2ProxyImage() string { + image := pointer.GetString(r.AFNImage) + if image == "" { + return ctfconfig.MustReadEnvVar_String(E2E_RMN_AFN2PROXY_IMAGE) + } + return image +} + +func (r *RMNConfig) GetAFN2ProxyVersion() string { + version := pointer.GetString(r.AFNVersion) + if version == "" { + return ctfconfig.MustReadEnvVar_String(E2E_RMN_AFN2PROXY_VERSION) + } + return version +} + type NodeConfig struct { NoOfPluginNodes *int `toml:",omitempty"` NoOfBootstraps *int `toml:",omitempty"` diff --git a/testdata/scripts/config/merge_raw_configs.txtar b/testdata/scripts/config/merge_raw_configs.txtar index b3d50f22b36..bf0da942eea 100644 --- a/testdata/scripts/config/merge_raw_configs.txtar +++ b/testdata/scripts/config/merge_raw_configs.txtar @@ -384,6 +384,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar index 5e8b847ceda..51edf69d599 100644 --- a/testdata/scripts/node/validate/default.txtar +++ b/testdata/scripts/node/validate/default.txtar @@ -249,6 +249,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/defaults-override.txtar b/testdata/scripts/node/validate/defaults-override.txtar index bf8bece28bf..19bae4bec1a 100644 --- a/testdata/scripts/node/validate/defaults-override.txtar +++ b/testdata/scripts/node/validate/defaults-override.txtar @@ -310,6 +310,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar index 2e72ed7e9bb..ddd01a4c1e4 100644 --- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar @@ -293,6 +293,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar index 7b27328f7a6..0f40ad6a208 100644 --- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar +++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar @@ -293,6 +293,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar index 83d23546175..dd7455ca3a8 100644 --- a/testdata/scripts/node/validate/disk-based-logging.txtar +++ b/testdata/scripts/node/validate/disk-based-logging.txtar @@ -293,6 +293,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar index 3fccffc4e69..1ffe2ab718c 100644 --- a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar +++ b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar @@ -278,6 +278,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar index 5ea0aa289a8..52edd2b8065 100644 --- a/testdata/scripts/node/validate/invalid.txtar +++ b/testdata/scripts/node/validate/invalid.txtar @@ -283,6 +283,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar index 26641c0ef76..623459ce253 100644 --- a/testdata/scripts/node/validate/valid.txtar +++ b/testdata/scripts/node/validate/valid.txtar @@ -290,6 +290,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering] diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar index 51b3e897741..5452c49f122 100644 --- a/testdata/scripts/node/validate/warnings.txtar +++ b/testdata/scripts/node/validate/warnings.txtar @@ -272,6 +272,7 @@ CertFile = '' [Mercury.Transmitter] TransmitQueueMaxSize = 10000 TransmitTimeout = '5s' +TransmitConcurrency = 100 [Capabilities] [Capabilities.Peering]