Skip to content

Commit

Permalink
Merge conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
connorwstein committed Nov 19, 2024
2 parents f842de1 + 5a68674 commit 85377d4
Show file tree
Hide file tree
Showing 65 changed files with 3,530 additions and 3,949 deletions.
11 changes: 11 additions & 0 deletions .changeset/late-windows-clean.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
"chainlink": minor
---

#internal Updated the TXM confirmation logic to use the mined transaction count to identify re-org'd or confirmed transactions.

- Confirmer uses the mined transaction count to determine if transactions have been re-org'd or confirmed.
- Confirmer no longer sets transaction states to `confirmed_missing_receipt`. This state is maintained in queries for backwards compatibility.
- Finalizer now responsible for fetching and storing receipts for confirmed transactions.
- Finalizer now responsible for resuming pending task runs.
- Finalizer now responsible for marking old transactions without receipts broadcasted before the finalized head as fatal.
5 changes: 5 additions & 0 deletions .changeset/sixty-queens-wait.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"chainlink": minor
---

#updated chain config: allow chain id and account address to be manually provided when no selections are available
5 changes: 5 additions & 0 deletions .changeset/weak-weeks-grin.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"chainlink": patch
---

Bump Solana to include MultiNode integration. #added
14 changes: 14 additions & 0 deletions .github/actions/goreleaser-build-sign-publish/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,32 @@ inputs:
description: "The goreleaser configuration yaml"
default: ".goreleaser.yaml"
required: false
# other inputs
enable-debug:
description: |
Enable debug information for the run (true/false). This includes
buildkit debug information, and goreleaser debug, etc.
required: false
default: "${{ runner.debug == '1' }}"

runs:
using: composite
steps:
- # We need QEMU to test the cross architecture builds after they're built.
name: Set up QEMU
uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0

- name: Setup docker buildx
uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.0
with:
buildkitd-flags: ${{ inputs.enable-debug == 'true' && '--debug' || '' }}

- name: Set up Go
uses: ./.github/actions/setup-go
with:
go-version-file: 'go.mod'
only-modules: 'true'

- name: Setup goreleaser
uses: goreleaser/goreleaser-action@9ed2f89a662bf1735a48bc8557fd212fa902bebf # v6.1.0
with:
Expand All @@ -65,6 +78,7 @@ runs:
IMAGE_TAG: ${{ inputs.docker-image-tag }}
GORELEASER_KEY: ${{ inputs.goreleaser-key }}
GITHUB_TOKEN: ${{ github.token }}
DEBUG: ${{ inputs.enable-debug }}
run: |
# https://github.com/orgs/community/discussions/24950
${GITHUB_ACTION_PATH}/release.js
5 changes: 3 additions & 2 deletions .github/actions/goreleaser-build-sign-publish/release.js
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ function extractDockerImages(artifacts) {

function constructGoreleaserCommand(releaseType, version, goreleaserConfig) {
const flags = [];
const debugFlag = (process.env.DEBUG == 'true') ? '--verbose' : '';

checkReleaseType(releaseType);

Expand All @@ -192,9 +193,9 @@ function constructGoreleaserCommand(releaseType, version, goreleaserConfig) {

const flagsStr = flags.join(" ");
if (releaseType === "merge") {
return `CHAINLINK_VERSION=${version} goreleaser ${subCmd} ${flagsStr}`;
return `CHAINLINK_VERSION=${version} goreleaser ${debugFlag} ${subCmd} ${flagsStr}`;
} else {
return `CHAINLINK_VERSION=${version} goreleaser ${subCmd} --config ${goreleaserConfig} ${flagsStr}`;
return `CHAINLINK_VERSION=${version} goreleaser ${debugFlag} ${subCmd} --config ${goreleaserConfig} ${flagsStr}`;
}
}

Expand Down
2 changes: 1 addition & 1 deletion .github/e2e-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -947,7 +947,7 @@ runner-test-matrix:
pyroscope_env: ci-smoke-ccipv1_6-evm-simulated
test_env_vars:
E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2
E2E_JD_VERSION: 0.4.0
E2E_JD_VERSION: 0.6.0

- id: smoke/ccip_messaging_test.go:*
path: integration-tests/smoke/ccip_messaging_test.go
Expand Down
8 changes: 6 additions & 2 deletions .github/workflows/ci-core.yml
Original file line number Diff line number Diff line change
Expand Up @@ -526,10 +526,14 @@ jobs:
make rm-mocked
make generate
- name: Ensure clean after generate
run: git diff --stat --exit-code
run: |
git add --all
git diff --stat --cached --exit-code
- run: make gomodtidy
- name: Ensure clean after tidy
run: git diff --minimal --exit-code
run: |
git add --all
git diff --minimal --cached --exit-code
run-frequency:
name: Scheduled Run Frequency
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/delete-caches.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,14 @@ jobs:
uses: actions/checkout@v4.1.2

- name: Setup gh-actions-cache extension
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: gh extension install actions/gh-actions-cache

- name: Retrieve Trunk SHA
id: get-sha
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
SHA=$(gh pr view -R $REPO $PR_NUMBER --json mergeCommit --jq .mergeCommit.oid)
echo "sha=$SHA" >> $GITHUB_OUTPUT
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/solidity.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ jobs:
run: ./tools/ci/check_solc_hashes
- name: Check if Go solidity wrappers are updated
if: ${{ needs.changes.outputs.changes == 'true' }}
run: git diff --minimal --color --exit-code | diff-so-fancy
run: |
git add --all
git diff --minimal --color --cached --exit-code | diff-so-fancy
# The if statements for steps after checkout repo is a workaround for
# passing required check for PRs that don't have filtered changes.
Expand Down
123 changes: 52 additions & 71 deletions common/client/multi_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package client

import (
"context"
"errors"
"fmt"
"math/big"
"sync"
Expand Down Expand Up @@ -32,7 +31,9 @@ type MultiNode[
CHAIN_ID types.ID,
RPC any,
] struct {
services.StateMachine
services.Service
eng *services.Engine

primaryNodes []Node[CHAIN_ID, RPC]
sendOnlyNodes []SendOnlyNode[CHAIN_ID, RPC]
chainID CHAIN_ID
Expand All @@ -47,9 +48,6 @@ type MultiNode[

activeMu sync.RWMutex
activeNode Node[CHAIN_ID, RPC]

chStop services.StopChan
wg sync.WaitGroup
}

func NewMultiNode[
Expand All @@ -73,15 +71,19 @@ func NewMultiNode[
primaryNodes: primaryNodes,
sendOnlyNodes: sendOnlyNodes,
chainID: chainID,
lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()),
selectionMode: selectionMode,
nodeSelector: nodeSelector,
chStop: make(services.StopChan),
leaseDuration: leaseDuration,
chainFamily: chainFamily,
reportInterval: reportInterval,
deathDeclarationDelay: deathDeclarationDelay,
}
c.Service, c.eng = services.Config{
Name: "MultiNode",
Start: c.start,
Close: c.close,
}.NewServiceEngine(logger.With(lggr, "chainID", chainID.String()))
c.lggr = c.eng.SugaredLogger

c.lggr.Debugf("The MultiNode is configured to use NodeSelectionMode: %s", selectionMode)

Expand All @@ -93,16 +95,12 @@ func (c *MultiNode[CHAIN_ID, RPC]) ChainID() CHAIN_ID {
}

func (c *MultiNode[CHAIN_ID, RPC]) DoAll(ctx context.Context, do func(ctx context.Context, rpc RPC, isSendOnly bool)) error {
var err error
ok := c.IfNotStopped(func() {
ctx, _ = c.chStop.Ctx(ctx)

return c.eng.IfNotStopped(func() error {
callsCompleted := 0
for _, n := range c.primaryNodes {
select {
case <-ctx.Done():
err = ctx.Err()
return
return ctx.Err()
default:
if n.State() != nodeStateAlive {
continue
Expand All @@ -111,27 +109,23 @@ func (c *MultiNode[CHAIN_ID, RPC]) DoAll(ctx context.Context, do func(ctx contex
callsCompleted++
}
}
if callsCompleted == 0 {
err = ErroringNodeError
}

for _, n := range c.sendOnlyNodes {
select {
case <-ctx.Done():
err = ctx.Err()
return
return ctx.Err()
default:
if n.State() != nodeStateAlive {
continue
}
do(ctx, n.RPC(), true)
}
}
if callsCompleted == 0 {
return ErroringNodeError
}
return nil
})
if !ok {
return errors.New("MultiNode is stopped")
}
return err
}

func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]string {
Expand All @@ -149,53 +143,44 @@ func (c *MultiNode[CHAIN_ID, RPC]) NodeStates() map[string]string {
//
// Nodes handle their own redialing and runloops, so this function does not
// return any error if the nodes aren't available
func (c *MultiNode[CHAIN_ID, RPC]) Start(ctx context.Context) error {
return c.StartOnce("MultiNode", func() (merr error) {
if len(c.primaryNodes) == 0 {
return fmt.Errorf("no available nodes for chain %s", c.chainID.String())
func (c *MultiNode[CHAIN_ID, RPC]) start(ctx context.Context) error {
if len(c.primaryNodes) == 0 {
return fmt.Errorf("no available nodes for chain %s", c.chainID.String())
}
var ms services.MultiStart
for _, n := range c.primaryNodes {
if n.ConfiguredChainID().String() != c.chainID.String() {
return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String()))
}
var ms services.MultiStart
for _, n := range c.primaryNodes {
if n.ConfiguredChainID().String() != c.chainID.String() {
return ms.CloseBecause(fmt.Errorf("node %s has configured chain ID %s which does not match multinode configured chain ID of %s", n.String(), n.ConfiguredChainID().String(), c.chainID.String()))
}
n.SetPoolChainInfoProvider(c)
// node will handle its own redialing and automatic recovery
if err := ms.Start(ctx, n); err != nil {
return err
}
n.SetPoolChainInfoProvider(c)
// node will handle its own redialing and automatic recovery
if err := ms.Start(ctx, n); err != nil {
return err
}
for _, s := range c.sendOnlyNodes {
if s.ConfiguredChainID().String() != c.chainID.String() {
return ms.CloseBecause(fmt.Errorf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", s.String(), s.ConfiguredChainID().String(), c.chainID.String()))
}
if err := ms.Start(ctx, s); err != nil {
return err
}
}
for _, s := range c.sendOnlyNodes {
if s.ConfiguredChainID().String() != c.chainID.String() {
return ms.CloseBecause(fmt.Errorf("sendonly node %s has configured chain ID %s which does not match multinode configured chain ID of %s", s.String(), s.ConfiguredChainID().String(), c.chainID.String()))
}
c.wg.Add(1)
go c.runLoop()

if c.leaseDuration.Seconds() > 0 && c.selectionMode != NodeSelectionModeRoundRobin {
c.lggr.Infof("The MultiNode will switch to best node every %s", c.leaseDuration.String())
c.wg.Add(1)
go c.checkLeaseLoop()
} else {
c.lggr.Info("Best node switching is disabled")
if err := ms.Start(ctx, s); err != nil {
return err
}
}
c.eng.Go(c.runLoop)

return nil
})
if c.leaseDuration.Seconds() > 0 && c.selectionMode != NodeSelectionModeRoundRobin {
c.lggr.Infof("The MultiNode will switch to best node every %s", c.leaseDuration.String())
c.eng.Go(c.checkLeaseLoop)
} else {
c.lggr.Info("Best node switching is disabled")
}

return nil
}

// Close tears down the MultiNode and closes all nodes
func (c *MultiNode[CHAIN_ID, RPC]) Close() error {
return c.StopOnce("MultiNode", func() error {
close(c.chStop)
c.wg.Wait()

return services.CloseAll(services.MultiCloser(c.primaryNodes), services.MultiCloser(c.sendOnlyNodes))
})
func (c *MultiNode[CHAIN_ID, RPC]) close() error {
return services.CloseAll(services.MultiCloser(c.primaryNodes), services.MultiCloser(c.sendOnlyNodes))
}

// SelectRPC returns an RPC of an active node. If there are no active nodes it returns an error.
Expand Down Expand Up @@ -233,8 +218,7 @@ func (c *MultiNode[CHAIN_ID, RPC]) selectNode() (node Node[CHAIN_ID, RPC], err e
c.activeNode = c.nodeSelector.Select()
if c.activeNode == nil {
c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name())
errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String())
c.SvcErrBuffer.Append(errmsg)
c.eng.EmitHealthErr(fmt.Errorf("no live nodes available for chain %s", c.chainID.String()))
return nil, ErroringNodeError
}

Expand Down Expand Up @@ -296,24 +280,21 @@ func (c *MultiNode[CHAIN_ID, RPC]) checkLease() {
}
}

func (c *MultiNode[CHAIN_ID, RPC]) checkLeaseLoop() {
defer c.wg.Done()
func (c *MultiNode[CHAIN_ID, RPC]) checkLeaseLoop(ctx context.Context) {
c.leaseTicker = time.NewTicker(c.leaseDuration)
defer c.leaseTicker.Stop()

for {
select {
case <-c.leaseTicker.C:
c.checkLease()
case <-c.chStop:
case <-ctx.Done():
return
}
}
}

func (c *MultiNode[CHAIN_ID, RPC]) runLoop() {
defer c.wg.Done()

func (c *MultiNode[CHAIN_ID, RPC]) runLoop(ctx context.Context) {
nodeStates := make([]nodeWithState, len(c.primaryNodes))
for i, n := range c.primaryNodes {
nodeStates[i] = nodeWithState{
Expand All @@ -332,7 +313,7 @@ func (c *MultiNode[CHAIN_ID, RPC]) runLoop() {
select {
case <-monitor.C:
c.report(nodeStates)
case <-c.chStop:
case <-ctx.Done():
return
}
}
Expand Down Expand Up @@ -376,7 +357,7 @@ func (c *MultiNode[CHAIN_ID, RPC]) report(nodesStateInfo []nodeWithState) {
if total == dead {
rerr := fmt.Errorf("no primary nodes available: 0/%d nodes are alive", total)
c.lggr.Criticalw(rerr.Error(), "nodeStates", nodesStateInfo)
c.SvcErrBuffer.Append(rerr)
c.eng.EmitHealthErr(rerr)
} else if dead > 0 {
c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodesStateInfo)
}
Expand Down
Loading

0 comments on commit 85377d4

Please sign in to comment.