diff --git a/.changeset/big-camels-report.md b/.changeset/big-camels-report.md
new file mode 100644
index 00000000000..f81f66b9138
--- /dev/null
+++ b/.changeset/big-camels-report.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+#bugfix fix non-idempotent loopp registry.Register
diff --git a/.changeset/giant-eels-jump.md b/.changeset/giant-eels-jump.md
new file mode 100644
index 00000000000..5ab8ca875ca
--- /dev/null
+++ b/.changeset/giant-eels-jump.md
@@ -0,0 +1,5 @@
+---
+"chainlink": patch
+---
+
+Add error handling for Arbitrum RPC server timeouts. #added
diff --git a/.github/workflows/ci-core-partial.yml b/.github/workflows/ci-core-partial.yml
index c9752d4e1e4..35f689090e8 100644
--- a/.github/workflows/ci-core-partial.yml
+++ b/.github/workflows/ci-core-partial.yml
@@ -46,6 +46,7 @@ jobs:
permissions:
id-token: write
contents: write
+ actions: write
strategy:
fail-fast: false
matrix:
@@ -86,7 +87,7 @@ jobs:
go-mod-download-directory: ${{ matrix.type.test-suite == 'ccip-deployment' && matrix.type.module-directory || '' }}
- name: Build Tests
- uses: smartcontractkit/.github/apps/go-conditional-tests@37882e110590e636627a26371bdbd56ddfcce821 # go-conditional-tests@0.1.0
+ uses: smartcontractkit/.github/apps/go-conditional-tests@57f99fbea73056c490c766d50ef582a13ec4f3bb # go-conditional-tests@0.2.0
timeout-minutes: 10
with:
pipeline-step: "build"
@@ -98,7 +99,7 @@ jobs:
build-flags: ${{ matrix.type.build-flags }}
- name: Run Tests
- uses: smartcontractkit/.github/apps/go-conditional-tests@37882e110590e636627a26371bdbd56ddfcce821 # go-conditional-tests@0.1.0
+ uses: smartcontractkit/.github/apps/go-conditional-tests@57f99fbea73056c490c766d50ef582a13ec4f3bb # go-conditional-tests@0.2.0
timeout-minutes: 15
env:
CL_DATABASE_URL: ${{ env.DB_URL }}
@@ -112,7 +113,7 @@ jobs:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Update Test Index
- uses: smartcontractkit/.github/apps/go-conditional-tests@37882e110590e636627a26371bdbd56ddfcce821 # go-conditional-tests@0.1.0
+ uses: smartcontractkit/.github/apps/go-conditional-tests@57f99fbea73056c490c766d50ef582a13ec4f3bb # go-conditional-tests@0.2.0
with:
pipeline-step: "update"
collect-coverage: ${{ needs.filter.outputs.should-collect-coverage }}
@@ -130,7 +131,7 @@ jobs:
if: ${{ needs.filter.outputs.should-collect-coverage == 'true' }}
runs-on: ubuntu-latest
steps:
- - name: Checkout the repo
+ - name: Checkout the repo
uses: actions/checkout@v4.2.1
with:
# fetches all history for all tags and branches to provide more metadata for sonar reports
diff --git a/.mockery.yaml b/.mockery.yaml
index dd9024cc066..5777ca1da92 100644
--- a/.mockery.yaml
+++ b/.mockery.yaml
@@ -583,12 +583,6 @@ packages:
github.com/smartcontractkit/chainlink/v2/core/services/workflows/syncer:
interfaces:
ORM:
- ContractReader:
- config:
- mockname: "Mock{{ .InterfaceName }}"
- filename: contract_reader_mock.go
- inpackage: true
- dir: "{{ .InterfaceDir }}"
Handler:
config:
mockname: "Mock{{ .InterfaceName }}"
diff --git a/core/capabilities/remote/executable/client_test.go b/core/capabilities/remote/executable/client_test.go
index 5c4da350b9e..0314f62b1b7 100644
--- a/core/capabilities/remote/executable/client_test.go
+++ b/core/capabilities/remote/executable/client_test.go
@@ -29,6 +29,7 @@ const (
)
func Test_Client_DonTopologies(t *testing.T) {
+ testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/CAPPL-363")
ctx := testutils.Context(t)
transmissionSchedule, err := values.NewMap(map[string]any{
@@ -87,6 +88,7 @@ func Test_Client_DonTopologies(t *testing.T) {
}
func Test_Client_TransmissionSchedules(t *testing.T) {
+ testutils.SkipFlakey(t, "https://smartcontract-it.atlassian.net/browse/CAPPL-363")
ctx := testutils.Context(t)
responseTest := func(t *testing.T, response commoncap.CapabilityResponse, responseError error) {
diff --git a/core/chains/evm/client/errors.go b/core/chains/evm/client/errors.go
index 1075dc40606..bde97185580 100644
--- a/core/chains/evm/client/errors.go
+++ b/core/chains/evm/client/errors.go
@@ -64,6 +64,7 @@ const (
ServiceUnavailable
TerminallyStuck
TooManyResults
+ ServiceTimeout
)
type ClientErrors map[int]*regexp.Regexp
@@ -160,7 +161,8 @@ var arbitrum = ClientErrors{
Fatal: arbitrumFatal,
L2FeeTooLow: regexp.MustCompile(`(: |^)max fee per gas less than block base fee(:|$)`),
L2Full: regexp.MustCompile(`(: |^)(queue full|sequencer pending tx pool full, please try again)(:|$)`),
- ServiceUnavailable: regexp.MustCompile(`(: |^)502 Bad Gateway: [\s\S]*$|network is unreachable|i/o timeout`),
+ ServiceUnavailable: regexp.MustCompile(`(: |^)502 Bad Gateway: [\s\S]*$|network is unreachable|i/o timeout|(: |^)503 Service Temporarily Unavailable(:|$)`),
+ ServiceTimeout: regexp.MustCompile(`(: |^)408 Request Timeout(:|$)`),
}
// Treasure
@@ -398,6 +400,11 @@ func (s *SendError) IsServiceUnavailable(configErrors *ClientErrors) bool {
return s.is(ServiceUnavailable, configErrors) || pkgerrors.Is(s.err, commonclient.ErroringNodeError)
}
+// IsServiceTimeout indicates if the error was caused by a service timeout
+func (s *SendError) IsServiceTimeout(configErrors *ClientErrors) bool {
+ return s.is(ServiceTimeout, configErrors)
+}
+
// IsTerminallyStuck indicates if a transaction was stuck without any chance of inclusion
func (s *SendError) IsTerminallyStuckConfigError(configErrors *ClientErrors) bool {
return s.is(TerminallyStuck, configErrors)
@@ -619,6 +626,10 @@ func ClassifySendError(err error, clientErrors config.ClientErrors, lggr logger.
lggr.Errorw(fmt.Sprintf("service unavailable while sending transaction %x", tx.Hash()), "err", sendError, "etx", tx)
return commonclient.Retryable
}
+ if sendError.IsServiceTimeout(configErrors) {
+ lggr.Errorw(fmt.Sprintf("service timed out while sending transaction %x", tx.Hash()), "err", sendError, "etx", tx)
+ return commonclient.Retryable
+ }
if sendError.IsTimeout() {
lggr.Errorw(fmt.Sprintf("timeout while sending transaction %x", tx.Hash()), "err", sendError, "etx", tx)
return commonclient.Retryable
@@ -666,7 +677,7 @@ var drpc = ClientErrors{
// Linkpool, Blockdaemon, and Chainstack all return "request timed out" if the log results are too large for them to process
var defaultClient = ClientErrors{
- TooManyResults: regexp.MustCompile(`request timed out`),
+ TooManyResults: regexp.MustCompile(`request timed out|408 Request Timed Out`),
}
// JSON-RPC error codes which can indicate a refusal of the server to process an eth_getLogs request because the result set is too large
diff --git a/core/chains/evm/client/errors_test.go b/core/chains/evm/client/errors_test.go
index 75ac21597d8..1f9aaa53365 100644
--- a/core/chains/evm/client/errors_test.go
+++ b/core/chains/evm/client/errors_test.go
@@ -245,6 +245,7 @@ func Test_Eth_Errors(t *testing.T) {
{"network is unreachable", true, "Arbitrum"},
{"client error service unavailable", true, "tomlConfig"},
{"[Request ID: 825608a8-fd8a-4b5b-aea7-92999509306d] Error invoking RPC: [Request ID: 825608a8-fd8a-4b5b-aea7-92999509306d] Transaction execution returns a null value for transaction", true, "hedera"},
+ {"call failed: 503 Service Temporarily Unavailable: \r\n
503 Service Temporarily Unavailable\r\n\r\n503 Service Temporarily Unavailable
\r\n\r\n\r\n", true, "Arbitrum"},
}
for _, test := range tests {
err = evmclient.NewSendErrorS(test.message)
@@ -260,6 +261,20 @@ func Test_Eth_Errors(t *testing.T) {
}
})
+ t.Run("IsServiceTimeout", func(t *testing.T) {
+ tests := []errorCase{
+ {"call failed: 408 Request Timeout: {", true, "Arbitrum"},
+ {"408 Request Timeout: {\"id\":303,\"jsonrpc\":\"2.0\",\"error\":{\"code\\\":-32009,\\\"message\\\":\\\"request timeout\\\"}}\",\"errVerbose\":\"408 Request Timeout:\n", true, "Arbitrum"},
+ {"request timeout", false, "tomlConfig"},
+ }
+ for _, test := range tests {
+ err = evmclient.NewSendErrorS(test.message)
+ assert.Equal(t, err.IsServiceTimeout(clientErrors), test.expect)
+ err = newSendErrorWrapped(test.message)
+ assert.Equal(t, err.IsServiceTimeout(clientErrors), test.expect)
+ }
+ })
+
t.Run("IsTxFeeExceedsCap", func(t *testing.T) {
tests := []errorCase{
{"tx fee (1.10 ether) exceeds the configured cap (1.00 ether)", true, "geth"},
diff --git a/core/scripts/go.mod b/core/scripts/go.mod
index 97fb10380b3..df6924b43df 100644
--- a/core/scripts/go.mod
+++ b/core/scripts/go.mod
@@ -33,7 +33,7 @@ require (
github.com/prometheus/client_golang v1.20.5
github.com/shopspring/decimal v1.4.0
github.com/smartcontractkit/chainlink-automation v0.8.1
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241212163958-6a43e61b9d49
github.com/smartcontractkit/libocr v0.0.0-20241007185508-adbe57025f12
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.19.0
diff --git a/core/services/llo/mercurytransmitter/server.go b/core/services/llo/mercurytransmitter/server.go
index 4e97c0483b3..3ce2b0a4b4a 100644
--- a/core/services/llo/mercurytransmitter/server.go
+++ b/core/services/llo/mercurytransmitter/server.go
@@ -62,6 +62,22 @@ var (
},
[]string{"donID", "serverURL", "code"},
)
+ promTransmitConcurrentTransmitGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "llo",
+ Subsystem: "mercurytransmitter",
+ Name: "concurrent_transmit_gauge",
+ Help: "Gauge that measures the number of transmit threads currently waiting on a remote transmit call. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
+ },
+ []string{"donID", "serverURL"},
+ )
+ promTransmitConcurrentDeleteGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "llo",
+ Subsystem: "mercurytransmitter",
+ Name: "concurrent_delete_gauge",
+ Help: "Gauge that measures the number of delete threads currently waiting on a delete call to the DB. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
+ },
+ []string{"donID", "serverURL"},
+ )
)
type ReportPacker interface {
@@ -87,12 +103,14 @@ type server struct {
evmPremiumLegacyPacker ReportPacker
jsonPacker ReportPacker
- transmitSuccessCount prometheus.Counter
- transmitDuplicateCount prometheus.Counter
- transmitConnectionErrorCount prometheus.Counter
- transmitQueueDeleteErrorCount prometheus.Counter
- transmitQueueInsertErrorCount prometheus.Counter
- transmitQueuePushErrorCount prometheus.Counter
+ transmitSuccessCount prometheus.Counter
+ transmitDuplicateCount prometheus.Counter
+ transmitConnectionErrorCount prometheus.Counter
+ transmitQueueDeleteErrorCount prometheus.Counter
+ transmitQueueInsertErrorCount prometheus.Counter
+ transmitQueuePushErrorCount prometheus.Counter
+ transmitConcurrentTransmitGauge prometheus.Gauge
+ transmitConcurrentDeleteGauge prometheus.Gauge
transmitThreadBusyCount atomic.Int32
deleteThreadBusyCount atomic.Int32
@@ -130,6 +148,8 @@ func newServer(lggr logger.Logger, verboseLogging bool, cfg QueueConfig, client
promTransmitQueueDeleteErrorCount.WithLabelValues(donIDStr, serverURL),
promTransmitQueueInsertErrorCount.WithLabelValues(donIDStr, serverURL),
promTransmitQueuePushErrorCount.WithLabelValues(donIDStr, serverURL),
+ promTransmitConcurrentTransmitGauge.WithLabelValues(donIDStr, serverURL),
+ promTransmitConcurrentDeleteGauge.WithLabelValues(donIDStr, serverURL),
atomic.Int32{},
atomic.Int32{},
}
@@ -161,7 +181,7 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
select {
case hash := <-s.deleteQueue:
for {
- s.deleteThreadBusyCount.Add(1)
+ s.deleteThreadBusyCountInc()
if err := s.pm.orm.Delete(ctx, [][32]byte{hash}); err != nil {
s.lggr.Errorw("Failed to delete transmission record", "err", err, "transmissionHash", hash)
s.transmitQueueDeleteErrorCount.Inc()
@@ -170,7 +190,7 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
// Wait a backoff duration before trying to delete again
continue
case <-stopCh:
- s.deleteThreadBusyCount.Add(-1)
+ s.deleteThreadBusyCountDec()
// abort and return immediately on stop even if items remain in queue
return
}
@@ -179,7 +199,7 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
}
// success
b.Reset()
- s.deleteThreadBusyCount.Add(-1)
+ s.deleteThreadBusyCountDec()
case <-stopCh:
// abort and return immediately on stop even if items remain in queue
return
@@ -187,6 +207,23 @@ func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup
}
}
+func (s *server) transmitThreadBusyCountInc() {
+ val := s.transmitThreadBusyCount.Add(1)
+ s.transmitConcurrentTransmitGauge.Set(float64(val))
+}
+func (s *server) transmitThreadBusyCountDec() {
+ val := s.transmitThreadBusyCount.Add(-1)
+ s.transmitConcurrentTransmitGauge.Set(float64(val))
+}
+func (s *server) deleteThreadBusyCountInc() {
+ val := s.deleteThreadBusyCount.Add(1)
+ s.transmitConcurrentDeleteGauge.Set(float64(val))
+}
+func (s *server) deleteThreadBusyCountDec() {
+ val := s.deleteThreadBusyCount.Add(-1)
+ s.transmitConcurrentDeleteGauge.Set(float64(val))
+}
+
func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donIDStr string) {
defer wg.Done()
// Exponential backoff with very short retry interval (since latency is a priority)
@@ -208,8 +245,8 @@ func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, donI
return false
}
- s.transmitThreadBusyCount.Add(1)
- defer s.transmitThreadBusyCount.Add(-1)
+ s.transmitThreadBusyCountInc()
+ defer s.transmitThreadBusyCountDec()
req, res, err := func(ctx context.Context) (*pb.TransmitRequest, *pb.TransmitResponse, error) {
ctx, cancelFn := context.WithTimeout(ctx, utils.WithJitter(s.transmitTimeout))
diff --git a/core/services/llo/mercurytransmitter/transmitter.go b/core/services/llo/mercurytransmitter/transmitter.go
index 8e60bf938a5..23aa4b79e58 100644
--- a/core/services/llo/mercurytransmitter/transmitter.go
+++ b/core/services/llo/mercurytransmitter/transmitter.go
@@ -116,7 +116,6 @@ type transmitter struct {
orm ORM
servers map[string]*server
registerer prometheus.Registerer
- collectors []prometheus.Collector
donID uint32
fromAccount string
@@ -155,7 +154,6 @@ func newTransmitter(opts Opts) *transmitter {
opts.ORM,
servers,
opts.Registerer,
- nil,
opts.DonID,
fmt.Sprintf("%x", opts.FromAccount),
make(services.StopChan),
@@ -194,31 +192,6 @@ func (mt *transmitter) Start(ctx context.Context) (err error) {
go s.runDeleteQueueLoop(mt.stopCh, mt.wg)
go s.runQueueLoop(mt.stopCh, mt.wg, donIDStr)
}
- mt.collectors = append(mt.collectors, prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "llo",
- Subsystem: "mercurytransmitter",
- Name: "concurrent_transmit_gauge",
- Help: "Gauge that measures the number of transmit threads currently waiting on a remote transmit call. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
- ConstLabels: prometheus.Labels{"donID": donIDStr, "serverURL": s.url, "maxConcurrentTransmits": strconv.FormatInt(int64(nThreads), 10)},
- }, func() float64 {
- return float64(s.transmitThreadBusyCount.Load())
- }))
- mt.collectors = append(mt.collectors, prometheus.NewGaugeFunc(
- prometheus.GaugeOpts{
- Namespace: "llo",
- Subsystem: "mercurytransmitter",
- Name: "concurrent_delete_gauge",
- Help: "Gauge that measures the number of delete threads currently waiting on a delete call to the DB. You may wish to alert if this exceeds some number for a given period of time, or if it ever reaches its max.",
- ConstLabels: prometheus.Labels{"donID": donIDStr, "serverURL": s.url, "maxConcurrentDeletes": strconv.FormatInt(int64(nThreads), 10)},
- }, func() float64 {
- return float64(s.deleteThreadBusyCount.Load())
- }))
- for _, c := range mt.collectors {
- if err := mt.registerer.Register(c); err != nil {
- return err
- }
- }
}
if err := (&services.MultiStart{}).Start(ctx, startClosers...); err != nil {
return err
@@ -250,12 +223,7 @@ func (mt *transmitter) Close() error {
closers = append(closers, s.pm)
closers = append(closers, s.c)
}
- err := services.CloseAll(closers...)
- // Unregister all the gauge funcs
- for _, c := range mt.collectors {
- mt.registerer.Unregister(c)
- }
- return err
+ return services.CloseAll(closers...)
})
}
diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go
index 8a4101804dd..b0983e55c89 100644
--- a/core/services/ocr2/plugins/mercury/plugin.go
+++ b/core/services/ocr2/plugins/mercury/plugin.go
@@ -1,6 +1,7 @@
package mercury
import (
+ "context"
"encoding/json"
"fmt"
"os/exec"
@@ -79,14 +80,13 @@ func NewServices(
return nil, errors.New("expected job to have a non-nil PipelineSpec")
}
- var err error
var pluginConfig config.PluginConfig
if len(jb.OCR2OracleSpec.PluginConfig) == 0 {
if !enableTriggerCapability {
return nil, fmt.Errorf("at least one transmission option must be configured")
}
} else {
- err = json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig)
+ err := json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig)
if err != nil {
return nil, errors.WithStack(err)
}
@@ -101,8 +101,8 @@ func NewServices(
// encapsulate all the subservices and ensure we close them all if any fail to start
srvs := []job.ServiceCtx{ocr2Provider}
abort := func() {
- if err = services.MultiCloser(srvs).Close(); err != nil {
- lggr.Errorw("Error closing unused services", "err", err)
+ if cerr := services.MultiCloser(srvs).Close(); cerr != nil {
+ lggr.Errorw("Error closing unused services", "err", cerr)
}
}
saver := ocrcommon.NewResultRunSaver(pipelineRunner, lggr, cfg.MaxSuccessfulRuns(), cfg.ResultWriteQueueDepth())
@@ -112,6 +112,7 @@ func NewServices(
var (
factory ocr3types.MercuryPluginFactory
factoryServices []job.ServiceCtx
+ fErr error
)
fCfg := factoryCfg{
orm: orm,
@@ -127,31 +128,31 @@ func NewServices(
}
switch feedID.Version() {
case 1:
- factory, factoryServices, err = newv1factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv1factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v1 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v1 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
case 2:
- factory, factoryServices, err = newv2factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv2factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v2 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v2 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
case 3:
- factory, factoryServices, err = newv3factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv3factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v3 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v3 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
case 4:
- factory, factoryServices, err = newv4factory(fCfg)
- if err != nil {
+ factory, factoryServices, fErr = newv4factory(fCfg)
+ if fErr != nil {
abort()
- return nil, fmt.Errorf("failed to create mercury v4 factory: %w", err)
+ return nil, fmt.Errorf("failed to create mercury v4 factory: %w", fErr)
}
srvs = append(srvs, factoryServices...)
default:
@@ -214,13 +215,14 @@ func newv4factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loop mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV4Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -253,13 +255,14 @@ func newv3factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV3Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -292,13 +295,14 @@ func newv2factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV2Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -329,13 +333,14 @@ func newv1factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
loopEnabled := loopCmd != ""
if loopEnabled {
- cmdFn, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
+ cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
if err != nil {
return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
}
// in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
+ // and unregistration of the loop
factoryServer := loop.NewMercuryV1Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer)
+ srvs = append(srvs, factoryServer, unregisterer)
// adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
factory = factoryServer
} else {
@@ -344,20 +349,46 @@ func newv1factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.
return factory, srvs, nil
}
-func initLoop(cmd string, cfg Config, feedID utils.FeedID, lggr logger.Logger) (func() *exec.Cmd, loop.GRPCOpts, logger.Logger, error) {
+func initLoop(cmd string, cfg Config, feedID utils.FeedID, lggr logger.Logger) (func() *exec.Cmd, *loopUnregisterCloser, loop.GRPCOpts, logger.Logger, error) {
lggr.Debugw("Initializing Mercury loop", "command", cmd)
mercuryLggr := lggr.Named(fmt.Sprintf("MercuryV%d", feedID.Version())).Named(feedID.String())
envVars, err := plugins.ParseEnvFile(env.MercuryPlugin.Env.Get())
if err != nil {
- return nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to parse mercury env file: %w", err)
+ return nil, nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to parse mercury env file: %w", err)
}
+ loopID := mercuryLggr.Name()
cmdFn, opts, err := cfg.RegisterLOOP(plugins.CmdConfig{
- ID: mercuryLggr.Name(),
+ ID: loopID,
Cmd: cmd,
Env: envVars,
})
if err != nil {
- return nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to register loop: %w", err)
+ return nil, nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to register loop: %w", err)
+ }
+ return cmdFn, newLoopUnregister(cfg, loopID), opts, mercuryLggr, nil
+}
+
+// loopUnregisterCloser is a helper to unregister a loop
+// as a service
+// TODO BCF-3451 all other jobs that use custom plugin providers that should be refactored to use this pattern
+// perhaps it can be implemented in the delegate on job delete.
+type loopUnregisterCloser struct {
+ r plugins.RegistrarConfig
+ id string
+}
+
+func (l *loopUnregisterCloser) Close() error {
+ l.r.UnregisterLOOP(l.id)
+ return nil
+}
+
+func (l *loopUnregisterCloser) Start(ctx context.Context) error {
+ return nil
+}
+
+func newLoopUnregister(r plugins.RegistrarConfig, id string) *loopUnregisterCloser {
+ return &loopUnregisterCloser{
+ r: r,
+ id: id,
}
- return cmdFn, opts, mercuryLggr, nil
}
diff --git a/core/services/ocr2/plugins/mercury/plugin_test.go b/core/services/ocr2/plugins/mercury/plugin_test.go
index 22aaf7522de..eb67da53100 100644
--- a/core/services/ocr2/plugins/mercury/plugin_test.go
+++ b/core/services/ocr2/plugins/mercury/plugin_test.go
@@ -2,6 +2,7 @@ package mercury_test
import (
"context"
+ "errors"
"os/exec"
"reflect"
"testing"
@@ -9,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/v2/core/config/env"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -22,6 +24,7 @@ import (
v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
+ "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
mercuryocr2 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
@@ -92,21 +95,23 @@ var (
// this is kind of gross, but it's the best way to test return values of the services
expectedEmbeddedServiceCnt = 3
- expectedLoopServiceCnt = expectedEmbeddedServiceCnt + 1
+ expectedLoopServiceCnt = expectedEmbeddedServiceCnt + 2 // factory server and loop unregisterer
)
func TestNewServices(t *testing.T) {
type args struct {
pluginConfig job.JSONConfig
feedID utils.FeedID
+ cfg mercuryocr2.Config
}
- tests := []struct {
+ testCases := []struct {
name string
args args
loopMode bool
wantLoopFactory any
wantServiceCnt int
wantErr bool
+ wantErrStr string
}{
{
name: "no plugin config error ",
@@ -186,6 +191,19 @@ func TestNewServices(t *testing.T) {
wantErr: false,
wantLoopFactory: &loop.MercuryV3Service{},
},
+ {
+ name: "v3 loop err",
+ loopMode: true,
+ args: args{
+ pluginConfig: v3jsonCfg,
+ feedID: v3FeedId,
+ cfg: mercuryocr2.NewMercuryConfig(1, 1, &testRegistrarConfig{failRegister: true}),
+ },
+ wantServiceCnt: expectedLoopServiceCnt,
+ wantErr: true,
+ wantLoopFactory: &loop.MercuryV3Service{},
+ wantErrStr: "failed to init loop for feed",
+ },
{
name: "v4 loop",
loopMode: true,
@@ -198,17 +216,27 @@ func TestNewServices(t *testing.T) {
wantLoopFactory: &loop.MercuryV4Service{},
},
}
- for _, tt := range tests {
+ for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.loopMode {
t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd")
assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get())
}
- got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID)
+ // use default config if not provided
+ if tt.args.cfg == nil {
+ tt.args.cfg = testCfg
+ }
+ got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
if (err != nil) != tt.wantErr {
t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr)
return
}
+ if err != nil {
+ if tt.wantErrStr != "" {
+ assert.Contains(t, err.Error(), tt.wantErrStr)
+ }
+ return
+ }
assert.Len(t, got, tt.wantServiceCnt)
if tt.loopMode {
foundLoopFactory := false
@@ -222,15 +250,97 @@ func TestNewServices(t *testing.T) {
}
})
}
+
+ t.Run("restartable loop", func(t *testing.T) {
+ // setup a real loop registry to test restartability
+ registry := plugins.NewLoopRegistry(logger.TestLogger(t), nil, nil, nil, "")
+ loopRegistrarConfig := plugins.NewRegistrarConfig(loop.GRPCOpts{}, registry.Register, registry.Unregister)
+ prodCfg := mercuryocr2.NewMercuryConfig(1, 1, loopRegistrarConfig)
+ type args struct {
+ pluginConfig job.JSONConfig
+ feedID utils.FeedID
+ cfg mercuryocr2.Config
+ }
+ testCases := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "v1 loop",
+ args: args{
+ pluginConfig: v1jsonCfg,
+ feedID: v1FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ {
+ name: "v2 loop",
+ args: args{
+ pluginConfig: v2jsonCfg,
+ feedID: v2FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ {
+ name: "v3 loop",
+ args: args{
+ pluginConfig: v3jsonCfg,
+ feedID: v3FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ {
+ name: "v4 loop",
+ args: args{
+ pluginConfig: v4jsonCfg,
+ feedID: v4FeedId,
+ cfg: prodCfg,
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range testCases {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd")
+ assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get())
+
+ got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ // hack to simulate a restart. we don't have enough boilerplate to start the oracle service
+ // only care about the subservices so we start all except the oracle, which happens to be the last one
+ for i := 0; i < len(got)-1; i++ {
+ require.NoError(t, got[i].Start(tests.Context(t)))
+ }
+ // if we don't close the services, we get conflicts with the loop registry
+ _, err = newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
+ require.ErrorContains(t, err, "plugin already registered")
+
+ // close all services and try again
+ for i := len(got) - 2; i >= 0; i-- {
+ require.NoError(t, got[i].Close())
+ }
+ _, err = newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
+ require.NoError(t, err)
+ })
+ }
+ })
}
// we are only varying the version via feedID (and the plugin config)
// this wrapper supplies dummy values for the rest of the arguments
-func newServicesTestWrapper(t *testing.T, pluginConfig job.JSONConfig, feedID utils.FeedID) ([]job.ServiceCtx, error) {
+func newServicesTestWrapper(t *testing.T, pluginConfig job.JSONConfig, feedID utils.FeedID, cfg mercuryocr2.Config) ([]job.ServiceCtx, error) {
t.Helper()
jb := testJob
jb.OCR2OracleSpec.PluginConfig = pluginConfig
- return mercuryocr2.NewServices(jb, &testProvider{}, nil, logger.TestLogger(t), testArgsNoPlugin, testCfg, nil, &testDataSourceORM{}, feedID, false)
+ return mercuryocr2.NewServices(jb, &testProvider{}, nil, logger.TestLogger(t), testArgsNoPlugin, cfg, nil, &testDataSourceORM{}, feedID, false)
}
type testProvider struct{}
@@ -292,16 +402,21 @@ func (*testProvider) ReportCodecV3() v3.ReportCodec { return nil }
func (*testProvider) ReportCodecV4() v4.ReportCodec { return nil }
// Start implements types.MercuryProvider.
-func (*testProvider) Start(context.Context) error { panic("unimplemented") }
+func (*testProvider) Start(context.Context) error { return nil }
var _ commontypes.MercuryProvider = (*testProvider)(nil)
-type testRegistrarConfig struct{}
+type testRegistrarConfig struct {
+ failRegister bool
+}
func (c *testRegistrarConfig) UnregisterLOOP(ID string) {}
// RegisterLOOP implements plugins.RegistrarConfig.
-func (*testRegistrarConfig) RegisterLOOP(config plugins.CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) {
+func (c *testRegistrarConfig) RegisterLOOP(config plugins.CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) {
+ if c.failRegister {
+ return nil, loop.GRPCOpts{}, errors.New("failed to register")
+ }
return nil, loop.GRPCOpts{}, nil
}
diff --git a/core/services/ocr3/promwrapper/factory.go b/core/services/ocr3/promwrapper/factory.go
index 6518cea3c0d..e369b3260ef 100644
--- a/core/services/ocr3/promwrapper/factory.go
+++ b/core/services/ocr3/promwrapper/factory.go
@@ -47,6 +47,7 @@ func (r ReportingPluginFactory[RI]) NewReportingPlugin(ctx context.Context, conf
config.ConfigDigest.String(),
promOCR3ReportsGenerated,
promOCR3Durations,
+ promOCR3Sizes,
promOCR3PluginStatus,
)
return wrapped, info, err
diff --git a/core/services/ocr3/promwrapper/plugin.go b/core/services/ocr3/promwrapper/plugin.go
index dcee5050d1e..aa5fb87a6ee 100644
--- a/core/services/ocr3/promwrapper/plugin.go
+++ b/core/services/ocr3/promwrapper/plugin.go
@@ -21,6 +21,7 @@ type reportingPlugin[RI any] struct {
// Prometheus components for tracking metrics
reportsGenerated *prometheus.CounterVec
durations *prometheus.HistogramVec
+ sizes *prometheus.CounterVec
status *prometheus.GaugeVec
}
@@ -31,6 +32,7 @@ func newReportingPlugin[RI any](
configDigest string,
reportsGenerated *prometheus.CounterVec,
durations *prometheus.HistogramVec,
+ sizes *prometheus.CounterVec,
status *prometheus.GaugeVec,
) *reportingPlugin[RI] {
return &reportingPlugin[RI]{
@@ -40,6 +42,7 @@ func newReportingPlugin[RI any](
configDigest: configDigest,
reportsGenerated: reportsGenerated,
durations: durations,
+ sizes: sizes,
status: status,
}
}
@@ -51,9 +54,11 @@ func (p *reportingPlugin[RI]) Query(ctx context.Context, outctx ocr3types.Outcom
}
func (p *reportingPlugin[RI]) Observation(ctx context.Context, outctx ocr3types.OutcomeContext, query ocrtypes.Query) (ocrtypes.Observation, error) {
- return withObservedExecution(p, observation, func() (ocrtypes.Observation, error) {
+ result, err := withObservedExecution(p, observation, func() (ocrtypes.Observation, error) {
return p.ReportingPlugin.Observation(ctx, outctx, query)
})
+ p.trackSize(observation, len(result), err)
+ return result, err
}
func (p *reportingPlugin[RI]) ValidateObservation(ctx context.Context, outctx ocr3types.OutcomeContext, query ocrtypes.Query, ao ocrtypes.AttributedObservation) error {
@@ -65,9 +70,11 @@ func (p *reportingPlugin[RI]) ValidateObservation(ctx context.Context, outctx oc
}
func (p *reportingPlugin[RI]) Outcome(ctx context.Context, outctx ocr3types.OutcomeContext, query ocrtypes.Query, aos []ocrtypes.AttributedObservation) (ocr3types.Outcome, error) {
- return withObservedExecution(p, outcome, func() (ocr3types.Outcome, error) {
+ result, err := withObservedExecution(p, outcome, func() (ocr3types.Outcome, error) {
return p.ReportingPlugin.Outcome(ctx, outctx, query, aos)
})
+ p.trackSize(outcome, len(result), err)
+ return result, err
}
func (p *reportingPlugin[RI]) Reports(ctx context.Context, seqNr uint64, outcome ocr3types.Outcome) ([]ocr3types.ReportPlus[RI], error) {
@@ -111,6 +118,15 @@ func (p *reportingPlugin[RI]) updateStatus(status bool) {
Set(float64(boolToInt(status)))
}
+func (p *reportingPlugin[RI]) trackSize(function functionType, size int, err error) {
+ if err != nil {
+ return
+ }
+ p.sizes.
+ WithLabelValues(p.chainID, p.plugin, string(function)).
+ Add(float64(size))
+}
+
func boolToInt(arg bool) int {
if arg {
return 1
diff --git a/core/services/ocr3/promwrapper/plugin_test.go b/core/services/ocr3/promwrapper/plugin_test.go
index 9a7b6f2e648..a10a467799f 100644
--- a/core/services/ocr3/promwrapper/plugin_test.go
+++ b/core/services/ocr3/promwrapper/plugin_test.go
@@ -17,17 +17,20 @@ import (
)
func Test_ReportsGeneratedGauge(t *testing.T) {
+ pluginObservationSize := 5
+ pluginOutcomeSize := 3
+
plugin1 := newReportingPlugin(
fakePlugin[uint]{reports: make([]ocr3types.ReportPlus[uint], 2)},
- "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin2 := newReportingPlugin(
- fakePlugin[bool]{reports: make([]ocr3types.ReportPlus[bool], 10)},
- "solana", "different_plugin", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ fakePlugin[bool]{reports: make([]ocr3types.ReportPlus[bool], 10), observationSize: pluginObservationSize, outcomeSize: pluginOutcomeSize},
+ "solana", "different_plugin", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin3 := newReportingPlugin(
fakePlugin[string]{err: errors.New("error")},
- "1234", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "1234", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
r1, err := plugin1.Reports(tests.Context(t), 1, nil)
@@ -64,20 +67,33 @@ func Test_ReportsGeneratedGauge(t *testing.T) {
require.NoError(t, plugin1.Close())
pluginHealth = testutil.ToFloat64(promOCR3PluginStatus.WithLabelValues("123", "empty", "abc"))
require.Equal(t, 0, int(pluginHealth))
+
+ iterations := 10
+ for i := 0; i < iterations; i++ {
+ _, err1 := plugin2.Outcome(tests.Context(t), ocr3types.OutcomeContext{}, nil, nil)
+ require.NoError(t, err1)
+ }
+ _, err1 := plugin2.Observation(tests.Context(t), ocr3types.OutcomeContext{}, nil)
+ require.NoError(t, err1)
+
+ outcomesLen := testutil.ToFloat64(promOCR3Sizes.WithLabelValues("solana", "different_plugin", "outcome"))
+ require.Equal(t, pluginOutcomeSize*iterations, int(outcomesLen))
+ observationLen := testutil.ToFloat64(promOCR3Sizes.WithLabelValues("solana", "different_plugin", "observation"))
+ require.Equal(t, pluginObservationSize, int(observationLen))
}
func Test_DurationHistograms(t *testing.T) {
plugin1 := newReportingPlugin(
fakePlugin[uint]{},
- "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin2 := newReportingPlugin(
fakePlugin[uint]{err: errors.New("error")},
- "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "123", "empty", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
plugin3 := newReportingPlugin(
fakePlugin[uint]{},
- "solana", "commit", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3PluginStatus,
+ "solana", "commit", "abc", promOCR3ReportsGenerated, promOCR3Durations, promOCR3Sizes, promOCR3PluginStatus,
)
for _, p := range []*reportingPlugin[uint]{plugin1, plugin2, plugin3} {
@@ -102,8 +118,10 @@ func Test_DurationHistograms(t *testing.T) {
}
type fakePlugin[RI any] struct {
- reports []ocr3types.ReportPlus[RI]
- err error
+ reports []ocr3types.ReportPlus[RI]
+ observationSize int
+ outcomeSize int
+ err error
}
func (f fakePlugin[RI]) Query(context.Context, ocr3types.OutcomeContext) (ocrtypes.Query, error) {
@@ -117,7 +135,7 @@ func (f fakePlugin[RI]) Observation(context.Context, ocr3types.OutcomeContext, o
if f.err != nil {
return nil, f.err
}
- return ocrtypes.Observation{}, nil
+ return make([]byte, f.observationSize), nil
}
func (f fakePlugin[RI]) ValidateObservation(context.Context, ocr3types.OutcomeContext, ocrtypes.Query, ocrtypes.AttributedObservation) error {
@@ -132,7 +150,7 @@ func (f fakePlugin[RI]) Outcome(context.Context, ocr3types.OutcomeContext, ocrty
if f.err != nil {
return nil, f.err
}
- return ocr3types.Outcome{}, nil
+ return make([]byte, f.outcomeSize), nil
}
func (f fakePlugin[RI]) Reports(context.Context, uint64, ocr3types.Outcome) ([]ocr3types.ReportPlus[RI], error) {
diff --git a/core/services/ocr3/promwrapper/types.go b/core/services/ocr3/promwrapper/types.go
index 2fa29dcdf20..59468358783 100644
--- a/core/services/ocr3/promwrapper/types.go
+++ b/core/services/ocr3/promwrapper/types.go
@@ -48,6 +48,13 @@ var (
},
[]string{"chainID", "plugin", "function", "success"},
)
+ promOCR3Sizes = promauto.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "ocr3_reporting_plugin_data_sizes",
+ Help: "Tracks the size of the data produced by OCR3 plugin in bytes (e.g. reports, observations etc.)",
+ },
+ []string{"chainID", "plugin", "function"},
+ )
promOCR3PluginStatus = promauto.NewGaugeVec(
prometheus.GaugeOpts{
Name: "ocr3_reporting_plugin_status",
diff --git a/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go b/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go
index 3c6ee8a1d04..c7c164803cb 100644
--- a/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go
+++ b/core/services/relay/evm/capabilities/workflows/syncer/workflow_syncer_test.go
@@ -6,7 +6,9 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
+ rand2 "math/rand/v2"
"strings"
+ "sync"
"testing"
"time"
@@ -31,17 +33,38 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
"github.com/stretchr/testify/require"
+
+ crypto2 "github.com/ethereum/go-ethereum/crypto"
)
type testEvtHandler struct {
events []syncer.Event
+ mux sync.Mutex
}
func (m *testEvtHandler) Handle(ctx context.Context, event syncer.Event) error {
+ m.mux.Lock()
+ defer m.mux.Unlock()
m.events = append(m.events, event)
return nil
}
+func (m *testEvtHandler) ClearEvents() {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ m.events = make([]syncer.Event, 0)
+}
+
+func (m *testEvtHandler) GetEvents() []syncer.Event {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ eventsCopy := make([]syncer.Event, len(m.events))
+ copy(eventsCopy, m.events)
+
+ return eventsCopy
+}
+
func newTestEvtHandler() *testEvtHandler {
return &testEvtHandler{
events: make([]syncer.Event, 0),
@@ -68,6 +91,138 @@ func (m *testWorkflowRegistryContractLoader) LoadWorkflows(ctx context.Context,
}, nil
}
+func Test_EventHandlerStateSync(t *testing.T) {
+ lggr := logger.TestLogger(t)
+ backendTH := testutils.NewEVMBackendTH(t)
+ donID := uint32(1)
+
+ eventPollTicker := time.NewTicker(50 * time.Millisecond)
+ defer eventPollTicker.Stop()
+
+ // Deploy a test workflow_registry
+ wfRegistryAddr, _, wfRegistryC, err := workflow_registry_wrapper.DeployWorkflowRegistry(backendTH.ContractsOwner, backendTH.Backend.Client())
+ backendTH.Backend.Commit()
+ require.NoError(t, err)
+
+ // setup contract state to allow the secrets to be updated
+ updateAllowedDONs(t, backendTH, wfRegistryC, []uint32{donID}, true)
+ updateAuthorizedAddress(t, backendTH, wfRegistryC, []common.Address{backendTH.ContractsOwner.From}, true)
+
+ // Create some initial static state
+ numberWorkflows := 20
+ for i := 0; i < numberWorkflows; i++ {
+ var workflowID [32]byte
+ _, err = rand.Read((workflowID)[:])
+ require.NoError(t, err)
+ workflow := RegisterWorkflowCMD{
+ Name: fmt.Sprintf("test-wf-%d", i),
+ DonID: donID,
+ Status: uint8(1),
+ SecretsURL: "someurl",
+ }
+ workflow.ID = workflowID
+ registerWorkflow(t, backendTH, wfRegistryC, workflow)
+ }
+
+ testEventHandler := newTestEvtHandler()
+ loader := syncer.NewWorkflowRegistryContractLoader(lggr, wfRegistryAddr.Hex(), func(ctx context.Context, bytes []byte) (syncer.ContractReader, error) {
+ return backendTH.NewContractReader(ctx, t, bytes)
+ }, testEventHandler)
+
+ // Create the registry
+ registry := syncer.NewWorkflowRegistry(
+ lggr,
+ func(ctx context.Context, bytes []byte) (syncer.ContractReader, error) {
+ return backendTH.NewContractReader(ctx, t, bytes)
+ },
+ wfRegistryAddr.Hex(),
+ syncer.WorkflowEventPollerConfig{
+ QueryCount: 20,
+ },
+ testEventHandler,
+ loader,
+ &testDonNotifier{
+ don: capabilities.DON{
+ ID: donID,
+ },
+ err: nil,
+ },
+ syncer.WithTicker(eventPollTicker.C),
+ )
+
+ servicetest.Run(t, registry)
+
+ require.Eventually(t, func() bool {
+ numEvents := len(testEventHandler.GetEvents())
+ return numEvents == numberWorkflows
+ }, 5*time.Second, time.Second)
+
+ for _, event := range testEventHandler.GetEvents() {
+ assert.Equal(t, syncer.WorkflowRegisteredEvent, event.GetEventType())
+ }
+
+ testEventHandler.ClearEvents()
+
+ // Create different event types for a number of workflows and confirm that the event handler processes them in order
+ numberOfEventCycles := 50
+ for i := 0; i < numberOfEventCycles; i++ {
+ var workflowID [32]byte
+ _, err = rand.Read((workflowID)[:])
+ require.NoError(t, err)
+ workflow := RegisterWorkflowCMD{
+ Name: "test-wf-register-event",
+ DonID: donID,
+ Status: uint8(1),
+ SecretsURL: "",
+ }
+ workflow.ID = workflowID
+
+ // Generate events of different types with some jitter
+ registerWorkflow(t, backendTH, wfRegistryC, workflow)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ data := append(backendTH.ContractsOwner.From.Bytes(), []byte(workflow.Name)...)
+ workflowKey := crypto2.Keccak256Hash(data)
+ activateWorkflow(t, backendTH, wfRegistryC, workflowKey)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ pauseWorkflow(t, backendTH, wfRegistryC, workflowKey)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ var newWorkflowID [32]byte
+ _, err = rand.Read((newWorkflowID)[:])
+ require.NoError(t, err)
+ updateWorkflow(t, backendTH, wfRegistryC, workflowKey, newWorkflowID, workflow.BinaryURL+"2", workflow.ConfigURL, workflow.SecretsURL)
+ time.Sleep(time.Millisecond * time.Duration(rand2.IntN(10)))
+ deleteWorkflow(t, backendTH, wfRegistryC, workflowKey)
+ }
+
+ // Confirm the expected number of events are received in the correct order
+ require.Eventually(t, func() bool {
+ events := testEventHandler.GetEvents()
+ numEvents := len(events)
+ expectedNumEvents := 5 * numberOfEventCycles
+
+ if numEvents == expectedNumEvents {
+ // verify the events are the expected types in the expected order
+ for idx, event := range events {
+ switch idx % 5 {
+ case 0:
+ assert.Equal(t, syncer.WorkflowRegisteredEvent, event.GetEventType())
+ case 1:
+ assert.Equal(t, syncer.WorkflowActivatedEvent, event.GetEventType())
+ case 2:
+ assert.Equal(t, syncer.WorkflowPausedEvent, event.GetEventType())
+ case 3:
+ assert.Equal(t, syncer.WorkflowUpdatedEvent, event.GetEventType())
+ case 4:
+ assert.Equal(t, syncer.WorkflowDeletedEvent, event.GetEventType())
+ }
+ }
+ return true
+ }
+
+ return false
+ }, 50*time.Second, time.Second)
+}
+
func Test_InitialStateSync(t *testing.T) {
lggr := logger.TestLogger(t)
backendTH := testutils.NewEVMBackendTH(t)
@@ -128,10 +283,10 @@ func Test_InitialStateSync(t *testing.T) {
servicetest.Run(t, worker)
require.Eventually(t, func() bool {
- return len(testEventHandler.events) == numberWorkflows
+ return len(testEventHandler.GetEvents()) == numberWorkflows
}, 5*time.Second, time.Second)
- for _, event := range testEventHandler.events {
+ for _, event := range testEventHandler.GetEvents() {
assert.Equal(t, syncer.WorkflowRegisteredEvent, event.GetEventType())
}
}
@@ -263,7 +418,7 @@ func Test_RegistrySyncer_WorkflowRegistered_InitiallyPaused(t *testing.T) {
require.NoError(t, err)
from := [20]byte(backendTH.ContractsOwner.From)
- id, err := workflows.GenerateWorkflowID(from[:], []byte(wantContents), []byte(""), "")
+ id, err := workflows.GenerateWorkflowID(from[:], "test-wf", []byte(wantContents), []byte(""), "")
require.NoError(t, err)
giveWorkflow.ID = id
@@ -361,7 +516,7 @@ func Test_RegistrySyncer_WorkflowRegistered_InitiallyActivated(t *testing.T) {
require.NoError(t, err)
from := [20]byte(backendTH.ContractsOwner.From)
- id, err := workflows.GenerateWorkflowID(from[:], []byte(wantContents), []byte(""), "")
+ id, err := workflows.GenerateWorkflowID(from[:], "test-wf", []byte(wantContents), []byte(""), "")
require.NoError(t, err)
giveWorkflow.ID = id
@@ -497,3 +652,59 @@ func requestForceUpdateSecrets(
th.Backend.Commit()
th.Backend.Commit()
}
+
+func activateWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte,
+) {
+ t.Helper()
+ _, err := wfRegC.ActivateWorkflow(th.ContractsOwner, workflowKey)
+ require.NoError(t, err, "failed to activate workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
+
+func pauseWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte,
+) {
+ t.Helper()
+ _, err := wfRegC.PauseWorkflow(th.ContractsOwner, workflowKey)
+ require.NoError(t, err, "failed to pause workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
+
+func deleteWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte,
+) {
+ t.Helper()
+ _, err := wfRegC.DeleteWorkflow(th.ContractsOwner, workflowKey)
+ require.NoError(t, err, "failed to delete workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
+
+func updateWorkflow(
+ t *testing.T,
+ th *testutils.EVMBackendTH,
+ wfRegC *workflow_registry_wrapper.WorkflowRegistry,
+ workflowKey [32]byte, newWorkflowID [32]byte, binaryURL string, configURL string, secretsURL string,
+) {
+ t.Helper()
+ _, err := wfRegC.UpdateWorkflow(th.ContractsOwner, workflowKey, newWorkflowID, binaryURL, configURL, secretsURL)
+ require.NoError(t, err, "failed to update workflow")
+ th.Backend.Commit()
+ th.Backend.Commit()
+ th.Backend.Commit()
+}
diff --git a/core/services/workflows/syncer/contract_reader_mock.go b/core/services/workflows/syncer/contract_reader_mock.go
deleted file mode 100644
index e6e7c8385f5..00000000000
--- a/core/services/workflows/syncer/contract_reader_mock.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Code generated by mockery v2.46.3. DO NOT EDIT.
-
-package syncer
-
-import (
- context "context"
-
- query "github.com/smartcontractkit/chainlink-common/pkg/types/query"
- primitives "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
- mock "github.com/stretchr/testify/mock"
-
- types "github.com/smartcontractkit/chainlink-common/pkg/types"
-)
-
-// MockContractReader is an autogenerated mock type for the ContractReader type
-type MockContractReader struct {
- mock.Mock
-}
-
-type MockContractReader_Expecter struct {
- mock *mock.Mock
-}
-
-func (_m *MockContractReader) EXPECT() *MockContractReader_Expecter {
- return &MockContractReader_Expecter{mock: &_m.Mock}
-}
-
-// Bind provides a mock function with given fields: _a0, _a1
-func (_m *MockContractReader) Bind(_a0 context.Context, _a1 []types.BoundContract) error {
- ret := _m.Called(_a0, _a1)
-
- if len(ret) == 0 {
- panic("no return value specified for Bind")
- }
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, []types.BoundContract) error); ok {
- r0 = rf(_a0, _a1)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// MockContractReader_Bind_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bind'
-type MockContractReader_Bind_Call struct {
- *mock.Call
-}
-
-// Bind is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 []types.BoundContract
-func (_e *MockContractReader_Expecter) Bind(_a0 interface{}, _a1 interface{}) *MockContractReader_Bind_Call {
- return &MockContractReader_Bind_Call{Call: _e.mock.On("Bind", _a0, _a1)}
-}
-
-func (_c *MockContractReader_Bind_Call) Run(run func(_a0 context.Context, _a1 []types.BoundContract)) *MockContractReader_Bind_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].([]types.BoundContract))
- })
- return _c
-}
-
-func (_c *MockContractReader_Bind_Call) Return(_a0 error) *MockContractReader_Bind_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *MockContractReader_Bind_Call) RunAndReturn(run func(context.Context, []types.BoundContract) error) *MockContractReader_Bind_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// Close provides a mock function with given fields:
-func (_m *MockContractReader) Close() error {
- ret := _m.Called()
-
- if len(ret) == 0 {
- panic("no return value specified for Close")
- }
-
- var r0 error
- if rf, ok := ret.Get(0).(func() error); ok {
- r0 = rf()
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// MockContractReader_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'
-type MockContractReader_Close_Call struct {
- *mock.Call
-}
-
-// Close is a helper method to define mock.On call
-func (_e *MockContractReader_Expecter) Close() *MockContractReader_Close_Call {
- return &MockContractReader_Close_Call{Call: _e.mock.On("Close")}
-}
-
-func (_c *MockContractReader_Close_Call) Run(run func()) *MockContractReader_Close_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run()
- })
- return _c
-}
-
-func (_c *MockContractReader_Close_Call) Return(_a0 error) *MockContractReader_Close_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *MockContractReader_Close_Call) RunAndReturn(run func() error) *MockContractReader_Close_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// GetLatestValueWithHeadData provides a mock function with given fields: ctx, readName, confidenceLevel, params, returnVal
-func (_m *MockContractReader) GetLatestValueWithHeadData(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any) (*types.Head, error) {
- ret := _m.Called(ctx, readName, confidenceLevel, params, returnVal)
-
- if len(ret) == 0 {
- panic("no return value specified for GetLatestValueWithHeadData")
- }
-
- var r0 *types.Head
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, primitives.ConfidenceLevel, any, any) (*types.Head, error)); ok {
- return rf(ctx, readName, confidenceLevel, params, returnVal)
- }
- if rf, ok := ret.Get(0).(func(context.Context, string, primitives.ConfidenceLevel, any, any) *types.Head); ok {
- r0 = rf(ctx, readName, confidenceLevel, params, returnVal)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).(*types.Head)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, string, primitives.ConfidenceLevel, any, any) error); ok {
- r1 = rf(ctx, readName, confidenceLevel, params, returnVal)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// MockContractReader_GetLatestValueWithHeadData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestValueWithHeadData'
-type MockContractReader_GetLatestValueWithHeadData_Call struct {
- *mock.Call
-}
-
-// GetLatestValueWithHeadData is a helper method to define mock.On call
-// - ctx context.Context
-// - readName string
-// - confidenceLevel primitives.ConfidenceLevel
-// - params any
-// - returnVal any
-func (_e *MockContractReader_Expecter) GetLatestValueWithHeadData(ctx interface{}, readName interface{}, confidenceLevel interface{}, params interface{}, returnVal interface{}) *MockContractReader_GetLatestValueWithHeadData_Call {
- return &MockContractReader_GetLatestValueWithHeadData_Call{Call: _e.mock.On("GetLatestValueWithHeadData", ctx, readName, confidenceLevel, params, returnVal)}
-}
-
-func (_c *MockContractReader_GetLatestValueWithHeadData_Call) Run(run func(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any)) *MockContractReader_GetLatestValueWithHeadData_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(string), args[2].(primitives.ConfidenceLevel), args[3].(any), args[4].(any))
- })
- return _c
-}
-
-func (_c *MockContractReader_GetLatestValueWithHeadData_Call) Return(head *types.Head, err error) *MockContractReader_GetLatestValueWithHeadData_Call {
- _c.Call.Return(head, err)
- return _c
-}
-
-func (_c *MockContractReader_GetLatestValueWithHeadData_Call) RunAndReturn(run func(context.Context, string, primitives.ConfidenceLevel, any, any) (*types.Head, error)) *MockContractReader_GetLatestValueWithHeadData_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// QueryKey provides a mock function with given fields: _a0, _a1, _a2, _a3, _a4
-func (_m *MockContractReader) QueryKey(_a0 context.Context, _a1 types.BoundContract, _a2 query.KeyFilter, _a3 query.LimitAndSort, _a4 any) ([]types.Sequence, error) {
- ret := _m.Called(_a0, _a1, _a2, _a3, _a4)
-
- if len(ret) == 0 {
- panic("no return value specified for QueryKey")
- }
-
- var r0 []types.Sequence
- var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) ([]types.Sequence, error)); ok {
- return rf(_a0, _a1, _a2, _a3, _a4)
- }
- if rf, ok := ret.Get(0).(func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) []types.Sequence); ok {
- r0 = rf(_a0, _a1, _a2, _a3, _a4)
- } else {
- if ret.Get(0) != nil {
- r0 = ret.Get(0).([]types.Sequence)
- }
- }
-
- if rf, ok := ret.Get(1).(func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) error); ok {
- r1 = rf(_a0, _a1, _a2, _a3, _a4)
- } else {
- r1 = ret.Error(1)
- }
-
- return r0, r1
-}
-
-// MockContractReader_QueryKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryKey'
-type MockContractReader_QueryKey_Call struct {
- *mock.Call
-}
-
-// QueryKey is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 types.BoundContract
-// - _a2 query.KeyFilter
-// - _a3 query.LimitAndSort
-// - _a4 any
-func (_e *MockContractReader_Expecter) QueryKey(_a0 interface{}, _a1 interface{}, _a2 interface{}, _a3 interface{}, _a4 interface{}) *MockContractReader_QueryKey_Call {
- return &MockContractReader_QueryKey_Call{Call: _e.mock.On("QueryKey", _a0, _a1, _a2, _a3, _a4)}
-}
-
-func (_c *MockContractReader_QueryKey_Call) Run(run func(_a0 context.Context, _a1 types.BoundContract, _a2 query.KeyFilter, _a3 query.LimitAndSort, _a4 any)) *MockContractReader_QueryKey_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(types.BoundContract), args[2].(query.KeyFilter), args[3].(query.LimitAndSort), args[4].(any))
- })
- return _c
-}
-
-func (_c *MockContractReader_QueryKey_Call) Return(_a0 []types.Sequence, _a1 error) *MockContractReader_QueryKey_Call {
- _c.Call.Return(_a0, _a1)
- return _c
-}
-
-func (_c *MockContractReader_QueryKey_Call) RunAndReturn(run func(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) ([]types.Sequence, error)) *MockContractReader_QueryKey_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// Start provides a mock function with given fields: ctx
-func (_m *MockContractReader) Start(ctx context.Context) error {
- ret := _m.Called(ctx)
-
- if len(ret) == 0 {
- panic("no return value specified for Start")
- }
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context) error); ok {
- r0 = rf(ctx)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// MockContractReader_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'
-type MockContractReader_Start_Call struct {
- *mock.Call
-}
-
-// Start is a helper method to define mock.On call
-// - ctx context.Context
-func (_e *MockContractReader_Expecter) Start(ctx interface{}) *MockContractReader_Start_Call {
- return &MockContractReader_Start_Call{Call: _e.mock.On("Start", ctx)}
-}
-
-func (_c *MockContractReader_Start_Call) Run(run func(ctx context.Context)) *MockContractReader_Start_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context))
- })
- return _c
-}
-
-func (_c *MockContractReader_Start_Call) Return(_a0 error) *MockContractReader_Start_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *MockContractReader_Start_Call) RunAndReturn(run func(context.Context) error) *MockContractReader_Start_Call {
- _c.Call.Return(run)
- return _c
-}
-
-// NewMockContractReader creates a new instance of MockContractReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-// The first argument is typically a *testing.T value.
-func NewMockContractReader(t interface {
- mock.TestingT
- Cleanup(func())
-}) *MockContractReader {
- mock := &MockContractReader{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
-}
diff --git a/core/services/workflows/syncer/handler.go b/core/services/workflows/syncer/handler.go
index b88527f905d..4ef7f952249 100644
--- a/core/services/workflows/syncer/handler.go
+++ b/core/services/workflows/syncer/handler.go
@@ -428,7 +428,7 @@ func (h *eventHandler) workflowRegisteredEvent(
}
// Calculate the hash of the binary and config files
- hash, err := pkgworkflows.GenerateWorkflowID(payload.WorkflowOwner, decodedBinary, config, payload.SecretsURL)
+ hash, err := pkgworkflows.GenerateWorkflowID(payload.WorkflowOwner, payload.WorkflowName, decodedBinary, config, payload.SecretsURL)
if err != nil {
return fmt.Errorf("failed to generate workflow id: %w", err)
}
@@ -456,12 +456,13 @@ func (h *eventHandler) workflowRegisteredEvent(
}
wfID := hex.EncodeToString(payload.WorkflowID[:])
+ owner := hex.EncodeToString(payload.WorkflowOwner)
entry := &job.WorkflowSpec{
Workflow: hex.EncodeToString(decodedBinary),
Config: string(config),
WorkflowID: wfID,
Status: status,
- WorkflowOwner: hex.EncodeToString(payload.WorkflowOwner),
+ WorkflowOwner: owner,
WorkflowName: payload.WorkflowName,
SpecType: job.WASMFile,
BinaryURL: payload.BinaryURL,
@@ -480,7 +481,7 @@ func (h *eventHandler) workflowRegisteredEvent(
engine, err := h.engineFactory(
ctx,
wfID,
- string(payload.WorkflowOwner),
+ owner,
payload.WorkflowName,
config,
decodedBinary,
diff --git a/core/services/workflows/syncer/handler_test.go b/core/services/workflows/syncer/handler_test.go
index eb8b338158f..f205cbde1cd 100644
--- a/core/services/workflows/syncer/handler_test.go
+++ b/core/services/workflows/syncer/handler_test.go
@@ -444,7 +444,7 @@ func testRunningWorkflow(t *testing.T, tc testCase) {
fetcher = tc.fetcher
)
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
require.NoError(t, err)
wfID := hex.EncodeToString(giveWFID[:])
@@ -492,7 +492,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
})
)
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
require.NoError(t, err)
wfIDs := hex.EncodeToString(giveWFID[:])
@@ -584,9 +584,9 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
})
)
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
require.NoError(t, err)
- updatedWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, binary, updateConfig, secretsURL)
+ updatedWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, updateConfig, secretsURL)
require.NoError(t, err)
require.NoError(t, err)
diff --git a/core/services/workflows/syncer/heap.go b/core/services/workflows/syncer/heap.go
deleted file mode 100644
index 061293928a3..00000000000
--- a/core/services/workflows/syncer/heap.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package syncer
-
-import "container/heap"
-
-type Heap interface {
- // Push adds a new item to the heap.
- Push(x WorkflowRegistryEventResponse)
-
- // Pop removes the smallest item from the heap and returns it.
- Pop() WorkflowRegistryEventResponse
-
- // Len returns the number of items in the heap.
- Len() int
-}
-
-// publicHeap is a wrapper around the heap.Interface that exposes the Push and Pop methods.
-type publicHeap[T any] struct {
- heap heap.Interface
-}
-
-func (h *publicHeap[T]) Push(x T) {
- heap.Push(h.heap, x)
-}
-
-func (h *publicHeap[T]) Pop() T {
- return heap.Pop(h.heap).(T)
-}
-
-func (h *publicHeap[T]) Len() int {
- return h.heap.Len()
-}
-
-// blockHeightHeap is a heap.Interface that sorts WorkflowRegistryEventResponses by block height.
-type blockHeightHeap []WorkflowRegistryEventResponse
-
-// newBlockHeightHeap returns an initialized heap that sorts WorkflowRegistryEventResponses by block height.
-func newBlockHeightHeap() Heap {
- h := blockHeightHeap(make([]WorkflowRegistryEventResponse, 0))
- heap.Init(&h)
- return &publicHeap[WorkflowRegistryEventResponse]{heap: &h}
-}
-
-func (h *blockHeightHeap) Len() int { return len(*h) }
-
-func (h *blockHeightHeap) Less(i, j int) bool {
- return (*h)[i].Event.Head.Height < (*h)[j].Event.Head.Height
-}
-
-func (h *blockHeightHeap) Swap(i, j int) {
- (*h)[i], (*h)[j] = (*h)[j], (*h)[i]
-}
-
-func (h *blockHeightHeap) Push(x any) {
- *h = append(*h, x.(WorkflowRegistryEventResponse))
-}
-
-func (h *blockHeightHeap) Pop() any {
- old := *h
- n := len(old)
- x := old[n-1]
- *h = old[0 : n-1]
- return x
-}
diff --git a/core/services/workflows/syncer/orm.go b/core/services/workflows/syncer/orm.go
index 97f2c834f36..bd0501795e6 100644
--- a/core/services/workflows/syncer/orm.go
+++ b/core/services/workflows/syncer/orm.go
@@ -161,6 +161,10 @@ func (orm *orm) GetContentsByWorkflowID(ctx context.Context, workflowID string)
return "", "", ErrEmptySecrets
}
+ if jr.Contents.String == "" {
+ return "", "", ErrEmptySecrets
+ }
+
return jr.SecretsURLHash.String, jr.Contents.String, nil
}
@@ -328,10 +332,13 @@ func (orm *orm) UpsertWorkflowSpecWithSecrets(
status = EXCLUDED.status,
binary_url = EXCLUDED.binary_url,
config_url = EXCLUDED.config_url,
- secrets_id = EXCLUDED.secrets_id,
created_at = EXCLUDED.created_at,
updated_at = EXCLUDED.updated_at,
- spec_type = EXCLUDED.spec_type
+ spec_type = EXCLUDED.spec_type,
+ secrets_id = CASE
+ WHEN workflow_specs.secrets_id IS NULL THEN EXCLUDED.secrets_id
+ ELSE workflow_specs.secrets_id
+ END
RETURNING id
`
diff --git a/core/services/workflows/syncer/orm_test.go b/core/services/workflows/syncer/orm_test.go
index 08c60447498..a94233e78a1 100644
--- a/core/services/workflows/syncer/orm_test.go
+++ b/core/services/workflows/syncer/orm_test.go
@@ -256,3 +256,120 @@ func Test_GetContentsByWorkflowID(t *testing.T) {
assert.Equal(t, giveHash, gotHash)
assert.Equal(t, giveContent, gotContent)
}
+
+func Test_GetContentsByWorkflowID_SecretsProvidedButEmpty(t *testing.T) {
+ db := pgtest.NewSqlxDB(t)
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ orm := &orm{ds: db, lggr: lggr}
+
+ // workflow_id is missing
+ _, _, err := orm.GetContentsByWorkflowID(ctx, "doesnt-exist")
+ require.ErrorContains(t, err, "no rows in result set")
+
+ // secrets_id is nil; should return EmptySecrets
+ workflowID := "aWorkflowID"
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := ""
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, &job.WorkflowSpec{
+ Workflow: "",
+ Config: "",
+ WorkflowID: workflowID,
+ WorkflowOwner: "aWorkflowOwner",
+ WorkflowName: "aWorkflowName",
+ BinaryURL: "",
+ ConfigURL: "",
+ CreatedAt: time.Now(),
+ SpecType: job.DefaultSpecType,
+ }, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ _, _, err = orm.GetContentsByWorkflowID(ctx, workflowID)
+ require.ErrorIs(t, err, ErrEmptySecrets)
+}
+
+func Test_UpsertWorkflowSpecWithSecrets(t *testing.T) {
+ db := pgtest.NewSqlxDB(t)
+ ctx := testutils.Context(t)
+ lggr := logger.TestLogger(t)
+ orm := &orm{ds: db, lggr: lggr}
+
+ t.Run("inserts new spec and new secrets", func(t *testing.T) {
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := "some contents"
+
+ spec := &job.WorkflowSpec{
+ Workflow: "test_workflow",
+ Config: "test_config",
+ WorkflowID: "cid-123",
+ WorkflowOwner: "owner-123",
+ WorkflowName: "Test Workflow",
+ Status: job.WorkflowSpecStatusActive,
+ BinaryURL: "http://example.com/binary",
+ ConfigURL: "http://example.com/config",
+ CreatedAt: time.Now(),
+ SpecType: job.WASMFile,
+ }
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ // Verify the record exists in the database
+ var dbSpec job.WorkflowSpec
+ err = db.Get(&dbSpec, `SELECT * FROM workflow_specs WHERE workflow_owner = $1 AND workflow_name = $2`, spec.WorkflowOwner, spec.WorkflowName)
+ require.NoError(t, err)
+ require.Equal(t, spec.Workflow, dbSpec.Workflow)
+
+ // Verify the secrets exists in the database
+ contents, err := orm.GetContents(ctx, giveURL)
+ require.NoError(t, err)
+ require.Equal(t, giveContent, contents)
+ })
+
+ t.Run("updates existing spec and secrets", func(t *testing.T) {
+ giveURL := "https://example.com"
+ giveBytes, err := crypto.Keccak256([]byte(giveURL))
+ require.NoError(t, err)
+ giveHash := hex.EncodeToString(giveBytes)
+ giveContent := "some contents"
+
+ spec := &job.WorkflowSpec{
+ Workflow: "test_workflow",
+ Config: "test_config",
+ WorkflowID: "cid-123",
+ WorkflowOwner: "owner-123",
+ WorkflowName: "Test Workflow",
+ Status: job.WorkflowSpecStatusActive,
+ BinaryURL: "http://example.com/binary",
+ ConfigURL: "http://example.com/config",
+ CreatedAt: time.Now(),
+ SpecType: job.WASMFile,
+ }
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, giveContent)
+ require.NoError(t, err)
+
+ // Update the status
+ spec.Status = job.WorkflowSpecStatusPaused
+
+ _, err = orm.UpsertWorkflowSpecWithSecrets(ctx, spec, giveURL, giveHash, "new contents")
+ require.NoError(t, err)
+
+ // Verify the record is updated in the database
+ var dbSpec job.WorkflowSpec
+ err = db.Get(&dbSpec, `SELECT * FROM workflow_specs WHERE workflow_owner = $1 AND workflow_name = $2`, spec.WorkflowOwner, spec.WorkflowName)
+ require.NoError(t, err)
+ require.Equal(t, spec.Config, dbSpec.Config)
+
+ // Verify the secrets is updated in the database
+ contents, err := orm.GetContents(ctx, giveURL)
+ require.NoError(t, err)
+ require.Equal(t, "new contents", contents)
+ })
+}
diff --git a/core/services/workflows/syncer/workflow_registry.go b/core/services/workflows/syncer/workflow_registry.go
index 75fcc9735ad..223fbe8e758 100644
--- a/core/services/workflows/syncer/workflow_registry.go
+++ b/core/services/workflows/syncer/workflow_registry.go
@@ -5,13 +5,14 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
+ "iter"
"sync"
"time"
"github.com/smartcontractkit/chainlink-common/pkg/capabilities"
"github.com/smartcontractkit/chainlink-common/pkg/services"
- types "github.com/smartcontractkit/chainlink-common/pkg/types"
- query "github.com/smartcontractkit/chainlink-common/pkg/types/query"
+ "github.com/smartcontractkit/chainlink-common/pkg/types"
+ "github.com/smartcontractkit/chainlink-common/pkg/types/query"
"github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
"github.com/smartcontractkit/chainlink-common/pkg/values"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/workflow/generated/workflow_registry_wrapper"
@@ -90,19 +91,19 @@ type WorkflowLoadConfig struct {
// FetcherFunc is an abstraction for fetching the contents stored at a URL.
type FetcherFunc func(ctx context.Context, url string) ([]byte, error)
-type ContractReaderFactory interface {
- NewContractReader(context.Context, []byte) (types.ContractReader, error)
-}
-
// ContractReader is a subset of types.ContractReader defined locally to enable mocking.
type ContractReader interface {
Start(ctx context.Context) error
Close() error
Bind(context.Context, []types.BoundContract) error
- QueryKey(context.Context, types.BoundContract, query.KeyFilter, query.LimitAndSort, any) ([]types.Sequence, error)
+ QueryKeys(ctx context.Context, keyQueries []types.ContractKeyFilter, limitAndSort query.LimitAndSort) (iter.Seq2[string, types.Sequence], error)
GetLatestValueWithHeadData(ctx context.Context, readName string, confidenceLevel primitives.ConfidenceLevel, params any, returnVal any) (head *types.Head, err error)
}
+type ContractReaderFactory interface {
+ NewContractReader(context.Context, []byte) (types.ContractReader, error)
+}
+
// WorkflowRegistrySyncer is the public interface of the package.
type WorkflowRegistrySyncer interface {
services.Service
@@ -128,21 +129,11 @@ type workflowRegistry struct {
newContractReaderFn newContractReaderFn
- eventPollerCfg WorkflowEventPollerConfig
- eventTypes []WorkflowRegistryEventType
-
- // eventsCh is read by the handler and each event is handled once received.
- eventsCh chan WorkflowRegistryEventResponse
+ eventPollerCfg WorkflowEventPollerConfig
+ eventTypes []WorkflowRegistryEventType
handler evtHandler
initialWorkflowsStateLoader initialWorkflowsStateLoader
- // batchCh is a channel that receives batches of events from the contract query goroutines.
- batchCh chan []WorkflowRegistryEventResponse
-
- // heap is a min heap that merges batches of events from the contract query goroutines. The
- // default min heap is sorted by block height.
- heap Heap
-
workflowDonNotifier donNotifier
reader ContractReader
@@ -197,11 +188,8 @@ func NewWorkflowRegistry(
newContractReaderFn: newContractReaderFn,
workflowRegistryAddress: addr,
eventPollerCfg: eventPollerConfig,
- heap: newBlockHeightHeap(),
stopCh: make(services.StopChan),
eventTypes: ets,
- eventsCh: make(chan WorkflowRegistryEventResponse),
- batchCh: make(chan []WorkflowRegistryEventResponse, len(ets)),
handler: handler,
initialWorkflowsStateLoader: initialWorkflowsStateLoader,
workflowDonNotifier: workflowDonNotifier,
@@ -238,15 +226,13 @@ func (w *workflowRegistry) Start(_ context.Context) error {
return
}
- w.syncEventsLoop(ctx, loadWorkflowsHead.Height)
- }()
-
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- defer cancel()
+ reader, err := w.getContractReader(ctx)
+ if err != nil {
+ w.lggr.Criticalf("contract reader unavailable : %s", err)
+ return
+ }
- w.handlerLoop(ctx)
+ w.readRegistryEvents(ctx, reader, loadWorkflowsHead.Height)
}()
return nil
@@ -273,135 +259,82 @@ func (w *workflowRegistry) Name() string {
return name
}
-// handlerLoop handles the events that are emitted by the contract.
-func (w *workflowRegistry) handlerLoop(ctx context.Context) {
+// readRegistryEvents polls the contract for events and send them to the events channel.
+func (w *workflowRegistry) readRegistryEvents(ctx context.Context, reader ContractReader, lastReadBlockNumber string) {
+ ticker := w.getTicker()
+
+ var keyQueries = make([]types.ContractKeyFilter, 0, len(w.eventTypes))
+ for _, et := range w.eventTypes {
+ var logData values.Value
+ keyQueries = append(keyQueries, types.ContractKeyFilter{
+ KeyFilter: query.KeyFilter{
+ Key: string(et),
+ Expressions: []query.Expression{
+ query.Confidence(primitives.Finalized),
+ query.Block(lastReadBlockNumber, primitives.Gt),
+ },
+ },
+ Contract: types.BoundContract{
+ Name: WorkflowRegistryContractName,
+ Address: w.workflowRegistryAddress,
+ },
+ SequenceDataType: &logData,
+ })
+ }
+
+ cursor := ""
for {
select {
case <-ctx.Done():
return
- case resp, open := <-w.eventsCh:
- if !open {
- return
+ case <-ticker:
+ limitAndSort := query.LimitAndSort{
+ SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
+ Limit: query.Limit{Count: w.eventPollerCfg.QueryCount},
}
-
- if resp.Err != nil || resp.Event == nil {
- w.lggr.Errorw("failed to handle event", "err", resp.Err)
- continue
+ if cursor != "" {
+ limitAndSort.Limit = query.CursorLimit(cursor, query.CursorFollowing, w.eventPollerCfg.QueryCount)
}
- event := resp.Event
- w.lggr.Debugf("handling event: %+v", event)
- if err := w.handler.Handle(ctx, *event); err != nil {
- w.lggr.Errorw("failed to handle event", "event", event, "err", err)
+ logsIter, err := reader.QueryKeys(ctx, keyQueries, limitAndSort)
+ if err != nil {
+ w.lggr.Errorw("failed to query keys", "err", err)
continue
}
- }
- }
-}
-// syncEventsLoop polls the contract for events and passes them to a channel for handling.
-func (w *workflowRegistry) syncEventsLoop(ctx context.Context, lastReadBlockNumber string) {
- var (
- // sendLog is a helper that sends a WorkflowRegistryEventResponse to the eventsCh in a
- // blocking way that will send the response or be canceled.
- sendLog = func(resp WorkflowRegistryEventResponse) {
- select {
- case w.eventsCh <- resp:
- case <-ctx.Done():
+ var logs []sequenceWithEventType
+ for eventType, log := range logsIter {
+ logs = append(logs, sequenceWithEventType{
+ Sequence: log,
+ EventType: WorkflowRegistryEventType(eventType),
+ })
}
- }
-
- ticker = w.getTicker()
-
- signals = make(map[WorkflowRegistryEventType]chan struct{}, 0)
- )
-
- // critical failure if there is no reader, the loop will exit and the parent context will be
- // canceled.
- reader, err := w.getContractReader(ctx)
- if err != nil {
- w.lggr.Criticalf("contract reader unavailable : %s", err)
- return
- }
-
- // fan out and query for each event type
- for i := 0; i < len(w.eventTypes); i++ {
- signal := make(chan struct{}, 1)
- signals[w.eventTypes[i]] = signal
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
-
- queryEvent(
- ctx,
- signal,
- w.lggr,
- reader,
- lastReadBlockNumber,
- queryEventConfig{
- ContractName: WorkflowRegistryContractName,
- ContractAddress: w.workflowRegistryAddress,
- WorkflowEventPollerConfig: w.eventPollerCfg,
- },
- w.eventTypes[i],
- w.batchCh,
- )
- }()
- }
+ w.lggr.Debugw("QueryKeys called", "logs", len(logs), "eventTypes", w.eventTypes, "lastReadBlockNumber", lastReadBlockNumber, "logCursor", cursor)
- // Periodically send a signal to all the queryEvent goroutines to query the contract
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker:
- w.lggr.Debugw("Syncing with WorkflowRegistry")
- // for each event type, send a signal for it to execute a query and produce a new
- // batch of event logs
- for i := 0; i < len(w.eventTypes); i++ {
- signal := signals[w.eventTypes[i]]
- select {
- case signal <- struct{}{}:
- case <-ctx.Done():
- return
- }
+ // ChainReader QueryKey API provides logs including the cursor value and not
+ // after the cursor value. If the response only consists of the log corresponding
+ // to the cursor and no log after it, then we understand that there are no new
+ // logs
+ if len(logs) == 1 && logs[0].Sequence.Cursor == cursor {
+ w.lggr.Infow("No new logs since", "cursor", cursor)
+ continue
}
- // block on fan-in until all fetched event logs are sent to the handlers
- w.orderAndSend(
- ctx,
- len(w.eventTypes),
- w.batchCh,
- sendLog,
- )
- }
- }
-}
+ var events []WorkflowRegistryEventResponse
+ for _, log := range logs {
+ if log.Sequence.Cursor == cursor {
+ continue
+ }
-// orderAndSend reads n batches from the batch channel, heapifies all the batches then dequeues
-// the min heap via the sendLog function.
-func (w *workflowRegistry) orderAndSend(
- ctx context.Context,
- batchCount int,
- batchCh <-chan []WorkflowRegistryEventResponse,
- sendLog func(WorkflowRegistryEventResponse),
-) {
- for {
- select {
- case <-ctx.Done():
- return
- case batch := <-batchCh:
- for _, response := range batch {
- w.heap.Push(response)
+ events = append(events, toWorkflowRegistryEventResponse(log.Sequence, log.EventType, w.lggr))
+ cursor = log.Sequence.Cursor
}
- batchCount--
- // If we have received responses for all the events, then we can drain the heap.
- if batchCount == 0 {
- for w.heap.Len() > 0 {
- sendLog(w.heap.Pop())
+ for _, event := range events {
+ err := w.handler.Handle(ctx, event.Event)
+ if err != nil {
+ w.lggr.Errorw("failed to handle event", "err", err)
}
- return
}
}
}
@@ -437,95 +370,9 @@ func (w *workflowRegistry) getContractReader(ctx context.Context) (ContractReade
return w.reader, nil
}
-type queryEventConfig struct {
- ContractName string
- ContractAddress string
- WorkflowEventPollerConfig
-}
-
-// queryEvent queries the contract for events of the given type on each tick from the ticker.
-// Sends a batch of event logs to the batch channel. The batch represents all the
-// event logs read since the last query. Loops until the context is canceled.
-func queryEvent(
- ctx context.Context,
- ticker <-chan struct{},
- lggr logger.Logger,
- reader ContractReader,
- lastReadBlockNumber string,
- cfg queryEventConfig,
- et WorkflowRegistryEventType,
- batchCh chan<- []WorkflowRegistryEventResponse,
-) {
- // create query
- var (
- logData values.Value
- cursor = ""
- limitAndSort = query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: cfg.QueryCount},
- }
- bc = types.BoundContract{
- Name: cfg.ContractName,
- Address: cfg.ContractAddress,
- }
- )
-
- // Loop until canceled
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker:
- responseBatch := []WorkflowRegistryEventResponse{}
-
- if cursor != "" {
- limitAndSort.Limit = query.CursorLimit(cursor, query.CursorFollowing, cfg.QueryCount)
- }
-
- logs, err := reader.QueryKey(
- ctx,
- bc,
- query.KeyFilter{
- Key: string(et),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block(lastReadBlockNumber, primitives.Gte),
- },
- },
- limitAndSort,
- &logData,
- )
- lcursor := cursor
- if lcursor == "" {
- lcursor = "empty"
- }
- lggr.Debugw("QueryKeys called", "logs", len(logs), "eventType", et, "lastReadBlockNumber", lastReadBlockNumber, "logCursor", lcursor)
-
- if err != nil {
- lggr.Errorw("QueryKey failure", "err", err)
- continue
- }
-
- // ChainReader QueryKey API provides logs including the cursor value and not
- // after the cursor value. If the response only consists of the log corresponding
- // to the cursor and no log after it, then we understand that there are no new
- // logs
- if len(logs) == 1 && logs[0].Cursor == cursor {
- lggr.Infow("No new logs since", "cursor", cursor)
- continue
- }
-
- for _, log := range logs {
- if log.Cursor == cursor {
- continue
- }
-
- responseBatch = append(responseBatch, toWorkflowRegistryEventResponse(log, et, lggr))
- cursor = log.Cursor
- }
- batchCh <- responseBatch
- }
- }
+type sequenceWithEventType struct {
+ Sequence types.Sequence
+ EventType WorkflowRegistryEventType
}
func getWorkflowRegistryEventReader(
@@ -681,7 +528,7 @@ func (l *workflowRegistryContractLoader) LoadWorkflows(ctx context.Context, don
var workflows GetWorkflowMetadataListByDONReturnVal
headAtLastRead, err = contractReader.GetLatestValueWithHeadData(ctx, readIdentifier, primitives.Finalized, params, &workflows)
if err != nil {
- return nil, fmt.Errorf("failed to get workflow metadata for don %w", err)
+ return nil, fmt.Errorf("failed to get lastest value with head data %w", err)
}
l.lggr.Debugw("Rehydrating existing workflows", "len", len(workflows.WorkflowMetadataList))
diff --git a/core/services/workflows/syncer/workflow_registry_test.go b/core/services/workflows/syncer/workflow_registry_test.go
deleted file mode 100644
index 621d3d123d5..00000000000
--- a/core/services/workflows/syncer/workflow_registry_test.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package syncer
-
-import (
- "context"
- "encoding/hex"
- "testing"
- "time"
-
- "github.com/stretchr/testify/mock"
-
- "github.com/jonboulle/clockwork"
-
- "github.com/smartcontractkit/chainlink-common/pkg/capabilities"
- "github.com/smartcontractkit/chainlink-common/pkg/custmsg"
- "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
- types "github.com/smartcontractkit/chainlink-common/pkg/types"
- query "github.com/smartcontractkit/chainlink-common/pkg/types/query"
- "github.com/smartcontractkit/chainlink-common/pkg/types/query/primitives"
- "github.com/smartcontractkit/chainlink-common/pkg/values"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/workflowkey"
- "github.com/smartcontractkit/chainlink/v2/core/utils/crypto"
- "github.com/smartcontractkit/chainlink/v2/core/utils/matches"
-
- "github.com/stretchr/testify/require"
-)
-
-type testDonNotifier struct {
- don capabilities.DON
- err error
-}
-
-func (t *testDonNotifier) WaitForDon(ctx context.Context) (capabilities.DON, error) {
- return t.don, t.err
-}
-
-func Test_Workflow_Registry_Syncer(t *testing.T) {
- var (
- giveContents = "contents"
- wantContents = "updated contents"
- contractAddress = "0xdeadbeef"
- giveCfg = WorkflowEventPollerConfig{
- QueryCount: 20,
- }
- giveURL = "http://example.com"
- giveHash, err = crypto.Keccak256([]byte(giveURL))
-
- giveLog = types.Sequence{
- Data: map[string]any{
- "SecretsURLHash": giveHash,
- "Owner": "0xowneraddr",
- },
- Cursor: "cursor",
- }
- )
-
- require.NoError(t, err)
-
- var (
- lggr = logger.TestLogger(t)
- db = pgtest.NewSqlxDB(t)
- orm = &orm{ds: db, lggr: lggr}
- ctx, cancel = context.WithCancel(testutils.Context(t))
- reader = NewMockContractReader(t)
- emitter = custmsg.NewLabeler()
- gateway = func(_ context.Context, _ string) ([]byte, error) {
- return []byte(wantContents), nil
- }
- ticker = make(chan time.Time)
-
- handler = NewEventHandler(lggr, orm, gateway, nil, nil,
- emitter, clockwork.NewFakeClock(), workflowkey.Key{})
- loader = NewWorkflowRegistryContractLoader(lggr, contractAddress, func(ctx context.Context, bytes []byte) (ContractReader, error) {
- return reader, nil
- }, handler)
-
- worker = NewWorkflowRegistry(lggr, func(ctx context.Context, bytes []byte) (ContractReader, error) {
- return reader, nil
- }, contractAddress,
- WorkflowEventPollerConfig{
- QueryCount: 20,
- }, handler, loader,
- &testDonNotifier{
- don: capabilities.DON{
- ID: 1,
- },
- err: nil,
- },
- WithTicker(ticker))
- )
-
- // Cleanup the worker
- defer cancel()
-
- // Seed the DB with an original entry
- _, err = orm.Create(ctx, giveURL, hex.EncodeToString(giveHash), giveContents)
- require.NoError(t, err)
-
- // Mock out the contract reader query
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(ForceUpdateSecretsEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{giveLog}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowPausedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowDeletedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowActivatedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowUpdatedEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().QueryKey(
- matches.AnyContext,
- types.BoundContract{
- Name: WorkflowRegistryContractName,
- Address: contractAddress,
- },
- query.KeyFilter{
- Key: string(WorkflowRegisteredEvent),
- Expressions: []query.Expression{
- query.Confidence(primitives.Finalized),
- query.Block("0", primitives.Gte),
- },
- },
- query.LimitAndSort{
- SortBy: []query.SortBy{query.NewSortByTimestamp(query.Asc)},
- Limit: query.Limit{Count: giveCfg.QueryCount},
- },
- new(values.Value),
- ).Return([]types.Sequence{}, nil)
- reader.EXPECT().GetLatestValueWithHeadData(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(&types.Head{
- Height: "0",
- }, nil)
- reader.EXPECT().Start(mock.Anything).Return(nil)
- reader.EXPECT().Bind(mock.Anything, mock.Anything).Return(nil)
-
- // Go run the worker
- servicetest.Run(t, worker)
-
- // Send a tick to start a query
- ticker <- time.Now()
-
- // Require the secrets contents to eventually be updated
- require.Eventually(t, func() bool {
- secrets, err := orm.GetContents(ctx, giveURL)
- require.NoError(t, err)
- return secrets == wantContents
- }, 5*time.Second, time.Second)
-}
diff --git a/deployment/address_book.go b/deployment/address_book.go
index 6f605013011..3ce0332a4c3 100644
--- a/deployment/address_book.go
+++ b/deployment/address_book.go
@@ -89,8 +89,10 @@ type AddressBook interface {
Remove(ab AddressBook) error
}
+type AddressesByChain map[uint64]map[string]TypeAndVersion
+
type AddressBookMap struct {
- addressesByChain map[uint64]map[string]TypeAndVersion
+ addressesByChain AddressesByChain
mtx sync.RWMutex
}
diff --git a/deployment/ccip/changeset/accept_ownership_test.go b/deployment/ccip/changeset/accept_ownership_test.go
index 5580b31a85a..9b71e0ad5cb 100644
--- a/deployment/ccip/changeset/accept_ownership_test.go
+++ b/deployment/ccip/changeset/accept_ownership_test.go
@@ -9,9 +9,11 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
func Test_NewAcceptOwnershipChangeset(t *testing.T) {
+ t.Parallel()
e := NewMemoryEnvironment(t)
state, err := LoadOnchainState(e.Env)
require.NoError(t, err)
@@ -20,12 +22,12 @@ func Test_NewAcceptOwnershipChangeset(t *testing.T) {
source := allChains[0]
dest := allChains[1]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
- source: &commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
+ source: {
Timelock: state.Chains[source].Timelock,
CallProxy: state.Chains[source].CallProxy,
},
- dest: &commonchangeset.TimelockExecutionContracts{
+ dest: {
Timelock: state.Chains[dest].Timelock,
CallProxy: state.Chains[dest].CallProxy,
},
diff --git a/deployment/ccip/changeset/cs_add_chain.go b/deployment/ccip/changeset/cs_add_chain.go
index b3d0df04c93..ddb6e61d5ba 100644
--- a/deployment/ccip/changeset/cs_add_chain.go
+++ b/deployment/ccip/changeset/cs_add_chain.go
@@ -8,18 +8,14 @@ import (
"github.com/smartcontractkit/chainlink-ccip/chainconfig"
"github.com/smartcontractkit/chainlink-ccip/pkg/types/ccipocr3"
-
- "github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
- "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_home"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
"github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/fee_quoter"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/onramp"
)
@@ -136,135 +132,6 @@ func NewChainInboundChangeset(
}, nil
}
-type AddDonAndSetCandidateChangesetConfig struct {
- HomeChainSelector uint64
- FeedChainSelector uint64
- NewChainSelector uint64
- PluginType types.PluginType
- NodeIDs []string
- CCIPOCRParams CCIPOCRParams
-}
-
-func (a AddDonAndSetCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (deployment.Nodes, error) {
- if a.HomeChainSelector == 0 {
- return nil, fmt.Errorf("HomeChainSelector must be set")
- }
- if a.FeedChainSelector == 0 {
- return nil, fmt.Errorf("FeedChainSelector must be set")
- }
- if a.NewChainSelector == 0 {
- return nil, fmt.Errorf("ocr config chain selector must be set")
- }
- if a.PluginType != types.PluginTypeCCIPCommit &&
- a.PluginType != types.PluginTypeCCIPExec {
- return nil, fmt.Errorf("PluginType must be set to either CCIPCommit or CCIPExec")
- }
- // TODO: validate token config
- if len(a.NodeIDs) == 0 {
- return nil, fmt.Errorf("nodeIDs must be set")
- }
- nodes, err := deployment.NodeInfo(a.NodeIDs, e.Offchain)
- if err != nil {
- return nil, fmt.Errorf("get node info: %w", err)
- }
-
- // check that chain config is set up for the new chain
- chainConfig, err := state.Chains[a.HomeChainSelector].CCIPHome.GetChainConfig(nil, a.NewChainSelector)
- if err != nil {
- return nil, fmt.Errorf("get all chain configs: %w", err)
- }
-
- // FChain should never be zero if a chain config is set in CCIPHome
- if chainConfig.FChain == 0 {
- return nil, fmt.Errorf("chain config not set up for new chain %d", a.NewChainSelector)
- }
-
- err = a.CCIPOCRParams.Validate()
- if err != nil {
- return nil, fmt.Errorf("invalid ccip ocr params: %w", err)
- }
-
- if e.OCRSecrets.IsEmpty() {
- return nil, fmt.Errorf("OCR secrets must be set")
- }
-
- return nodes, nil
-}
-
-// AddDonAndSetCandidateChangeset adds new DON for destination to home chain
-// and sets the commit plugin config as candidateConfig for the don.
-func AddDonAndSetCandidateChangeset(
- e deployment.Environment,
- cfg AddDonAndSetCandidateChangesetConfig,
-) (deployment.ChangesetOutput, error) {
- state, err := LoadOnchainState(e)
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
-
- nodes, err := cfg.Validate(e, state)
- if err != nil {
- return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
- }
-
- newDONArgs, err := internal.BuildOCR3ConfigForCCIPHome(
- e.OCRSecrets,
- state.Chains[cfg.NewChainSelector].OffRamp,
- e.Chains[cfg.NewChainSelector],
- nodes.NonBootstraps(),
- state.Chains[cfg.HomeChainSelector].RMNHome.Address(),
- cfg.CCIPOCRParams.OCRParameters,
- cfg.CCIPOCRParams.CommitOffChainConfig,
- cfg.CCIPOCRParams.ExecuteOffChainConfig,
- )
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
- latestDon, err := internal.LatestCCIPDON(state.Chains[cfg.HomeChainSelector].CapabilityRegistry)
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
- commitConfig, ok := newDONArgs[cfg.PluginType]
- if !ok {
- return deployment.ChangesetOutput{}, fmt.Errorf("missing commit plugin in ocr3Configs")
- }
- donID := latestDon.Id + 1
- addDonOp, err := newDonWithCandidateOp(
- donID, commitConfig,
- state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
- nodes.NonBootstraps(),
- )
- if err != nil {
- return deployment.ChangesetOutput{}, err
- }
-
- var (
- timelocksPerChain = map[uint64]common.Address{
- cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
- }
- proposerMCMSes = map[uint64]*gethwrappers.ManyChainMultiSig{
- cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
- }
- )
- prop, err := proposalutils.BuildProposalFromBatches(
- timelocksPerChain,
- proposerMCMSes,
- []timelock.BatchChainOperation{{
- ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
- Batch: []mcms.Operation{addDonOp},
- }},
- "setCandidate for commit and AddDon on new Chain",
- 0, // minDelay
- )
- if err != nil {
- return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal from batch: %w", err)
- }
-
- return deployment.ChangesetOutput{
- Proposals: []timelock.MCMSWithTimelockProposal{*prop},
- }, nil
-}
-
func applyChainConfigUpdatesOp(
e deployment.Environment,
state CCIPOnChainState,
@@ -304,38 +171,3 @@ func applyChainConfigUpdatesOp(
Value: big.NewInt(0),
}, nil
}
-
-// newDonWithCandidateOp sets the candidate commit config by calling setCandidate on CCIPHome contract through the AddDON call on CapReg contract
-// This should be done first before calling any other UpdateDON calls
-// This proposes to set up OCR3 config for the commit plugin for the DON
-func newDonWithCandidateOp(
- donID uint32,
- pluginConfig ccip_home.CCIPHomeOCR3Config,
- capReg *capabilities_registry.CapabilitiesRegistry,
- nodes deployment.Nodes,
-) (mcms.Operation, error) {
- encodedSetCandidateCall, err := internal.CCIPHomeABI.Pack(
- "setCandidate",
- donID,
- pluginConfig.PluginType,
- pluginConfig,
- [32]byte{},
- )
- if err != nil {
- return mcms.Operation{}, fmt.Errorf("pack set candidate call: %w", err)
- }
- addDonTx, err := capReg.AddDON(deployment.SimTransactOpts(), nodes.PeerIDs(), []capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
- {
- CapabilityId: internal.CCIPCapabilityID,
- Config: encodedSetCandidateCall,
- },
- }, false, false, nodes.DefaultF())
- if err != nil {
- return mcms.Operation{}, fmt.Errorf("could not generate add don tx w/ commit config: %w", err)
- }
- return mcms.Operation{
- To: capReg.Address(),
- Data: addDonTx.Data(),
- Value: big.NewInt(0),
- }, nil
-}
diff --git a/deployment/ccip/changeset/cs_add_chain_test.go b/deployment/ccip/changeset/cs_add_chain_test.go
index b21d7411ce7..a8fdf50b0c1 100644
--- a/deployment/ccip/changeset/cs_add_chain_test.go
+++ b/deployment/ccip/changeset/cs_add_chain_test.go
@@ -1,12 +1,12 @@
package changeset
import (
- "math/big"
"testing"
"time"
"github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
@@ -30,6 +30,7 @@ import (
)
func TestAddChainInbound(t *testing.T) {
+ t.Parallel()
// 4 chains where the 4th is added after initial deployment.
e := NewMemoryEnvironment(t,
WithChains(4),
@@ -46,12 +47,7 @@ func TestAddChainInbound(t *testing.T) {
require.NoError(t, err)
require.NoError(t, e.Env.ExistingAddresses.Merge(newAddresses))
- cfg := commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ cfg := proposalutils.SingleGroupTimelockConfig(t)
e.Env, err = commonchangeset.ApplyChangesets(t, e.Env, nil, []commonchangeset.ChangesetApplication{
{
Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployLinkToken),
@@ -152,7 +148,7 @@ func TestAddChainInbound(t *testing.T) {
}
// transfer ownership to timelock
- _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
initialDeploy[0]: {
Timelock: state.Chains[initialDeploy[0]].Timelock,
CallProxy: state.Chains[initialDeploy[0]].CallProxy,
@@ -183,18 +179,11 @@ func TestAddChainInbound(t *testing.T) {
assertTimelockOwnership(t, e, initialDeploy, state)
- nodes, err := deployment.NodeInfo(e.Env.NodeIDs, e.Env.Offchain)
- require.NoError(t, err)
-
// TODO This currently is not working - Able to send the request here but request gets stuck in execution
// Send a new message and expect that this is delivered once the chain is completely set up as inbound
//TestSendRequest(t, e.Env, state, initialDeploy[0], newChain, true)
- var nodeIDs []string
- for _, node := range nodes {
- nodeIDs = append(nodeIDs, node.NodeID)
- }
- _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, e.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
e.HomeChainSel: {
Timelock: state.Chains[e.HomeChainSel].Timelock,
CallProxy: state.Chains[e.HomeChainSel].CallProxy,
@@ -207,31 +196,37 @@ func TestAddChainInbound(t *testing.T) {
{
Changeset: commonchangeset.WrapChangeSet(AddDonAndSetCandidateChangeset),
Config: AddDonAndSetCandidateChangesetConfig{
- HomeChainSelector: e.HomeChainSel,
- FeedChainSelector: e.FeedChainSel,
- NewChainSelector: newChain,
- PluginType: types.PluginTypeCCIPCommit,
- NodeIDs: nodeIDs,
- CCIPOCRParams: DefaultOCRParams(
- e.FeedChainSel,
- tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
- nil,
- ),
+ SetCandidateChangesetConfig: SetCandidateChangesetConfig{
+ HomeChainSelector: e.HomeChainSel,
+ FeedChainSelector: e.FeedChainSel,
+ DONChainSelector: newChain,
+ PluginType: types.PluginTypeCCIPCommit,
+ CCIPOCRParams: DefaultOCRParams(
+ e.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
+ nil,
+ ),
+ MCMS: &MCMSConfig{
+ MinDelay: 0,
+ },
+ },
},
},
{
- Changeset: commonchangeset.WrapChangeSet(SetCandidatePluginChangeset),
- Config: AddDonAndSetCandidateChangesetConfig{
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
HomeChainSelector: e.HomeChainSel,
FeedChainSelector: e.FeedChainSel,
- NewChainSelector: newChain,
+ DONChainSelector: newChain,
PluginType: types.PluginTypeCCIPExec,
- NodeIDs: nodeIDs,
CCIPOCRParams: DefaultOCRParams(
e.FeedChainSel,
tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[newChain].LinkToken, state.Chains[newChain].Weth9),
nil,
),
+ MCMS: &MCMSConfig{
+ MinDelay: 0,
+ },
},
},
{
@@ -239,13 +234,13 @@ func TestAddChainInbound(t *testing.T) {
Config: PromoteAllCandidatesChangesetConfig{
HomeChainSelector: e.HomeChainSel,
DONChainSelector: newChain,
- NodeIDs: nodeIDs,
MCMS: &MCMSConfig{
MinDelay: 0,
},
},
},
})
+ require.NoError(t, err)
// verify if the configs are updated
require.NoError(t, ValidateCCIPHomeConfigSetUp(
diff --git a/deployment/ccip/changeset/cs_add_lane_test.go b/deployment/ccip/changeset/cs_add_lane_test.go
index 7f1374a1725..5c324c975ef 100644
--- a/deployment/ccip/changeset/cs_add_lane_test.go
+++ b/deployment/ccip/changeset/cs_add_lane_test.go
@@ -16,6 +16,7 @@ import (
)
func TestAddLanesWithTestRouter(t *testing.T) {
+ t.Parallel()
e := NewMemoryEnvironment(t)
// Here we have CR + nodes set up, but no CCIP contracts deployed.
state, err := LoadOnchainState(e.Env)
diff --git a/deployment/ccip/changeset/cs_ccip_home.go b/deployment/ccip/changeset/cs_ccip_home.go
index 202d4216b60..22fb1fc23fa 100644
--- a/deployment/ccip/changeset/cs_ccip_home.go
+++ b/deployment/ccip/changeset/cs_ccip_home.go
@@ -10,27 +10,34 @@ import (
"github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink/deployment"
"github.com/smartcontractkit/chainlink/deployment/ccip/changeset/internal"
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/ccip_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
)
var (
+ _ deployment.ChangeSet[AddDonAndSetCandidateChangesetConfig] = AddDonAndSetCandidateChangeset
_ deployment.ChangeSet[PromoteAllCandidatesChangesetConfig] = PromoteAllCandidatesChangeset
- _ deployment.ChangeSet[AddDonAndSetCandidateChangesetConfig] = SetCandidatePluginChangeset
+ _ deployment.ChangeSet[SetCandidateChangesetConfig] = SetCandidateChangeset
)
type PromoteAllCandidatesChangesetConfig struct {
HomeChainSelector uint64
+
// DONChainSelector is the chain selector of the DON that we want to promote the candidate config of.
// Note that each (chain, ccip capability version) pair has a unique DON ID.
DONChainSelector uint64
- NodeIDs []string
- MCMS *MCMSConfig
+
+ // MCMS is optional MCMS configuration, if provided the changeset will generate an MCMS proposal.
+ // If nil, the changeset will execute the commands directly using the deployer key
+ // of the provided environment.
+ MCMS *MCMSConfig
}
func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (deployment.Nodes, error) {
@@ -40,7 +47,7 @@ func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment,
if err := deployment.IsValidChainSelector(p.DONChainSelector); err != nil {
return nil, fmt.Errorf("don chain selector invalid: %w", err)
}
- if len(p.NodeIDs) == 0 {
+ if len(e.NodeIDs) == 0 {
return nil, fmt.Errorf("NodeIDs must be set")
}
if state.Chains[p.HomeChainSelector].CCIPHome == nil {
@@ -49,8 +56,12 @@ func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment,
if state.Chains[p.HomeChainSelector].CapabilityRegistry == nil {
return nil, fmt.Errorf("CapabilityRegistry contract does not exist")
}
+ if state.Chains[p.DONChainSelector].OffRamp == nil {
+ // should not be possible, but a defensive check.
+ return nil, fmt.Errorf("OffRamp contract does not exist")
+ }
- nodes, err := deployment.NodeInfo(p.NodeIDs, e.Offchain)
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
if err != nil {
return nil, fmt.Errorf("fetch node info: %w", err)
}
@@ -96,7 +107,10 @@ func (p PromoteAllCandidatesChangesetConfig) Validate(e deployment.Environment,
}
// PromoteAllCandidatesChangeset generates a proposal to call promoteCandidate on the CCIPHome through CapReg.
-// This needs to be called after SetCandidateProposal is executed.
+// Note that a DON must exist prior to being able to use this changeset effectively,
+// i.e AddDonAndSetCandidateChangeset must be called first.
+// This can also be used to promote a 0x0 candidate config to be the active, effectively shutting down the DON.
+// At that point you can call the RemoveDON changeset to remove the DON entirely from the capability registry.
func PromoteAllCandidatesChangeset(
e deployment.Environment,
cfg PromoteAllCandidatesChangesetConfig,
@@ -160,8 +174,122 @@ func PromoteAllCandidatesChangeset(
}, nil
}
-// SetCandidatePluginChangeset calls setCandidate on the CCIPHome for setting up OCR3 exec Plugin config for the new chain.
-func SetCandidatePluginChangeset(
+// AddDonAndSetCandidateChangesetConfig is a separate config struct
+// because the validation is slightly different from SetCandidateChangesetConfig.
+// In particular, we check to make sure we don't already have a DON for the chain.
+type AddDonAndSetCandidateChangesetConfig struct {
+ SetCandidateChangesetConfig
+}
+
+func (a AddDonAndSetCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (deployment.Nodes, error) {
+ nodes, err := a.SetCandidateChangesetConfig.Validate(e, state)
+ if err != nil {
+ return nil, err
+ }
+
+ // check if a DON already exists for this chain
+ donID, err := internal.DonIDForChain(
+ state.Chains[a.HomeChainSelector].CapabilityRegistry,
+ state.Chains[a.HomeChainSelector].CCIPHome,
+ a.DONChainSelector,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("fetch don id for chain: %w", err)
+ }
+ if donID != 0 {
+ return nil, fmt.Errorf("don already exists in CR for chain %d, it has id %d", a.DONChainSelector, donID)
+ }
+
+ return nodes, nil
+}
+
+type SetCandidateChangesetConfig struct {
+ HomeChainSelector uint64
+ FeedChainSelector uint64
+
+ // DONChainSelector is the chain selector of the chain where the DON will be added.
+ DONChainSelector uint64
+
+ PluginType types.PluginType
+ // Note that the PluginType field is used to determine which field in CCIPOCRParams is used.
+ CCIPOCRParams CCIPOCRParams
+
+ // MCMS is optional MCMS configuration, if provided the changeset will generate an MCMS proposal.
+ // If nil, the changeset will execute the commands directly using the deployer key
+ // of the provided environment.
+ MCMS *MCMSConfig
+}
+
+func (s SetCandidateChangesetConfig) Validate(e deployment.Environment, state CCIPOnChainState) (deployment.Nodes, error) {
+ if err := deployment.IsValidChainSelector(s.HomeChainSelector); err != nil {
+ return nil, fmt.Errorf("home chain selector invalid: %w", err)
+ }
+ if err := deployment.IsValidChainSelector(s.FeedChainSelector); err != nil {
+ return nil, fmt.Errorf("feed chain selector invalid: %w", err)
+ }
+ if err := deployment.IsValidChainSelector(s.DONChainSelector); err != nil {
+ return nil, fmt.Errorf("don chain selector invalid: %w", err)
+ }
+ if len(e.NodeIDs) == 0 {
+ return nil, fmt.Errorf("nodeIDs must be set")
+ }
+ if state.Chains[s.HomeChainSelector].CCIPHome == nil {
+ return nil, fmt.Errorf("CCIPHome contract does not exist")
+ }
+ if state.Chains[s.HomeChainSelector].CapabilityRegistry == nil {
+ return nil, fmt.Errorf("CapabilityRegistry contract does not exist")
+ }
+ if state.Chains[s.DONChainSelector].OffRamp == nil {
+ // should not be possible, but a defensive check.
+ return nil, fmt.Errorf("OffRamp contract does not exist on don chain selector %d", s.DONChainSelector)
+ }
+ if s.PluginType != types.PluginTypeCCIPCommit &&
+ s.PluginType != types.PluginTypeCCIPExec {
+ return nil, fmt.Errorf("PluginType must be set to either CCIPCommit or CCIPExec")
+ }
+
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return nil, fmt.Errorf("get node info: %w", err)
+ }
+
+ // TODO: validate token config
+ // TODO: validate gas config
+
+ // check that chain config is set up for the new chain
+ chainConfig, err := state.Chains[s.HomeChainSelector].CCIPHome.GetChainConfig(nil, s.DONChainSelector)
+ if err != nil {
+ return nil, fmt.Errorf("get all chain configs: %w", err)
+ }
+
+ // FChain should never be zero if a chain config is set in CCIPHome
+ if chainConfig.FChain == 0 {
+ return nil, fmt.Errorf("chain config not set up for new chain %d", s.DONChainSelector)
+ }
+
+ err = s.CCIPOCRParams.Validate()
+ if err != nil {
+ return nil, fmt.Errorf("invalid ccip ocr params: %w", err)
+ }
+
+ if e.OCRSecrets.IsEmpty() {
+ return nil, fmt.Errorf("OCR secrets must be set")
+ }
+
+ return nodes, nil
+}
+
+// AddDonAndSetCandidateChangeset adds new DON for destination to home chain
+// and sets the plugin config as candidateConfig for the don.
+//
+// This is the first step to creating a CCIP DON and must be executed before any
+// other changesets (SetCandidateChangeset, PromoteAllCandidatesChangeset)
+// can be executed.
+//
+// Note that these operations must be done together because the createDON call
+// in the capability registry calls the capability config contract, so we must
+// provide suitable calldata for CCIPHome.
+func AddDonAndSetCandidateChangeset(
e deployment.Environment,
cfg AddDonAndSetCandidateChangesetConfig,
) (deployment.ChangesetOutput, error) {
@@ -175,10 +303,153 @@ func SetCandidatePluginChangeset(
return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
}
+ txOpts := e.Chains[cfg.HomeChainSelector].DeployerKey
+ if cfg.MCMS != nil {
+ txOpts = deployment.SimTransactOpts()
+ }
+
newDONArgs, err := internal.BuildOCR3ConfigForCCIPHome(
e.OCRSecrets,
- state.Chains[cfg.NewChainSelector].OffRamp,
- e.Chains[cfg.NewChainSelector],
+ state.Chains[cfg.DONChainSelector].OffRamp,
+ e.Chains[cfg.DONChainSelector],
+ nodes.NonBootstraps(),
+ state.Chains[cfg.HomeChainSelector].RMNHome.Address(),
+ cfg.CCIPOCRParams.OCRParameters,
+ cfg.CCIPOCRParams.CommitOffChainConfig,
+ cfg.CCIPOCRParams.ExecuteOffChainConfig,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ latestDon, err := internal.LatestCCIPDON(state.Chains[cfg.HomeChainSelector].CapabilityRegistry)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ pluginOCR3Config, ok := newDONArgs[cfg.PluginType]
+ if !ok {
+ return deployment.ChangesetOutput{}, fmt.Errorf("missing commit plugin in ocr3Configs")
+ }
+
+ expectedDonID := latestDon.Id + 1
+ addDonOp, err := newDonWithCandidateOp(
+ txOpts,
+ e.Chains[cfg.HomeChainSelector],
+ expectedDonID,
+ pluginOCR3Config,
+ state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
+ nodes.NonBootstraps(),
+ cfg.MCMS != nil,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ if cfg.MCMS == nil {
+ return deployment.ChangesetOutput{}, nil
+ }
+
+ prop, err := proposalutils.BuildProposalFromBatches(
+ map[uint64]common.Address{
+ cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
+ },
+ map[uint64]*gethwrappers.ManyChainMultiSig{
+ cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
+ },
+ []timelock.BatchChainOperation{{
+ ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
+ Batch: []mcms.Operation{addDonOp},
+ }},
+ fmt.Sprintf("addDON on new Chain && setCandidate for plugin %s", cfg.PluginType.String()),
+ cfg.MCMS.MinDelay,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("failed to build proposal from batch: %w", err)
+ }
+
+ return deployment.ChangesetOutput{
+ Proposals: []timelock.MCMSWithTimelockProposal{*prop},
+ }, nil
+}
+
+// newDonWithCandidateOp sets the candidate commit config by calling setCandidate on CCIPHome contract through the AddDON call on CapReg contract
+// This should be done first before calling any other UpdateDON calls
+// This proposes to set up OCR3 config for the commit plugin for the DON
+func newDonWithCandidateOp(
+ txOpts *bind.TransactOpts,
+ homeChain deployment.Chain,
+ donID uint32,
+ pluginConfig ccip_home.CCIPHomeOCR3Config,
+ capReg *capabilities_registry.CapabilitiesRegistry,
+ nodes deployment.Nodes,
+ mcmsEnabled bool,
+) (mcms.Operation, error) {
+ encodedSetCandidateCall, err := internal.CCIPHomeABI.Pack(
+ "setCandidate",
+ donID,
+ pluginConfig.PluginType,
+ pluginConfig,
+ [32]byte{},
+ )
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("pack set candidate call: %w", err)
+ }
+
+ addDonTx, err := capReg.AddDON(
+ txOpts,
+ nodes.PeerIDs(),
+ []capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
+ {
+ CapabilityId: internal.CCIPCapabilityID,
+ Config: encodedSetCandidateCall,
+ },
+ },
+ false, // isPublic
+ false, // acceptsWorkflows
+ nodes.DefaultF(),
+ )
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("could not generate add don tx w/ commit config: %w", err)
+ }
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(homeChain, addDonTx, err)
+ if err != nil {
+ return mcms.Operation{}, fmt.Errorf("error confirming addDon call: %w", err)
+ }
+ }
+
+ return mcms.Operation{
+ To: capReg.Address(),
+ Data: addDonTx.Data(),
+ Value: big.NewInt(0),
+ }, nil
+}
+
+// SetCandidateChangeset generates a proposal to call setCandidate on the CCIPHome through the capability registry.
+// A DON must exist in order to use this changeset effectively, i.e AddDonAndSetCandidateChangeset must be called first.
+func SetCandidateChangeset(
+ e deployment.Environment,
+ cfg SetCandidateChangesetConfig,
+) (deployment.ChangesetOutput, error) {
+ state, err := LoadOnchainState(e)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ nodes, err := cfg.Validate(e, state)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("%w: %w", deployment.ErrInvalidConfig, err)
+ }
+
+ txOpts := e.Chains[cfg.HomeChainSelector].DeployerKey
+ if cfg.MCMS != nil {
+ txOpts = deployment.SimTransactOpts()
+ }
+
+ newDONArgs, err := internal.BuildOCR3ConfigForCCIPHome(
+ e.OCRSecrets,
+ state.Chains[cfg.DONChainSelector].OffRamp,
+ e.Chains[cfg.DONChainSelector],
nodes.NonBootstraps(),
state.Chains[cfg.HomeChainSelector].RMNHome.Address(),
cfg.CCIPOCRParams.OCRParameters,
@@ -195,33 +466,37 @@ func SetCandidatePluginChangeset(
}
setCandidateMCMSOps, err := setCandidateOnExistingDon(
+ e.Logger,
+ txOpts,
+ e.Chains[cfg.HomeChainSelector],
config,
state.Chains[cfg.HomeChainSelector].CapabilityRegistry,
state.Chains[cfg.HomeChainSelector].CCIPHome,
- cfg.NewChainSelector,
+ cfg.DONChainSelector,
nodes.NonBootstraps(),
+ cfg.MCMS != nil,
)
if err != nil {
return deployment.ChangesetOutput{}, err
}
- var (
- timelocksPerChain = map[uint64]common.Address{
+ if cfg.MCMS == nil {
+ return deployment.ChangesetOutput{}, nil
+ }
+
+ prop, err := proposalutils.BuildProposalFromBatches(
+ map[uint64]common.Address{
cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].Timelock.Address(),
- }
- proposerMCMSes = map[uint64]*gethwrappers.ManyChainMultiSig{
+ },
+ map[uint64]*gethwrappers.ManyChainMultiSig{
cfg.HomeChainSelector: state.Chains[cfg.HomeChainSelector].ProposerMcm,
- }
- )
- prop, err := proposalutils.BuildProposalFromBatches(
- timelocksPerChain,
- proposerMCMSes,
+ },
[]timelock.BatchChainOperation{{
ChainIdentifier: mcms.ChainIdentifier(cfg.HomeChainSelector),
Batch: setCandidateMCMSOps,
}},
fmt.Sprintf("SetCandidate for %s plugin", cfg.PluginType.String()),
- 0, // minDelay
+ cfg.MCMS.MinDelay,
)
if err != nil {
return deployment.ChangesetOutput{}, err
@@ -236,11 +511,15 @@ func SetCandidatePluginChangeset(
// setCandidateOnExistingDon calls setCandidate on CCIPHome contract through the UpdateDON call on CapReg contract
// This proposes to set up OCR3 config for the provided plugin for the DON
func setCandidateOnExistingDon(
+ lggr logger.Logger,
+ txOpts *bind.TransactOpts,
+ homeChain deployment.Chain,
pluginConfig ccip_home.CCIPHomeOCR3Config,
capReg *capabilities_registry.CapabilitiesRegistry,
ccipHome *ccip_home.CCIPHome,
chainSelector uint64,
nodes deployment.Nodes,
+ mcmsEnabled bool,
) ([]mcms.Operation, error) {
// fetch DON ID for the chain
donID, err := internal.DonIDForChain(capReg, ccipHome, chainSelector)
@@ -251,7 +530,8 @@ func setCandidateOnExistingDon(
return nil, fmt.Errorf("don doesn't exist in CR for chain %d", chainSelector)
}
- fmt.Printf("donID: %d", donID)
+ lggr.Infof("donID for chain %d: %d", chainSelector, donID)
+
encodedSetCandidateCall, err := internal.CCIPHomeABI.Pack(
"setCandidate",
donID,
@@ -265,7 +545,7 @@ func setCandidateOnExistingDon(
// set candidate call
updateDonTx, err := capReg.UpdateDON(
- deployment.SimTransactOpts(),
+ txOpts,
donID,
nodes.PeerIDs(),
[]capabilities_registry.CapabilitiesRegistryCapabilityConfiguration{
@@ -280,6 +560,12 @@ func setCandidateOnExistingDon(
if err != nil {
return nil, fmt.Errorf("update don w/ exec config: %w", err)
}
+ if !mcmsEnabled {
+ _, err = deployment.ConfirmIfNoError(homeChain, updateDonTx, err)
+ if err != nil {
+ return nil, fmt.Errorf("error confirming updateDon call: %w", err)
+ }
+ }
return []mcms.Operation{{
To: capReg.Address(),
diff --git a/deployment/ccip/changeset/cs_ccip_home_test.go b/deployment/ccip/changeset/cs_ccip_home_test.go
index 92784551957..c4df4fe32d7 100644
--- a/deployment/ccip/changeset/cs_ccip_home_test.go
+++ b/deployment/ccip/changeset/cs_ccip_home_test.go
@@ -16,6 +16,7 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
cctypes "github.com/smartcontractkit/chainlink/v2/core/capabilities/ccip/types"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
"github.com/smartcontractkit/chainlink/deployment"
@@ -27,7 +28,7 @@ import (
func TestActiveCandidate(t *testing.T) {
t.Skipf("to be enabled after latest cl-ccip is compatible")
-
+ t.Parallel()
tenv := NewMemoryEnvironment(t,
WithChains(3),
WithNodes(5))
@@ -86,9 +87,9 @@ func TestActiveCandidate(t *testing.T) {
ConfirmExecWithSeqNrsForAll(t, e, state, expectedSeqNumExec, startBlocks)
// compose the transfer ownership and accept ownership changesets
- timelockContracts := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContracts := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range allChains {
- timelockContracts[chain] = &commonchangeset.TimelockExecutionContracts{
+ timelockContracts[chain] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain].Timelock,
CallProxy: state.Chains[chain].CallProxy,
}
@@ -164,11 +165,15 @@ func TestActiveCandidate(t *testing.T) {
}
)
setCommitCandidateOp, err := setCandidateOnExistingDon(
+ e.Logger,
+ deployment.SimTransactOpts(),
+ tenv.Env.Chains[tenv.HomeChainSel],
ocr3ConfigMap[cctypes.PluginTypeCCIPCommit],
state.Chains[tenv.HomeChainSel].CapabilityRegistry,
state.Chains[tenv.HomeChainSel].CCIPHome,
tenv.FeedChainSel,
nodes.NonBootstraps(),
+ true,
)
require.NoError(t, err)
setCommitCandidateProposal, err := proposalutils.BuildProposalFromBatches(timelocksPerChain, proposerMCMSes, []timelock.BatchChainOperation{{
@@ -176,19 +181,23 @@ func TestActiveCandidate(t *testing.T) {
Batch: setCommitCandidateOp,
}}, "set new candidates on commit plugin", 0)
require.NoError(t, err)
- setCommitCandidateSigned := commonchangeset.SignProposal(t, e, setCommitCandidateProposal)
- commonchangeset.ExecuteProposal(t, e, setCommitCandidateSigned, &commonchangeset.TimelockExecutionContracts{
+ setCommitCandidateSigned := proposalutils.SignProposal(t, e, setCommitCandidateProposal)
+ proposalutils.ExecuteProposal(t, e, setCommitCandidateSigned, &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[tenv.HomeChainSel].Timelock,
CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
}, tenv.HomeChainSel)
// create the op for the commit plugin as well
setExecCandidateOp, err := setCandidateOnExistingDon(
+ e.Logger,
+ deployment.SimTransactOpts(),
+ tenv.Env.Chains[tenv.HomeChainSel],
ocr3ConfigMap[cctypes.PluginTypeCCIPExec],
state.Chains[tenv.HomeChainSel].CapabilityRegistry,
state.Chains[tenv.HomeChainSel].CCIPHome,
tenv.FeedChainSel,
nodes.NonBootstraps(),
+ true,
)
require.NoError(t, err)
@@ -197,8 +206,8 @@ func TestActiveCandidate(t *testing.T) {
Batch: setExecCandidateOp,
}}, "set new candidates on commit and exec plugins", 0)
require.NoError(t, err)
- setExecCandidateSigned := commonchangeset.SignProposal(t, e, setExecCandidateProposal)
- commonchangeset.ExecuteProposal(t, e, setExecCandidateSigned, &commonchangeset.TimelockExecutionContracts{
+ setExecCandidateSigned := proposalutils.SignProposal(t, e, setExecCandidateProposal)
+ proposalutils.ExecuteProposal(t, e, setExecCandidateSigned, &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[tenv.HomeChainSel].Timelock,
CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
}, tenv.HomeChainSel)
@@ -234,8 +243,8 @@ func TestActiveCandidate(t *testing.T) {
Batch: promoteOps,
}}, "promote candidates and revoke actives", 0)
require.NoError(t, err)
- promoteSigned := commonchangeset.SignProposal(t, e, promoteProposal)
- commonchangeset.ExecuteProposal(t, e, promoteSigned, &commonchangeset.TimelockExecutionContracts{
+ promoteSigned := proposalutils.SignProposal(t, e, promoteProposal)
+ proposalutils.ExecuteProposal(t, e, promoteSigned, &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[tenv.HomeChainSel].Timelock,
CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
}, tenv.HomeChainSel)
@@ -288,37 +297,9 @@ func Test_PromoteCandidate(t *testing.T) {
source := allChains[0]
dest := allChains[1]
- nodes, err := deployment.NodeInfo(tenv.Env.NodeIDs, tenv.Env.Offchain)
- require.NoError(t, err)
-
- var nodeIDs []string
- for _, node := range nodes {
- nodeIDs = append(nodeIDs, node.NodeID)
- }
-
if tc.mcmsEnabled {
// Transfer ownership to timelock so that we can promote the zero digest later down the line.
- _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
- source: {
- Timelock: state.Chains[source].Timelock,
- CallProxy: state.Chains[source].CallProxy,
- },
- dest: {
- Timelock: state.Chains[dest].Timelock,
- CallProxy: state.Chains[dest].CallProxy,
- },
- tenv.HomeChainSel: {
- Timelock: state.Chains[tenv.HomeChainSel].Timelock,
- CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
- },
- }, []commonchangeset.ChangesetApplication{
- {
- Changeset: commonchangeset.WrapChangeSet(commonchangeset.TransferToMCMSWithTimelock),
- Config: genTestTransferOwnershipConfig(tenv, allChains, state),
- },
- })
- require.NoError(t, err)
- assertTimelockOwnership(t, tenv, allChains, state)
+ transferToTimelock(t, tenv, state, source, dest)
}
var (
@@ -345,7 +326,7 @@ func Test_PromoteCandidate(t *testing.T) {
MinDelay: 0,
}
}
- _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
tenv.HomeChainSel: {
Timelock: state.Chains[tenv.HomeChainSel].Timelock,
CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
@@ -356,7 +337,6 @@ func Test_PromoteCandidate(t *testing.T) {
Config: PromoteAllCandidatesChangesetConfig{
HomeChainSelector: tenv.HomeChainSel,
DONChainSelector: dest,
- NodeIDs: nodeIDs,
MCMS: mcmsConfig,
},
},
@@ -378,3 +358,148 @@ func Test_PromoteCandidate(t *testing.T) {
})
}
}
+
+func Test_SetCandidate(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ mcmsEnabled bool
+ }{
+ {
+ name: "MCMS enabled",
+ mcmsEnabled: true,
+ },
+ {
+ name: "MCMS disabled",
+ mcmsEnabled: false,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := testcontext.Get(t)
+ tenv := NewMemoryEnvironment(t,
+ WithChains(2),
+ WithNodes(4))
+ state, err := LoadOnchainState(tenv.Env)
+ require.NoError(t, err)
+
+ // Deploy to all chains.
+ allChains := maps.Keys(tenv.Env.Chains)
+ source := allChains[0]
+ dest := allChains[1]
+
+ if tc.mcmsEnabled {
+ // Transfer ownership to timelock so that we can promote the zero digest later down the line.
+ transferToTimelock(t, tenv, state, source, dest)
+ }
+
+ var (
+ capReg = state.Chains[tenv.HomeChainSel].CapabilityRegistry
+ ccipHome = state.Chains[tenv.HomeChainSel].CCIPHome
+ )
+ donID, err := internal.DonIDForChain(capReg, ccipHome, dest)
+ require.NoError(t, err)
+ require.NotEqual(t, uint32(0), donID)
+ candidateDigestCommitBefore, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, candidateDigestCommitBefore)
+ candidateDigestExecBefore, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.Equal(t, [32]byte{}, candidateDigestExecBefore)
+
+ var mcmsConfig *MCMSConfig
+ if tc.mcmsEnabled {
+ mcmsConfig = &MCMSConfig{
+ MinDelay: 0,
+ }
+ }
+ tokenConfig := NewTestTokenConfig(state.Chains[tenv.FeedChainSel].USDFeeds)
+ _, err = commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ tenv.HomeChainSel: {
+ Timelock: state.Chains[tenv.HomeChainSel].Timelock,
+ CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
+ },
+ }, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ HomeChainSelector: tenv.HomeChainSel,
+ FeedChainSelector: tenv.FeedChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPCommit,
+ CCIPOCRParams: DefaultOCRParams(
+ tenv.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[dest].LinkToken, state.Chains[dest].Weth9),
+ nil,
+ ),
+ MCMS: mcmsConfig,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(SetCandidateChangeset),
+ Config: SetCandidateChangesetConfig{
+ HomeChainSelector: tenv.HomeChainSel,
+ FeedChainSelector: tenv.FeedChainSel,
+ DONChainSelector: dest,
+ PluginType: types.PluginTypeCCIPExec,
+ CCIPOCRParams: DefaultOCRParams(
+ tenv.FeedChainSel,
+ tokenConfig.GetTokenInfo(logger.TestLogger(t), state.Chains[dest].LinkToken, state.Chains[dest].Weth9),
+ nil,
+ ),
+ MCMS: mcmsConfig,
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // after setting a new candidate on both plugins, the candidate config digest
+ // should be nonzero.
+ candidateDigestCommitAfter, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPCommit))
+ require.NoError(t, err)
+ require.NotEqual(t, [32]byte{}, candidateDigestCommitAfter)
+ require.NotEqual(t, candidateDigestCommitBefore, candidateDigestCommitAfter)
+
+ candidateDigestExecAfter, err := ccipHome.GetCandidateDigest(&bind.CallOpts{
+ Context: ctx,
+ }, donID, uint8(types.PluginTypeCCIPExec))
+ require.NoError(t, err)
+ require.NotEqual(t, [32]byte{}, candidateDigestExecAfter)
+ require.NotEqual(t, candidateDigestExecBefore, candidateDigestExecAfter)
+ })
+ }
+}
+
+func transferToTimelock(
+ t *testing.T,
+ tenv DeployedEnv,
+ state CCIPOnChainState,
+ source,
+ dest uint64) {
+ // Transfer ownership to timelock so that we can promote the zero digest later down the line.
+ _, err := commonchangeset.ApplyChangesets(t, tenv.Env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ source: {
+ Timelock: state.Chains[source].Timelock,
+ CallProxy: state.Chains[source].CallProxy,
+ },
+ dest: {
+ Timelock: state.Chains[dest].Timelock,
+ CallProxy: state.Chains[dest].CallProxy,
+ },
+ tenv.HomeChainSel: {
+ Timelock: state.Chains[tenv.HomeChainSel].Timelock,
+ CallProxy: state.Chains[tenv.HomeChainSel].CallProxy,
+ },
+ }, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.TransferToMCMSWithTimelock),
+ Config: genTestTransferOwnershipConfig(tenv, []uint64{source, dest}, state),
+ },
+ })
+ require.NoError(t, err)
+ assertTimelockOwnership(t, tenv, []uint64{source, dest}, state)
+}
diff --git a/deployment/ccip/changeset/cs_deploy_chain_test.go b/deployment/ccip/changeset/cs_deploy_chain_test.go
index fbf9c881138..9e1a581112d 100644
--- a/deployment/ccip/changeset/cs_deploy_chain_test.go
+++ b/deployment/ccip/changeset/cs_deploy_chain_test.go
@@ -3,7 +3,6 @@ package changeset
import (
"encoding/json"
"fmt"
- "math/big"
"testing"
"github.com/stretchr/testify/require"
@@ -11,12 +10,14 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/v2/core/logger"
)
func TestDeployChainContractsChangeset(t *testing.T) {
+ t.Parallel()
lggr := logger.TestLogger(t)
e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{
Bootstraps: 1,
@@ -30,12 +31,7 @@ func TestDeployChainContractsChangeset(t *testing.T) {
p2pIds := nodes.NonBootstraps().PeerIDs()
cfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
for _, chain := range e.AllChainSelectors() {
- cfg[chain] = commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ cfg[chain] = proposalutils.SingleGroupTimelockConfig(t)
}
e, err = commonchangeset.ApplyChangesets(t, e, nil, []commonchangeset.ChangesetApplication{
{
@@ -98,6 +94,7 @@ func TestDeployChainContractsChangeset(t *testing.T) {
}
func TestDeployCCIPContracts(t *testing.T) {
+ t.Parallel()
e := NewMemoryEnvironment(t)
// Deploy all the CCIP contracts.
state, err := LoadOnchainState(e.Env)
diff --git a/deployment/ccip/changeset/cs_home_chain_test.go b/deployment/ccip/changeset/cs_home_chain_test.go
index a06161f7086..eb620691db0 100644
--- a/deployment/ccip/changeset/cs_home_chain_test.go
+++ b/deployment/ccip/changeset/cs_home_chain_test.go
@@ -13,6 +13,7 @@ import (
)
func TestDeployHomeChain(t *testing.T) {
+ t.Parallel()
lggr := logger.TestLogger(t)
e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{
Bootstraps: 1,
diff --git a/deployment/ccip/changeset/cs_initial_add_chain.go b/deployment/ccip/changeset/cs_initial_add_chain.go
index 5ba648d74b5..4f8b2ac2722 100644
--- a/deployment/ccip/changeset/cs_initial_add_chain.go
+++ b/deployment/ccip/changeset/cs_initial_add_chain.go
@@ -483,7 +483,7 @@ func ValidateCCIPHomeConfigSetUp(
return fmt.Errorf("fetch don id for chain: %w", err)
}
if donID == 0 {
- return fmt.Errorf("don id for chain(%d) does not exist", chainSel)
+ return fmt.Errorf("don id for chain (%d) does not exist", chainSel)
}
// final sanity checks on configs.
diff --git a/deployment/ccip/changeset/cs_initial_add_chain_test.go b/deployment/ccip/changeset/cs_initial_add_chain_test.go
index c1404eb7123..f344068f11b 100644
--- a/deployment/ccip/changeset/cs_initial_add_chain_test.go
+++ b/deployment/ccip/changeset/cs_initial_add_chain_test.go
@@ -9,10 +9,12 @@ import (
"github.com/stretchr/testify/require"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/router"
)
func TestInitialAddChainAppliedTwice(t *testing.T) {
+ t.Parallel()
// This already applies the initial add chain changeset.
e := NewMemoryEnvironment(t)
@@ -24,10 +26,10 @@ func TestInitialAddChainAppliedTwice(t *testing.T) {
allChains := e.Env.AllChainSelectors()
tokenConfig := NewTestTokenConfig(state.Chains[e.FeedChainSel].USDFeeds)
chainConfigs := make(map[uint64]CCIPOCRParams)
- timelockContractsPerChain := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContractsPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range allChains {
- timelockContractsPerChain[chain] = &commonchangeset.TimelockExecutionContracts{
+ timelockContractsPerChain[chain] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain].Timelock,
CallProxy: state.Chains[chain].CallProxy,
}
diff --git a/deployment/ccip/changeset/cs_jobspec_test.go b/deployment/ccip/changeset/cs_jobspec_test.go
index 21e80e85aa2..a0445b0d5ee 100644
--- a/deployment/ccip/changeset/cs_jobspec_test.go
+++ b/deployment/ccip/changeset/cs_jobspec_test.go
@@ -13,6 +13,7 @@ import (
)
func TestJobSpecChangeset(t *testing.T) {
+ t.Parallel()
lggr := logger.TestLogger(t)
e := memory.NewMemoryEnvironment(t, lggr, zapcore.InfoLevel, memory.MemoryEnvironmentConfig{
Chains: 1,
diff --git a/deployment/ccip/changeset/cs_update_rmn_config.go b/deployment/ccip/changeset/cs_update_rmn_config.go
index 25ae8308eb5..42eace928c3 100644
--- a/deployment/ccip/changeset/cs_update_rmn_config.go
+++ b/deployment/ccip/changeset/cs_update_rmn_config.go
@@ -12,7 +12,6 @@ import (
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
"github.com/smartcontractkit/chainlink/deployment"
- commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
"github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_home"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_remote"
@@ -304,10 +303,10 @@ func NewPromoteCandidateConfigChangeset(e deployment.Environment, config Promote
}, nil
}
-func buildTimelockPerChain(e deployment.Environment, state CCIPOnChainState) map[uint64]*commonchangeset.TimelockExecutionContracts {
- timelocksPerChain := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+func buildTimelockPerChain(e deployment.Environment, state CCIPOnChainState) map[uint64]*proposalutils.TimelockExecutionContracts {
+ timelocksPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range e.Chains {
- timelocksPerChain[chain.Selector] = &commonchangeset.TimelockExecutionContracts{
+ timelocksPerChain[chain.Selector] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain.Selector].Timelock,
CallProxy: state.Chains[chain.Selector].CallProxy,
}
diff --git a/deployment/ccip/changeset/cs_update_rmn_config_test.go b/deployment/ccip/changeset/cs_update_rmn_config_test.go
index 3ec309182aa..bab70f68fb5 100644
--- a/deployment/ccip/changeset/cs_update_rmn_config_test.go
+++ b/deployment/ccip/changeset/cs_update_rmn_config_test.go
@@ -56,6 +56,7 @@ func TestUpdateRMNConfig(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
updateRMNConfig(t, tc)
})
}
diff --git a/deployment/ccip/changeset/solana_state.go b/deployment/ccip/changeset/solana_state.go
new file mode 100644
index 00000000000..4e5507cfcd3
--- /dev/null
+++ b/deployment/ccip/changeset/solana_state.go
@@ -0,0 +1,6 @@
+package changeset
+
+// SolChainState holds a Go binding for all the currently deployed CCIP programs
+// on a chain. If a binding is nil, it means here is no such contract on the chain.
+type SolCCIPChainState struct {
+}
diff --git a/deployment/ccip/changeset/state.go b/deployment/ccip/changeset/state.go
index 7453195d304..cd88db1b9ee 100644
--- a/deployment/ccip/changeset/state.go
+++ b/deployment/ccip/changeset/state.go
@@ -161,6 +161,15 @@ func (c CCIPChainState) GenerateView() (view.ChainView, error) {
}
chainView.RMN[c.RMNRemote.Address().Hex()] = rmnView
}
+
+ if c.RMNHome != nil {
+ rmnHomeView, err := v1_6.GenerateRMNHomeView(c.RMNHome)
+ if err != nil {
+ return chainView, errors.Wrapf(err, "failed to generate rmn home view for rmn home %s", c.RMNHome.Address().String())
+ }
+ chainView.RMNHome[c.RMNHome.Address().Hex()] = rmnHomeView
+ }
+
if c.FeeQuoter != nil && c.Router != nil && c.TokenAdminRegistry != nil {
fqView, err := v1_6.GenerateFeeQuoterView(c.FeeQuoter, c.Router, c.TokenAdminRegistry)
if err != nil {
@@ -252,7 +261,8 @@ type CCIPOnChainState struct {
// Populated go bindings for the appropriate version for all contracts.
// We would hold 2 versions of each contract here. Once we upgrade we can phase out the old one.
// When generating bindings, make sure the package name corresponds to the version.
- Chains map[uint64]CCIPChainState
+ Chains map[uint64]CCIPChainState
+ SolChains map[uint64]SolCCIPChainState
}
func (s CCIPOnChainState) View(chains []uint64) (map[string]view.ChainView, error) {
@@ -301,13 +311,13 @@ func LoadOnchainState(e deployment.Environment) (CCIPOnChainState, error) {
// LoadChainState Loads all state for a chain into state
func LoadChainState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (CCIPChainState, error) {
var state CCIPChainState
- mcmsWithTimelock, err := commoncs.MaybeLoadMCMSWithTimelockState(chain, addresses)
+ mcmsWithTimelock, err := commoncs.MaybeLoadMCMSWithTimelockChainState(chain, addresses)
if err != nil {
return state, err
}
state.MCMSWithTimelockState = *mcmsWithTimelock
- linkState, err := commoncs.MaybeLoadLinkTokenState(chain, addresses)
+ linkState, err := commoncs.MaybeLoadLinkTokenChainState(chain, addresses)
if err != nil {
return state, err
}
diff --git a/deployment/ccip/changeset/test_assertions.go b/deployment/ccip/changeset/test_assertions.go
index c0b510acc07..a114e52b361 100644
--- a/deployment/ccip/changeset/test_assertions.go
+++ b/deployment/ccip/changeset/test_assertions.go
@@ -221,8 +221,8 @@ func ConfirmCommitForAllWithExpectedSeqNums(
return false
}
},
- 3*time.Minute,
- 1*time.Second,
+ tests.WaitTimeout(t),
+ 2*time.Second,
"all commitments did not confirm",
)
}
diff --git a/deployment/ccip/changeset/test_environment.go b/deployment/ccip/changeset/test_environment.go
index ede078254c2..0efa44d108c 100644
--- a/deployment/ccip/changeset/test_environment.go
+++ b/deployment/ccip/changeset/test_environment.go
@@ -20,6 +20,7 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
)
@@ -299,12 +300,7 @@ func NewEnvironmentWithJobsAndContracts(t *testing.T, tc *TestConfigs, tEnv Test
mcmsCfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
for _, c := range e.Env.AllChainSelectors() {
- mcmsCfg[c] = commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ mcmsCfg[c] = proposalutils.SingleGroupTimelockConfig(t)
}
var (
usdcChains []uint64
@@ -382,9 +378,9 @@ func NewEnvironmentWithJobsAndContracts(t *testing.T, tc *TestConfigs, tEnv Test
}
// Build the per chain config.
chainConfigs := make(map[uint64]CCIPOCRParams)
- timelockContractsPerChain := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContractsPerChain := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for _, chain := range allChains {
- timelockContractsPerChain[chain] = &commonchangeset.TimelockExecutionContracts{
+ timelockContractsPerChain[chain] = &proposalutils.TimelockExecutionContracts{
Timelock: state.Chains[chain].Timelock,
CallProxy: state.Chains[chain].CallProxy,
}
diff --git a/deployment/ccip/changeset/view_test.go b/deployment/ccip/changeset/view_test.go
index 11430bfbddf..35193979849 100644
--- a/deployment/ccip/changeset/view_test.go
+++ b/deployment/ccip/changeset/view_test.go
@@ -7,6 +7,7 @@ import (
)
func TestSmokeView(t *testing.T) {
+ t.Parallel()
tenv := NewMemoryEnvironment(t, WithChains(3))
_, err := ViewCCIP(tenv.Env)
require.NoError(t, err)
diff --git a/deployment/ccip/view/v1_6/rmnhome.go b/deployment/ccip/view/v1_6/rmnhome.go
new file mode 100644
index 00000000000..82d39074d6f
--- /dev/null
+++ b/deployment/ccip/view/v1_6/rmnhome.go
@@ -0,0 +1,214 @@
+package v1_6
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/smartcontractkit/chainlink/deployment/common/view/types"
+ "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/ccip/generated/rmn_home"
+)
+
+type RMNHomeView struct {
+ types.ContractMetaData
+ CandidateConfig *RMNHomeVersionedConfig `json:"candidateConfig,omitempty"`
+ ActiveConfig *RMNHomeVersionedConfig `json:"activeConfig,omitempty"`
+}
+
+type RMNHomeVersionedConfig struct {
+ Version uint32 `json:"version"`
+ StaticConfig RMNHomeStaticConfig `json:"staticConfig"`
+ DynamicConfig RMNHomeDynamicConfig `json:"dynamicConfig"`
+ Digest [32]byte `json:"digest"`
+}
+
+func decodeHexString(hexStr string, expectedLength int) ([]byte, error) {
+ bytes, err := hex.DecodeString(hexStr)
+ if err != nil {
+ return nil, err
+ }
+ if len(bytes) != expectedLength {
+ return nil, fmt.Errorf("invalid length: expected %d, got %d", expectedLength, len(bytes))
+ }
+ return bytes, nil
+}
+
+func (c RMNHomeVersionedConfig) MarshalJSON() ([]byte, error) {
+ type Alias RMNHomeVersionedConfig
+ return json.Marshal(&struct {
+ Digest string `json:"digest"`
+ *Alias
+ }{
+ Digest: hex.EncodeToString(c.Digest[:]),
+ Alias: (*Alias)(&c),
+ })
+}
+
+func (c *RMNHomeVersionedConfig) UnmarshalJSON(data []byte) error {
+ type Alias RMNHomeVersionedConfig
+ aux := &struct {
+ Digest string `json:"digest"`
+ *Alias
+ }{
+ Alias: (*Alias)(c),
+ }
+
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ digestBytes, err := decodeHexString(aux.Digest, 32)
+ if err != nil {
+ return err
+ }
+ copy(c.Digest[:], digestBytes)
+ return nil
+}
+
+type RMNHomeStaticConfig struct {
+ Nodes []RMNHomeNode `json:"nodes"`
+}
+
+type RMNHomeDynamicConfig struct {
+ SourceChains []RMNHomeSourceChain `json:"sourceChains"`
+}
+
+type RMNHomeSourceChain struct {
+ ChainSelector uint64 `json:"selector"`
+ F uint64 `json:"f"`
+ ObserverNodesBitmap *big.Int `json:"observerNodesBitmap"`
+}
+
+type RMNHomeNode struct {
+ PeerId [32]byte `json:"peerId"`
+ OffchainPublicKey [32]byte `json:"offchainPublicKey"`
+}
+
+func (n RMNHomeNode) MarshalJSON() ([]byte, error) {
+ type Alias RMNHomeNode
+ return json.Marshal(&struct {
+ PeerId string `json:"peerId"`
+ OffchainPublicKey string `json:"offchainPublicKey"`
+ *Alias
+ }{
+ PeerId: hex.EncodeToString(n.PeerId[:]),
+ OffchainPublicKey: hex.EncodeToString(n.OffchainPublicKey[:]),
+ Alias: (*Alias)(&n),
+ })
+}
+
+func (n *RMNHomeNode) UnmarshalJSON(data []byte) error {
+ type Alias RMNHomeNode
+ aux := &struct {
+ PeerId string `json:"peerId"`
+ OffchainPublicKey string `json:"offchainPublicKey"`
+ *Alias
+ }{
+ Alias: (*Alias)(n),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ peerIdBytes, err := decodeHexString(aux.PeerId, 32)
+ if err != nil {
+ return err
+ }
+ copy(n.PeerId[:], peerIdBytes)
+
+ offchainPublicKeyBytes, err := decodeHexString(aux.OffchainPublicKey, 32)
+ if err != nil {
+ return err
+ }
+ copy(n.OffchainPublicKey[:], offchainPublicKeyBytes)
+
+ return nil
+}
+
+type DigestFunc func(*bind.CallOpts) ([32]byte, error)
+
+func mapNodes(nodes []rmn_home.RMNHomeNode) []RMNHomeNode {
+ result := make([]RMNHomeNode, len(nodes))
+ for i, node := range nodes {
+ result[i] = RMNHomeNode{
+ PeerId: node.PeerId,
+ OffchainPublicKey: node.OffchainPublicKey,
+ }
+ }
+ return result
+}
+
+func mapSourceChains(chains []rmn_home.RMNHomeSourceChain) []RMNHomeSourceChain {
+ result := make([]RMNHomeSourceChain, len(chains))
+ for i, chain := range chains {
+ result[i] = RMNHomeSourceChain{
+ ChainSelector: chain.ChainSelector,
+ F: chain.F,
+ ObserverNodesBitmap: chain.ObserverNodesBitmap,
+ }
+ }
+ return result
+}
+
+func generateRmnHomeVersionedConfig(reader *rmn_home.RMNHome, digestFunc DigestFunc) (*RMNHomeVersionedConfig, error) {
+ address := reader.Address()
+ digest, err := digestFunc(nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get digest for contract %s: %w", address, err)
+ }
+
+ if digest == [32]byte{} {
+ return nil, nil
+ }
+
+ config, err := reader.GetConfig(nil, digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get config for contract %s: %w", address, err)
+ }
+
+ staticConfig := RMNHomeStaticConfig{
+ Nodes: mapNodes(config.VersionedConfig.StaticConfig.Nodes),
+ }
+
+ dynamicConfig := RMNHomeDynamicConfig{
+ SourceChains: mapSourceChains(config.VersionedConfig.DynamicConfig.SourceChains),
+ }
+
+ return &RMNHomeVersionedConfig{
+ Version: config.VersionedConfig.Version,
+ Digest: config.VersionedConfig.ConfigDigest,
+ StaticConfig: staticConfig,
+ DynamicConfig: dynamicConfig,
+ }, nil
+}
+
+func GenerateRMNHomeView(rmnReader *rmn_home.RMNHome) (RMNHomeView, error) {
+ if rmnReader == nil {
+ return RMNHomeView{}, nil
+ }
+
+ address := rmnReader.Address()
+
+ activeConfig, err := generateRmnHomeVersionedConfig(rmnReader, rmnReader.GetActiveDigest)
+ if err != nil {
+ return RMNHomeView{}, fmt.Errorf("failed to generate active config for contract %s: %w", address, err)
+ }
+
+ candidateConfig, err := generateRmnHomeVersionedConfig(rmnReader, rmnReader.GetCandidateDigest)
+ if err != nil {
+ return RMNHomeView{}, fmt.Errorf("failed to generate candidate config for contract %s: %w", address, err)
+ }
+
+ contractMetaData, err := types.NewContractMetaData(rmnReader, rmnReader.Address())
+ if err != nil {
+ return RMNHomeView{}, fmt.Errorf("failed to create contract metadata for contract %s: %w", address, err)
+ }
+
+ return RMNHomeView{
+ ContractMetaData: contractMetaData,
+ CandidateConfig: candidateConfig,
+ ActiveConfig: activeConfig,
+ }, nil
+}
diff --git a/deployment/ccip/view/view.go b/deployment/ccip/view/view.go
index 77781a8a31a..4f216d13008 100644
--- a/deployment/ccip/view/view.go
+++ b/deployment/ccip/view/view.go
@@ -22,6 +22,7 @@ type ChainView struct {
// v1.6
FeeQuoter map[string]v1_6.FeeQuoterView `json:"feeQuoter,omitempty"`
NonceManager map[string]v1_6.NonceManagerView `json:"nonceManager,omitempty"`
+ RMNHome map[string]v1_6.RMNHomeView `json:"rmnHome,omitempty"`
RMN map[string]v1_6.RMNRemoteView `json:"rmn,omitempty"`
OnRamp map[string]v1_6.OnRampView `json:"onRamp,omitempty"`
OffRamp map[string]v1_6.OffRampView `json:"offRamp,omitempty"`
@@ -46,6 +47,7 @@ func NewChain() ChainView {
// v1.6
FeeQuoter: make(map[string]v1_6.FeeQuoterView),
NonceManager: make(map[string]v1_6.NonceManagerView),
+ RMNHome: make(map[string]v1_6.RMNHomeView),
RMN: make(map[string]v1_6.RMNRemoteView),
OnRamp: make(map[string]v1_6.OnRampView),
OffRamp: make(map[string]v1_6.OffRampView),
diff --git a/deployment/common/changeset/deploy_link_token.go b/deployment/common/changeset/deploy_link_token.go
index 292c07c93df..c115a7ee083 100644
--- a/deployment/common/changeset/deploy_link_token.go
+++ b/deployment/common/changeset/deploy_link_token.go
@@ -12,7 +12,7 @@ import (
var _ deployment.ChangeSet[[]uint64] = DeployLinkToken
-// DeployLinkToken deploys a link token contract to the chain identified by the chainSelector.
+// DeployLinkToken deploys a link token contract to the chain identified by the ChainSelector.
func DeployLinkToken(e deployment.Environment, chains []uint64) (deployment.ChangesetOutput, error) {
for _, chain := range chains {
_, ok := e.Chains[chain]
diff --git a/deployment/common/changeset/deploy_link_token_test.go b/deployment/common/changeset/deploy_link_token_test.go
index a61743e9bf4..bc472d2a247 100644
--- a/deployment/common/changeset/deploy_link_token_test.go
+++ b/deployment/common/changeset/deploy_link_token_test.go
@@ -27,7 +27,7 @@ func TestDeployLinkToken(t *testing.T) {
require.NoError(t, err)
addrs, err := e.ExistingAddresses.AddressesForChain(chain1)
require.NoError(t, err)
- state, err := changeset.MaybeLoadLinkTokenState(e.Chains[chain1], addrs)
+ state, err := changeset.MaybeLoadLinkTokenChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
// View itself already unit tested
_, err = state.GenerateLinkView()
diff --git a/deployment/common/changeset/example/add_mint_burners_link.go b/deployment/common/changeset/example/add_mint_burners_link.go
new file mode 100644
index 00000000000..7322f99dd60
--- /dev/null
+++ b/deployment/common/changeset/example/add_mint_burners_link.go
@@ -0,0 +1,70 @@
+package example
+
+import (
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+)
+
+type AddMintersBurnersLinkConfig struct {
+ ChainSelector uint64
+ Minters []common.Address
+ Burners []common.Address
+}
+
+var _ deployment.ChangeSet[*AddMintersBurnersLinkConfig] = AddMintersBurnersLink
+
+// AddMintersBurnersLink grants the minter / burner role to the provided addresses.
+func AddMintersBurnersLink(e deployment.Environment, cfg *AddMintersBurnersLinkConfig) (deployment.ChangesetOutput, error) {
+
+ chain := e.Chains[cfg.ChainSelector]
+ addresses, err := e.ExistingAddresses.AddressesForChain(cfg.ChainSelector)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addresses)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ for _, minter := range cfg.Minters {
+ // check if minter is already a minter
+ isMinter, err := linkState.LinkToken.IsMinter(&bind.CallOpts{Context: e.GetContext()}, minter)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ if isMinter {
+ continue
+ }
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, minter)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ }
+ for _, burner := range cfg.Burners {
+ // check if burner is already a burner
+ isBurner, err := linkState.LinkToken.IsBurner(&bind.CallOpts{Context: e.GetContext()}, burner)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ if isBurner {
+ continue
+ }
+ tx, err := linkState.LinkToken.GrantBurnRole(chain.DeployerKey, burner)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ }
+ return deployment.ChangesetOutput{}, nil
+
+}
diff --git a/deployment/common/changeset/example/add_mint_burners_link_test.go b/deployment/common/changeset/example/add_mint_burners_link_test.go
new file mode 100644
index 00000000000..4dbfddc0b30
--- /dev/null
+++ b/deployment/common/changeset/example/add_mint_burners_link_test.go
@@ -0,0 +1,50 @@
+package example_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset/example"
+)
+
+// TestAddMintersBurnersLink tests the AddMintersBurnersLink changeset
+func TestAddMintersBurnersLink(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+ // Deploy Link Token and Timelock contracts and add addresses to environment
+ env := setupLinkTransferTestEnv(t)
+
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ _, err = example.AddMintersBurnersLink(env, &example.AddMintersBurnersLinkConfig{
+ ChainSelector: chainSelector,
+ Minters: []common.Address{timelockAddress},
+ Burners: []common.Address{timelockAddress},
+ })
+ require.NoError(t, err)
+
+ // check timelock balance
+ isMinter, err := linkState.LinkToken.IsMinter(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ require.True(t, isMinter)
+ isBurner, err := linkState.LinkToken.IsBurner(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ require.True(t, isBurner)
+}
diff --git a/deployment/common/changeset/example/link_transfer.go b/deployment/common/changeset/example/link_transfer.go
new file mode 100644
index 00000000000..2e3be48a4d1
--- /dev/null
+++ b/deployment/common/changeset/example/link_transfer.go
@@ -0,0 +1,239 @@
+package example
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ ethTypes "github.com/ethereum/go-ethereum/core/types"
+ owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
+ chain_selectors "github.com/smartcontractkit/chain-selectors"
+
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ "github.com/smartcontractkit/chainlink/deployment/common/types"
+)
+
+const MaxTimelockDelay = 24 * 7 * time.Hour
+
+type TransferConfig struct {
+ To common.Address
+ Value *big.Int
+}
+
+type MCMSConfig struct {
+ MinDelay time.Duration // delay for timelock worker to execute the transfers.
+ OverrideRoot bool
+}
+
+type LinkTransferConfig struct {
+ Transfers map[uint64][]TransferConfig
+ From common.Address
+ McmsConfig *MCMSConfig
+}
+
+var _ deployment.ChangeSet[*LinkTransferConfig] = LinkTransfer
+
+func getDeployer(e deployment.Environment, chain uint64, mcmConfig *MCMSConfig) *bind.TransactOpts {
+ if mcmConfig == nil {
+ return e.Chains[chain].DeployerKey
+ }
+
+ return deployment.SimTransactOpts()
+}
+
+// Validate checks that the LinkTransferConfig is valid.
+func (cfg LinkTransferConfig) Validate(e deployment.Environment) error {
+ ctx := e.GetContext()
+ // Check that Transfers map has at least one chainSel
+ if len(cfg.Transfers) == 0 {
+ return errors.New("transfers map must have at least one chainSel")
+ }
+
+ // Check transfers config values.
+ for chainSel, transfers := range cfg.Transfers {
+ selector, err := chain_selectors.GetSelectorFamily(chainSel)
+ if err != nil {
+ return fmt.Errorf("invalid chain selector: %w", err)
+ }
+ if selector != chain_selectors.FamilyEVM {
+ return fmt.Errorf("chain selector %d is not an EVM chain", chainSel)
+ }
+ chain, ok := e.Chains[chainSel]
+ if !ok {
+ return fmt.Errorf("chain with selector %d not found", chainSel)
+ }
+ addrs, err := e.ExistingAddresses.AddressesForChain(chainSel)
+ if err != nil {
+ return fmt.Errorf("error getting addresses for chain %d: %w", chainSel, err)
+ }
+ if len(transfers) == 0 {
+ return fmt.Errorf("transfers for chainSel %d must have at least one LinkTransfer", chainSel)
+ }
+ totalAmount := big.NewInt(0)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ if err != nil {
+ return fmt.Errorf("error loading link token state during validation: %w", err)
+ }
+ for _, transfer := range transfers {
+ if transfer.To == (common.Address{}) {
+ return errors.New("'to' address for transfers must be set")
+ }
+ if transfer.Value == nil {
+ return errors.New("value for transfers must be set")
+ }
+ if transfer.Value.Cmp(big.NewInt(0)) == 0 {
+ return errors.New("value for transfers must be non-zero")
+ }
+ if transfer.Value.Cmp(big.NewInt(0)) == -1 {
+ return errors.New("value for transfers must be positive")
+ }
+ totalAmount.Add(totalAmount, transfer.Value)
+ }
+ // check that from address has enough funds for the transfers
+ balance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, cfg.From)
+ if balance.Cmp(totalAmount) < 0 {
+ return fmt.Errorf("sender does not have enough funds for transfers for chain selector %d, required: %s, available: %s", chainSel, totalAmount.String(), balance.String())
+ }
+ }
+
+ if cfg.McmsConfig == nil {
+ return nil
+ }
+
+ // Upper bound for min delay (7 days)
+ if cfg.McmsConfig.MinDelay > MaxTimelockDelay {
+ return errors.New("minDelay must be less than 7 days")
+ }
+
+ return nil
+}
+
+// initStatePerChain initializes the state for each chain selector on the provided config
+func initStatePerChain(cfg *LinkTransferConfig, e deployment.Environment) (
+ linkStatePerChain map[uint64]*changeset.LinkTokenState,
+ mcmsStatePerChain map[uint64]*changeset.MCMSWithTimelockState,
+ err error) {
+ linkStatePerChain = map[uint64]*changeset.LinkTokenState{}
+ mcmsStatePerChain = map[uint64]*changeset.MCMSWithTimelockState{}
+ // Load state for each chain
+ chainSelectors := []uint64{}
+ for chainSelector := range cfg.Transfers {
+ chainSelectors = append(chainSelectors, chainSelector)
+ }
+ linkStatePerChain, err = changeset.MaybeLoadLinkTokenState(e, chainSelectors)
+ if err != nil {
+ return nil, nil, err
+ }
+ mcmsStatePerChain, err = changeset.MaybeLoadMCMSWithTimelockState(e, chainSelectors)
+ if err != nil {
+ return nil, nil, err
+
+ }
+ return linkStatePerChain, mcmsStatePerChain, nil
+}
+
+// transferOrBuildTx transfers the LINK tokens or builds the tx for the MCMS proposal
+func transferOrBuildTx(
+ e deployment.Environment,
+ linkState *changeset.LinkTokenState,
+ transfer TransferConfig,
+ opts *bind.TransactOpts,
+ chain deployment.Chain,
+ mcmsConfig *MCMSConfig) (*ethTypes.Transaction, error) {
+ tx, err := linkState.LinkToken.Transfer(opts, transfer.To, transfer.Value)
+ if err != nil {
+ return nil, fmt.Errorf("error packing transfer tx data: %w", err)
+ }
+ // only wait for tx if we are not using MCMS
+ if mcmsConfig == nil {
+ if _, err := deployment.ConfirmIfNoError(chain, tx, err); err != nil {
+ e.Logger.Errorw("Failed to confirm transfer tx", "chain", chain.String(), "err", err)
+ return nil, err
+ }
+ }
+ return tx, nil
+
+}
+
+// LinkTransfer takes the given link transfers and executes them or creates an MCMS proposal for them.
+func LinkTransfer(e deployment.Environment, cfg *LinkTransferConfig) (deployment.ChangesetOutput, error) {
+
+ err := cfg.Validate(e)
+ if err != nil {
+ return deployment.ChangesetOutput{}, fmt.Errorf("invalid LinkTransferConfig: %w", err)
+ }
+ chainSelectors := []uint64{}
+ for chainSelector := range cfg.Transfers {
+ chainSelectors = append(chainSelectors, chainSelector)
+ }
+ mcmsPerChain := map[uint64]*owner_helpers.ManyChainMultiSig{}
+
+ timelockAddresses := map[uint64]common.Address{}
+ // Initialize state for each chain
+ linkStatePerChain, mcmsStatePerChain, err := initStatePerChain(cfg, e)
+
+ allBatches := []timelock.BatchChainOperation{}
+ for chainSelector := range cfg.Transfers {
+ chainID := mcms.ChainIdentifier(chainSelector)
+ chain := e.Chains[chainSelector]
+ linkAddress := linkStatePerChain[chainSelector].LinkToken.Address()
+ mcmsState := mcmsStatePerChain[chainSelector]
+ linkState := linkStatePerChain[chainSelector]
+
+ timelockAddress := mcmsState.Timelock.Address()
+
+ mcmsPerChain[uint64(chainID)] = mcmsState.ProposerMcm
+
+ timelockAddresses[chainSelector] = timelockAddress
+ batch := timelock.BatchChainOperation{
+ ChainIdentifier: chainID,
+ Batch: []mcms.Operation{},
+ }
+
+ opts := getDeployer(e, chainSelector, cfg.McmsConfig)
+ totalAmount := big.NewInt(0)
+ for _, transfer := range cfg.Transfers[chainSelector] {
+ tx, err := transferOrBuildTx(e, linkState, transfer, opts, chain, cfg.McmsConfig)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ op := mcms.Operation{
+ To: linkAddress,
+ Data: tx.Data(),
+ Value: big.NewInt(0),
+ ContractType: string(types.LinkToken),
+ }
+ batch.Batch = append(batch.Batch, op)
+ totalAmount.Add(totalAmount, transfer.Value)
+ }
+
+ allBatches = append(allBatches, batch)
+ }
+
+ if cfg.McmsConfig != nil {
+ proposal, err := proposalutils.BuildProposalFromBatches(
+ timelockAddresses,
+ mcmsPerChain,
+ allBatches,
+ "LINK Value transfer proposal",
+ cfg.McmsConfig.MinDelay,
+ )
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ return deployment.ChangesetOutput{
+ Proposals: []timelock.MCMSWithTimelockProposal{*proposal},
+ }, nil
+ }
+
+ return deployment.ChangesetOutput{}, nil
+}
diff --git a/deployment/common/changeset/example/link_transfer_test.go b/deployment/common/changeset/example/link_transfer_test.go
new file mode 100644
index 00000000000..eecfbd37c95
--- /dev/null
+++ b/deployment/common/changeset/example/link_transfer_test.go
@@ -0,0 +1,373 @@
+package example_test
+
+import (
+ "context"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ chain_selectors "github.com/smartcontractkit/chain-selectors"
+
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset/example"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/zap/zapcore"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/types"
+ "github.com/smartcontractkit/chainlink/deployment/environment/memory"
+)
+
+// setupLinkTransferContracts deploys all required contracts for the link transfer tests and returns the updated env.
+func setupLinkTransferTestEnv(t *testing.T) deployment.Environment {
+
+ lggr := logger.TestLogger(t)
+ cfg := memory.MemoryEnvironmentConfig{
+ Nodes: 1,
+ Chains: 2,
+ }
+ env := memory.NewMemoryEnvironment(t, lggr, zapcore.DebugLevel, cfg)
+ chainSelector := env.AllChainSelectors()[0]
+ config := proposalutils.SingleGroupMCMS(t)
+
+ // Deploy MCMS and Timelock
+ env, err := changeset.ApplyChangesets(t, env, nil, []changeset.ChangesetApplication{
+ {
+ Changeset: changeset.WrapChangeSet(changeset.DeployLinkToken),
+ Config: []uint64{chainSelector},
+ },
+ {
+ Changeset: changeset.WrapChangeSet(changeset.DeployMCMSWithTimelock),
+ Config: map[uint64]types.MCMSWithTimelockConfig{
+ chainSelector: {
+ Canceller: config,
+ Bypasser: config,
+ Proposer: config,
+ TimelockMinDelay: big.NewInt(0),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ return env
+}
+
+// TestLinkTransferMCMS tests the LinkTransfer changeset by sending LINK from a timelock contract
+// to the deployer key via mcms proposal.
+func TestLinkTransferMCMS(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+
+ env := setupLinkTransferTestEnv(t)
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ // grant minter permissions
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, chain.DeployerKey.From)
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ tx, err = linkState.LinkToken.Mint(chain.DeployerKey, timelockAddress, big.NewInt(750))
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ timelocks := map[uint64]*proposalutils.TimelockExecutionContracts{
+ chainSelector: {
+ Timelock: mcmsState.Timelock,
+ CallProxy: mcmsState.CallProxy,
+ },
+ }
+ // Apply the changeset
+ _, err = changeset.ApplyChangesets(t, env, timelocks, []changeset.ChangesetApplication{
+ // the changeset produces proposals, ApplyChangesets will sign & execute them.
+ // in practice, signing and executing are separated processes.
+ {
+ Changeset: changeset.WrapChangeSet(example.LinkTransfer),
+ Config: &example.LinkTransferConfig{
+ From: timelockAddress,
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {
+ To: chain.DeployerKey.From,
+ Value: big.NewInt(500),
+ },
+ },
+ },
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: 0,
+ OverrideRoot: true,
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Check new balances
+ endBalance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, chain.DeployerKey.From)
+ require.NoError(t, err)
+ expectedBalance := big.NewInt(500)
+ require.Equal(t, expectedBalance, endBalance)
+
+ // check timelock balance
+ endBalance, err = linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ expectedBalance = big.NewInt(250)
+ require.Equal(t, expectedBalance, endBalance)
+}
+
+// TestLinkTransfer tests the LinkTransfer changeset by sending LINK from a timelock contract to the deployer key.
+func TestLinkTransfer(t *testing.T) {
+ t.Parallel()
+ ctx := context.Background()
+
+ env := setupLinkTransferTestEnv(t)
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ // grant minter permissions
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, chain.DeployerKey.From)
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ tx, err = linkState.LinkToken.Mint(chain.DeployerKey, chain.DeployerKey.From, big.NewInt(750))
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+
+ timelocks := map[uint64]*proposalutils.TimelockExecutionContracts{
+ chainSelector: {
+ Timelock: mcmsState.Timelock,
+ CallProxy: mcmsState.CallProxy,
+ },
+ }
+
+ // Apply the changeset
+ _, err = changeset.ApplyChangesets(t, env, timelocks, []changeset.ChangesetApplication{
+ // the changeset produces proposals, ApplyChangesets will sign & execute them.
+ // in practice, signing and executing are separated processes.
+ {
+ Changeset: changeset.WrapChangeSet(example.LinkTransfer),
+ Config: &example.LinkTransferConfig{
+ From: chain.DeployerKey.From,
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {
+ To: timelockAddress,
+ Value: big.NewInt(500),
+ },
+ },
+ },
+ // No MCMSConfig here means we'll execute the txs directly.
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ // Check new balances
+ endBalance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, chain.DeployerKey.From)
+ require.NoError(t, err)
+ expectedBalance := big.NewInt(250)
+ require.Equal(t, expectedBalance, endBalance)
+
+ // check timelock balance
+ endBalance, err = linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ expectedBalance = big.NewInt(500)
+ require.Equal(t, expectedBalance, endBalance)
+}
+
+func TestValidate(t *testing.T) {
+ env := setupLinkTransferTestEnv(t)
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+ tx, err := linkState.LinkToken.GrantMintRole(chain.DeployerKey, chain.DeployerKey.From)
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ require.NoError(t, err)
+ tx, err = linkState.LinkToken.Mint(chain.DeployerKey, chain.DeployerKey.From, big.NewInt(750))
+ require.NoError(t, err)
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+
+ require.NoError(t, err)
+ tests := []struct {
+ name string
+ cfg example.LinkTransferConfig
+ errorMsg string
+ }{
+ {
+ name: "valid config",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: time.Hour,
+ },
+ },
+ },
+ {
+ name: "valid non mcms config",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ },
+ },
+ {
+ name: "insufficient funds",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: big.NewInt(100)},
+ {To: chain.DeployerKey.From, Value: big.NewInt(500)},
+ {To: chain.DeployerKey.From, Value: big.NewInt(1250)},
+ },
+ },
+ From: mcmsState.Timelock.Address(),
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: time.Hour,
+ },
+ },
+ errorMsg: "sender does not have enough funds for transfers for chain selector 909606746561742123, required: 1850, available: 0",
+ },
+ {
+ name: "invalid config: empty transfers",
+ cfg: example.LinkTransferConfig{Transfers: map[uint64][]example.TransferConfig{}},
+ errorMsg: "transfers map must have at least one chainSel",
+ },
+ {
+ name: "invalid chain selector",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ 1: {{To: common.Address{}, Value: big.NewInt(100)}}},
+ },
+ errorMsg: "invalid chain selector: unknown chain selector 1",
+ },
+ {
+ name: "chain selector not found",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chain_selectors.ETHEREUM_TESTNET_GOERLI_ARBITRUM_1.Selector: {{To: common.Address{}, Value: big.NewInt(100)}}},
+ },
+ errorMsg: "chain with selector 6101244977088475029 not found",
+ },
+ {
+ name: "empty transfer list",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {},
+ },
+ },
+ errorMsg: "transfers for chainSel 909606746561742123 must have at least one LinkTransfer",
+ },
+ {
+ name: "empty value",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: nil},
+ },
+ },
+ },
+ errorMsg: "value for transfers must be set",
+ },
+ {
+ name: "zero value",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: big.NewInt(0)},
+ },
+ },
+ },
+ errorMsg: "value for transfers must be non-zero",
+ },
+ {
+ name: "negative value",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {
+ {To: chain.DeployerKey.From, Value: big.NewInt(-5)},
+ },
+ },
+ },
+ errorMsg: "value for transfers must be positive",
+ },
+ {
+ name: "non-evm-chain",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chain_selectors.APTOS_MAINNET.Selector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ },
+ errorMsg: "chain selector 4741433654826277614 is not an EVM chain",
+ },
+ {
+ name: "delay greater than max allowed",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: mcmsState.Timelock.Address(), Value: big.NewInt(100)}}},
+ From: chain.DeployerKey.From,
+ McmsConfig: &example.MCMSConfig{
+ MinDelay: time.Hour * 24 * 10,
+ },
+ },
+ errorMsg: "minDelay must be less than 7 days",
+ },
+ {
+ name: "invalid config: transfer to address missing",
+ cfg: example.LinkTransferConfig{
+ Transfers: map[uint64][]example.TransferConfig{
+ chainSelector: {{To: common.Address{}, Value: big.NewInt(100)}}},
+ },
+ errorMsg: "'to' address for transfers must be set",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.cfg.Validate(env)
+ if tt.errorMsg != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tt.errorMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+}
diff --git a/deployment/common/changeset/example/mint_link.go b/deployment/common/changeset/example/mint_link.go
new file mode 100644
index 00000000000..dc50f8a1a27
--- /dev/null
+++ b/deployment/common/changeset/example/mint_link.go
@@ -0,0 +1,43 @@
+package example
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+)
+
+type MintLinkConfig struct {
+ Amount *big.Int
+ ChainSelector uint64
+ To common.Address
+}
+
+var _ deployment.ChangeSet[*MintLinkConfig] = MintLink
+
+// MintLink mints LINK to the provided contract.
+func MintLink(e deployment.Environment, cfg *MintLinkConfig) (deployment.ChangesetOutput, error) {
+
+ chain := e.Chains[cfg.ChainSelector]
+ addresses, err := e.ExistingAddresses.AddressesForChain(cfg.ChainSelector)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addresses)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+
+ tx, err := linkState.LinkToken.Mint(chain.DeployerKey, cfg.To, cfg.Amount)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ _, err = deployment.ConfirmIfNoError(chain, tx, err)
+ if err != nil {
+ return deployment.ChangesetOutput{}, err
+ }
+ return deployment.ChangesetOutput{}, nil
+
+}
diff --git a/deployment/common/changeset/example/mint_link_test.go b/deployment/common/changeset/example/mint_link_test.go
new file mode 100644
index 00000000000..1c60c3221de
--- /dev/null
+++ b/deployment/common/changeset/example/mint_link_test.go
@@ -0,0 +1,58 @@
+package example_test
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/changeset/example"
+)
+
+// TestMintLink tests the MintLink changeset
+func TestMintLink(t *testing.T) {
+ t.Parallel()
+ env := setupLinkTransferTestEnv(t)
+ ctx := env.GetContext()
+ chainSelector := env.AllChainSelectors()[0]
+ chain := env.Chains[chainSelector]
+
+ addrs, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ require.NoError(t, err)
+ require.Len(t, addrs, 6)
+
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chain, addrs)
+ require.NoError(t, err)
+ linkState, err := changeset.MaybeLoadLinkTokenChainState(chain, addrs)
+ require.NoError(t, err)
+
+ _, err = changeset.ApplyChangesets(t, env, nil, []changeset.ChangesetApplication{
+ {
+ Changeset: changeset.WrapChangeSet(example.AddMintersBurnersLink),
+ Config: &example.AddMintersBurnersLinkConfig{
+ ChainSelector: chainSelector,
+ Minters: []common.Address{chain.DeployerKey.From},
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ timelockAddress := mcmsState.Timelock.Address()
+
+ // Mint some funds
+ _, err = example.MintLink(env, &example.MintLinkConfig{
+ ChainSelector: chainSelector,
+ To: timelockAddress,
+ Amount: big.NewInt(7568),
+ })
+ require.NoError(t, err)
+
+ // check timelock balance
+ endBalance, err := linkState.LinkToken.BalanceOf(&bind.CallOpts{Context: ctx}, timelockAddress)
+ require.NoError(t, err)
+ expectedBalance := big.NewInt(7568)
+ require.Equal(t, expectedBalance, endBalance)
+}
diff --git a/deployment/common/changeset/internal/mcms_test.go b/deployment/common/changeset/internal/mcms_test.go
index 10fb1d980de..ff013717d30 100644
--- a/deployment/common/changeset/internal/mcms_test.go
+++ b/deployment/common/changeset/internal/mcms_test.go
@@ -2,7 +2,6 @@ package internal_test
import (
"encoding/json"
- "math/big"
"testing"
chainsel "github.com/smartcontractkit/chain-selectors"
@@ -11,6 +10,7 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
"github.com/smartcontractkit/chainlink/deployment/common/changeset"
"github.com/smartcontractkit/chainlink/deployment/common/changeset/internal"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -23,7 +23,7 @@ func TestDeployMCMSWithConfig(t *testing.T) {
})
ab := deployment.NewMemoryAddressBook()
_, err := internal.DeployMCMSWithConfig(types.ProposerManyChainMultisig,
- lggr, chains[chainsel.TEST_90000001.Selector], ab, changeset.SingleGroupMCMS(t))
+ lggr, chains[chainsel.TEST_90000001.Selector], ab, proposalutils.SingleGroupMCMS(t))
require.NoError(t, err)
}
@@ -35,17 +35,12 @@ func TestDeployMCMSWithTimelockContracts(t *testing.T) {
ab := deployment.NewMemoryAddressBook()
_, err := internal.DeployMCMSWithTimelockContracts(lggr,
chains[chainsel.TEST_90000001.Selector],
- ab, types.MCMSWithTimelockConfig{
- Canceller: changeset.SingleGroupMCMS(t),
- Bypasser: changeset.SingleGroupMCMS(t),
- Proposer: changeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- })
+ ab, proposalutils.SingleGroupTimelockConfig(t))
require.NoError(t, err)
addresses, err := ab.AddressesForChain(chainsel.TEST_90000001.Selector)
require.NoError(t, err)
require.Len(t, addresses, 5)
- mcmsState, err := changeset.MaybeLoadMCMSWithTimelockState(chains[chainsel.TEST_90000001.Selector], addresses)
+ mcmsState, err := changeset.MaybeLoadMCMSWithTimelockChainState(chains[chainsel.TEST_90000001.Selector], addresses)
require.NoError(t, err)
v, err := mcmsState.GenerateMCMSWithTimelockView()
b, err := json.MarshalIndent(v, "", " ")
diff --git a/deployment/common/changeset/state.go b/deployment/common/changeset/state.go
index a580c13b40b..0db34abad71 100644
--- a/deployment/common/changeset/state.go
+++ b/deployment/common/changeset/state.go
@@ -8,6 +8,7 @@ import (
owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/common/view/v1_0"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
@@ -19,32 +20,7 @@ import (
// It is public for use in product specific packages.
// Either all fields are nil or all fields are non-nil.
type MCMSWithTimelockState struct {
- CancellerMcm *owner_helpers.ManyChainMultiSig
- BypasserMcm *owner_helpers.ManyChainMultiSig
- ProposerMcm *owner_helpers.ManyChainMultiSig
- Timelock *owner_helpers.RBACTimelock
- CallProxy *owner_helpers.CallProxy
-}
-
-// Validate checks that all fields are non-nil, ensuring it's ready
-// for use generating views or interactions.
-func (state MCMSWithTimelockState) Validate() error {
- if state.Timelock == nil {
- return errors.New("timelock not found")
- }
- if state.CancellerMcm == nil {
- return errors.New("canceller not found")
- }
- if state.ProposerMcm == nil {
- return errors.New("proposer not found")
- }
- if state.BypasserMcm == nil {
- return errors.New("bypasser not found")
- }
- if state.CallProxy == nil {
- return errors.New("call proxy not found")
- }
- return nil
+ *proposalutils.MCMSWithTimelockContracts
}
func (state MCMSWithTimelockState) GenerateMCMSWithTimelockView() (v1_0.MCMSWithTimelockView, error) {
@@ -80,15 +56,38 @@ func (state MCMSWithTimelockState) GenerateMCMSWithTimelockView() (v1_0.MCMSWith
}, nil
}
-// MaybeLoadMCMSWithTimelockState looks for the addresses corresponding to
+// MaybeLoadMCMSWithTimelockState loads the MCMSWithTimelockState state for each chain in the given environment.
+func MaybeLoadMCMSWithTimelockState(env deployment.Environment, chainSelectors []uint64) (map[uint64]*MCMSWithTimelockState, error) {
+ result := map[uint64]*MCMSWithTimelockState{}
+ for _, chainSelector := range chainSelectors {
+ chain, ok := env.Chains[chainSelector]
+ if !ok {
+ return nil, fmt.Errorf("chain %d not found", chainSelector)
+ }
+ addressesChain, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ if err != nil {
+ return nil, err
+ }
+ state, err := MaybeLoadMCMSWithTimelockChainState(chain, addressesChain)
+ if err != nil {
+ return nil, err
+ }
+ result[chainSelector] = state
+ }
+ return result, nil
+}
+
+// MaybeLoadMCMSWithTimelockChainState looks for the addresses corresponding to
// contracts deployed with DeployMCMSWithTimelock and loads them into a
// MCMSWithTimelockState struct. If none of the contracts are found, the state struct will be nil.
// An error indicates:
// - Found but was unable to load a contract
// - It only found part of the bundle of contracts
// - If found more than one instance of a contract (we expect one bundle in the given addresses)
-func MaybeLoadMCMSWithTimelockState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*MCMSWithTimelockState, error) {
- state := MCMSWithTimelockState{}
+func MaybeLoadMCMSWithTimelockChainState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*MCMSWithTimelockState, error) {
+ state := MCMSWithTimelockState{
+ MCMSWithTimelockContracts: &proposalutils.MCMSWithTimelockContracts{},
+ }
// We expect one of each contract on the chain.
timelock := deployment.NewTypeAndVersion(types.RBACTimelock, deployment.Version1_0_0)
callProxy := deployment.NewTypeAndVersion(types.CallProxy, deployment.Version1_0_0)
@@ -153,7 +152,28 @@ func (s LinkTokenState) GenerateLinkView() (v1_0.LinkTokenView, error) {
return v1_0.GenerateLinkTokenView(s.LinkToken)
}
-func MaybeLoadLinkTokenState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*LinkTokenState, error) {
+// MaybeLoadLinkTokenState loads the LinkTokenState state for each chain in the given environment.
+func MaybeLoadLinkTokenState(env deployment.Environment, chainSelectors []uint64) (map[uint64]*LinkTokenState, error) {
+ result := map[uint64]*LinkTokenState{}
+ for _, chainSelector := range chainSelectors {
+ chain, ok := env.Chains[chainSelector]
+ if !ok {
+ return nil, fmt.Errorf("chain %d not found", chainSelector)
+ }
+ addressesChain, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ if err != nil {
+ return nil, err
+ }
+ state, err := MaybeLoadLinkTokenChainState(chain, addressesChain)
+ if err != nil {
+ return nil, err
+ }
+ result[chainSelector] = state
+ }
+ return result, nil
+}
+
+func MaybeLoadLinkTokenChainState(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*LinkTokenState, error) {
state := LinkTokenState{}
linkToken := deployment.NewTypeAndVersion(types.LinkToken, deployment.Version1_0_0)
// Perhaps revisit if we have a use case for multiple.
diff --git a/deployment/common/changeset/test_helpers.go b/deployment/common/changeset/test_helpers.go
index 8fce5ea79f2..e92b36e5b55 100644
--- a/deployment/common/changeset/test_helpers.go
+++ b/deployment/common/changeset/test_helpers.go
@@ -9,6 +9,7 @@ import (
"github.com/smartcontractkit/chainlink-testing-framework/lib/utils/testcontext"
"github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
type ChangesetApplication struct {
@@ -32,7 +33,7 @@ func WrapChangeSet[C any](fn deployment.ChangeSet[C]) func(e deployment.Environm
}
// ApplyChangesets applies the changeset applications to the environment and returns the updated environment.
-func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPerChain map[uint64]*TimelockExecutionContracts, changesetApplications []ChangesetApplication) (deployment.Environment, error) {
+func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPerChain map[uint64]*proposalutils.TimelockExecutionContracts, changesetApplications []ChangesetApplication) (deployment.Environment, error) {
currentEnv := e
for i, csa := range changesetApplications {
out, err := csa.Changeset(currentEnv, csa.Config)
@@ -72,14 +73,14 @@ func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPe
chains.Add(uint64(op.ChainIdentifier))
}
- signed := SignProposal(t, e, &prop)
+ signed := proposalutils.SignProposal(t, e, &prop)
for _, sel := range chains.ToSlice() {
timelockContracts, ok := timelockContractsPerChain[sel]
if !ok || timelockContracts == nil {
return deployment.Environment{}, fmt.Errorf("timelock contracts not found for chain %d", sel)
}
- ExecuteProposal(t, e, signed, timelockContracts, sel)
+ proposalutils.ExecuteProposal(t, e, signed, timelockContracts, sel)
}
}
}
@@ -91,6 +92,7 @@ func ApplyChangesets(t *testing.T, e deployment.Environment, timelockContractsPe
NodeIDs: e.NodeIDs,
Offchain: e.Offchain,
OCRSecrets: e.OCRSecrets,
+ GetContext: e.GetContext,
}
}
return currentEnv, nil
diff --git a/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go b/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go
index 6c68924b35e..7ba11596a2d 100644
--- a/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go
+++ b/deployment/common/changeset/transfer_to_mcms_with_timelock_test.go
@@ -6,8 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
- "math/big"
-
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/v2/core/logger"
@@ -28,23 +27,18 @@ func TestTransferToMCMSWithTimelock(t *testing.T) {
{
Changeset: WrapChangeSet(DeployMCMSWithTimelock),
Config: map[uint64]types.MCMSWithTimelockConfig{
- chain1: {
- Canceller: SingleGroupMCMS(t),
- Bypasser: SingleGroupMCMS(t),
- Proposer: SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- },
+ chain1: proposalutils.SingleGroupTimelockConfig(t),
},
},
})
require.NoError(t, err)
addrs, err := e.ExistingAddresses.AddressesForChain(chain1)
require.NoError(t, err)
- state, err := MaybeLoadMCMSWithTimelockState(e.Chains[chain1], addrs)
+ state, err := MaybeLoadMCMSWithTimelockChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
- link, err := MaybeLoadLinkTokenState(e.Chains[chain1], addrs)
+ link, err := MaybeLoadLinkTokenChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
- e, err = ApplyChangesets(t, e, map[uint64]*TimelockExecutionContracts{
+ e, err = ApplyChangesets(t, e, map[uint64]*proposalutils.TimelockExecutionContracts{
chain1: {
Timelock: state.Timelock,
CallProxy: state.CallProxy,
@@ -62,7 +56,7 @@ func TestTransferToMCMSWithTimelock(t *testing.T) {
})
require.NoError(t, err)
// We expect now that the link token is owned by the MCMS timelock.
- link, err = MaybeLoadLinkTokenState(e.Chains[chain1], addrs)
+ link, err = MaybeLoadLinkTokenChainState(e.Chains[chain1], addrs)
require.NoError(t, err)
o, err := link.LinkToken.Owner(nil)
require.NoError(t, err)
diff --git a/deployment/common/proposalutils/mcms_helpers.go b/deployment/common/proposalutils/mcms_helpers.go
new file mode 100644
index 00000000000..4a7540761ee
--- /dev/null
+++ b/deployment/common/proposalutils/mcms_helpers.go
@@ -0,0 +1,273 @@
+package proposalutils
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/common/types"
+)
+
+// TimelockExecutionContracts is a helper struct for executing timelock proposals. it contains
+// the timelock and call proxy contracts.
+type TimelockExecutionContracts struct {
+ Timelock *owner_helpers.RBACTimelock
+ CallProxy *owner_helpers.CallProxy
+}
+
+// NewTimelockExecutionContracts creates a new TimelockExecutionContracts struct.
+// If there are multiple timelocks or call proxy on the chain, an error is returned.
+// If there is a missing timelocks or call proxy on the chain, an error is returned.
+func NewTimelockExecutionContracts(env deployment.Environment, chainSelector uint64) (*TimelockExecutionContracts, error) {
+ addrTypeVer, err := env.ExistingAddresses.AddressesForChain(chainSelector)
+ if err != nil {
+ return nil, fmt.Errorf("error getting addresses for chain: %w", err)
+ }
+ var timelock *owner_helpers.RBACTimelock
+ var callProxy *owner_helpers.CallProxy
+ for addr, tv := range addrTypeVer {
+ if tv.Type == types.RBACTimelock {
+ if timelock != nil {
+ return nil, fmt.Errorf("multiple timelocks found on chain %d", chainSelector)
+ }
+ var err error
+ timelock, err = owner_helpers.NewRBACTimelock(common.HexToAddress(addr), env.Chains[chainSelector].Client)
+ if err != nil {
+ return nil, fmt.Errorf("error creating timelock: %w", err)
+ }
+ }
+ if tv.Type == types.CallProxy {
+ if callProxy != nil {
+ return nil, fmt.Errorf("multiple call proxies found on chain %d", chainSelector)
+ }
+ var err error
+ callProxy, err = owner_helpers.NewCallProxy(common.HexToAddress(addr), env.Chains[chainSelector].Client)
+ if err != nil {
+ return nil, fmt.Errorf("error creating call proxy: %w", err)
+ }
+ }
+ }
+ if timelock == nil || callProxy == nil {
+ return nil, fmt.Errorf("missing timelock (%T) or call proxy(%T) on chain %d", timelock == nil, callProxy == nil, chainSelector)
+ }
+ return &TimelockExecutionContracts{
+ Timelock: timelock,
+ CallProxy: callProxy,
+ }, nil
+}
+
+type RunTimelockExecutorConfig struct {
+ Executor *mcms.Executor
+ TimelockContracts *TimelockExecutionContracts
+ ChainSelector uint64
+ // BlockStart is optional. It filter the timelock scheduled events.
+ // If not provided, the executor assumes that the operations have not been executed yet
+ // executes all the operations for the given chain.
+ BlockStart *uint64
+ BlockEnd *uint64
+}
+
+func (cfg RunTimelockExecutorConfig) Validate() error {
+ if cfg.Executor == nil {
+ return fmt.Errorf("executor is nil")
+ }
+ if cfg.TimelockContracts == nil {
+ return fmt.Errorf("timelock contracts is nil")
+ }
+ if cfg.ChainSelector == 0 {
+ return fmt.Errorf("chain selector is 0")
+ }
+ if cfg.BlockStart != nil && cfg.BlockEnd == nil {
+ if *cfg.BlockStart > *cfg.BlockEnd {
+ return fmt.Errorf("block start is greater than block end")
+ }
+ }
+ if cfg.BlockStart == nil && cfg.BlockEnd != nil {
+ return fmt.Errorf("block start must not be nil when block end is not nil")
+ }
+
+ if len(cfg.Executor.Operations[mcms.ChainIdentifier(cfg.ChainSelector)]) == 0 {
+ return fmt.Errorf("no operations for chain %d", cfg.ChainSelector)
+ }
+ return nil
+}
+
+// RunTimelockExecutor runs the scheduled operations for the given chain.
+// If the block start is not provided, it assumes that the operations have not been scheduled yet
+// and executes all the operations for the given chain.
+// It is an error if there are no operations for the given chain.
+func RunTimelockExecutor(env deployment.Environment, cfg RunTimelockExecutorConfig) error {
+ // TODO: This sort of helper probably should move to the MCMS lib.
+ // Execute all the transactions in the proposal which are for this chain.
+ if err := cfg.Validate(); err != nil {
+ return fmt.Errorf("error validating config: %w", err)
+ }
+ for _, chainOp := range cfg.Executor.Operations[mcms.ChainIdentifier(cfg.ChainSelector)] {
+ for idx, op := range cfg.Executor.ChainAgnosticOps {
+ start := cfg.BlockStart
+ end := cfg.BlockEnd
+ if bytes.Equal(op.Data, chainOp.Data) && op.To == chainOp.To {
+ if start == nil {
+ opTx, err2 := cfg.Executor.ExecuteOnChain(env.Chains[cfg.ChainSelector].Client, env.Chains[cfg.ChainSelector].DeployerKey, idx)
+ if err2 != nil {
+ return fmt.Errorf("error executing on chain: %w", err2)
+ }
+ block, err2 := env.Chains[cfg.ChainSelector].Confirm(opTx)
+ if err2 != nil {
+ return fmt.Errorf("error confirming on chain: %w", err2)
+ }
+ start = &block
+ end = &block
+ }
+
+ it, err2 := cfg.TimelockContracts.Timelock.FilterCallScheduled(&bind.FilterOpts{
+ Start: *start,
+ End: end,
+ Context: env.GetContext(),
+ }, nil, nil)
+ if err2 != nil {
+ return fmt.Errorf("error filtering call scheduled: %w", err2)
+ }
+ var calls []owner_helpers.RBACTimelockCall
+ var pred, salt [32]byte
+ for it.Next() {
+ // Note these are the same for the whole batch, can overwrite
+ pred = it.Event.Predecessor
+ salt = it.Event.Salt
+ verboseDebug(env.Logger, it.Event)
+ env.Logger.Info("scheduled", "event", it.Event)
+ calls = append(calls, owner_helpers.RBACTimelockCall{
+ Target: it.Event.Target,
+ Data: it.Event.Data,
+ Value: it.Event.Value,
+ })
+ }
+
+ timelockExecutorProxy, err := owner_helpers.NewRBACTimelock(cfg.TimelockContracts.CallProxy.Address(), env.Chains[cfg.ChainSelector].Client)
+ if err != nil {
+ return fmt.Errorf("error creating timelock executor proxy: %w", err)
+ }
+ tx, err := timelockExecutorProxy.ExecuteBatch(
+ env.Chains[cfg.ChainSelector].DeployerKey, calls, pred, salt)
+ if err != nil {
+ return fmt.Errorf("error executing batch: %w", err)
+ }
+ _, err = env.Chains[cfg.ChainSelector].Confirm(tx)
+ if err != nil {
+ return fmt.Errorf("error confirming batch: %w", err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func verboseDebug(lggr logger.Logger, event *owner_helpers.RBACTimelockCallScheduled) {
+ b, err := json.Marshal(event)
+ if err != nil {
+ panic(err)
+ }
+ lggr.Debug("scheduled", "event", string(b))
+}
+
+// MCMSWithTimelockContracts holds the Go bindings
+// for a MCMSWithTimelock contract deployment.
+// It is public for use in product specific packages.
+// Either all fields are nil or all fields are non-nil.
+type MCMSWithTimelockContracts struct {
+ CancellerMcm *owner_helpers.ManyChainMultiSig
+ BypasserMcm *owner_helpers.ManyChainMultiSig
+ ProposerMcm *owner_helpers.ManyChainMultiSig
+ Timelock *owner_helpers.RBACTimelock
+ CallProxy *owner_helpers.CallProxy
+}
+
+// Validate checks that all fields are non-nil, ensuring it's ready
+// for use generating views or interactions.
+func (state MCMSWithTimelockContracts) Validate() error {
+ if state.Timelock == nil {
+ return errors.New("timelock not found")
+ }
+ if state.CancellerMcm == nil {
+ return errors.New("canceller not found")
+ }
+ if state.ProposerMcm == nil {
+ return errors.New("proposer not found")
+ }
+ if state.BypasserMcm == nil {
+ return errors.New("bypasser not found")
+ }
+ if state.CallProxy == nil {
+ return errors.New("call proxy not found")
+ }
+ return nil
+}
+
+// MaybeLoadMCMSWithTimelockContracts looks for the addresses corresponding to
+// contracts deployed with DeployMCMSWithTimelock and loads them into a
+// MCMSWithTimelockState struct. If none of the contracts are found, the state struct will be nil.
+// An error indicates:
+// - Found but was unable to load a contract
+// - It only found part of the bundle of contracts
+// - If found more than one instance of a contract (we expect one bundle in the given addresses)
+func MaybeLoadMCMSWithTimelockContracts(chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*MCMSWithTimelockContracts, error) {
+ state := MCMSWithTimelockContracts{}
+ // We expect one of each contract on the chain.
+ timelock := deployment.NewTypeAndVersion(types.RBACTimelock, deployment.Version1_0_0)
+ callProxy := deployment.NewTypeAndVersion(types.CallProxy, deployment.Version1_0_0)
+ proposer := deployment.NewTypeAndVersion(types.ProposerManyChainMultisig, deployment.Version1_0_0)
+ canceller := deployment.NewTypeAndVersion(types.CancellerManyChainMultisig, deployment.Version1_0_0)
+ bypasser := deployment.NewTypeAndVersion(types.BypasserManyChainMultisig, deployment.Version1_0_0)
+
+ // Ensure we either have the bundle or not.
+ _, err := deployment.AddressesContainBundle(addresses,
+ map[deployment.TypeAndVersion]struct{}{
+ timelock: {}, proposer: {}, canceller: {}, bypasser: {}, callProxy: {},
+ })
+ if err != nil {
+ return nil, fmt.Errorf("unable to check MCMS contracts on chain %s error: %w", chain.Name(), err)
+ }
+
+ for address, tvStr := range addresses {
+ switch tvStr {
+ case timelock:
+ tl, err := owner_helpers.NewRBACTimelock(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.Timelock = tl
+ case callProxy:
+ cp, err := owner_helpers.NewCallProxy(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.CallProxy = cp
+ case proposer:
+ mcms, err := owner_helpers.NewManyChainMultiSig(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.ProposerMcm = mcms
+ case bypasser:
+ mcms, err := owner_helpers.NewManyChainMultiSig(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.BypasserMcm = mcms
+ case canceller:
+ mcms, err := owner_helpers.NewManyChainMultiSig(common.HexToAddress(address), chain.Client)
+ if err != nil {
+ return nil, err
+ }
+ state.CancellerMcm = mcms
+ }
+ }
+ return &state, nil
+}
diff --git a/deployment/common/changeset/mcms_test_helpers.go b/deployment/common/proposalutils/mcms_test_helpers.go
similarity index 54%
rename from deployment/common/changeset/mcms_test_helpers.go
rename to deployment/common/proposalutils/mcms_test_helpers.go
index ffa99114d74..610fe84f34c 100644
--- a/deployment/common/changeset/mcms_test_helpers.go
+++ b/deployment/common/proposalutils/mcms_test_helpers.go
@@ -1,22 +1,21 @@
-package changeset
+package proposalutils
import (
- "bytes"
- "context"
"crypto/ecdsa"
+ "math/big"
"testing"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/config"
- owner_helpers "github.com/smartcontractkit/ccip-owner-contracts/pkg/gethwrappers"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/mcms"
"github.com/smartcontractkit/ccip-owner-contracts/pkg/proposal/timelock"
chainsel "github.com/smartcontractkit/chain-selectors"
"github.com/stretchr/testify/require"
"github.com/smartcontractkit/chainlink/deployment"
+ commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
+ // "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
)
var (
@@ -25,13 +24,6 @@ var (
TestXXXMCMSSigner *ecdsa.PrivateKey
)
-// TimelockExecutionContracts is a helper struct for executing timelock proposals. it contains
-// the timelock and call proxy contracts.
-type TimelockExecutionContracts struct {
- Timelock *owner_helpers.RBACTimelock
- CallProxy *owner_helpers.CallProxy
-}
-
func init() {
key, err := crypto.GenerateKey()
if err != nil {
@@ -79,45 +71,22 @@ func ExecuteProposal(t *testing.T, env deployment.Environment, executor *mcms.Ex
if err2 != nil {
require.NoError(t, deployment.MaybeDataErr(err2))
}
+
_, err2 = env.Chains[sel].Confirm(tx)
require.NoError(t, err2)
+ cfg := RunTimelockExecutorConfig{
+ Executor: executor,
+ TimelockContracts: timelockContracts,
+ ChainSelector: sel,
+ }
+ require.NoError(t, RunTimelockExecutor(env, cfg))
+}
- // TODO: This sort of helper probably should move to the MCMS lib.
- // Execute all the transactions in the proposal which are for this chain.
- for _, chainOp := range executor.Operations[mcms.ChainIdentifier(sel)] {
- for idx, op := range executor.ChainAgnosticOps {
- if bytes.Equal(op.Data, chainOp.Data) && op.To == chainOp.To {
- opTx, err3 := executor.ExecuteOnChain(env.Chains[sel].Client, env.Chains[sel].DeployerKey, idx)
- require.NoError(t, err3)
- block, err3 := env.Chains[sel].Confirm(opTx)
- require.NoError(t, err3)
- t.Log("executed", chainOp)
- it, err3 := timelockContracts.Timelock.FilterCallScheduled(&bind.FilterOpts{
- Start: block,
- End: &block,
- Context: context.Background(),
- }, nil, nil)
- require.NoError(t, err3)
- var calls []owner_helpers.RBACTimelockCall
- var pred, salt [32]byte
- for it.Next() {
- // Note these are the same for the whole batch, can overwrite
- pred = it.Event.Predecessor
- salt = it.Event.Salt
- t.Log("scheduled", it.Event)
- calls = append(calls, owner_helpers.RBACTimelockCall{
- Target: it.Event.Target,
- Data: it.Event.Data,
- Value: it.Event.Value,
- })
- }
- timelockExecutorProxy, err := owner_helpers.NewRBACTimelock(timelockContracts.CallProxy.Address(), env.Chains[sel].Client)
- tx, err := timelockExecutorProxy.ExecuteBatch(
- env.Chains[sel].DeployerKey, calls, pred, salt)
- require.NoError(t, err)
- _, err = env.Chains[sel].Confirm(tx)
- require.NoError(t, err)
- }
- }
+func SingleGroupTimelockConfig(t *testing.T) commontypes.MCMSWithTimelockConfig {
+ return commontypes.MCMSWithTimelockConfig{
+ Canceller: SingleGroupMCMS(t),
+ Bypasser: SingleGroupMCMS(t),
+ Proposer: SingleGroupMCMS(t),
+ TimelockMinDelay: big.NewInt(0),
}
}
diff --git a/deployment/common/proposalutils/propose.go b/deployment/common/proposalutils/propose.go
index feaee69940e..32a5bcdfda2 100644
--- a/deployment/common/proposalutils/propose.go
+++ b/deployment/common/proposalutils/propose.go
@@ -15,7 +15,8 @@ const (
DefaultValidUntil = 72 * time.Hour
)
-func buildProposalMetadata(
+
+func BuildProposalMetadata(
chainSelectors []uint64,
proposerMcmsesPerChain map[uint64]*gethwrappers.ManyChainMultiSig,
) (map[mcms.ChainIdentifier]mcms.ChainMetadata, error) {
@@ -56,7 +57,7 @@ func BuildProposalFromBatches(
chains.Add(uint64(op.ChainIdentifier))
}
- mcmsMd, err := buildProposalMetadata(chains.ToSlice(), proposerMcmsesPerChain)
+ mcmsMd, err := BuildProposalMetadata(chains.ToSlice(), proposerMcmsesPerChain)
if err != nil {
return nil, err
}
diff --git a/deployment/environment.go b/deployment/environment.go
index 3d120adbbf1..0823404da2d 100644
--- a/deployment/environment.go
+++ b/deployment/environment.go
@@ -95,6 +95,7 @@ type Environment struct {
Logger logger.Logger
ExistingAddresses AddressBook
Chains map[uint64]Chain
+ SolChains map[uint64]SolChain
NodeIDs []string
Offchain OffchainClient
GetContext func() context.Context
@@ -180,7 +181,7 @@ func MaybeDataErr(err error) error {
var d rpc.DataError
ok := errors.As(err, &d)
if ok {
- return d
+ return fmt.Errorf("%s: %v", d.Error(), d.ErrorData())
}
return err
}
@@ -331,7 +332,6 @@ func NodeInfo(nodeIDs []string, oc NodeChainConfigsLister) (Nodes, error) {
Enabled: 1,
Ids: nodeIDs,
}
-
}
nodesFromJD, err := oc.ListNodes(context.Background(), &nodev1.ListNodesRequest{
Filter: filter,
diff --git a/deployment/environment/crib/ccip_deployer.go b/deployment/environment/crib/ccip_deployer.go
new file mode 100644
index 00000000000..aea7ad0cb8f
--- /dev/null
+++ b/deployment/environment/crib/ccip_deployer.go
@@ -0,0 +1,136 @@
+package crib
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/smartcontractkit/ccip-owner-contracts/pkg/config"
+ commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
+ "github.com/smartcontractkit/chainlink/deployment/environment/devenv"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay"
+ "math/big"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/ccip/changeset"
+ "github.com/smartcontractkit/chainlink/v2/core/logger"
+)
+
+// DeployHomeChainContracts deploys the home chain contracts so that the chainlink nodes can be started with the CR address in Capabilities.ExternalRegistry
+// DeployHomeChainContracts is to 1. Set up crib with chains and chainlink nodes ( cap reg is not known yet so not setting the config with capreg address)
+// Call DeployHomeChain changeset with nodeinfo ( the peer id and all)
+func DeployHomeChainContracts(ctx context.Context, lggr logger.Logger, envConfig devenv.EnvironmentConfig, homeChainSel uint64, feedChainSel uint64) (deployment.CapabilityRegistryConfig, deployment.AddressBook, error) {
+ e, _, err := devenv.NewEnvironment(func() context.Context { return ctx }, lggr, envConfig)
+ if err != nil {
+ return deployment.CapabilityRegistryConfig{}, nil, err
+ }
+ if e == nil {
+ return deployment.CapabilityRegistryConfig{}, nil, errors.New("environment is nil")
+ }
+
+ nodes, err := deployment.NodeInfo(e.NodeIDs, e.Offchain)
+ if err != nil {
+ return deployment.CapabilityRegistryConfig{}, e.ExistingAddresses, fmt.Errorf("failed to get node info from env: %w", err)
+ }
+ p2pIds := nodes.NonBootstraps().PeerIDs()
+ *e, err = commonchangeset.ApplyChangesets(nil, *e, nil, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.DeployHomeChain),
+ Config: changeset.DeployHomeChainConfig{
+ HomeChainSel: homeChainSel,
+ RMNStaticConfig: changeset.NewTestRMNStaticConfig(),
+ RMNDynamicConfig: changeset.NewTestRMNDynamicConfig(),
+ NodeOperators: changeset.NewTestNodeOperator(e.Chains[homeChainSel].DeployerKey.From),
+ NodeP2PIDsPerNodeOpAdmin: map[string][][32]byte{
+ "NodeOperator": p2pIds,
+ },
+ },
+ },
+ })
+
+ state, err := changeset.LoadOnchainState(*e)
+ if err != nil {
+ return deployment.CapabilityRegistryConfig{}, e.ExistingAddresses, fmt.Errorf("failed to load on chain state: %w", err)
+ }
+ capRegAddr := state.Chains[homeChainSel].CapabilityRegistry.Address()
+ if capRegAddr == common.HexToAddress("0x") {
+ return deployment.CapabilityRegistryConfig{}, e.ExistingAddresses, fmt.Errorf("cap Reg address not found: %w", err)
+ }
+ capRegConfig := deployment.CapabilityRegistryConfig{
+ EVMChainID: homeChainSel,
+ Contract: state.Chains[homeChainSel].CapabilityRegistry.Address(),
+ NetworkType: relay.NetworkEVM,
+ }
+ return capRegConfig, e.ExistingAddresses, nil
+}
+
+func DeployCCIPAndAddLanes(ctx context.Context, lggr logger.Logger, envConfig devenv.EnvironmentConfig, homeChainSel, feedChainSel uint64, ab deployment.AddressBook) (DeployCCIPOutput, error) {
+ e, _, err := devenv.NewEnvironment(func() context.Context { return ctx }, lggr, envConfig)
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to initiate new environment: %w", err)
+ }
+ e.ExistingAddresses = ab
+ allChainIds := e.AllChainSelectors()
+ cfg := make(map[uint64]commontypes.MCMSWithTimelockConfig)
+ for _, chain := range e.AllChainSelectors() {
+ mcmsConfig, err := config.NewConfig(1, []common.Address{e.Chains[chain].DeployerKey.From}, []config.Config{})
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to create mcms config: %w", err)
+ }
+ cfg[chain] = commontypes.MCMSWithTimelockConfig{
+ Canceller: *mcmsConfig,
+ Bypasser: *mcmsConfig,
+ Proposer: *mcmsConfig,
+ TimelockMinDelay: big.NewInt(0),
+ }
+ }
+
+ // This will not apply any proposals because we pass nil to testing.
+ // However, setup is ok because we only need to deploy the contracts and distribute job specs
+ *e, err = commonchangeset.ApplyChangesets(nil, *e, nil, []commonchangeset.ChangesetApplication{
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployLinkToken),
+ Config: allChainIds,
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.DeployPrerequisites),
+ Config: changeset.DeployPrerequisiteConfig{
+ ChainSelectors: allChainIds,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployMCMSWithTimelock),
+ Config: cfg,
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.DeployChainContracts),
+ Config: changeset.DeployChainContractsConfig{
+ ChainSelectors: allChainIds,
+ HomeChainSelector: homeChainSel,
+ },
+ },
+ {
+ Changeset: commonchangeset.WrapChangeSet(changeset.CCIPCapabilityJobspec),
+ Config: struct{}{},
+ },
+ })
+ state, err := changeset.LoadOnchainState(*e)
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to load onchain state: %w", err)
+ }
+ // Add all lanes
+ err = changeset.AddLanesForAll(*e, state)
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to add lanes: %w", err)
+ }
+
+ addresses, err := e.ExistingAddresses.Addresses()
+ if err != nil {
+ return DeployCCIPOutput{}, fmt.Errorf("failed to get convert address book to address book map: %w", err)
+ }
+ return DeployCCIPOutput{
+ AddressBook: *deployment.NewMemoryAddressBookFromMap(addresses),
+ NodeIDs: e.NodeIDs,
+ }, err
+}
diff --git a/deployment/environment/crib/data.go b/deployment/environment/crib/data.go
new file mode 100644
index 00000000000..b9197691613
--- /dev/null
+++ b/deployment/environment/crib/data.go
@@ -0,0 +1,81 @@
+package crib
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/environment/devenv"
+)
+
+type OutputReader struct {
+ outputDir string
+}
+
+func NewOutputReader(outputDir string) *OutputReader {
+ return &OutputReader{outputDir: outputDir}
+}
+
+func (r *OutputReader) ReadNodesDetails() NodesDetails {
+ byteValue := r.readFile(NodesDetailsFileName)
+
+ var result NodesDetails
+
+ // Unmarshal the JSON into the map
+ err := json.Unmarshal(byteValue, &result)
+ if err != nil {
+ fmt.Println("Error unmarshalling JSON:", err)
+ panic(err)
+ }
+
+ return result
+}
+
+func (r *OutputReader) ReadChainConfigs() []devenv.ChainConfig {
+ byteValue := r.readFile(ChainsConfigsFileName)
+
+ var result []devenv.ChainConfig
+
+ // Unmarshal the JSON into the map
+ err := json.Unmarshal(byteValue, &result)
+ if err != nil {
+ fmt.Println("Error unmarshalling JSON:", err)
+ panic(err)
+ }
+
+ return result
+}
+
+func (r *OutputReader) ReadAddressBook() *deployment.AddressBookMap {
+ byteValue := r.readFile(AddressBookFileName)
+
+ var result map[uint64]map[string]deployment.TypeAndVersion
+
+ // Unmarshal the JSON into the map
+ err := json.Unmarshal(byteValue, &result)
+ if err != nil {
+ fmt.Println("Error unmarshalling JSON:", err)
+ panic(err)
+ }
+
+ return deployment.NewMemoryAddressBookFromMap(result)
+}
+
+func (r *OutputReader) readFile(fileName string) []byte {
+ file, err := os.Open(fmt.Sprintf("%s/%s", r.outputDir, fileName))
+ if err != nil {
+ fmt.Println("Error opening file:", err)
+ panic(err)
+ }
+ defer file.Close()
+
+ // Read the file's content into a byte slice
+ byteValue, err := io.ReadAll(file)
+ if err != nil {
+ fmt.Println("Error reading file:", err)
+ panic(err)
+ }
+ return byteValue
+}
diff --git a/deployment/environment/crib/env.go b/deployment/environment/crib/env.go
new file mode 100644
index 00000000000..3af1acaf754
--- /dev/null
+++ b/deployment/environment/crib/env.go
@@ -0,0 +1,45 @@
+package crib
+
+const (
+ AddressBookFileName = "ccip-v2-scripts-address-book.json"
+ NodesDetailsFileName = "ccip-v2-scripts-nodes-details.json"
+ ChainsConfigsFileName = "ccip-v2-scripts-chains-details.json"
+)
+
+type CRIBEnv struct {
+ envStateDir string
+}
+
+func NewDevspaceEnvFromStateDir(envStateDir string) CRIBEnv {
+ return CRIBEnv{
+ envStateDir: envStateDir,
+ }
+}
+
+func (c CRIBEnv) GetConfig() DeployOutput {
+ reader := NewOutputReader(c.envStateDir)
+ nodesDetails := reader.ReadNodesDetails()
+ chainConfigs := reader.ReadChainConfigs()
+ return DeployOutput{
+ AddressBook: reader.ReadAddressBook(),
+ NodeIDs: nodesDetails.NodeIDs,
+ Chains: chainConfigs,
+ }
+}
+
+type RPC struct {
+ External *string
+ Internal *string
+}
+
+type ChainConfig struct {
+ ChainID uint64 // chain id as per EIP-155, mainly applicable for EVM chains
+ ChainName string // name of the chain populated from chainselector repo
+ ChainType string // should denote the chain family. Acceptable values are EVM, COSMOS, SOLANA, STARKNET, APTOS etc
+ WSRPCs []RPC // websocket rpcs to connect to the chain
+ HTTPRPCs []RPC // http rpcs to connect to the chain
+}
+
+type NodesDetails struct {
+ NodeIDs []string
+}
diff --git a/deployment/environment/crib/env_test.go b/deployment/environment/crib/env_test.go
new file mode 100644
index 00000000000..262a2540923
--- /dev/null
+++ b/deployment/environment/crib/env_test.go
@@ -0,0 +1,18 @@
+package crib
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestShouldProvideEnvironmentConfig(t *testing.T) {
+ t.Parallel()
+ env := NewDevspaceEnvFromStateDir("testdata/lanes-deployed-state")
+ config := env.GetConfig()
+ require.NotNil(t, config)
+ assert.NotEmpty(t, config.NodeIDs)
+ assert.NotNil(t, config.AddressBook)
+ assert.NotEmpty(t, config.Chains)
+}
diff --git a/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-address-book.json b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-address-book.json
new file mode 100644
index 00000000000..e4b2672cb5f
--- /dev/null
+++ b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-address-book.json
@@ -0,0 +1 @@
+{"12922642891491394802":{"0x05Aa229Aec102f78CE0E852A812a388F076Aa555":{"Type":"CancellerManyChainMultiSig","Version":"1.0.0"},"0x0D4ff719551E23185Aeb16FFbF2ABEbB90635942":{"Type":"TestRouter","Version":"1.2.0"},"0x0f5D1ef48f12b6f691401bfe88c2037c690a6afe":{"Type":"ProposerManyChainMultiSig","Version":"1.0.0"},"0x2dE080e97B0caE9825375D31f5D0eD5751fDf16D":{"Type":"CCIPReceiver","Version":"1.0.0"},"0x2fc631e4B3018258759C52AF169200213e84ABab":{"Type":"OnRamp","Version":"1.6.0-dev"},"0x5C7c905B505f0Cf40Ab6600d05e677F717916F6B":{"Type":"Router","Version":"1.2.0"},"0x63cf2Cd54fE91e3545D1379abf5bfd194545259d":{"Type":"OffRamp","Version":"1.6.0-dev"},"0x712516e61C8B383dF4A63CFe83d7701Bce54B03e":{"Type":"LinkToken","Version":"1.0.0"},"0x71C95911E9a5D330f4D621842EC243EE1343292e":{"Type":"PriceFeed","Version":"1.0.0"},"0x73eccD6288e117cAcA738BDAD4FEC51312166C1A":{"Type":"RMNRemote","Version":"1.6.0-dev"},"0x8464135c8F25Da09e49BC8782676a84730C318bC":{"Type":"PriceFeed","Version":"1.0.0"},"0x85C5Dd61585773423e378146D4bEC6f8D149E248":{"Type":"TokenAdminRegistry","Version":"1.5.0"},"0x948B3c65b89DF0B4894ABE91E6D02FE579834F8F":{"Type":"WETH9","Version":"1.0.0"},"0xAfe1b5bdEbD4ae65AF2024738bf0735fbb65d44b":{"Type":"FeeQuoter","Version":"1.6.0-dev"},"0xC6bA8C3233eCF65B761049ef63466945c362EdD2":{"Type":"BypasserManyChainMultiSig","Version":"1.0.0"},"0xbCF26943C0197d2eE0E5D05c716Be60cc2761508":{"Type":"AdminManyChainMultiSig","Version":"1.0.0"},"0xcA03Dc4665A8C3603cb4Fd5Ce71Af9649dC00d44":{"Type":"RBACTimelock","Version":"1.0.0"},"0xe6b98F104c1BEf218F3893ADab4160Dc73Eb8367":{"Type":"ARMProxy","Version":"1.0.0"},"0xfbAb4aa40C202E4e80390171E82379824f7372dd":{"Type":"NonceManager","Version":"1.6.0-dev"}},"3379446385462418246":{"0x09635F643e140090A9A8Dcd712eD6285858ceBef":{"Type":"RMNRemote","Version":"1.6.0-dev"},"0x0B306BF915C4d645ff596e518fAf3F9669b97016":{"Type":"LinkToken","Version":"1.0.0"},"0x1613beB3B2C4f22Ee086B2b38C1476A3cE7f78E8":{"Type":"OnRamp","Version":"1.6.0-dev"},"0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6":{"Type":"CCIPHome","Version":"1.6.0-dev"},"0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44":{"Type":"ProposerManyChainMultiSig","Version":"1.0.0"},"0x3Aa5ebB10DC797CAC828524e59A333d0A371443c":{"Type":"BypasserManyChainMultiSig","Version":"1.0.0"},"0x4A679253410272dd5232B3Ff7cF5dbB88f295319":{"Type":"RBACTimelock","Version":"1.0.0"},"0x59b670e9fA9D0A427751Af201D676719a970857b":{"Type":"CancellerManyChainMultiSig","Version":"1.0.0"},"0x67d269191c92Caf3cD7723F116c85e6E9bf55933":{"Type":"ARMProxy","Version":"1.0.0"},"0x7a2088a1bFc9d81c55368AE168C2C02570cB814F":{"Type":"CCIPReceiver","Version":"1.0.0"},"0x84eA74d481Ee0A5332c457a4d796187F6Ba67fEB":{"Type":"TokenAdminRegistry","Version":"1.5.0"},"0x851356ae760d987E095750cCeb3bC6014560891C":{"Type":"OffRamp","Version":"1.6.0-dev"},"0x8A791620dd6260079BF849Dc5567aDC3F2FdC318":{"Type":"RMNHome","Version":"1.6.0-dev"},"0x9A676e781A523b5d0C0e43731313A708CB607508":{"Type":"WETH9","Version":"1.0.0"},"0x9A9f2CCfdE556A7E9Ff0848998Aa4a0CFD8863AE":{"Type":"AdminManyChainMultiSig","Version":"1.0.0"},"0x9E545E3C0baAB3E08CdfD552C960A1050f373042":{"Type":"NonceManager","Version":"1.6.0-dev"},"0xE6E340D132b5f46d1e472DebcD681B2aBc16e57E":{"Type":"Router","Version":"1.2.0"},"0xa513E6E4b8f2a923D98304ec87F64353C4D5C853":{"Type":"CapabilitiesRegistry","Version":"1.0.0"},"0xa82fF9aFd8f496c3d6ac40E2a0F282E47488CFc9":{"Type":"FeeQuoter","Version":"1.6.0-dev"},"0xc3e53F4d16Ae77Db1c982e75a937B9f60FE63690":{"Type":"TestRouter","Version":"1.2.0"}}}
diff --git a/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-chains-details.json b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-chains-details.json
new file mode 100644
index 00000000000..f93ea4ce231
--- /dev/null
+++ b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-chains-details.json
@@ -0,0 +1,24 @@
+[
+ {
+ "ChainID": 1337,
+ "ChainName": "alpha",
+ "ChainType": "EVM",
+ "WSRPCs": [
+ "wss://crib-local-geth-1337-ws.local:443"
+ ],
+ "HTTPRPCs": [
+ "https://crib-local-geth-1337-ws.local:443"
+ ]
+ },
+ {
+ "ChainID": 2337,
+ "ChainName": "alpha",
+ "ChainType": "EVM",
+ "WSRPCs": [
+ "wss://crib-local-geth-2337-ws.local:443"
+ ],
+ "HTTPRPCs": [
+ "https://crib-local-geth-2337-ws.local:443"
+ ]
+ }
+]
diff --git a/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-nodes-details.json b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-nodes-details.json
new file mode 100644
index 00000000000..477ae0527b1
--- /dev/null
+++ b/deployment/environment/crib/testdata/lanes-deployed-state/ccip-v2-scripts-nodes-details.json
@@ -0,0 +1 @@
+{"NodeIDs":["node_2URuou3RXmtZu5gLQX8qd","node_m9TTQbUxBx3WjDEjmpVDL","node_4FiKVPtuQjCTvHnS7QpES","node_A4VTgecDwMoG2YYicyjuG","node_jQFpzXDadzaADq147nThS"]}
diff --git a/deployment/environment/crib/types.go b/deployment/environment/crib/types.go
new file mode 100644
index 00000000000..d19c8424443
--- /dev/null
+++ b/deployment/environment/crib/types.go
@@ -0,0 +1,39 @@
+package crib
+
+import (
+ "context"
+ "github.com/smartcontractkit/chainlink-common/pkg/logger"
+ "github.com/smartcontractkit/chainlink/deployment"
+ "github.com/smartcontractkit/chainlink/deployment/environment/devenv"
+)
+
+const (
+ CRIB_ENV_NAME = "Crib Environment"
+)
+
+type DeployOutput struct {
+ NodeIDs []string
+ Chains []devenv.ChainConfig // chain selector -> Chain Config
+ AddressBook deployment.AddressBook // Addresses of all contracts
+}
+
+type DeployCCIPOutput struct {
+ AddressBook deployment.AddressBookMap
+ NodeIDs []string
+}
+
+func NewDeployEnvironmentFromCribOutput(lggr logger.Logger, output DeployOutput) (*deployment.Environment, error) {
+ chains, err := devenv.NewChains(lggr, output.Chains)
+ if err != nil {
+ return nil, err
+ }
+ return deployment.NewEnvironment(
+ CRIB_ENV_NAME,
+ lggr,
+ output.AddressBook,
+ chains,
+ output.NodeIDs,
+ nil, // todo: populate the offchain client using output.DON
+ func() context.Context { return context.Background() }, deployment.XXXGenerateTestOCRSecrets(),
+ ), nil
+}
diff --git a/deployment/environment/devenv/don.go b/deployment/environment/devenv/don.go
index 05a3d5bea08..76f6ee92b68 100644
--- a/deployment/environment/devenv/don.go
+++ b/deployment/environment/devenv/don.go
@@ -2,7 +2,9 @@ package devenv
import (
"context"
+ "errors"
"fmt"
+ chainsel "github.com/smartcontractkit/chain-selectors"
"strconv"
"strings"
"time"
@@ -10,8 +12,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/rs/zerolog"
"github.com/sethvargo/go-retry"
- chainsel "github.com/smartcontractkit/chain-selectors"
-
nodev1 "github.com/smartcontractkit/chainlink-protos/job-distributor/v1/node"
clclient "github.com/smartcontractkit/chainlink/deployment/environment/nodeclient"
"github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/client"
@@ -185,7 +185,7 @@ type JDChainConfigInput struct {
// It expects bootstrap nodes to have label with key "type" and value as "bootstrap".
// It fetches the account address, peer id, and OCR2 key bundle id and creates the JobDistributorChainConfig.
func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChainConfigInput, jd JobDistributor) error {
- for i, chain := range chains {
+ for _, chain := range chains {
chainId := strconv.FormatUint(chain.ChainID, 10)
var account string
switch chain.ChainType {
@@ -239,35 +239,51 @@ func (n *Node) CreateCCIPOCRSupportedChains(ctx context.Context, chains []JDChai
break
}
}
- // JD silently fails to update nodeChainConfig. Therefore, we fetch the node config and
- // if it's not updated , throw an error
- _, err = n.gqlClient.CreateJobDistributorChainConfig(ctx, client.JobDistributorChainConfigInput{
- JobDistributorID: n.JDId,
- ChainID: chainId,
- ChainType: chain.ChainType,
- AccountAddr: account,
- AdminAddr: n.adminAddr,
- Ocr2Enabled: true,
- Ocr2IsBootstrap: isBootstrap,
- Ocr2Multiaddr: n.multiAddr,
- Ocr2P2PPeerID: value(peerID),
- Ocr2KeyBundleID: ocr2BundleId,
- Ocr2Plugins: `{"commit":true,"execute":true,"median":false,"mercury":false}`,
+
+ // retry twice with 5 seconds interval to create JobDistributorChainConfig
+ err = retry.Do(ctx, retry.WithMaxDuration(10*time.Second, retry.NewConstant(3*time.Second)), func(ctx context.Context) error {
+ // check the node chain config to see if this chain already exists
+ nodeChainConfigs, err := jd.ListNodeChainConfigs(context.Background(), &nodev1.ListNodeChainConfigsRequest{
+ Filter: &nodev1.ListNodeChainConfigsRequest_Filter{
+ NodeIds: []string{n.NodeId},
+ }})
+ if err != nil {
+ return retry.RetryableError(fmt.Errorf("failed to list node chain configs for node %s, retrying..: %w", n.Name, err))
+ }
+ if nodeChainConfigs != nil {
+ for _, chainConfig := range nodeChainConfigs.ChainConfigs {
+ if chainConfig.Chain.Id == chainId {
+ return nil
+ }
+ }
+ }
+
+ // JD silently fails to update nodeChainConfig. Therefore, we fetch the node config and
+ // if it's not updated , throw an error
+ _, err = n.gqlClient.CreateJobDistributorChainConfig(ctx, client.JobDistributorChainConfigInput{
+ JobDistributorID: n.JDId,
+ ChainID: chainId,
+ ChainType: chain.ChainType,
+ AccountAddr: account,
+ AdminAddr: n.adminAddr,
+ Ocr2Enabled: true,
+ Ocr2IsBootstrap: isBootstrap,
+ Ocr2Multiaddr: n.multiAddr,
+ Ocr2P2PPeerID: value(peerID),
+ Ocr2KeyBundleID: ocr2BundleId,
+ Ocr2Plugins: `{"commit":true,"execute":true,"median":false,"mercury":false}`,
+ })
+ // todo: add a check if the chain config failed because of a duplicate in that case, should we update or return success?
+ if err != nil {
+ return fmt.Errorf("failed to create CCIPOCR2SupportedChains for node %s: %w", n.Name, err)
+ }
+
+ return retry.RetryableError(errors.New("retrying CreateChainConfig in JD"))
})
+
if err != nil {
return fmt.Errorf("failed to create CCIPOCR2SupportedChains for node %s: %w", n.Name, err)
}
- // query the node chain config to check if it's created
- nodeChainConfigs, err := jd.ListNodeChainConfigs(context.Background(), &nodev1.ListNodeChainConfigsRequest{
- Filter: &nodev1.ListNodeChainConfigsRequest_Filter{
- NodeIds: []string{n.NodeId},
- }})
- if err != nil {
- return fmt.Errorf("failed to list node chain configs for node %s: %w", n.Name, err)
- }
- if nodeChainConfigs == nil || len(nodeChainConfigs.ChainConfigs) < i+1 {
- return fmt.Errorf("failed to create chain config for node %s", n.Name)
- }
}
return nil
}
@@ -377,6 +393,17 @@ func (n *Node) CreateJobDistributor(ctx context.Context, jd JobDistributor) (str
return "", err
}
// create the job distributor in the node with the csa key
+ resp, err := n.gqlClient.ListJobDistributors(ctx)
+ if err != nil {
+ return "", fmt.Errorf("could not list job distrubutors: %w", err)
+ }
+ if len(resp.FeedsManagers.Results) > 0 {
+ for _, fm := range resp.FeedsManagers.Results {
+ if fm.GetPublicKey() == csaKey {
+ return fm.GetId(), nil
+ }
+ }
+ }
return n.gqlClient.CreateJobDistributor(ctx, client.JobDistributorInput{
Name: "Job Distributor",
Uri: jd.WSRPC,
@@ -394,8 +421,9 @@ func (n *Node) SetUpAndLinkJobDistributor(ctx context.Context, jd JobDistributor
}
// now create the job distributor in the node
id, err := n.CreateJobDistributor(ctx, jd)
- if err != nil && !strings.Contains(err.Error(), "DuplicateFeedsManagerError") {
- return err
+ if err != nil &&
+ (!strings.Contains(err.Error(), "only a single feeds manager is supported") || !strings.Contains(err.Error(), "DuplicateFeedsManagerError")) {
+ return fmt.Errorf("failed to create job distributor in node %s: %w", n.Name, err)
}
// wait for the node to connect to the job distributor
err = retry.Do(ctx, retry.WithMaxDuration(1*time.Minute, retry.NewFibonacci(1*time.Second)), func(ctx context.Context) error {
diff --git a/deployment/environment/web/sdk/client/client.go b/deployment/environment/web/sdk/client/client.go
index 5472591ef94..e0a56b9e642 100644
--- a/deployment/environment/web/sdk/client/client.go
+++ b/deployment/environment/web/sdk/client/client.go
@@ -4,10 +4,11 @@ import (
"context"
"encoding/json"
"fmt"
+ "github.com/Khan/genqlient/graphql"
+ "github.com/sethvargo/go-retry"
"net/http"
"strings"
-
- "github.com/Khan/genqlient/graphql"
+ "time"
"github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/client/doer"
"github.com/smartcontractkit/chainlink/deployment/environment/web/sdk/internal/generated"
@@ -60,8 +61,15 @@ func New(baseURI string, creds Credentials) (Client, error) {
endpoints: ep,
credentials: creds,
}
-
- if err := c.login(); err != nil {
+
+ err := retry.Do(context.Background(), retry.WithMaxDuration(10*time.Second, retry.NewFibonacci(2*time.Second)), func(ctx context.Context) error {
+ err := c.login()
+ if err != nil {
+ return retry.RetryableError(fmt.Errorf("retrying login to node: %w", err))
+ }
+ return nil
+ })
+ if err != nil {
return nil, fmt.Errorf("failed to login to node: %w", err)
}
diff --git a/deployment/keystone/changeset/accept_ownership_test.go b/deployment/keystone/changeset/accept_ownership_test.go
index b2aa1b20194..d949e63c7aa 100644
--- a/deployment/keystone/changeset/accept_ownership_test.go
+++ b/deployment/keystone/changeset/accept_ownership_test.go
@@ -1,7 +1,6 @@
package changeset_test
import (
- "math/big"
"testing"
"github.com/stretchr/testify/require"
@@ -10,6 +9,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
@@ -44,23 +44,18 @@ func TestAcceptAllOwnership(t *testing.T) {
{
Changeset: commonchangeset.WrapChangeSet(commonchangeset.DeployMCMSWithTimelock),
Config: map[uint64]types.MCMSWithTimelockConfig{
- registrySel: {
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- },
+ registrySel: proposalutils.SingleGroupTimelockConfig(t),
},
},
})
require.NoError(t, err)
addrs, err := env.ExistingAddresses.AddressesForChain(registrySel)
require.NoError(t, err)
- timelock, err := commonchangeset.MaybeLoadMCMSWithTimelockState(env.Chains[registrySel], addrs)
+ timelock, err := commonchangeset.MaybeLoadMCMSWithTimelockChainState(env.Chains[registrySel], addrs)
require.NoError(t, err)
- _, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*commonchangeset.TimelockExecutionContracts{
- registrySel: &commonchangeset.TimelockExecutionContracts{
+ _, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*proposalutils.TimelockExecutionContracts{
+ registrySel: &proposalutils.TimelockExecutionContracts{
Timelock: timelock.Timelock,
CallProxy: timelock.CallProxy,
},
diff --git a/deployment/keystone/changeset/append_node_capabilities_test.go b/deployment/keystone/changeset/append_node_capabilities_test.go
index 159500ab5a7..bfc01b309f5 100644
--- a/deployment/keystone/changeset/append_node_capabilities_test.go
+++ b/deployment/keystone/changeset/append_node_capabilities_test.go
@@ -8,6 +8,7 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
@@ -87,7 +88,7 @@ func TestAppendNodeCapabilities(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/deploy_forwarder_test.go b/deployment/keystone/changeset/deploy_forwarder_test.go
index dd894fde9d9..e04bac6d264 100644
--- a/deployment/keystone/changeset/deploy_forwarder_test.go
+++ b/deployment/keystone/changeset/deploy_forwarder_test.go
@@ -11,6 +11,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
)
@@ -116,11 +117,11 @@ func TestConfigureForwarders(t *testing.T) {
require.Len(t, csOut.Proposals, nChains)
require.Nil(t, csOut.AddressBook)
- timelockContracts := make(map[uint64]*commonchangeset.TimelockExecutionContracts)
+ timelockContracts := make(map[uint64]*proposalutils.TimelockExecutionContracts)
for selector, contractSet := range te.ContractSets() {
require.NotNil(t, contractSet.Timelock)
require.NotNil(t, contractSet.CallProxy)
- timelockContracts[selector] = &commonchangeset.TimelockExecutionContracts{
+ timelockContracts[selector] = &proposalutils.TimelockExecutionContracts{
Timelock: contractSet.Timelock,
CallProxy: contractSet.CallProxy,
}
diff --git a/deployment/keystone/changeset/deploy_ocr3_test.go b/deployment/keystone/changeset/deploy_ocr3_test.go
index 5d02f83500d..7a276886242 100644
--- a/deployment/keystone/changeset/deploy_ocr3_test.go
+++ b/deployment/keystone/changeset/deploy_ocr3_test.go
@@ -13,6 +13,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/logger"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
kslib "github.com/smartcontractkit/chainlink/deployment/keystone"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
@@ -118,7 +119,7 @@ func TestConfigureOCR3(t *testing.T) {
contracts := te.ContractSets()[te.RegistrySelector]
require.NoError(t, err)
- var timelockContracts = map[uint64]*commonchangeset.TimelockExecutionContracts{
+ var timelockContracts = map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/helpers_test.go b/deployment/keystone/changeset/helpers_test.go
index 4e7553d0b8e..d956db991de 100644
--- a/deployment/keystone/changeset/helpers_test.go
+++ b/deployment/keystone/changeset/helpers_test.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"math"
- "math/big"
"sort"
"testing"
@@ -21,6 +20,7 @@ import (
"github.com/smartcontractkit/chainlink/deployment"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
commontypes "github.com/smartcontractkit/chainlink/deployment/common/types"
"github.com/smartcontractkit/chainlink/deployment/environment/memory"
"github.com/smartcontractkit/chainlink/deployment/keystone"
@@ -258,12 +258,7 @@ func SetupTestEnv(t *testing.T, c TestConfig) TestEnv {
timelockCfgs := make(map[uint64]commontypes.MCMSWithTimelockConfig)
for sel := range env.Chains {
t.Logf("Enabling MCMS on chain %d", sel)
- timelockCfgs[sel] = commontypes.MCMSWithTimelockConfig{
- Canceller: commonchangeset.SingleGroupMCMS(t),
- Bypasser: commonchangeset.SingleGroupMCMS(t),
- Proposer: commonchangeset.SingleGroupMCMS(t),
- TimelockMinDelay: big.NewInt(0),
- }
+ timelockCfgs[sel] = proposalutils.SingleGroupTimelockConfig(t)
}
env, err = commonchangeset.ApplyChangesets(t, env, nil, []commonchangeset.ChangesetApplication{
{
@@ -284,7 +279,7 @@ func SetupTestEnv(t *testing.T, c TestConfig) TestEnv {
require.NoError(t, mcms.Validate())
// transfer ownership of all contracts to the MCMS
- env, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*commonchangeset.TimelockExecutionContracts{sel: {Timelock: mcms.Timelock, CallProxy: mcms.CallProxy}}, []commonchangeset.ChangesetApplication{
+ env, err = commonchangeset.ApplyChangesets(t, env, map[uint64]*proposalutils.TimelockExecutionContracts{sel: {Timelock: mcms.Timelock, CallProxy: mcms.CallProxy}}, []commonchangeset.ChangesetApplication{
{
Changeset: commonchangeset.WrapChangeSet(kschangeset.AcceptAllOwnershipsProposal),
Config: &kschangeset.AcceptAllOwnershipRequest{
diff --git a/deployment/keystone/changeset/update_don_test.go b/deployment/keystone/changeset/update_don_test.go
index 18287da6887..64cb41c14e5 100644
--- a/deployment/keystone/changeset/update_don_test.go
+++ b/deployment/keystone/changeset/update_don_test.go
@@ -7,6 +7,7 @@ import (
"github.com/stretchr/testify/require"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset/internal"
kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
@@ -118,7 +119,7 @@ func TestUpdateDon(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/update_node_capabilities_test.go b/deployment/keystone/changeset/update_node_capabilities_test.go
index cb5588ff3d1..87b49acf614 100644
--- a/deployment/keystone/changeset/update_node_capabilities_test.go
+++ b/deployment/keystone/changeset/update_node_capabilities_test.go
@@ -8,6 +8,7 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
@@ -118,7 +119,7 @@ func TestUpdateNodeCapabilities(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/changeset/update_nodes_test.go b/deployment/keystone/changeset/update_nodes_test.go
index be3bfb12ee6..31f71cd9603 100644
--- a/deployment/keystone/changeset/update_nodes_test.go
+++ b/deployment/keystone/changeset/update_nodes_test.go
@@ -9,6 +9,7 @@ import (
"golang.org/x/exp/maps"
commonchangeset "github.com/smartcontractkit/chainlink/deployment/common/changeset"
+ "github.com/smartcontractkit/chainlink/deployment/common/proposalutils"
"github.com/smartcontractkit/chainlink/deployment/keystone/changeset"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
)
@@ -89,7 +90,7 @@ func TestUpdateNodes(t *testing.T) {
// now apply the changeset such that the proposal is signed and execed
contracts := te.ContractSets()[te.RegistrySelector]
- timelockContracts := map[uint64]*commonchangeset.TimelockExecutionContracts{
+ timelockContracts := map[uint64]*proposalutils.TimelockExecutionContracts{
te.RegistrySelector: {
Timelock: contracts.Timelock,
CallProxy: contracts.CallProxy,
diff --git a/deployment/keystone/state.go b/deployment/keystone/state.go
index cbf449c7f31..0ac7cdc89ed 100644
--- a/deployment/keystone/state.go
+++ b/deployment/keystone/state.go
@@ -78,7 +78,7 @@ func GetContractSets(lggr logger.Logger, req *GetContractSetsRequest) (*GetContr
func loadContractSet(lggr logger.Logger, chain deployment.Chain, addresses map[string]deployment.TypeAndVersion) (*ContractSet, error) {
var out ContractSet
- mcmsWithTimelock, err := commonchangeset.MaybeLoadMCMSWithTimelockState(chain, addresses)
+ mcmsWithTimelock, err := commonchangeset.MaybeLoadMCMSWithTimelockChainState(chain, addresses)
if err != nil {
return nil, fmt.Errorf("failed to load mcms contract: %w", err)
}
diff --git a/deployment/solana_chain.go b/deployment/solana_chain.go
new file mode 100644
index 00000000000..338642e3e32
--- /dev/null
+++ b/deployment/solana_chain.go
@@ -0,0 +1,5 @@
+package deployment
+
+// SolChain represents a Solana chain.
+type SolChain struct {
+}
diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod
index 25de46a971e..b53ab2cd248 100644
--- a/integration-tests/load/go.mod
+++ b/integration-tests/load/go.mod
@@ -27,7 +27,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.33.0
github.com/slack-go/slack v0.15.0
- github.com/smartcontractkit/chainlink-common v0.3.1-0.20241210192653-a9c706f99e83
+ github.com/smartcontractkit/chainlink-common v0.3.1-0.20241212163958-6a43e61b9d49
github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.19
github.com/smartcontractkit/chainlink-testing-framework/seth v1.50.9
github.com/smartcontractkit/chainlink-testing-framework/wasp v1.50.2
@@ -398,6 +398,7 @@ require (
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
+ github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shirou/gopsutil/v3 v3.24.3 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
diff --git a/integration-tests/smoke/ccip/ccip_rmn_test.go b/integration-tests/smoke/ccip/ccip_rmn_test.go
index adf07be290f..c22f9bcf20e 100644
--- a/integration-tests/smoke/ccip/ccip_rmn_test.go
+++ b/integration-tests/smoke/ccip/ccip_rmn_test.go
@@ -35,6 +35,7 @@ import (
)
func TestRMN_TwoMessagesOnTwoLanesIncludingBatching(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "messages on two lanes including batching",
waitForExec: true,
@@ -58,6 +59,7 @@ func TestRMN_TwoMessagesOnTwoLanesIncludingBatching(t *testing.T) {
}
func TestRMN_MultipleMessagesOnOneLaneNoWaitForExec(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "multiple messages for rmn batching inspection and one rmn node down",
waitForExec: false, // do not wait for execution reports
@@ -80,6 +82,7 @@ func TestRMN_MultipleMessagesOnOneLaneNoWaitForExec(t *testing.T) {
}
func TestRMN_NotEnoughObservers(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "one message but not enough observers, should not get a commit report",
passIfNoCommitAfter: 15 * time.Second,
@@ -102,6 +105,7 @@ func TestRMN_NotEnoughObservers(t *testing.T) {
}
func TestRMN_DifferentSigners(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "different signers and different observers",
homeChainConfig: homeChainConfig{
@@ -126,6 +130,7 @@ func TestRMN_DifferentSigners(t *testing.T) {
}
func TestRMN_NotEnoughSigners(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "different signers and different observers",
passIfNoCommitAfter: 15 * time.Second,
@@ -151,6 +156,7 @@ func TestRMN_NotEnoughSigners(t *testing.T) {
}
func TestRMN_DifferentRmnNodesForDifferentChains(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "different rmn nodes support different chains",
waitForExec: false,
@@ -177,6 +183,7 @@ func TestRMN_DifferentRmnNodesForDifferentChains(t *testing.T) {
}
func TestRMN_TwoMessagesOneSourceChainCursed(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "two messages, one source chain is cursed",
passIfNoCommitAfter: 15 * time.Second,
@@ -203,6 +210,7 @@ func TestRMN_TwoMessagesOneSourceChainCursed(t *testing.T) {
}
func TestRMN_GlobalCurseTwoMessagesOnTwoLanes(t *testing.T) {
+ t.Skip("This test is flaky and needs to be fixed")
runRmnTestCase(t, rmnTestCase{
name: "global curse messages on two lanes",
waitForExec: false,
diff --git a/integration-tests/testconfig/ccip/config.go b/integration-tests/testconfig/ccip/config.go
index 72c81f05f47..70c850fd591 100644
--- a/integration-tests/testconfig/ccip/config.go
+++ b/integration-tests/testconfig/ccip/config.go
@@ -147,6 +147,9 @@ func (o *JDConfig) GetJDDBVersion() string {
func (o *Config) Validate() error {
var chainIds []int64
for _, net := range o.PrivateEthereumNetworks {
+ if net.EthereumChainConfig.ChainID < 0 {
+ return fmt.Errorf("negative chain ID found for network %d", net.EthereumChainConfig.ChainID)
+ }
chainIds = append(chainIds, int64(net.EthereumChainConfig.ChainID))
}
homeChainSelector, err := strconv.ParseUint(pointer.GetString(o.HomeChainSelector), 10, 64)
@@ -189,14 +192,21 @@ func IsSelectorValid(selector uint64, chainIds []int64) (bool, error) {
if err != nil {
return false, err
}
- if chainId >= math.MaxInt64 {
- return false, fmt.Errorf("chain id overflows int64: %d", chainId)
- }
- expId := int64(chainId)
- for _, id := range chainIds {
- if id == expId {
+
+ for _, cID := range chainIds {
+ if isEqualUint64AndInt64(chainId, cID) {
return true, nil
}
}
return false, nil
}
+
+func isEqualUint64AndInt64(u uint64, i int64) bool {
+ if i < 0 {
+ return false // uint64 cannot be equal to a negative int64
+ }
+ if u > math.MaxInt64 {
+ return false // uint64 cannot be equal to an int64 if it exceeds the maximum int64 value
+ }
+ return u == uint64(i)
+}
diff --git a/plugins/registrar.go b/plugins/registrar.go
index 2a82f2a6204..8523d3980cc 100644
--- a/plugins/registrar.go
+++ b/plugins/registrar.go
@@ -6,7 +6,7 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/loop"
)
-// RegistrarConfig generates contains static configuration inher
+// RegistrarConfig generates contains static configuration
type RegistrarConfig interface {
RegisterLOOP(config CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error)
UnregisterLOOP(ID string)