diff --git a/.changeset/moody-swans-worry.md b/.changeset/moody-swans-worry.md
new file mode 100644
index 00000000000..bef24b9e975
--- /dev/null
+++ b/.changeset/moody-swans-worry.md
@@ -0,0 +1,9 @@
+---
+"chainlink": major
+---
+
+Remove support for mercury #removed
+
+Remove support for wsrpc protocol for LLO
+
+Remove `Mercury.Cache` configuration options
diff --git a/core/cmd/shell.go b/core/cmd/shell.go
index 8061cabc229..71a8c380e39 100644
--- a/core/cmd/shell.go
+++ b/core/cmd/shell.go
@@ -49,8 +49,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/keystore"
"github.com/smartcontractkit/chainlink/v2/core/services/llo"
"github.com/smartcontractkit/chainlink/v2/core/services/periodicbackup"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/cache"
"github.com/smartcontractkit/chainlink/v2/core/services/versioning"
"github.com/smartcontractkit/chainlink/v2/core/services/webhook"
"github.com/smartcontractkit/chainlink/v2/core/services/workflows"
@@ -231,12 +229,6 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G
loopRegistry := plugins.NewLoopRegistry(appLggr, cfg.Database(), cfg.Tracing(), cfg.Telemetry(), beholderAuthHeaders, csaPubKeyHex)
- mercuryPool := wsrpc.NewPool(appLggr, cache.Config{
- LatestReportTTL: cfg.Mercury().Cache().LatestReportTTL(),
- MaxStaleAge: cfg.Mercury().Cache().MaxStaleAge(),
- LatestReportDeadline: cfg.Mercury().Cache().LatestReportDeadline(),
- })
-
capabilitiesRegistry := capabilities.NewRegistry(appLggr)
retirementReportCache := llo.NewRetirementReportCache(appLggr, ds)
@@ -249,7 +241,6 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G
Registerer: appRegisterer,
LoopRegistry: loopRegistry,
GRPCOpts: grpcOpts,
- MercuryPool: mercuryPool,
CapabilitiesRegistry: capabilitiesRegistry,
HTTPClient: unrestrictedClient,
RetirementReportCache: retirementReportCache,
@@ -325,7 +316,6 @@ func (n ChainlinkAppFactory) NewApplication(ctx context.Context, cfg chainlink.G
SecretGenerator: chainlink.FilePersistedSecretGenerator{},
LoopRegistry: loopRegistry,
GRPCOpts: grpcOpts,
- MercuryPool: mercuryPool,
RetirementReportCache: retirementReportCache,
LLOTransmissionReaper: lloReaper,
})
diff --git a/core/config/docs/core.toml b/core/config/docs/core.toml
index 6f4a3fa5cb9..70a1dd0ae3c 100644
--- a/core/config/docs/core.toml
+++ b/core/config/docs/core.toml
@@ -683,27 +683,6 @@ env = 'test' # Example
# by default.
VerboseLogging = false # Default
-# Mercury.Cache controls settings for the price retrieval cache querying a mercury server
-[Mercury.Cache]
-# LatestReportTTL controls how "stale" we will allow a price to be e.g. if
-# set to 1s, a new price will always be fetched if the last result was
-# from 1 second ago or older.
-#
-# Another way of looking at it is such: the cache will _never_ return a
-# price that was queried from now-LatestReportTTL or before.
-#
-# Setting to zero disables caching entirely.
-LatestReportTTL = "1s" # Default
-# MaxStaleAge is that maximum amount of time that a value can be stale
-# before it is deleted from the cache (a form of garbage collection).
-#
-# This should generally be set to something much larger than
-# LatestReportTTL. Setting to zero disables garbage collection.
-MaxStaleAge = "1h" # Default
-# LatestReportDeadline controls how long to wait for a response from the
-# mercury server before retrying. Setting this to zero will wait indefinitely.
-LatestReportDeadline = "5s" # Default
-
# Mercury.TLS controls client settings for when the node talks to traditional web servers or load balancers.
[Mercury.TLS]
# CertFile is the path to a PEM file of trusted root certificate authority certificates
@@ -713,8 +692,7 @@ CertFile = "/path/to/client/certs.pem" # Example
[Mercury.Transmitter]
# Protocol is the protocol to use for the transmitter.
#
-# Options are either:
-# - "wsrpc" for the legacy websocket protocol
+# Options are currently:
# - "grpc" for the gRPC protocol
Protocol = "grpc" # Default
# TransmitQueueMaxSize controls the size of the transmit queue. This is scoped
diff --git a/core/config/mercury_config.go b/core/config/mercury_config.go
index 6b0c97d5e91..76bdaf457ea 100644
--- a/core/config/mercury_config.go
+++ b/core/config/mercury_config.go
@@ -2,18 +2,11 @@ package config
import (
"fmt"
- "time"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/types"
)
-type MercuryCache interface {
- LatestReportTTL() time.Duration
- MaxStaleAge() time.Duration
- LatestReportDeadline() time.Duration
-}
-
type MercuryTLS interface {
CertFile() string
}
@@ -21,8 +14,7 @@ type MercuryTLS interface {
type MercuryTransmitterProtocol string
const (
- MercuryTransmitterProtocolWSRPC MercuryTransmitterProtocol = "wsrpc"
- MercuryTransmitterProtocolGRPC MercuryTransmitterProtocol = "grpc"
+ MercuryTransmitterProtocolGRPC MercuryTransmitterProtocol = "grpc"
)
func (m MercuryTransmitterProtocol) String() string {
@@ -31,8 +23,6 @@ func (m MercuryTransmitterProtocol) String() string {
func (m *MercuryTransmitterProtocol) UnmarshalText(text []byte) error {
switch string(text) {
- case "wsrpc":
- *m = MercuryTransmitterProtocolWSRPC
case "grpc":
*m = MercuryTransmitterProtocolGRPC
default:
@@ -52,7 +42,6 @@ type MercuryTransmitter interface {
type Mercury interface {
Credentials(credName string) *types.MercuryCredentials
- Cache() MercuryCache
TLS() MercuryTLS
Transmitter() MercuryTransmitter
VerboseLogging() bool
diff --git a/core/config/toml/types.go b/core/config/toml/types.go
index 1dee6ec6352..a8b9ef0d24e 100644
--- a/core/config/toml/types.go
+++ b/core/config/toml/types.go
@@ -1422,24 +1422,6 @@ func (ins *Insecure) setFrom(f *Insecure) {
}
}
-type MercuryCache struct {
- LatestReportTTL *commonconfig.Duration
- MaxStaleAge *commonconfig.Duration
- LatestReportDeadline *commonconfig.Duration
-}
-
-func (mc *MercuryCache) setFrom(f *MercuryCache) {
- if v := f.LatestReportTTL; v != nil {
- mc.LatestReportTTL = v
- }
- if v := f.MaxStaleAge; v != nil {
- mc.MaxStaleAge = v
- }
- if v := f.LatestReportDeadline; v != nil {
- mc.LatestReportDeadline = v
- }
-}
-
type MercuryTLS struct {
CertFile *string
}
@@ -1490,14 +1472,12 @@ func (m *MercuryTransmitter) setFrom(f *MercuryTransmitter) {
}
type Mercury struct {
- Cache MercuryCache `toml:",omitempty"`
TLS MercuryTLS `toml:",omitempty"`
Transmitter MercuryTransmitter `toml:",omitempty"`
VerboseLogging *bool `toml:",omitempty"`
}
func (m *Mercury) setFrom(f *Mercury) {
- m.Cache.setFrom(&f.Cache)
m.TLS.setFrom(&f.TLS)
m.Transmitter.setFrom(&f.Transmitter)
if v := f.VerboseLogging; v != nil {
diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go
index 4c24c7712ed..3dd15795999 100644
--- a/core/internal/cltest/cltest.go
+++ b/core/internal/cltest/cltest.go
@@ -86,8 +86,6 @@ import (
p2ptypes "github.com/smartcontractkit/chainlink/v2/core/services/p2p/types"
"github.com/smartcontractkit/chainlink/v2/core/services/pg"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/cache"
"github.com/smartcontractkit/chainlink/v2/core/services/standardcapabilities"
"github.com/smartcontractkit/chainlink/v2/core/services/webhook"
clsessions "github.com/smartcontractkit/chainlink/v2/core/sessions"
@@ -415,12 +413,6 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn
mailMon := mailbox.NewMonitor(cfg.AppID().String(), lggr.Named("Mailbox"))
loopRegistry := plugins.NewLoopRegistry(lggr, cfg.Database(), cfg.Tracing(), cfg.Telemetry(), nil, "")
- mercuryPool := wsrpc.NewPool(lggr, cache.Config{
- LatestReportTTL: cfg.Mercury().Cache().LatestReportTTL(),
- MaxStaleAge: cfg.Mercury().Cache().MaxStaleAge(),
- LatestReportDeadline: cfg.Mercury().Cache().LatestReportDeadline(),
- })
-
c := clhttptest.NewTestLocalOnlyHTTPClient()
retirementReportCache := llo.NewRetirementReportCache(lggr, ds)
relayerFactory := chainlink.RelayerFactory{
@@ -428,7 +420,6 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn
LoopRegistry: loopRegistry,
GRPCOpts: loop.GRPCOpts{},
Registerer: prometheus.NewRegistry(), // Don't use global registry here since otherwise multiple apps can create name conflicts. Could also potentially give a mock registry to test prometheus.
- MercuryPool: mercuryPool,
CapabilitiesRegistry: capabilitiesRegistry,
HTTPClient: c,
RetirementReportCache: retirementReportCache,
@@ -510,7 +501,9 @@ func NewApplicationWithConfig(t testing.TB, cfg chainlink.GeneralConfig, flagsAn
UnrestrictedHTTPClient: c,
SecretGenerator: MockSecretGenerator{},
LoopRegistry: plugins.NewTestLoopRegistry(lggr),
- MercuryPool: mercuryPool,
+ CapabilitiesRegistry: capabilitiesRegistry,
+ CapabilitiesDispatcher: dispatcher,
+ CapabilitiesPeerWrapper: peerWrapper,
NewOracleFactoryFn: newOracleFactoryFn,
RetirementReportCache: retirementReportCache,
LLOTransmissionReaper: llo.NewTransmissionReaper(ds, lggr, cfg.Mercury().Transmitter().ReaperFrequency().Duration(), cfg.Mercury().Transmitter().ReaperMaxAge().Duration()),
diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go
index 801c66ca7c5..f0cb297e1ec 100644
--- a/core/services/chainlink/application.go
+++ b/core/services/chainlink/application.go
@@ -67,8 +67,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/periodicbackup"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
"github.com/smartcontractkit/chainlink/v2/core/services/registrysyncer"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
"github.com/smartcontractkit/chainlink/v2/core/services/standardcapabilities"
"github.com/smartcontractkit/chainlink/v2/core/services/streams"
"github.com/smartcontractkit/chainlink/v2/core/services/telemetry"
@@ -195,7 +193,6 @@ type ApplicationOpts struct {
SecretGenerator SecretGenerator
LoopRegistry *plugins.LoopRegistry
GRPCOpts loop.GRPCOpts
- MercuryPool wsrpc.Pool
RetirementReportCache llo.RetirementReportCache
LLOTransmissionReaper services.ServiceCtx
NewOracleFactoryFn standardcapabilities.NewOracleFactoryFn
@@ -349,10 +346,7 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
globalLogger.Info("DatabaseBackup: periodic database backups are disabled. To enable automatic backups, set Database.Backup.Mode=lite or Database.Backup.Mode=full")
}
- // pool must be started before all relayers and stopped after them
- if opts.MercuryPool != nil {
- srvcs = append(srvcs, opts.MercuryPool)
- }
+ // LLO-related services
if opts.RetirementReportCache != nil {
srvcs = append(srvcs, opts.RetirementReportCache)
}
@@ -403,7 +397,6 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
var (
pipelineORM = pipeline.NewORM(opts.DS, globalLogger, cfg.JobPipeline().MaxSuccessfulRuns())
bridgeORM = bridges.NewORM(opts.DS)
- mercuryORM = mercury.NewORM(opts.DS)
pipelineRunner = pipeline.NewRunner(pipelineORM, bridgeORM, cfg.JobPipeline(), cfg.WebServer(), legacyEVMChains, keyStore.Eth(), keyStore.VRF(), globalLogger, restrictedHTTPClient, unrestrictedHTTPClient)
jobORM = job.NewORM(opts.DS, pipelineORM, bridgeORM, keyStore, globalLogger)
txmORM = txmgr.NewTxStore(opts.DS, globalLogger)
@@ -565,7 +558,6 @@ func NewApplication(opts ApplicationOpts) (Application, error) {
Ds: opts.DS,
JobORM: jobORM,
BridgeORM: bridgeORM,
- MercuryORM: mercuryORM,
PipelineRunner: pipelineRunner,
StreamRegistry: streamRegistry,
PeerWrapper: peerWrapper,
diff --git a/core/services/chainlink/config_mercury.go b/core/services/chainlink/config_mercury.go
index d594e80ec96..cdbcaea6875 100644
--- a/core/services/chainlink/config_mercury.go
+++ b/core/services/chainlink/config_mercury.go
@@ -1,8 +1,6 @@
package chainlink
import (
- "time"
-
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
"github.com/smartcontractkit/chainlink-common/pkg/types"
@@ -10,22 +8,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/config/toml"
)
-var _ config.MercuryCache = (*mercuryCacheConfig)(nil)
-
-type mercuryCacheConfig struct {
- c toml.MercuryCache
-}
-
-func (m *mercuryCacheConfig) LatestReportTTL() time.Duration {
- return m.c.LatestReportTTL.Duration()
-}
-func (m *mercuryCacheConfig) MaxStaleAge() time.Duration {
- return m.c.MaxStaleAge.Duration()
-}
-func (m *mercuryCacheConfig) LatestReportDeadline() time.Duration {
- return m.c.LatestReportDeadline.Duration()
-}
-
var _ config.MercuryTLS = (*mercuryTLSConfig)(nil)
type mercuryTLSConfig struct {
@@ -86,10 +68,6 @@ func (m *mercuryConfig) Credentials(credName string) *types.MercuryCredentials {
return nil
}
-func (m *mercuryConfig) Cache() config.MercuryCache {
- return &mercuryCacheConfig{c: m.c.Cache}
-}
-
func (m *mercuryConfig) TLS() config.MercuryTLS {
return &mercuryTLSConfig{c: m.c.TLS}
}
diff --git a/core/services/chainlink/config_test.go b/core/services/chainlink/config_test.go
index 0a5e329b3bc..ea5882e8e56 100644
--- a/core/services/chainlink/config_test.go
+++ b/core/services/chainlink/config_test.go
@@ -789,11 +789,6 @@ func TestConfig_Marshal(t *testing.T) {
},
}
full.Mercury = toml.Mercury{
- Cache: toml.MercuryCache{
- LatestReportTTL: commoncfg.MustNewDuration(100 * time.Second),
- MaxStaleAge: commoncfg.MustNewDuration(101 * time.Second),
- LatestReportDeadline: commoncfg.MustNewDuration(102 * time.Second),
- },
TLS: toml.MercuryTLS{
CertFile: ptr("/path/to/cert.pem"),
},
@@ -1268,11 +1263,6 @@ SendOnly = true
{"Mercury", Config{Core: toml.Core{Mercury: full.Mercury}}, `[Mercury]
VerboseLogging = true
-[Mercury.Cache]
-LatestReportTTL = '1m40s'
-MaxStaleAge = '1m41s'
-LatestReportDeadline = '1m42s'
-
[Mercury.TLS]
CertFile = '/path/to/cert.pem'
diff --git a/core/services/chainlink/relayer_factory.go b/core/services/chainlink/relayer_factory.go
index 21275831f17..f4f122ad60d 100644
--- a/core/services/chainlink/relayer_factory.go
+++ b/core/services/chainlink/relayer_factory.go
@@ -25,7 +25,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/relay"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/dummy"
evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
"github.com/smartcontractkit/chainlink/v2/plugins"
)
@@ -34,7 +33,6 @@ type RelayerFactory struct {
*plugins.LoopRegistry
loop.GRPCOpts
Registerer prometheus.Registerer
- MercuryPool wsrpc.Pool
CapabilitiesRegistry coretypes.CapabilitiesRegistry
HTTPClient *http.Client
RetirementReportCache llo.RetirementReportCache
@@ -80,7 +78,6 @@ func (r *RelayerFactory) NewEVM(ctx context.Context, config EVMFactoryConfig) (m
DS: ccOpts.DS,
Registerer: r.Registerer,
CSAETHKeystore: config.CSAETHKeystore,
- MercuryPool: r.MercuryPool,
MercuryConfig: config.MercuryConfig,
CapabilitiesRegistry: r.CapabilitiesRegistry,
HTTPClient: r.HTTPClient,
diff --git a/core/services/chainlink/testdata/config-empty-effective.toml b/core/services/chainlink/testdata/config-empty-effective.toml
index 50c53108afe..dc15e2a315d 100644
--- a/core/services/chainlink/testdata/config-empty-effective.toml
+++ b/core/services/chainlink/testdata/config-empty-effective.toml
@@ -227,11 +227,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/core/services/chainlink/testdata/config-full.toml b/core/services/chainlink/testdata/config-full.toml
index e1fb256ef73..0fc6f8ae747 100644
--- a/core/services/chainlink/testdata/config-full.toml
+++ b/core/services/chainlink/testdata/config-full.toml
@@ -237,11 +237,6 @@ test = 'load'
[Mercury]
VerboseLogging = true
-[Mercury.Cache]
-LatestReportTTL = '1m40s'
-MaxStaleAge = '1m41s'
-LatestReportDeadline = '1m42s'
-
[Mercury.TLS]
CertFile = '/path/to/cert.pem'
diff --git a/core/services/chainlink/testdata/config-multi-chain-effective.toml b/core/services/chainlink/testdata/config-multi-chain-effective.toml
index 418d59a0e90..5cc2160c32e 100644
--- a/core/services/chainlink/testdata/config-multi-chain-effective.toml
+++ b/core/services/chainlink/testdata/config-multi-chain-effective.toml
@@ -227,11 +227,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/core/services/feeds/service.go b/core/services/feeds/service.go
index b789749a384..82683e737b6 100644
--- a/core/services/feeds/service.go
+++ b/core/services/feeds/service.go
@@ -1302,24 +1302,17 @@ func (s *service) Unsafe_SetConnectionsManager(connMgr ConnectionsManager) {
// findExistingJobForOCR2 looks for existing job for OCR2
func findExistingJobForOCR2(ctx context.Context, j *job.Job, tx job.ORM) (int32, error) {
var contractID string
- var feedID *common.Hash
switch j.Type {
case job.OffchainReporting2:
contractID = j.OCR2OracleSpec.ContractID
- feedID = j.OCR2OracleSpec.FeedID
case job.Bootstrap:
contractID = j.BootstrapSpec.ContractID
- if j.BootstrapSpec.FeedID != nil {
- feedID = j.BootstrapSpec.FeedID
- }
- case job.FluxMonitor, job.OffchainReporting:
- return 0, errors.Errorf("contractID and feedID not applicable for job type: %s", j.Type)
default:
return 0, errors.Errorf("unsupported job type: %s", j.Type)
}
- return tx.FindOCR2JobIDByAddress(ctx, contractID, feedID)
+ return tx.FindOCR2JobIDByAddress(ctx, contractID)
}
// findExistingJobForOCRFlux looks for existing job for OCR or flux
diff --git a/core/services/feeds/service_test.go b/core/services/feeds/service_test.go
index 0820a3e7e21..e07198aefd5 100644
--- a/core/services/feeds/service_test.go
+++ b/core/services/feeds/service_test.go
@@ -3004,8 +3004,6 @@ answer1 [type=median index=0];
func Test_Service_ApproveSpec_OCR2(t *testing.T) {
address := "0x613a38AC1659769640aaE063C651F48E0250454C"
- feedIDHex := "0x0000000000000000000000000000000000000000000000000000000000000001"
- feedID := common.HexToHash(feedIDHex)
externalJobID := uuid.New()
var (
@@ -3046,38 +3044,6 @@ answer1 [type=median index=0];
"""
[pluginConfig.juelsPerFeeCoinCache]
updateInterval = "30s"
-`
- defn2 = `
-name = 'LINK / ETH | version 3 | contract 0x0000000000000000000000000000000000000000'
-type = "offchainreporting2"
-pluginType = "median"
-schemaVersion = 1
-relay = "evm"
-contractID = "0x613a38AC1659769640aaE063C651F48E0250454C"
-externalJobID = '%s'
-feedID = '%s'
-observationSource = """
-// data source 1
-ds1 [type=bridge name=\"bridge-api0\" requestData="{\\\"data\\": {\\\"from\\\":\\\"LINK\\\",\\\"to\\\":\\\"ETH\\\"}}"];
-ds1_parse [type=jsonparse path="result"];
-ds1_multiply [type=multiply times=1000000000000000000];
-ds1 -> ds1_parse -> ds1_multiply -> answer1;
-
-answer1 [type=median index=0];
-"""
-[relayConfig]
-chainID = 0
-[pluginConfig]
-juelsPerFeeCoinSource = """
-ds1 [type=bridge name=voter_turnout];
-ds1_parse [type=jsonparse path="one,two"];
-ds1_multiply [type=multiply times=1.23];
-ds1 -> ds1_parse -> ds1_multiply -> answer1;
-answer1 [type=median index=0];
-"""
-# intentionally do not set gasPriceSubunitsSource for this pipeline example to cover case when none is set
-[pluginConfig.juelsPerFeeCoinCache]
-updateInterval = "20m"
`
jp = &feeds.JobProposal{
@@ -3128,7 +3094,7 @@ updateInterval = "20m"
svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -3170,7 +3136,7 @@ updateInterval = "20m"
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -3237,7 +3203,7 @@ updateInterval = "20m"
svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(j.ID, nil)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(j.ID, nil)
svc.orm.On("WithDataSource", mock.Anything).Return(feeds.ORM(svc.orm))
svc.jobORM.On("WithDataSource", mock.Anything).Return(job.ORM(svc.jobORM))
},
@@ -3255,55 +3221,7 @@ updateInterval = "20m"
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(nil, sql.ErrNoRows)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(j.ID, nil)
- svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
-
- svc.spawner.
- On("CreateJob",
- mock.Anything,
- mock.Anything,
- mock.MatchedBy(func(j *job.Job) bool {
- return j.Name.String == "LINK / ETH | version 3 | contract 0x0000000000000000000000000000000000000000"
- }),
- ).
- Run(func(args mock.Arguments) { (args.Get(2).(*job.Job)).ID = 1 }).
- Return(nil)
- svc.orm.On("ApproveSpec",
- mock.Anything,
- spec.ID,
- externalJobID,
- ).Return(nil)
- svc.fmsClient.On("ApprovedJob",
- mock.MatchedBy(func(ctx context.Context) bool { return true }),
- &proto.ApprovedJobRequest{
- Uuid: jp.RemoteUUID.String(),
- Version: int64(spec.Version),
- },
- ).Return(&proto.ApprovedJobResponse{}, nil)
- svc.orm.On("CountJobProposalsByStatus", mock.Anything).Return(&feeds.JobProposalCounts{}, nil)
- svc.orm.On("WithDataSource", mock.Anything).Return(feeds.ORM(svc.orm))
- svc.jobORM.On("WithDataSource", mock.Anything).Return(job.ORM(svc.jobORM))
- },
- id: spec.ID,
- force: true,
- },
- {
- name: "already existing self managed job replacement success if forced with feedID",
- httpTimeout: commonconfig.MustNewDuration(1 * time.Minute),
- before: func(svc *TestService) {
- svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil)
- svc.orm.On("GetSpec", mock.Anything, spec.ID).Return(&feeds.JobProposalSpec{
- ID: 20,
- Status: feeds.SpecStatusPending,
- JobProposalID: jp.ID,
- Version: 1,
- Definition: fmt.Sprintf(defn2, externalJobID.String(), &feedID),
- }, nil)
- svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
- svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
- svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(nil, sql.ErrNoRows)
- svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, &feedID).Return(j.ID, nil)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(j.ID, nil)
svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
svc.spawner.
@@ -3346,7 +3264,7 @@ updateInterval = "20m"
svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(&feeds.JobProposalSpec{ID: 100}, nil)
svc.orm.EXPECT().CancelSpec(mock.Anything, int64(100)).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(j.ID, nil)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(j.ID, nil)
svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
svc.spawner.
@@ -3459,7 +3377,7 @@ updateInterval = "20m"
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -3487,7 +3405,7 @@ updateInterval = "20m"
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -3521,7 +3439,7 @@ updateInterval = "20m"
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -3795,54 +3713,6 @@ func Test_Service_ApproveSpec_Stream(t *testing.T) {
id: spec.ID,
force: true,
},
- {
- name: "already existing self managed job replacement success if forced with feedID",
- httpTimeout: commonconfig.MustNewDuration(1 * time.Minute),
- before: func(svc *TestService) {
- svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil)
- svc.orm.On("GetSpec", mock.Anything, spec.ID).Return(&feeds.JobProposalSpec{
- ID: 20,
- Status: feeds.SpecStatusPending,
- JobProposalID: jp.ID,
- Version: 1,
- Definition: fmt.Sprintf(StreamTestSpecTemplate, streamName, externalJobID.String(), streamID),
- }, nil)
- svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
- svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
- svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(nil, sql.ErrNoRows)
- svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindJobIDByStreamID", mock.Anything, mock.Anything).Return(j.ID, nil)
- svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
-
- svc.spawner.
- On("CreateJob",
- mock.Anything,
- mock.Anything,
- mock.MatchedBy(func(j *job.Job) bool {
- return j.Name.String == streamName
- }),
- ).
- Run(func(args mock.Arguments) { (args.Get(2).(*job.Job)).ID = 1 }).
- Return(nil)
- svc.orm.On("ApproveSpec",
- mock.Anything,
- spec.ID,
- externalJobID,
- ).Return(nil)
- svc.fmsClient.On("ApprovedJob",
- mock.MatchedBy(func(ctx context.Context) bool { return true }),
- &proto.ApprovedJobRequest{
- Uuid: jp.RemoteUUID.String(),
- Version: int64(spec.Version),
- },
- ).Return(&proto.ApprovedJobResponse{}, nil)
- svc.orm.On("CountJobProposalsByStatus", mock.Anything).Return(&feeds.JobProposalCounts{}, nil)
- svc.orm.On("WithDataSource", mock.Anything).Return(feeds.ORM(svc.orm))
- svc.jobORM.On("WithDataSource", mock.Anything).Return(job.ORM(svc.jobORM))
- },
- id: spec.ID,
- force: true,
- },
{
name: "already existing FMS managed job replacement success if forced",
httpTimeout: commonconfig.MustNewDuration(1 * time.Minute),
@@ -4090,8 +3960,6 @@ func Test_Service_ApproveSpec_Stream(t *testing.T) {
func Test_Service_ApproveSpec_Bootstrap(t *testing.T) {
address := "0x613a38AC1659769640aaE063C651F48E0250454C"
- feedIDHex := "0x0000000000000000000000000000000000000000000000000000000000000001"
- feedID := common.HexToHash(feedIDHex)
externalJobID := uuid.New()
var (
@@ -4104,18 +3972,6 @@ contractID = '0x613a38AC1659769640aaE063C651F48E0250454C'
externalJobID = '%s'
relay = 'evm'
-[relayConfig]
-chainID = 0
-`
- defn2 = `
-name = 'LINK / ETH | version 3 | contract 0x0000000000000000000000000000000000000000'
-type = 'bootstrap'
-schemaVersion = 1
-contractID = '0x613a38AC1659769640aaE063C651F48E0250454C'
-externalJobID = '%s'
-feedID = '%s'
-relay = 'evm'
-
[relayConfig]
chainID = 0
`
@@ -4168,7 +4024,7 @@ chainID = 0
svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -4210,7 +4066,7 @@ chainID = 0
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -4277,7 +4133,7 @@ chainID = 0
svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(j.ID, nil)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(j.ID, nil)
svc.orm.On("WithDataSource", mock.Anything).Return(feeds.ORM(svc.orm))
svc.jobORM.On("WithDataSource", mock.Anything).Return(job.ORM(svc.jobORM))
},
@@ -4295,55 +4151,7 @@ chainID = 0
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(nil, sql.ErrNoRows)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(j.ID, nil)
- svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
-
- svc.spawner.
- On("CreateJob",
- mock.Anything,
- mock.Anything,
- mock.MatchedBy(func(j *job.Job) bool {
- return j.Name.String == "LINK / ETH | version 3 | contract 0x0000000000000000000000000000000000000000"
- }),
- ).
- Run(func(args mock.Arguments) { (args.Get(2).(*job.Job)).ID = 1 }).
- Return(nil)
- svc.orm.On("ApproveSpec",
- mock.Anything,
- spec.ID,
- externalJobID,
- ).Return(nil)
- svc.fmsClient.On("ApprovedJob",
- mock.MatchedBy(func(ctx context.Context) bool { return true }),
- &proto.ApprovedJobRequest{
- Uuid: jp.RemoteUUID.String(),
- Version: int64(spec.Version),
- },
- ).Return(&proto.ApprovedJobResponse{}, nil)
- svc.orm.On("CountJobProposalsByStatus", mock.Anything).Return(&feeds.JobProposalCounts{}, nil)
- svc.orm.On("WithDataSource", mock.Anything).Return(feeds.ORM(svc.orm))
- svc.jobORM.On("WithDataSource", mock.Anything).Return(job.ORM(svc.jobORM))
- },
- id: spec.ID,
- force: true,
- },
- {
- name: "already existing self managed job replacement success if forced with feedID",
- httpTimeout: commonconfig.MustNewDuration(1 * time.Minute),
- before: func(svc *TestService) {
- svc.connMgr.On("GetClient", jp.FeedsManagerID).Return(svc.fmsClient, nil)
- svc.orm.On("GetSpec", mock.Anything, spec.ID).Return(&feeds.JobProposalSpec{
- ID: 20,
- Status: feeds.SpecStatusPending,
- JobProposalID: jp.ID,
- Version: 1,
- Definition: fmt.Sprintf(defn2, externalJobID.String(), feedID),
- }, nil)
- svc.orm.On("GetJobProposal", mock.Anything, jp.ID).Return(jp, nil)
- svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
- svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(nil, sql.ErrNoRows)
- svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, &feedID).Return(j.ID, nil)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(j.ID, nil)
svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
svc.spawner.
@@ -4386,7 +4194,7 @@ chainID = 0
svc.orm.EXPECT().GetApprovedSpec(mock.Anything, jp.ID).Return(&feeds.JobProposalSpec{ID: 100}, nil)
svc.orm.EXPECT().CancelSpec(mock.Anything, int64(100)).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(j.ID, nil)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(j.ID, nil)
svc.spawner.On("DeleteJob", mock.Anything, mock.Anything, j.ID).Return(nil)
svc.spawner.
@@ -4499,7 +4307,7 @@ chainID = 0
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -4527,7 +4335,7 @@ chainID = 0
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil), mock.Anything).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
@@ -4561,7 +4369,7 @@ chainID = 0
svc.jobORM.On("AssertBridgesExist", mock.Anything, mock.IsType(pipeline.Pipeline{})).Return(nil)
svc.jobORM.On("FindJobByExternalJobID", mock.Anything, externalJobID).Return(job.Job{}, sql.ErrNoRows)
- svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address, (*common.Hash)(nil)).Return(int32(0), sql.ErrNoRows)
+ svc.jobORM.On("FindOCR2JobIDByAddress", mock.Anything, address).Return(int32(0), sql.ErrNoRows)
svc.spawner.
On("CreateJob",
diff --git a/core/services/job/job_orm_test.go b/core/services/job/job_orm_test.go
index 1084d7d0fab..1de63578030 100644
--- a/core/services/job/job_orm_test.go
+++ b/core/services/job/job_orm_test.go
@@ -54,36 +54,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/utils/testutils/heavyweight"
)
-const mercuryOracleTOML = `name = 'LINK / ETH | 0x0000000000000000000000000000000000000000000000000000000000000001 | verifier_proxy 0x0000000000000000000000000000000000000001'
-type = 'offchainreporting2'
-schemaVersion = 1
-externalJobID = '00000000-0000-0000-0000-000000000001'
-contractID = '0x0000000000000000000000000000000000000006'
-transmitterID = '%s'
-feedID = '%s'
-relay = 'evm'
-pluginType = 'mercury'
-observationSource = """
- ds [type=http method=GET url="https://chain.link/ETH-USD"];
- ds_parse [type=jsonparse path="data.price" separator="."];
- ds_multiply [type=multiply times=100];
- ds -> ds_parse -> ds_multiply;
-"""
-
-[relayConfig]
-chainID = 1
-fromBlock = 1000
-
-[onchainSigningStrategy]
-strategyName = 'single-chain'
-[onchainSigningStrategy.config]
-publicKey = '8fa807463ad73f9ee855cfd60ba406dcf98a2855b3dd8af613107b0f6890a707'
-
-[pluginConfig]
-serverURL = 'wss://localhost:8080'
-serverPubKey = '8fa807463ad73f9ee855cfd60ba406dcf98a2855b3dd8af613107b0f6890a707'
-`
-
func TestORM(t *testing.T) {
t.Parallel()
@@ -1215,28 +1185,6 @@ func Test_FindJob(t *testing.T) {
jobOCR2.OCR2OracleSpec.PluginConfig["juelsPerFeeCoinSource"] = juelsPerFeeCoinSource
- ocr2WithFeedID1 := "0x0001000000000000000000000000000000000000000000000000000000000001"
- ocr2WithFeedID2 := "0x0001000000000000000000000000000000000000000000000000000000000002"
- jobOCR2WithFeedID1, err := ocr2validate.ValidatedOracleSpecToml(
- testutils.Context(t),
- config.OCR2(),
- config.Insecure(),
- fmt.Sprintf(mercuryOracleTOML, cltest.DefaultCSAKey.PublicKeyString(), ocr2WithFeedID1),
- nil,
- )
- require.NoError(t, err)
-
- jobOCR2WithFeedID2, err := ocr2validate.ValidatedOracleSpecToml(
- testutils.Context(t),
- config.OCR2(),
- config.Insecure(),
- fmt.Sprintf(mercuryOracleTOML, cltest.DefaultCSAKey.PublicKeyString(), ocr2WithFeedID2),
- nil,
- )
- jobOCR2WithFeedID2.ExternalJobID = uuid.New()
- jobOCR2WithFeedID2.Name = null.StringFrom("new name")
- require.NoError(t, err)
-
err = orm.CreateJob(ctx, &job)
require.NoError(t, err)
@@ -1246,13 +1194,6 @@ func Test_FindJob(t *testing.T) {
err = orm.CreateJob(ctx, &jobOCR2)
require.NoError(t, err)
- err = orm.CreateJob(ctx, &jobOCR2WithFeedID1)
- require.NoError(t, err)
-
- // second ocr2 job with same contract id but different feed id
- err = orm.CreateJob(ctx, &jobOCR2WithFeedID2)
- require.NoError(t, err)
-
t.Run("by id", func(t *testing.T) {
ctx, cancel := context.WithTimeout(testutils.Context(t), 5*time.Second)
defer cancel()
@@ -1311,40 +1252,15 @@ func Test_FindJob(t *testing.T) {
assert.Equal(t, job.ID, jbID)
})
- t.Run("by contract id without feed id", func(t *testing.T) {
+ t.Run("by contract id", func(t *testing.T) {
ctx := testutils.Context(t)
contractID := "0x613a38AC1659769640aaE063C651F48E0250454C"
- // Find job ID for ocr2 job without feedID.
- jbID, err2 := orm.FindOCR2JobIDByAddress(ctx, contractID, nil)
+ jbID, err2 := orm.FindOCR2JobIDByAddress(ctx, contractID)
require.NoError(t, err2)
assert.Equal(t, jobOCR2.ID, jbID)
})
-
- t.Run("by contract id with valid feed id", func(t *testing.T) {
- ctx := testutils.Context(t)
- contractID := "0x0000000000000000000000000000000000000006"
- feedID := common.HexToHash(ocr2WithFeedID1)
-
- // Find job ID for ocr2 job with feed ID
- jbID, err2 := orm.FindOCR2JobIDByAddress(ctx, contractID, &feedID)
- require.NoError(t, err2)
-
- assert.Equal(t, jobOCR2WithFeedID1.ID, jbID)
- })
-
- t.Run("with duplicate contract id but different feed id", func(t *testing.T) {
- ctx := testutils.Context(t)
- contractID := "0x0000000000000000000000000000000000000006"
- feedID := common.HexToHash(ocr2WithFeedID2)
-
- // Find job ID for ocr2 job with feed ID
- jbID, err2 := orm.FindOCR2JobIDByAddress(ctx, contractID, &feedID)
- require.NoError(t, err2)
-
- assert.Equal(t, jobOCR2WithFeedID2.ID, jbID)
- })
}
func Test_FindJobsByPipelineSpecIDs(t *testing.T) {
diff --git a/core/services/job/mocks/orm.go b/core/services/job/mocks/orm.go
index 00e2e606819..94542e41a2c 100644
--- a/core/services/job/mocks/orm.go
+++ b/core/services/job/mocks/orm.go
@@ -3,11 +3,10 @@
package mocks
import (
- common "github.com/ethereum/go-ethereum/common"
- big "github.com/smartcontractkit/chainlink-integrations/evm/utils/big"
-
context "context"
+ big "github.com/smartcontractkit/chainlink-integrations/evm/utils/big"
+
job "github.com/smartcontractkit/chainlink/v2/core/services/job"
mock "github.com/stretchr/testify/mock"
@@ -1014,9 +1013,9 @@ func (_c *ORM_FindJobsByPipelineSpecIDs_Call) RunAndReturn(run func(context.Cont
return _c
}
-// FindOCR2JobIDByAddress provides a mock function with given fields: ctx, contractID, feedID
-func (_m *ORM) FindOCR2JobIDByAddress(ctx context.Context, contractID string, feedID *common.Hash) (int32, error) {
- ret := _m.Called(ctx, contractID, feedID)
+// FindOCR2JobIDByAddress provides a mock function with given fields: ctx, contractID
+func (_m *ORM) FindOCR2JobIDByAddress(ctx context.Context, contractID string) (int32, error) {
+ ret := _m.Called(ctx, contractID)
if len(ret) == 0 {
panic("no return value specified for FindOCR2JobIDByAddress")
@@ -1024,17 +1023,17 @@ func (_m *ORM) FindOCR2JobIDByAddress(ctx context.Context, contractID string, fe
var r0 int32
var r1 error
- if rf, ok := ret.Get(0).(func(context.Context, string, *common.Hash) (int32, error)); ok {
- return rf(ctx, contractID, feedID)
+ if rf, ok := ret.Get(0).(func(context.Context, string) (int32, error)); ok {
+ return rf(ctx, contractID)
}
- if rf, ok := ret.Get(0).(func(context.Context, string, *common.Hash) int32); ok {
- r0 = rf(ctx, contractID, feedID)
+ if rf, ok := ret.Get(0).(func(context.Context, string) int32); ok {
+ r0 = rf(ctx, contractID)
} else {
r0 = ret.Get(0).(int32)
}
- if rf, ok := ret.Get(1).(func(context.Context, string, *common.Hash) error); ok {
- r1 = rf(ctx, contractID, feedID)
+ if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
+ r1 = rf(ctx, contractID)
} else {
r1 = ret.Error(1)
}
@@ -1050,14 +1049,13 @@ type ORM_FindOCR2JobIDByAddress_Call struct {
// FindOCR2JobIDByAddress is a helper method to define mock.On call
// - ctx context.Context
// - contractID string
-// - feedID *common.Hash
-func (_e *ORM_Expecter) FindOCR2JobIDByAddress(ctx interface{}, contractID interface{}, feedID interface{}) *ORM_FindOCR2JobIDByAddress_Call {
- return &ORM_FindOCR2JobIDByAddress_Call{Call: _e.mock.On("FindOCR2JobIDByAddress", ctx, contractID, feedID)}
+func (_e *ORM_Expecter) FindOCR2JobIDByAddress(ctx interface{}, contractID interface{}) *ORM_FindOCR2JobIDByAddress_Call {
+ return &ORM_FindOCR2JobIDByAddress_Call{Call: _e.mock.On("FindOCR2JobIDByAddress", ctx, contractID)}
}
-func (_c *ORM_FindOCR2JobIDByAddress_Call) Run(run func(ctx context.Context, contractID string, feedID *common.Hash)) *ORM_FindOCR2JobIDByAddress_Call {
+func (_c *ORM_FindOCR2JobIDByAddress_Call) Run(run func(ctx context.Context, contractID string)) *ORM_FindOCR2JobIDByAddress_Call {
_c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(string), args[2].(*common.Hash))
+ run(args[0].(context.Context), args[1].(string))
})
return _c
}
@@ -1067,7 +1065,7 @@ func (_c *ORM_FindOCR2JobIDByAddress_Call) Return(_a0 int32, _a1 error) *ORM_Fin
return _c
}
-func (_c *ORM_FindOCR2JobIDByAddress_Call) RunAndReturn(run func(context.Context, string, *common.Hash) (int32, error)) *ORM_FindOCR2JobIDByAddress_Call {
+func (_c *ORM_FindOCR2JobIDByAddress_Call) RunAndReturn(run func(context.Context, string) (int32, error)) *ORM_FindOCR2JobIDByAddress_Call {
_c.Call.Return(run)
return _c
}
diff --git a/core/services/job/models.go b/core/services/job/models.go
index ec5d2daf9b0..e7fb38e0d99 100644
--- a/core/services/job/models.go
+++ b/core/services/job/models.go
@@ -365,9 +365,8 @@ var ForwardersSupportedPlugins = []types.OCR2PluginType{types.Median, types.OCR2
// OCR2OracleSpec defines the job spec for OCR2 jobs.
// Relay config is chain specific config for a relay (chain adapter).
type OCR2OracleSpec struct {
- ID int32 `toml:"-"`
- ContractID string `toml:"contractID"`
- FeedID *common.Hash `toml:"feedID"`
+ ID int32 `toml:"-"`
+ ContractID string `toml:"contractID"`
// Network
Relay string `toml:"relay"`
// TODO BCF-2442 implement ChainID as top level parameter rathe than buried in RelayConfig.
@@ -776,10 +775,9 @@ type LegacyGasStationSidecarSpec struct {
// BootstrapSpec defines the spec to handles the node communication setup process.
type BootstrapSpec struct {
- ID int32 `toml:"-"`
- ContractID string `toml:"contractID"`
- FeedID *common.Hash `toml:"feedID"`
- Relay string `toml:"relay"` // RelayID.Network
+ ID int32 `toml:"-"`
+ ContractID string `toml:"contractID"`
+ Relay string `toml:"relay"` // RelayID.Network
RelayConfig JSONConfig
MonitoringEndpoint null.String `toml:"monitoringEndpoint"`
BlockchainTimeout models.Interval `toml:"blockchainTimeout"`
diff --git a/core/services/job/orm.go b/core/services/job/orm.go
index b0605e44466..7ff8b61340f 100644
--- a/core/services/job/orm.go
+++ b/core/services/job/orm.go
@@ -50,7 +50,7 @@ type ORM interface {
FindJob(ctx context.Context, id int32) (Job, error)
FindJobByExternalJobID(ctx context.Context, uuid uuid.UUID) (Job, error)
FindJobIDByAddress(ctx context.Context, address evmtypes.EIP55Address, evmChainID *big.Big) (int32, error)
- FindOCR2JobIDByAddress(ctx context.Context, contractID string, feedID *common.Hash) (int32, error)
+ FindOCR2JobIDByAddress(ctx context.Context, contractID string) (int32, error)
FindJobIDsWithBridge(ctx context.Context, name string) ([]int32, error)
DeleteJob(ctx context.Context, id int32, jobType Type) error
RecordError(ctx context.Context, jobID int32, description string) error
@@ -272,16 +272,6 @@ func (o *orm) CreateJob(ctx context.Context, jb *Job) error {
return errors.Errorf("forwarding is not currently supported for %s jobs", jb.OCR2OracleSpec.PluginType)
}
- if jb.OCR2OracleSpec.PluginType == types.Mercury {
- if jb.OCR2OracleSpec.FeedID == nil {
- return errors.New("feed ID is required for mercury plugin type")
- }
- } else {
- if jb.OCR2OracleSpec.FeedID != nil {
- return errors.New("feed ID is not currently supported for non-mercury jobs")
- }
- }
-
if jb.OCR2OracleSpec.PluginType == types.Median {
var cfg medianconfig.PluginConfig
@@ -573,10 +563,10 @@ func (o *orm) insertOCROracleSpec(ctx context.Context, spec *OCROracleSpec) (spe
}
func (o *orm) insertOCR2OracleSpec(ctx context.Context, spec *OCR2OracleSpec) (specID int32, err error) {
- return o.prepareQuerySpecID(ctx, `INSERT INTO ocr2_oracle_specs (contract_id, feed_id, relay, relay_config, plugin_type, plugin_config, onchain_signing_strategy, p2pv2_bootstrappers, ocr_key_bundle_id, transmitter_id,
+ return o.prepareQuerySpecID(ctx, `INSERT INTO ocr2_oracle_specs (contract_id, relay, relay_config, plugin_type, plugin_config, onchain_signing_strategy, p2pv2_bootstrappers, ocr_key_bundle_id, transmitter_id,
blockchain_timeout, contract_config_tracker_poll_interval, contract_config_confirmations, allow_no_bootstrappers,
created_at, updated_at)
- VALUES (:contract_id, :feed_id, :relay, :relay_config, :plugin_type, :plugin_config, :onchain_signing_strategy, :p2pv2_bootstrappers, :ocr_key_bundle_id, :transmitter_id,
+ VALUES (:contract_id, :relay, :relay_config, :plugin_type, :plugin_config, :onchain_signing_strategy, :p2pv2_bootstrappers, :ocr_key_bundle_id, :transmitter_id,
:blockchain_timeout, :contract_config_tracker_poll_interval, :contract_config_confirmations, :allow_no_bootstrappers,
NOW(), NOW())
RETURNING id;`, spec)
@@ -637,10 +627,10 @@ func (o *orm) insertLegacyGasStationSidecarSpec(ctx context.Context, spec *Legac
}
func (o *orm) insertBootstrapSpec(ctx context.Context, spec *BootstrapSpec) (specID int32, err error) {
- return o.prepareQuerySpecID(ctx, `INSERT INTO bootstrap_specs (contract_id, feed_id, relay, relay_config, monitoring_endpoint,
+ return o.prepareQuerySpecID(ctx, `INSERT INTO bootstrap_specs (contract_id, relay, relay_config, monitoring_endpoint,
blockchain_timeout, contract_config_tracker_poll_interval,
contract_config_confirmations, created_at, updated_at)
- VALUES (:contract_id, :feed_id, :relay, :relay_config, :monitoring_endpoint,
+ VALUES (:contract_id, :relay, :relay_config, :monitoring_endpoint,
:blockchain_timeout, :contract_config_tracker_poll_interval,
:contract_config_confirmations, NOW(), NOW())
RETURNING id;`, spec)
@@ -1065,20 +1055,18 @@ WHERE ocrspec.id IS NOT NULL OR fmspec.id IS NOT NULL
return
}
-func (o *orm) FindOCR2JobIDByAddress(ctx context.Context, contractID string, feedID *common.Hash) (jobID int32, err error) {
- // NOTE: We want to explicitly match on NULL feed_id hence usage of `IS
- // NOT DISTINCT FROM` instead of `=`
+func (o *orm) FindOCR2JobIDByAddress(ctx context.Context, contractID string) (jobID int32, err error) {
stmt := `
SELECT jobs.id
FROM jobs
-LEFT JOIN ocr2_oracle_specs ocr2spec on ocr2spec.contract_id = $1 AND ocr2spec.feed_id IS NOT DISTINCT FROM $2 AND ocr2spec.id = jobs.ocr2_oracle_spec_id
-LEFT JOIN bootstrap_specs bs on bs.contract_id = $1 AND bs.feed_id IS NOT DISTINCT FROM $2 AND bs.id = jobs.bootstrap_spec_id
+LEFT JOIN ocr2_oracle_specs ocr2spec on ocr2spec.contract_id = $1 AND ocr2spec.id = jobs.ocr2_oracle_spec_id
+LEFT JOIN bootstrap_specs bs on bs.contract_id = $1 AND bs.id = jobs.bootstrap_spec_id
WHERE ocr2spec.id IS NOT NULL OR bs.id IS NOT NULL
`
- err = o.ds.GetContext(ctx, &jobID, stmt, contractID, feedID)
+ err = o.ds.GetContext(ctx, &jobID, stmt, contractID)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
- err = errors.Wrapf(err, "error searching for job by contract id=%s and feed id=%s", contractID, feedID)
+ err = errors.Wrapf(err, "error searching for job by contract id=%s", contractID)
}
err = errors.Wrap(err, "FindOCR2JobIDByAddress failed")
return
diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go
index b635c733da3..54ce1396bb8 100644
--- a/core/services/ocr2/delegate.go
+++ b/core/services/ocr2/delegate.go
@@ -59,7 +59,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/generic"
lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/median"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21"
ocr2keeper21core "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core"
@@ -69,8 +68,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/relay"
evmrelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
functionsRelay "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/functions"
- evmmercury "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
evmrelaytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
"github.com/smartcontractkit/chainlink/v2/core/services/streams"
"github.com/smartcontractkit/chainlink/v2/core/services/synchronization"
@@ -109,7 +106,6 @@ type Delegate struct {
ds sqlutil.DataSource
jobORM job.ORM
bridgeORM bridges.ORM
- mercuryORM evmmercury.ORM
pipelineRunner pipeline.Runner
streamRegistry streams.Getter
peerWrapper *ocrcommon.SingletonPeerWrapper
@@ -192,7 +188,6 @@ type jobPipelineConfig interface {
type mercuryConfig interface {
Credentials(credName string) *types.MercuryCredentials
- Cache() coreconfig.MercuryCache
TLS() coreconfig.MercuryTLS
Transmitter() coreconfig.MercuryTransmitter
VerboseLogging() bool
@@ -219,7 +214,6 @@ type DelegateOpts struct {
Ds sqlutil.DataSource
JobORM job.ORM
BridgeORM bridges.ORM
- MercuryORM evmmercury.ORM
PipelineRunner pipeline.Runner
StreamRegistry streams.Getter
PeerWrapper *ocrcommon.SingletonPeerWrapper
@@ -242,7 +236,6 @@ func NewDelegate(
ds: opts.Ds,
jobORM: opts.JobORM,
bridgeORM: opts.BridgeORM,
- mercuryORM: opts.MercuryORM,
pipelineRunner: opts.PipelineRunner,
streamRegistry: opts.StreamRegistry,
peerWrapper: opts.PeerWrapper,
@@ -430,10 +423,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi
ContractID: spec.ContractID,
TransmitterID: transmitterID,
}
- if spec.FeedID != nil && (*spec.FeedID != (common.Hash{})) {
- lggrCtx.FeedID = *spec.FeedID
- spec.RelayConfig["feedID"] = spec.FeedID
- }
lggr := logger.Sugared(d.lggr.Named(string(jb.Type)).Named(jb.ExternalJobID.String()).With(lggrCtx.Args()...))
kvStore := job.NewKVStore(jb.ID, d.ds)
@@ -503,9 +492,6 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi
ctx = lggrCtx.ContextWithValues(ctx)
switch spec.PluginType {
- case types.Mercury:
- return d.newServicesMercury(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc)
-
case types.LLO:
return d.newServicesLLO(ctx, lggr, jb, bootstrapPeers, kb, ocrDB, lc)
@@ -540,7 +526,7 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) ([]job.Servi
func GetEVMEffectiveTransmitterID(ctx context.Context, jb *job.Job, chain legacyevm.Chain, lggr logger.SugaredLogger) (string, error) {
spec := jb.OCR2OracleSpec
- if spec.PluginType == types.Mercury || spec.PluginType == types.LLO {
+ if spec.PluginType == types.LLO {
return spec.TransmitterID.String, nil
}
@@ -559,7 +545,6 @@ func GetEVMEffectiveTransmitterID(ctx context.Context, jb *job.Job, chain legacy
// effectiveTransmitterID is the transmitter address registered on the ocr contract. This is by default the EOA account on the node.
// In the case of forwarding, the transmitter address is the forwarder contract deployed onchain between EOA and OCR contract.
- // ForwardingAllowed cannot be set with Mercury, so this should always be false for mercury jobs
if jb.ForwardingAllowed {
if chain == nil {
return "", fmt.Errorf("job forwarding requires non-nil chain")
@@ -834,115 +819,6 @@ func (d *Delegate) newServicesGenericPlugin(
return srvs, nil
}
-func (d *Delegate) newServicesMercury(
- ctx context.Context,
- lggr logger.SugaredLogger,
- jb job.Job,
- bootstrapPeers []commontypes.BootstrapperLocator,
- kb ocr2key.KeyBundle,
- ocrDB *db,
- lc ocrtypes.LocalConfig,
-) ([]job.ServiceCtx, error) {
- if jb.OCR2OracleSpec.FeedID == nil || (*jb.OCR2OracleSpec.FeedID == (common.Hash{})) {
- return nil, errors.Errorf("ServicesForSpec: mercury job type requires feedID")
- }
- spec := jb.OCR2OracleSpec
- transmitterID := spec.TransmitterID.String
- if len(transmitterID) != 64 {
- return nil, errors.Errorf("ServicesForSpec: mercury job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID)
- }
- if _, err := hex.DecodeString(transmitterID); err != nil {
- return nil, errors.Wrapf(err, "ServicesForSpec: mercury job type requires transmitter ID to be a 32-byte hex string, got: %q", transmitterID)
- }
-
- rid, err := spec.RelayID()
- if err != nil {
- return nil, ErrJobSpecNoRelayer{Err: err, PluginName: "mercury"}
- }
- if rid.Network != relay.NetworkEVM {
- return nil, fmt.Errorf("mercury services: expected EVM relayer got %q", rid.Network)
- }
- relayer, err := d.RelayGetter.Get(rid)
- if err != nil {
- return nil, ErrRelayNotEnabled{Err: err, Relay: spec.Relay, PluginName: "mercury"}
- }
-
- provider, err2 := relayer.NewPluginProvider(ctx,
- types.RelayArgs{
- ExternalJobID: jb.ExternalJobID,
- JobID: jb.ID,
- ContractID: spec.ContractID,
- New: d.isNewlyCreatedJob,
- RelayConfig: spec.RelayConfig.Bytes(),
- ProviderType: string(spec.PluginType),
- }, types.PluginArgs{
- TransmitterID: transmitterID,
- PluginConfig: spec.PluginConfig.Bytes(),
- })
- if err2 != nil {
- return nil, err2
- }
-
- mercuryProvider, ok := provider.(types.MercuryProvider)
- if !ok {
- return nil, errors.New("could not coerce PluginProvider to MercuryProvider")
- }
-
- lc.ContractConfigTrackerPollInterval = 1 * time.Second // This is the fastest that libocr supports. See: https://github.com/smartcontractkit/offchain-reporting/pull/520
-
- // Disable OCR debug+info logging for legacy mercury jobs unless tracelogging is enabled, because its simply too verbose (150 jobs => ~50k logs per second)
- ocrLogger := ocrcommon.NewOCRWrapper(llo.NewSuppressedLogger(lggr, d.cfg.OCR2().TraceLogging(), d.cfg.OCR2().TraceLogging()), d.cfg.OCR2().TraceLogging(), func(ctx context.Context, msg string) {
- lggr.ErrorIf(d.jobORM.RecordError(ctx, jb.ID, msg), "unable to record error")
- })
-
- var relayConfig evmrelaytypes.RelayConfig
- err = json.Unmarshal(jb.OCR2OracleSpec.RelayConfig.Bytes(), &relayConfig)
- if err != nil {
- return nil, fmt.Errorf("error while unmarshalling relay config: %w", err)
- }
-
- var telemetryType synchronization.TelemetryType
- if relayConfig.EnableTriggerCapability && len(jb.OCR2OracleSpec.PluginConfig) == 0 {
- telemetryType = synchronization.OCR3DataFeeds
- // First use case for TriggerCapability transmission is Data Feeds, so telemetry should be routed accordingly.
- // This is only true if TriggerCapability is the *only* transmission method (PluginConfig is empty).
- } else {
- telemetryType = synchronization.OCR3Mercury
- }
-
- oracleArgsNoPlugin := libocr2.MercuryOracleArgs{
- BinaryNetworkEndpointFactory: d.peerWrapper.Peer2,
- V2Bootstrappers: bootstrapPeers,
- ContractTransmitter: mercuryProvider.ContractTransmitter(),
- ContractConfigTracker: mercuryProvider.ContractConfigTracker(),
- Database: ocrDB,
- LocalConfig: lc,
- Logger: ocrLogger,
- MonitoringEndpoint: d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), telemetryType),
- OffchainConfigDigester: mercuryProvider.OffchainConfigDigester(),
- OffchainKeyring: kb,
- OnchainKeyring: kb,
- MetricsRegisterer: prometheus.WrapRegistererWith(map[string]string{"job_name": jb.Name.ValueOrZero()}, prometheus.DefaultRegisterer),
- }
-
- chEnhancedTelem := make(chan ocrcommon.EnhancedTelemetryMercuryData, 100)
-
- mCfg := mercury.NewMercuryConfig(d.cfg.JobPipeline().MaxSuccessfulRuns(), d.cfg.JobPipeline().ResultWriteQueueDepth(), d.cfg)
-
- mercuryServices, err2 := mercury.NewServices(jb, mercuryProvider, d.pipelineRunner, lggr, oracleArgsNoPlugin, mCfg, chEnhancedTelem, d.mercuryORM, (mercuryutils.FeedID)(*spec.FeedID), relayConfig.EnableTriggerCapability)
-
- if ocrcommon.ShouldCollectEnhancedTelemetryMercury(jb) {
- enhancedTelemService := ocrcommon.NewEnhancedTelemetryService(&jb, chEnhancedTelem, make(chan struct{}), d.monitoringEndpointGen.GenMonitoringEndpoint(rid.Network, rid.ChainID, spec.FeedID.String(), synchronization.EnhancedEAMercury), lggr.Named("EnhancedTelemetryMercury"))
- mercuryServices = append(mercuryServices, enhancedTelemService)
- } else {
- lggr.Infow("Enhanced telemetry is disabled for mercury job", "job", jb.Name)
- }
-
- mercuryServices = append(mercuryServices, ocrLogger)
-
- return mercuryServices, err2
-}
-
func (d *Delegate) newServicesLLO(
ctx context.Context,
lggr logger.SugaredLogger,
diff --git a/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go b/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go
index 91ae4e6716d..0e7986e7a5b 100644
--- a/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go
+++ b/core/services/ocr2/plugins/ccip/testhelpers/integration/jobspec.go
@@ -38,10 +38,6 @@ func (o *OCR2TaskJobSpec) Type() string { return o.JobType }
// String representation of the job
func (o *OCR2TaskJobSpec) String() (string, error) {
- var feedID string
- if o.OCR2OracleSpec.FeedID != nil {
- feedID = o.OCR2OracleSpec.FeedID.Hex()
- }
externalID, err := ExternalJobID(o.Name)
if err != nil {
return "", err
@@ -53,7 +49,6 @@ func (o *OCR2TaskJobSpec) String() (string, error) {
MaxTaskDuration string
ForwardingAllowed bool
ContractID string
- FeedID string
Relay string
PluginType string
RelayConfig map[string]interface{}
@@ -74,7 +69,6 @@ func (o *OCR2TaskJobSpec) String() (string, error) {
ForwardingAllowed: o.ForwardingAllowed,
MaxTaskDuration: o.MaxTaskDuration,
ContractID: o.OCR2OracleSpec.ContractID,
- FeedID: feedID,
Relay: o.OCR2OracleSpec.Relay,
PluginType: string(o.OCR2OracleSpec.PluginType),
RelayConfig: o.OCR2OracleSpec.RelayConfig,
@@ -100,9 +94,6 @@ pluginType = "{{ .PluginType }}" {{end}}
relay = "{{.Relay}}"
schemaVersion = 1
contractID = "{{.ContractID}}"
-{{if .FeedID}}
-feedID = "{{.FeedID}}"
-{{end}}
{{if eq .JobType "offchainreporting2" }}
ocrKeyBundleID = "{{.OCRKeyBundleID}}" {{end}}
{{if eq .JobType "offchainreporting2" }}
diff --git a/core/services/ocr2/plugins/llo/config/config.go b/core/services/ocr2/plugins/llo/config/config.go
index 47683cb4a61..76c6ce8ba26 100644
--- a/core/services/ocr2/plugins/llo/config/config.go
+++ b/core/services/ocr2/plugins/llo/config/config.go
@@ -16,10 +16,14 @@ import (
llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
- mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config"
"github.com/smartcontractkit/chainlink/v2/core/utils"
)
+type Server struct {
+ URL string
+ PubKey utils.PlainHexBytes
+}
+
type PluginConfig struct {
ChannelDefinitionsContractAddress common.Address `json:"channelDefinitionsContractAddress" toml:"channelDefinitionsContractAddress"`
ChannelDefinitionsContractFromBlock int64 `json:"channelDefinitionsContractFromBlock" toml:"channelDefinitionsContractFromBlock"`
@@ -82,9 +86,9 @@ func (p *PluginConfig) Unmarshal(data []byte) error {
return json.Unmarshal(data, p)
}
-func (p PluginConfig) GetServers() (servers []mercuryconfig.Server) {
+func (p PluginConfig) GetServers() (servers []Server) {
for url, pubKey := range p.Servers {
- servers = append(servers, mercuryconfig.Server{URL: wssRegexp.ReplaceAllString(url, ""), PubKey: pubKey})
+ servers = append(servers, Server{URL: wssRegexp.ReplaceAllString(url, ""), PubKey: pubKey})
}
sort.Slice(servers, func(i, j int) bool {
return servers[i].URL < servers[j].URL
diff --git a/core/services/ocr2/plugins/llo/helpers_test.go b/core/services/ocr2/plugins/llo/helpers_test.go
index 78eada23630..460e97ee59a 100644
--- a/core/services/ocr2/plugins/llo/helpers_test.go
+++ b/core/services/ocr2/plugins/llo/helpers_test.go
@@ -3,7 +3,6 @@ package llo_test
import (
"context"
"crypto/ed25519"
- "errors"
"fmt"
"io"
"math/big"
@@ -16,7 +15,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/shopspring/decimal"
- "github.com/smartcontractkit/wsrpc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
@@ -27,9 +25,6 @@ import (
"github.com/smartcontractkit/chainlink-data-streams/rpc/mtls"
"github.com/smartcontractkit/wsrpc/credentials"
- "github.com/smartcontractkit/wsrpc/peer"
-
- ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
@@ -47,14 +42,11 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate"
"github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
"github.com/smartcontractkit/chainlink/v2/core/services/streams"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
"github.com/smartcontractkit/chainlink/v2/core/utils/testutils/heavyweight"
)
-var _ pb.MercuryServer = &wsrpcMercuryServer{}
-
type mercuryServer struct {
rpc.UnimplementedTransmitterServer
privKey ed25519.PrivateKey
@@ -73,7 +65,7 @@ func startMercuryServer(t *testing.T, srv *mercuryServer, pubKeys []ed25519.Publ
require.NoError(t, err)
s := grpc.NewServer(grpc.Creds(sMtls))
- // Register mercury implementation with the wsrpc server
+ // Register mercury implementation with the grpc server
rpc.RegisterTransmitterServer(s, srv)
// Start serving
@@ -112,62 +104,6 @@ func (s *mercuryServer) LatestReport(ctx context.Context, lrr *rpc.LatestReportR
panic("should not be called")
}
-type wsrpcMercuryServer struct {
- privKey ed25519.PrivateKey
- reqsCh chan wsrpcRequest
- t *testing.T
-}
-
-type wsrpcRequest struct {
- pk credentials.StaticSizedPublicKey
- req *pb.TransmitRequest
-}
-
-func (r wsrpcRequest) TransmitterID() ocr2types.Account {
- return ocr2types.Account(fmt.Sprintf("%x", r.pk))
-}
-
-func NewWSRPCMercuryServer(t *testing.T, privKey ed25519.PrivateKey, reqsCh chan wsrpcRequest) *wsrpcMercuryServer {
- return &wsrpcMercuryServer{privKey, reqsCh, t}
-}
-
-func (s *wsrpcMercuryServer) Transmit(ctx context.Context, req *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- p, ok := peer.FromContext(ctx)
- if !ok {
- return nil, errors.New("could not extract public key")
- }
- r := wsrpcRequest{p.PublicKey, req}
- s.reqsCh <- r
-
- return &pb.TransmitResponse{
- Code: 1,
- Error: "",
- }, nil
-}
-
-func (s *wsrpcMercuryServer) LatestReport(ctx context.Context, lrr *pb.LatestReportRequest) (*pb.LatestReportResponse, error) {
- panic("should not be called")
-}
-
-func startWSRPCMercuryServer(t *testing.T, srv *wsrpcMercuryServer, pubKeys []ed25519.PublicKey) (serverURL string) {
- // Set up the wsrpc server
- lis, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("[MAIN] failed to listen: %v", err)
- }
- serverURL = lis.Addr().String()
- s := wsrpc.NewServer(wsrpc.WithCreds(srv.privKey, pubKeys))
-
- // Register mercury implementation with the wsrpc server
- pb.RegisterMercuryServer(s, srv)
-
- // Start serving
- go s.Serve(lis)
- t.Cleanup(s.Stop)
-
- return
-}
-
type Node struct {
App chainlink.Application
ClientPubKey credentials.StaticSizedPublicKey
diff --git a/core/services/ocr2/plugins/llo/integration_test.go b/core/services/ocr2/plugins/llo/integration_test.go
index dc1b0185daa..01cea5bf367 100644
--- a/core/services/ocr2/plugins/llo/integration_test.go
+++ b/core/services/ocr2/plugins/llo/integration_test.go
@@ -32,7 +32,6 @@ import (
"github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
"github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper"
ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/wsrpc/credentials"
llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo"
"github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
@@ -45,21 +44,15 @@ import (
ubig "github.com/smartcontractkit/chainlink-integrations/evm/utils/big"
"github.com/smartcontractkit/chainlink/v2/core/config"
"github.com/smartcontractkit/chainlink/v2/core/config/toml"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/channel_config_store"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/configurator"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/destination_verifier"
"github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/destination_verifier_proxy"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/fee_manager"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/reward_manager"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy"
"github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
"github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
lloevm "github.com/smartcontractkit/chainlink/v2/core/services/llo/evm"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/llo"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
@@ -82,10 +75,6 @@ func setupBlockchain(t *testing.T) (
common.Address,
*channel_config_store.ChannelConfigStore,
common.Address,
- *verifier.Verifier,
- common.Address,
- *verifier_proxy.VerifierProxy,
- common.Address,
) {
steve := evmtestutils.MustNewSimTransactor(t) // config contract deployer and owner
genesisData := gethtypes.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}}
@@ -111,53 +100,13 @@ func setupBlockchain(t *testing.T) (
require.NoError(t, err)
backend.Commit()
- // Legacy mercury verifier
- legacyVerifier, legacyVerifierAddr, legacyVerifierProxy, legacyVerifierProxyAddr := setupLegacyMercuryVerifier(t, steve, backend)
-
// ChannelConfigStore
configStoreAddress, _, configStore, err := channel_config_store.DeployChannelConfigStore(steve, backend.Client())
require.NoError(t, err)
backend.Commit()
- return steve, backend, configurator, configuratorAddress, destinationVerifier, destinationVerifierAddr, verifierProxy, destinationVerifierProxyAddr, configStore, configStoreAddress, legacyVerifier, legacyVerifierAddr, legacyVerifierProxy, legacyVerifierProxyAddr
-}
-
-func setupLegacyMercuryVerifier(t *testing.T, steve *bind.TransactOpts, backend evmtypes.Backend) (*verifier.Verifier, common.Address, *verifier_proxy.VerifierProxy, common.Address) {
- linkTokenAddress, _, linkToken, err := link_token_interface.DeployLinkToken(steve, backend.Client())
- require.NoError(t, err)
- backend.Commit()
- _, err = linkToken.Transfer(steve, steve.From, big.NewInt(1000))
- require.NoError(t, err)
- backend.Commit()
- nativeTokenAddress, _, nativeToken, err := link_token_interface.DeployLinkToken(steve, backend.Client())
- require.NoError(t, err)
- backend.Commit()
- _, err = nativeToken.Transfer(steve, steve.From, big.NewInt(1000))
- require.NoError(t, err)
- backend.Commit()
- verifierProxyAddr, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(steve, backend.Client(), common.Address{}) // zero address for access controller disables access control
- require.NoError(t, err)
- backend.Commit()
- verifierAddress, _, verifier, err := verifier.DeployVerifier(steve, backend.Client(), verifierProxyAddr)
- require.NoError(t, err)
- backend.Commit()
- _, err = verifierProxy.InitializeVerifier(steve, verifierAddress)
- require.NoError(t, err)
- backend.Commit()
- rewardManagerAddr, _, rewardManager, err := reward_manager.DeployRewardManager(steve, backend.Client(), linkTokenAddress)
- require.NoError(t, err)
- backend.Commit()
- feeManagerAddr, _, _, err := fee_manager.DeployFeeManager(steve, backend.Client(), linkTokenAddress, nativeTokenAddress, verifierProxyAddr, rewardManagerAddr)
- require.NoError(t, err)
- backend.Commit()
- _, err = verifierProxy.SetFeeManager(steve, feeManagerAddr)
- require.NoError(t, err)
- backend.Commit()
- _, err = rewardManager.SetFeeManager(steve, feeManagerAddr)
- require.NoError(t, err)
- backend.Commit()
- return verifier, verifierAddress, verifierProxy, verifierProxyAddr
+ return steve, backend, configurator, configuratorAddress, destinationVerifier, destinationVerifierAddr, verifierProxy, destinationVerifierProxyAddr, configStore, configStoreAddress
}
type Stream struct {
@@ -315,31 +264,6 @@ func generateConfig(t *testing.T, opts ...OCRConfigOption) (signers []types.Onch
return
}
-func setLegacyConfig(t *testing.T, donID uint32, steve *bind.TransactOpts, backend evmtypes.Backend, legacyVerifier *verifier.Verifier, legacyVerifierAddr common.Address, nodes []Node, oracles []confighelper.OracleIdentityExtra, inOffchainConfig datastreamsllo.OffchainConfig) ocr2types.ConfigDigest {
- signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig := generateConfig(t, WithOracles(oracles), WithOffchainConfig(inOffchainConfig))
-
- signerAddresses, err := evm.OnchainPublicKeyToAddress(signers)
- require.NoError(t, err)
- offchainTransmitters := make([][32]byte, nNodes)
- for i := 0; i < nNodes; i++ {
- offchainTransmitters[i] = nodes[i].ClientPubKey
- }
- donIDPadded := llo.DonIDToBytes32(donID)
- _, err = legacyVerifier.SetConfig(steve, donIDPadded, signerAddresses, offchainTransmitters, fNodes, onchainConfig, offchainConfigVersion, offchainConfig, nil)
- require.NoError(t, err)
-
- // libocr requires a few confirmations to accept the config
- backend.Commit()
- backend.Commit()
- backend.Commit()
- backend.Commit()
-
- l, err := legacyVerifier.LatestConfigDigestAndEpoch(&bind.CallOpts{}, donIDPadded)
- require.NoError(t, err)
-
- return l.ConfigDigest
-}
-
func setStagingConfig(t *testing.T, donID uint32, steve *bind.TransactOpts, backend evmtypes.Backend, configurator *configurator.Configurator, configuratorAddress common.Address, nodes []Node, opts ...OCRConfigOption) ocr2types.ConfigDigest {
return setBlueGreenConfig(t, donID, steve, backend, configurator, configuratorAddress, nodes, opts...)
}
@@ -390,7 +314,7 @@ func setBlueGreenConfig(t *testing.T, donID uint32, steve *bind.TransactOpts, ba
require.NoError(t, err)
require.GreaterOrEqual(t, len(logs), 1)
- cfg, err := mercury.ConfigFromLog(logs[len(logs)-1].Data)
+ cfg, err := llo.DecodeProductionConfigSetLog(logs[len(logs)-1].Data)
require.NoError(t, err)
return cfg.ConfigDigest
@@ -445,7 +369,7 @@ func testIntegrationLLOEVMPremiumLegacy(t *testing.T, offchainConfig datastreams
clientPubKeys[i] = key.PublicKey
}
- steve, backend, _, _, verifier, _, verifierProxy, _, configStore, configStoreAddress, legacyVerifier, legacyVerifierAddr, _, _ := setupBlockchain(t)
+ steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress := setupBlockchain(t)
fromBlock := 1
// Setup bootstrap
@@ -455,12 +379,12 @@ func testIntegrationLLOEVMPremiumLegacy(t *testing.T, offchainConfig datastreams
bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
t.Run("using legacy verifier configuration contract, produces reports in v0.3 format", func(t *testing.T) {
- reqs := make(chan wsrpcRequest, 100000)
+ packetCh := make(chan *packet, 100000)
serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(salt - 2))
serverPubKey := serverKey.PublicKey
- srv := NewWSRPCMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs)
+ srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), packetCh)
- serverURL := startWSRPCMercuryServer(t, srv, clientPubKeys)
+ serverURL := startMercuryServer(t, srv, clientPubKeys)
donID := uint32(995544)
streams := []Stream{ethStream, linkStream, quoteStream1, quoteStream2}
@@ -470,9 +394,7 @@ func testIntegrationLLOEVMPremiumLegacy(t *testing.T, offchainConfig datastreams
}
// Setup oracle nodes
- oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolWSRPC)
- })
+ oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, nil)
chainID := testutils.SimulatedChainID
relayType := "evm"
@@ -480,9 +402,9 @@ func testIntegrationLLOEVMPremiumLegacy(t *testing.T, offchainConfig datastreams
chainID = "%s"
fromBlock = %d
lloDonID = %d
-lloConfigMode = "mercury"
+lloConfigMode = "bluegreen"
`, chainID, fromBlock, donID)
- addBootstrapJob(t, bootstrapNode, legacyVerifierAddr, "job-2", relayType, relayConfig)
+ addBootstrapJob(t, bootstrapNode, configuratorAddress, "job-2", relayType, relayConfig)
// Channel definitions
channelDefinitions := llotypes.ChannelDefinitions{
@@ -535,40 +457,34 @@ lloConfigMode = "mercury"
donID = %d
channelDefinitionsContractAddress = "0x%x"
channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, configStoreAddress, fromBlock)
- addOCRJobsEVMPremiumLegacy(t, streams, serverPubKey, serverURL, legacyVerifierAddr, bootstrapPeerID, bootstrapNodePort, nodes, configStoreAddress, clientPubKeys, pluginConfig, relayType, relayConfig)
+ addOCRJobsEVMPremiumLegacy(t, streams, serverPubKey, serverURL, configuratorAddress, bootstrapPeerID, bootstrapNodePort, nodes, configStoreAddress, clientPubKeys, pluginConfig, relayType, relayConfig)
// Set config on configurator
- setLegacyConfig(
- t, donID, steve, backend, legacyVerifier, legacyVerifierAddr, nodes, oracles, offchainConfig,
+ setProductionConfig(
+ t, donID, steve, backend, configurator, configuratorAddress, nodes, WithOracles(oracles), WithOffchainConfig(offchainConfig),
)
- // Set config on the destination verifier
signerAddresses := make([]common.Address, len(oracles))
for i, oracle := range oracles {
signerAddresses[i] = common.BytesToAddress(oracle.OracleIdentity.OnchainPublicKey)
}
- {
- recipientAddressesAndWeights := []destination_verifier.CommonAddressAndWeight{}
-
- _, err := verifier.SetConfig(steve, signerAddresses, fNodes, recipientAddressesAndWeights)
- require.NoError(t, err)
- backend.Commit()
- }
t.Run("receives at least one report per channel from each oracle when EAs are at 100% reliability", func(t *testing.T) {
- // Expect at least one report per feed from each oracle
- seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
+ // Expect at least one report per feed from each oracle (keyed by ip)
+ seen := make(map[[32]byte]map[string]struct{})
for _, cd := range channelDefinitions {
var opts lloevm.ReportFormatEVMPremiumLegacyOpts
err := json.Unmarshal(cd.Opts, &opts)
require.NoError(t, err)
// feedID will be deleted when all n oracles have reported
- seen[opts.FeedID] = make(map[credentials.StaticSizedPublicKey]struct{}, nNodes)
+ seen[opts.FeedID] = make(map[string]struct{}, nNodes)
}
- for req := range reqs {
- assert.Equal(t, uint32(llotypes.ReportFormatEVMPremiumLegacy), req.req.ReportFormat)
+
+ for pckt := range packetCh {
+ req := pckt.req
+ assert.Equal(t, uint32(llotypes.ReportFormatEVMPremiumLegacy), req.ReportFormat)
v := make(map[string]interface{})
- err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
+ err := mercury.PayloadTypes.UnpackIntoMap(v, req.Payload)
require.NoError(t, err)
report, exists := v["report"]
if !exists {
@@ -622,19 +538,11 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi
assert.Subset(t, signerAddresses, reportSigners)
}
- // test on-chain verification
- t.Run("on-chain verification", func(t *testing.T) {
- t.Skip("SKIP - MERC-6637")
- // Disabled because it flakes, sometimes returns "execution reverted"
- // No idea why
- // https://smartcontract-it.atlassian.net/browse/MERC-6637
- _, err = verifierProxy.Verify(steve, req.req.Payload, []byte{})
- require.NoError(t, err)
- })
-
- t.Logf("oracle %x reported for 0x%x", req.pk[:], feedID[:])
+ pr, ok := peer.FromContext(pckt.ctx)
+ require.True(t, ok)
+ t.Logf("oracle %s reported for 0x%x", pr.Addr.String(), feedID[:])
- seen[feedID][req.pk] = struct{}{}
+ seen[feedID][pr.Addr.String()] = struct{}{}
if len(seen[feedID]) == nNodes {
t.Logf("all oracles reported for 0x%x", feedID[:])
delete(seen, feedID)
@@ -683,7 +591,7 @@ func testIntegrationLLOEVMABIEncodeUnpacked(t *testing.T, offchainConfig datastr
clientPubKeys[i] = key.PublicKey
}
- steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress, _, _, _, _ := setupBlockchain(t)
+ steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress := setupBlockchain(t)
fromBlock := 1
// Setup bootstrap
@@ -1219,7 +1127,7 @@ func TestIntegration_LLO_stress_test_V1(t *testing.T) {
clientPubKeys[i] = key.PublicKey
}
- steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress, _, _, _, _ := setupBlockchain(t)
+ steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress := setupBlockchain(t)
fromBlock := 1
// Setup bootstrap
@@ -1448,7 +1356,7 @@ func TestIntegration_LLO_transmit_errors(t *testing.T) {
clientPubKeys[i] = key.PublicKey
}
- steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress, _, _, _, _ := setupBlockchain(t)
+ steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress := setupBlockchain(t)
fromBlock := 1
// Setup bootstrap
@@ -1612,7 +1520,7 @@ func testIntegrationLLOBlueGreenLifecycle(t *testing.T, offchainConfig datastrea
clientPubKeys[i] = key.PublicKey
}
- steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress, _, _, _, _ := setupBlockchain(t)
+ steve, backend, configurator, configuratorAddress, _, _, _, _, configStore, configStoreAddress := setupBlockchain(t)
fromBlock := 1
// Setup bootstrap
diff --git a/core/services/ocr2/plugins/mercury/config/config.go b/core/services/ocr2/plugins/mercury/config/config.go
deleted file mode 100644
index 40854bd8c0a..00000000000
--- a/core/services/ocr2/plugins/mercury/config/config.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// config is a separate package so that we can validate
-// the config in other packages, for example in job at job create time.
-
-package config
-
-import (
- "errors"
- "fmt"
- "net/url"
- "regexp"
- "sort"
-
- pkgerrors "github.com/pkg/errors"
-
- "github.com/smartcontractkit/chainlink/v2/core/null"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-type PluginConfig struct {
- // Must either specify details for single server OR multiple servers.
- // Specifying both is not valid.
-
- // Single mercury server
- // LEGACY: This is the old way of specifying a mercury server
- RawServerURL string `json:"serverURL" toml:"serverURL"`
- ServerPubKey utils.PlainHexBytes `json:"serverPubKey" toml:"serverPubKey"`
-
- // Multi mercury servers
- // This is the preferred way to specify mercury server(s)
- Servers map[string]utils.PlainHexBytes `json:"servers" toml:"servers"`
-
- // InitialBlockNumber allows to set a custom "validFromBlockNumber" for
- // the first ever report in the case of a brand new feed, where the mercury
- // server does not have any previous reports. For a brand new feed, this
- // effectively sets the "first" validFromBlockNumber.
- InitialBlockNumber null.Int64 `json:"initialBlockNumber" toml:"initialBlockNumber"`
-
- LinkFeedID *mercuryutils.FeedID `json:"linkFeedID" toml:"linkFeedID"`
- NativeFeedID *mercuryutils.FeedID `json:"nativeFeedID" toml:"nativeFeedID"`
-}
-
-func validateURL(rawServerURL string) error {
- var normalizedURI string
- if schemeRegexp.MatchString(rawServerURL) {
- normalizedURI = rawServerURL
- } else {
- normalizedURI = fmt.Sprintf("wss://%s", rawServerURL)
- }
- uri, err := url.ParseRequestURI(normalizedURI)
- if err != nil {
- return pkgerrors.Errorf(`Mercury: invalid value for ServerURL, got: %q`, rawServerURL)
- }
- if uri.Scheme != "wss" {
- return pkgerrors.Errorf(`Mercury: invalid scheme specified for MercuryServer, got: %q (scheme: %q) but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`, rawServerURL, uri.Scheme)
- }
- return nil
-}
-
-type Server struct {
- URL string
- PubKey utils.PlainHexBytes
-}
-
-func (p PluginConfig) GetServers() (servers []Server) {
- if p.RawServerURL != "" {
- return []Server{{URL: wssRegexp.ReplaceAllString(p.RawServerURL, ""), PubKey: p.ServerPubKey}}
- }
- for url, pubKey := range p.Servers {
- servers = append(servers, Server{URL: wssRegexp.ReplaceAllString(url, ""), PubKey: pubKey})
- }
- sort.Slice(servers, func(i, j int) bool {
- return servers[i].URL < servers[j].URL
- })
- return
-}
-
-func ValidatePluginConfig(config PluginConfig, feedID mercuryutils.FeedID) (merr error) {
- if len(config.Servers) > 0 {
- if config.RawServerURL != "" || len(config.ServerPubKey) != 0 {
- merr = errors.Join(merr, errors.New("Mercury: Servers and RawServerURL/ServerPubKey may not be specified together"))
- } else {
- for serverName, serverPubKey := range config.Servers {
- if err := validateURL(serverName); err != nil {
- merr = errors.Join(merr, pkgerrors.Wrap(err, "Mercury: invalid value for ServerURL"))
- }
- if len(serverPubKey) != 32 {
- merr = errors.Join(merr, errors.New("Mercury: ServerPubKey must be a 32-byte hex string"))
- }
- }
- }
- } else if config.RawServerURL == "" {
- merr = errors.Join(merr, errors.New("Mercury: Servers must be specified"))
- } else {
- if err := validateURL(config.RawServerURL); err != nil {
- merr = errors.Join(merr, pkgerrors.Wrap(err, "Mercury: invalid value for ServerURL"))
- }
- if len(config.ServerPubKey) != 32 {
- merr = errors.Join(merr, errors.New("Mercury: If RawServerURL is specified, ServerPubKey is also required and must be a 32-byte hex string"))
- }
- }
-
- switch feedID.Version() {
- case 1:
- if config.LinkFeedID != nil {
- merr = errors.Join(merr, errors.New("linkFeedID may not be specified for v1 jobs"))
- }
- if config.NativeFeedID != nil {
- merr = errors.Join(merr, errors.New("nativeFeedID may not be specified for v1 jobs"))
- }
- case 2, 3, 4:
- if config.LinkFeedID == nil {
- merr = errors.Join(merr, fmt.Errorf("linkFeedID must be specified for v%d jobs", feedID.Version()))
- }
- if config.NativeFeedID == nil {
- merr = errors.Join(merr, fmt.Errorf("nativeFeedID must be specified for v%d jobs", feedID.Version()))
- }
- if config.InitialBlockNumber.Valid {
- merr = errors.Join(merr, fmt.Errorf("initialBlockNumber may not be specified for v%d jobs", feedID.Version()))
- }
- default:
- merr = errors.Join(merr, fmt.Errorf("got unsupported schema version %d; supported versions are 1,2,3,4", feedID.Version()))
- }
-
- return merr
-}
-
-var schemeRegexp = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+.-]*://`)
-var wssRegexp = regexp.MustCompile(`^wss://`)
diff --git a/core/services/ocr2/plugins/mercury/config/config_test.go b/core/services/ocr2/plugins/mercury/config/config_test.go
deleted file mode 100644
index 5beba287133..00000000000
--- a/core/services/ocr2/plugins/mercury/config/config_test.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package config
-
-import (
- "testing"
-
- "github.com/pelletier/go-toml/v2"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-var v1FeedId = [32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-var v2FeedId = [32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-
-func Test_PluginConfig(t *testing.T) {
- t.Run("Mercury v1", func(t *testing.T) {
- t.Run("with valid values", func(t *testing.T) {
- rawToml := `
- ServerURL = "example.com:80"
- ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"
- InitialBlockNumber = 1234
- `
-
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- assert.Equal(t, "example.com:80", mc.RawServerURL)
- assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.ServerPubKey.String())
- assert.Equal(t, int64(1234), mc.InitialBlockNumber.Int64)
-
- err = ValidatePluginConfig(mc, v1FeedId)
- require.NoError(t, err)
- })
- t.Run("with multiple server URLs", func(t *testing.T) {
- t.Run("if no ServerURL/ServerPubKey is specified", func(t *testing.T) {
- rawToml := `
- Servers = { "example.com:80" = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", "example2.invalid:1234" = "524ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" }
- `
-
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- assert.Len(t, mc.Servers, 2)
- assert.Equal(t, "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.Servers["example.com:80"].String())
- assert.Equal(t, "524ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", mc.Servers["example2.invalid:1234"].String())
-
- err = ValidatePluginConfig(mc, v1FeedId)
- require.NoError(t, err)
- })
- t.Run("if ServerURL or ServerPubKey is specified", func(t *testing.T) {
- rawToml := `
- Servers = { "example.com:80" = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", "example2.invalid:1234" = "524ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" }
- ServerURL = "example.com:80"
- `
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v1FeedId)
- require.EqualError(t, err, "Mercury: Servers and RawServerURL/ServerPubKey may not be specified together")
-
- rawToml = `
- Servers = { "example.com:80" = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93", "example2.invalid:1234" = "524ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93" }
- ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"
- `
- err = toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v1FeedId)
- require.EqualError(t, err, "Mercury: Servers and RawServerURL/ServerPubKey may not be specified together")
- })
- })
-
- t.Run("with invalid values", func(t *testing.T) {
- rawToml := `
- InitialBlockNumber = "invalid"
- `
-
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.Error(t, err)
- assert.EqualError(t, err, `toml: strconv.ParseInt: parsing "invalid": invalid syntax`)
-
- rawToml = `
- ServerURL = "http://example.com"
- ServerPubKey = "4242"
- `
-
- err = toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v1FeedId)
- require.Error(t, err)
- assert.Contains(t, err.Error(), `Mercury: invalid scheme specified for MercuryServer, got: "http://example.com" (scheme: "http") but expected a websocket url e.g. "192.0.2.2:4242" or "wss://192.0.2.2:4242"`)
- assert.Contains(t, err.Error(), `If RawServerURL is specified, ServerPubKey is also required and must be a 32-byte hex string`)
- })
-
- t.Run("with unnecessary values", func(t *testing.T) {
- rawToml := `
- ServerURL = "example.com:80"
- ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"
- LinkFeedID = "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472"
- `
-
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v1FeedId)
- assert.Contains(t, err.Error(), `linkFeedID may not be specified for v1 jobs`)
- })
- })
-
- t.Run("Mercury v2/v3", func(t *testing.T) {
- t.Run("with valid values", func(t *testing.T) {
- rawToml := `
- ServerURL = "example.com:80"
- ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"
- LinkFeedID = "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472"
- NativeFeedID = "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472"
- `
-
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v2FeedId)
- require.NoError(t, err)
-
- require.NotNil(t, mc.LinkFeedID)
- require.NotNil(t, mc.NativeFeedID)
- assert.Equal(t, "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", (*mc.LinkFeedID).String())
- assert.Equal(t, "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472", (*mc.NativeFeedID).String())
- })
-
- t.Run("with invalid values", func(t *testing.T) {
- var mc PluginConfig
-
- rawToml := `LinkFeedID = "test"`
- err := toml.Unmarshal([]byte(rawToml), &mc)
- assert.Contains(t, err.Error(), "toml: hash: expected a hex string starting with '0x'")
-
- rawToml = `LinkFeedID = "0xtest"`
- err = toml.Unmarshal([]byte(rawToml), &mc)
- assert.Contains(t, err.Error(), `toml: hash: UnmarshalText failed: encoding/hex: invalid byte: U+0074 't'`)
-
- rawToml = `
- ServerURL = "example.com:80"
- ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"
- LinkFeedID = "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472"
- `
- err = toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v2FeedId)
- assert.Contains(t, err.Error(), "nativeFeedID must be specified for v2 jobs")
- })
-
- t.Run("with unnecessary values", func(t *testing.T) {
- rawToml := `
- ServerURL = "example.com:80"
- ServerPubKey = "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93"
- InitialBlockNumber = 1234
- `
-
- var mc PluginConfig
- err := toml.Unmarshal([]byte(rawToml), &mc)
- require.NoError(t, err)
-
- err = ValidatePluginConfig(mc, v2FeedId)
- assert.Contains(t, err.Error(), `initialBlockNumber may not be specified for v2 jobs`)
- })
- })
-}
-
-func Test_PluginConfig_GetServers(t *testing.T) {
- t.Run("with single server", func(t *testing.T) {
- pubKey := utils.PlainHexBytes([]byte{1, 2, 3})
- pc := PluginConfig{RawServerURL: "example.com", ServerPubKey: pubKey}
- require.Len(t, pc.GetServers(), 1)
- assert.Equal(t, "example.com", pc.GetServers()[0].URL)
- assert.Equal(t, pubKey, pc.GetServers()[0].PubKey)
-
- pc = PluginConfig{RawServerURL: "wss://example.com", ServerPubKey: pubKey}
- require.Len(t, pc.GetServers(), 1)
- assert.Equal(t, "example.com", pc.GetServers()[0].URL)
- assert.Equal(t, pubKey, pc.GetServers()[0].PubKey)
-
- pc = PluginConfig{RawServerURL: "example.com:1234/foo", ServerPubKey: pubKey}
- require.Len(t, pc.GetServers(), 1)
- assert.Equal(t, "example.com:1234/foo", pc.GetServers()[0].URL)
- assert.Equal(t, pubKey, pc.GetServers()[0].PubKey)
-
- pc = PluginConfig{RawServerURL: "wss://example.com:1234/foo", ServerPubKey: pubKey}
- require.Len(t, pc.GetServers(), 1)
- assert.Equal(t, "example.com:1234/foo", pc.GetServers()[0].URL)
- assert.Equal(t, pubKey, pc.GetServers()[0].PubKey)
- })
-
- t.Run("with multiple servers", func(t *testing.T) {
- servers := map[string]utils.PlainHexBytes{
- "example.com:80": utils.PlainHexBytes([]byte{1, 2, 3}),
- "mercuryserver.invalid:1234/foo": utils.PlainHexBytes([]byte{4, 5, 6}),
- }
- pc := PluginConfig{Servers: servers}
-
- require.Len(t, pc.GetServers(), 2)
- assert.Equal(t, "example.com:80", pc.GetServers()[0].URL)
- assert.Equal(t, utils.PlainHexBytes{1, 2, 3}, pc.GetServers()[0].PubKey)
- assert.Equal(t, "mercuryserver.invalid:1234/foo", pc.GetServers()[1].URL)
- assert.Equal(t, utils.PlainHexBytes{4, 5, 6}, pc.GetServers()[1].PubKey)
- })
-}
diff --git a/core/services/ocr2/plugins/mercury/helpers_test.go b/core/services/ocr2/plugins/mercury/helpers_test.go
deleted file mode 100644
index 91c3ff7f496..00000000000
--- a/core/services/ocr2/plugins/mercury/helpers_test.go
+++ /dev/null
@@ -1,547 +0,0 @@
-package mercury_test
-
-import (
- "context"
- "crypto/ed25519"
- "encoding/binary"
- "errors"
- "fmt"
- "math/big"
- "net"
- "strings"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-
- "github.com/smartcontractkit/wsrpc"
- "github.com/smartcontractkit/wsrpc/credentials"
- "github.com/smartcontractkit/wsrpc/peer"
-
- "github.com/smartcontractkit/libocr/offchainreporting2/chains/evmutil"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
-
- evmtypes "github.com/smartcontractkit/chainlink-integrations/evm/types"
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/keystest"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/chainlink"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/chaintype"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ocr2key"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/p2pkey"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrbootstrap"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
- "github.com/smartcontractkit/chainlink/v2/core/utils/testutils/heavyweight"
-)
-
-var _ pb.MercuryServer = &mercuryServer{}
-
-type request struct {
- pk credentials.StaticSizedPublicKey
- req *pb.TransmitRequest
-}
-
-type mercuryServer struct {
- privKey ed25519.PrivateKey
- reqsCh chan request
- t *testing.T
- buildReport func() []byte
-}
-
-func NewMercuryServer(t *testing.T, privKey ed25519.PrivateKey, reqsCh chan request, buildReport func() []byte) *mercuryServer {
- return &mercuryServer{privKey, reqsCh, t, buildReport}
-}
-
-func (s *mercuryServer) Transmit(ctx context.Context, req *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- p, ok := peer.FromContext(ctx)
- if !ok {
- return nil, errors.New("could not extract public key")
- }
- r := request{p.PublicKey, req}
- s.reqsCh <- r
-
- return &pb.TransmitResponse{
- Code: 1,
- Error: "",
- }, nil
-}
-
-func (s *mercuryServer) LatestReport(ctx context.Context, lrr *pb.LatestReportRequest) (*pb.LatestReportResponse, error) {
- p, ok := peer.FromContext(ctx)
- if !ok {
- return nil, errors.New("could not extract public key")
- }
- s.t.Logf("mercury server got latest report from %x for feed id 0x%x", p.PublicKey, lrr.FeedId)
-
- out := new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.FeedId = lrr.FeedId
-
- report := s.buildReport()
- payload, err := mercury.PayloadTypes.Pack(evmutil.RawReportContext(ocrtypes.ReportContext{}), report, [][32]byte{}, [][32]byte{}, [32]byte{})
- if err != nil {
- panic(err)
- }
- out.Report.Payload = payload
- return out, nil
-}
-
-func startMercuryServer(t *testing.T, srv *mercuryServer, pubKeys []ed25519.PublicKey) (serverURL string) {
- // Set up the wsrpc server
- lis, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("[MAIN] failed to listen: %v", err)
- }
- serverURL = lis.Addr().String()
- s := wsrpc.NewServer(wsrpc.WithCreds(srv.privKey, pubKeys))
-
- // Register mercury implementation with the wsrpc server
- pb.RegisterMercuryServer(s, srv)
-
- // Start serving
- go s.Serve(lis)
- t.Cleanup(s.Stop)
-
- return
-}
-
-type Feed struct {
- name string
- id [32]byte
- baseBenchmarkPrice *big.Int
- baseBid *big.Int
- baseAsk *big.Int
- baseMarketStatus uint32
-}
-
-func randomFeedID(version uint16) [32]byte {
- id := [32]byte(utils.NewHash())
- binary.BigEndian.PutUint16(id[:2], version)
- return id
-}
-
-type Node struct {
- App chainlink.Application
- ClientPubKey credentials.StaticSizedPublicKey
- KeyBundle ocr2key.KeyBundle
-}
-
-func (node *Node) AddJob(t *testing.T, spec string) {
- c := node.App.GetConfig()
- job, err := validate.ValidatedOracleSpecToml(testutils.Context(t), c.OCR2(), c.Insecure(), spec, nil)
- require.NoError(t, err)
- err = node.App.AddJobV2(testutils.Context(t), &job)
- require.NoError(t, err)
-}
-
-func (node *Node) AddBootstrapJob(t *testing.T, spec string) {
- job, err := ocrbootstrap.ValidatedBootstrapSpecToml(spec)
- require.NoError(t, err)
- err = node.App.AddJobV2(testutils.Context(t), &job)
- require.NoError(t, err)
-}
-
-func setupNode(
- t *testing.T,
- port int,
- dbName string,
- backend evmtypes.Backend,
- csaKey csakey.KeyV2,
-) (app chainlink.Application, peerID string, clientPubKey credentials.StaticSizedPublicKey, ocr2kb ocr2key.KeyBundle, observedLogs *observer.ObservedLogs) {
- k := big.NewInt(int64(port)) // keys unique to port
- p2pKey := p2pkey.MustNewV2XXXTestingOnly(k)
- rdr := keystest.NewRandReaderFromSeed(int64(port))
- ocr2kb = ocr2key.MustNewInsecure(rdr, chaintype.EVM)
-
- p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)}
-
- config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- // [JobPipeline]
- // MaxSuccessfulRuns = 0
- c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0))
- c.JobPipeline.VerboseLogging = ptr(true)
-
- // [Feature]
- // UICSAKeys=true
- // LogPoller = true
- // FeedsManager = false
- c.Feature.UICSAKeys = ptr(true)
- c.Feature.LogPoller = ptr(true)
- c.Feature.FeedsManager = ptr(false)
-
- // [OCR]
- // Enabled = false
- c.OCR.Enabled = ptr(false)
-
- // [OCR2]
- // Enabled = true
- c.OCR2.Enabled = ptr(true)
-
- // [P2P]
- // PeerID = '$PEERID'
- // TraceLogging = true
- c.P2P.PeerID = ptr(p2pKey.PeerID())
- c.P2P.TraceLogging = ptr(true)
-
- // [P2P.V2]
- // Enabled = true
- // AnnounceAddresses = ['$EXT_IP:17775']
- // ListenAddresses = ['127.0.0.1:17775']
- // DeltaDial = 500ms
- // DeltaReconcile = 5s
- c.P2P.V2.Enabled = ptr(true)
- c.P2P.V2.AnnounceAddresses = &p2paddresses
- c.P2P.V2.ListenAddresses = &p2paddresses
- c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond)
- c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second)
- })
-
- lggr, observedLogs := logger.TestLoggerObserved(t, zapcore.DebugLevel)
- app = cltest.NewApplicationWithConfigV2OnSimulatedBlockchain(t, config, backend, p2pKey, ocr2kb, csaKey, lggr.Named(dbName))
- err := app.Start(testutils.Context(t))
- require.NoError(t, err)
-
- t.Cleanup(func() {
- assert.NoError(t, app.Stop())
- })
-
- return app, p2pKey.PeerID().Raw(), csaKey.StaticSizedPublicKey(), ocr2kb, observedLogs
-}
-
-func ptr[T any](t T) *T { return &t }
-
-func addBootstrapJob(t *testing.T, bootstrapNode Node, chainID *big.Int, verifierAddress common.Address, feedName string, feedID [32]byte) {
- bootstrapNode.AddBootstrapJob(t, fmt.Sprintf(`
-type = "bootstrap"
-relay = "evm"
-schemaVersion = 1
-name = "boot-%s"
-contractID = "%s"
-feedID = "0x%x"
-contractConfigTrackerPollInterval = "1s"
-
-[relayConfig]
-chainID = %d
- `, feedName, verifierAddress, feedID, chainID))
-}
-
-func addV1MercuryJob(
- t *testing.T,
- node Node,
- i int,
- verifierAddress common.Address,
- bootstrapPeerID string,
- bootstrapNodePort int,
- bmBridge,
- bidBridge,
- askBridge,
- serverURL string,
- serverPubKey,
- clientPubKey ed25519.PublicKey,
- feedName string,
- feedID [32]byte,
- chainID *big.Int,
- fromBlock int,
-) {
- node.AddJob(t, fmt.Sprintf(`
-type = "offchainreporting2"
-schemaVersion = 1
-name = "mercury-%[1]d-%[14]s"
-forwardingAllowed = false
-maxTaskDuration = "1s"
-contractID = "%[2]s"
-feedID = "0x%[11]x"
-contractConfigTrackerPollInterval = "1s"
-ocrKeyBundleID = "%[3]s"
-p2pv2Bootstrappers = [
- "%[4]s"
-]
-relay = "evm"
-pluginType = "mercury"
-transmitterID = "%[10]x"
-observationSource = """
- // Benchmark Price
- price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- price1_parse [type=jsonparse path="result"];
- price1_multiply [type=multiply times=100000000 index=0];
-
- price1 -> price1_parse -> price1_multiply;
-
- // Bid
- bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- bid_parse [type=jsonparse path="result"];
- bid_multiply [type=multiply times=100000000 index=1];
-
- bid -> bid_parse -> bid_multiply;
-
- // Ask
- ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- ask_parse [type=jsonparse path="result"];
- ask_multiply [type=multiply times=100000000 index=2];
-
- ask -> ask_parse -> ask_multiply;
-"""
-
-[pluginConfig]
-serverURL = "%[8]s"
-serverPubKey = "%[9]x"
-initialBlockNumber = %[13]d
-
-[relayConfig]
-chainID = %[12]d
-
- `,
- i,
- verifierAddress,
- node.KeyBundle.ID(),
- fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort),
- bmBridge,
- bidBridge,
- askBridge,
- serverURL,
- serverPubKey,
- clientPubKey,
- feedID,
- chainID,
- fromBlock,
- feedName,
- ))
-}
-
-func addV2MercuryJob(
- t *testing.T,
- node Node,
- i int,
- verifierAddress common.Address,
- bootstrapPeerID string,
- bootstrapNodePort int,
- bmBridge,
- serverURL string,
- serverPubKey,
- clientPubKey ed25519.PublicKey,
- feedName string,
- feedID [32]byte,
- linkFeedID [32]byte,
- nativeFeedID [32]byte,
-) {
- node.AddJob(t, fmt.Sprintf(`
-type = "offchainreporting2"
-schemaVersion = 1
-name = "mercury-%[1]d-%[10]s"
-forwardingAllowed = false
-maxTaskDuration = "1s"
-contractID = "%[2]s"
-feedID = "0x%[9]x"
-contractConfigTrackerPollInterval = "1s"
-ocrKeyBundleID = "%[3]s"
-p2pv2Bootstrappers = [
- "%[4]s"
-]
-relay = "evm"
-pluginType = "mercury"
-transmitterID = "%[8]x"
-observationSource = """
- // Benchmark Price
- price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- price1_parse [type=jsonparse path="result"];
- price1_multiply [type=multiply times=100000000 index=0];
-
- price1 -> price1_parse -> price1_multiply;
-"""
-
-[pluginConfig]
-serverURL = "%[6]s"
-serverPubKey = "%[7]x"
-linkFeedID = "0x%[11]x"
-nativeFeedID = "0x%[12]x"
-
-[relayConfig]
-chainID = 1337
- `,
- i,
- verifierAddress,
- node.KeyBundle.ID(),
- fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort),
- bmBridge,
- serverURL,
- serverPubKey,
- clientPubKey,
- feedID,
- feedName,
- linkFeedID,
- nativeFeedID,
- ))
-}
-
-func addV3MercuryJob(
- t *testing.T,
- node Node,
- i int,
- verifierAddress common.Address,
- bootstrapPeerID string,
- bootstrapNodePort int,
- bmBridge,
- bidBridge,
- askBridge string,
- servers map[string]string,
- clientPubKey ed25519.PublicKey,
- feedName string,
- feedID [32]byte,
- linkFeedID [32]byte,
- nativeFeedID [32]byte,
-) {
- srvs := make([]string, 0, len(servers))
- for u, k := range servers {
- srvs = append(srvs, fmt.Sprintf("%q = %q", u, k))
- }
- serversStr := fmt.Sprintf("{ %s }", strings.Join(srvs, ", "))
-
- node.AddJob(t, fmt.Sprintf(`
-type = "offchainreporting2"
-schemaVersion = 1
-name = "mercury-%[1]d-%[11]s"
-forwardingAllowed = false
-maxTaskDuration = "1s"
-contractID = "%[2]s"
-feedID = "0x%[10]x"
-contractConfigTrackerPollInterval = "1s"
-ocrKeyBundleID = "%[3]s"
-p2pv2Bootstrappers = [
- "%[4]s"
-]
-relay = "evm"
-pluginType = "mercury"
-transmitterID = "%[9]x"
-observationSource = """
- // Benchmark Price
- price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- price1_parse [type=jsonparse path="result"];
- price1_multiply [type=multiply times=100000000 index=0];
-
- price1 -> price1_parse -> price1_multiply;
-
- // Bid
- bid [type=bridge name="%[6]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- bid_parse [type=jsonparse path="result"];
- bid_multiply [type=multiply times=100000000 index=1];
-
- bid -> bid_parse -> bid_multiply;
-
- // Ask
- ask [type=bridge name="%[7]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- ask_parse [type=jsonparse path="result"];
- ask_multiply [type=multiply times=100000000 index=2];
-
- ask -> ask_parse -> ask_multiply;
-"""
-
-[pluginConfig]
-servers = %[8]s
-linkFeedID = "0x%[12]x"
-nativeFeedID = "0x%[13]x"
-
-[relayConfig]
-chainID = 1337
- `,
- i,
- verifierAddress,
- node.KeyBundle.ID(),
- fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort),
- bmBridge,
- bidBridge,
- askBridge,
- serversStr,
- clientPubKey,
- feedID,
- feedName,
- linkFeedID,
- nativeFeedID,
- ))
-}
-
-func addV4MercuryJob(
- t *testing.T,
- node Node,
- i int,
- verifierAddress common.Address,
- bootstrapPeerID string,
- bootstrapNodePort int,
- bmBridge,
- marketStatusBridge string,
- servers map[string]string,
- clientPubKey ed25519.PublicKey,
- feedName string,
- feedID [32]byte,
- linkFeedID [32]byte,
- nativeFeedID [32]byte,
-) {
- srvs := make([]string, 0, len(servers))
- for u, k := range servers {
- srvs = append(srvs, fmt.Sprintf("%q = %q", u, k))
- }
- serversStr := fmt.Sprintf("{ %s }", strings.Join(srvs, ", "))
-
- node.AddJob(t, fmt.Sprintf(`
-type = "offchainreporting2"
-schemaVersion = 1
-name = "mercury-%[1]d-%[9]s"
-forwardingAllowed = false
-maxTaskDuration = "1s"
-contractID = "%[2]s"
-feedID = "0x%[8]x"
-contractConfigTrackerPollInterval = "1s"
-ocrKeyBundleID = "%[3]s"
-p2pv2Bootstrappers = [
- "%[4]s"
-]
-relay = "evm"
-pluginType = "mercury"
-transmitterID = "%[7]x"
-observationSource = """
- // Benchmark Price
- price1 [type=bridge name="%[5]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- price1_parse [type=jsonparse path="result"];
- price1_multiply [type=multiply times=100000000 index=0];
-
- price1 -> price1_parse -> price1_multiply;
-
- // Market Status
- marketstatus [type=bridge name="%[12]s" timeout="50ms" requestData="{\\"data\\":{\\"from\\":\\"ETH\\",\\"to\\":\\"USD\\"}}"];
- marketstatus_parse [type=jsonparse path="result" index=1];
-
- marketstatus -> marketstatus_parse;
-"""
-
-[pluginConfig]
-servers = %[6]s
-linkFeedID = "0x%[10]x"
-nativeFeedID = "0x%[11]x"
-
-[relayConfig]
-chainID = 1337
- `,
- i,
- verifierAddress,
- node.KeyBundle.ID(),
- fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort),
- bmBridge,
- serversStr,
- clientPubKey,
- feedID,
- feedName,
- linkFeedID,
- nativeFeedID,
- marketStatusBridge,
- ))
-}
diff --git a/core/services/ocr2/plugins/mercury/integration_plugin_test.go b/core/services/ocr2/plugins/mercury/integration_plugin_test.go
deleted file mode 100644
index 1dedaadcb54..00000000000
--- a/core/services/ocr2/plugins/mercury/integration_plugin_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-//go:build integration
-
-package mercury_test
-
-import (
- "testing"
-
- "github.com/smartcontractkit/chainlink/v2/core/config/env"
-)
-
-func TestIntegration_MercuryV1_Plugin(t *testing.T) {
- t.Setenv(string(env.MercuryPlugin.Cmd), "chainlink-mercury")
- integration_MercuryV1(t)
-}
-
-func TestIntegration_MercuryV2_Plugin(t *testing.T) {
- t.Setenv(string(env.MercuryPlugin.Cmd), "chainlink-mercury")
- integration_MercuryV2(t)
-}
-
-func TestIntegration_MercuryV3_Plugin(t *testing.T) {
- t.Setenv(string(env.MercuryPlugin.Cmd), "chainlink-mercury")
- integration_MercuryV3(t)
-}
diff --git a/core/services/ocr2/plugins/mercury/integration_test.go b/core/services/ocr2/plugins/mercury/integration_test.go
deleted file mode 100644
index f856d88a25d..00000000000
--- a/core/services/ocr2/plugins/mercury/integration_test.go
+++ /dev/null
@@ -1,1356 +0,0 @@
-package mercury_test
-
-import (
- "crypto/ed25519"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "io"
- "math"
- "math/big"
- "math/rand"
- "net/http"
- "net/http/httptest"
- "net/url"
- "strings"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/hashicorp/consul/sdk/freeport"
- "github.com/shopspring/decimal"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper"
- ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/wsrpc/credentials"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-
- mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
- v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
- datastreamsmercury "github.com/smartcontractkit/chainlink-data-streams/mercury"
-
- "github.com/smartcontractkit/chainlink-integrations/evm/assets"
- evmtestutils "github.com/smartcontractkit/chainlink-integrations/evm/testutils"
- evmtypes "github.com/smartcontractkit/chainlink-integrations/evm/types"
- "github.com/smartcontractkit/chainlink/v2/core/bridges"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/link_token_interface"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/fee_manager"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/reward_manager"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy"
- "github.com/smartcontractkit/chainlink/v2/core/internal/cltest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
- reportcodecv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
- reportcodecv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/reportcodec"
- reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
- reportcodecv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
- "github.com/smartcontractkit/chainlink/v2/core/store/models"
-)
-
-var (
- f = uint8(1)
- n = 4 // number of nodes
- multiplier int64 = 100000000
- rawOnchainConfig = mercurytypes.OnchainConfig{
- Min: big.NewInt(0),
- Max: big.NewInt(math.MaxInt64),
- }
- rawReportingPluginConfig = datastreamsmercury.OffchainConfig{
- ExpirationWindow: 1,
- BaseUSDFee: decimal.NewFromInt(100),
- }
-)
-
-func detectPanicLogs(t *testing.T, logObservers []*observer.ObservedLogs) {
- var panicLines []string
- for _, observedLogs := range logObservers {
- panicLogs := observedLogs.Filter(func(e observer.LoggedEntry) bool {
- return e.Level >= zapcore.DPanicLevel
- })
- for _, log := range panicLogs.All() {
- line := fmt.Sprintf("%v\t%s\t%s\t%s\t%s", log.Time.Format(time.RFC3339), log.Level.CapitalString(), log.LoggerName, log.Caller.TrimmedPath(), log.Message)
- panicLines = append(panicLines, line)
- }
- }
- if len(panicLines) > 0 {
- t.Errorf("Found logs with DPANIC or higher level:\n%s", strings.Join(panicLines, "\n"))
- }
-}
-
-func setupBlockchain(t *testing.T) (*bind.TransactOpts, evmtypes.Backend, *verifier.Verifier, common.Address, func() common.Hash) {
- steve := evmtestutils.MustNewSimTransactor(t) // config contract deployer and owner
- genesisData := types.GenesisAlloc{steve.From: {Balance: assets.Ether(1000).ToInt()}}
- backend := cltest.NewSimulatedBackend(t, genesisData, ethconfig.Defaults.Miner.GasCeil)
- backend.Commit() // ensure starting block number at least 1
- commit, stopMining := cltest.Mine(backend, 1*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain
- t.Cleanup(stopMining)
-
- // Deploy contracts
- linkTokenAddress, _, linkToken, err := link_token_interface.DeployLinkToken(steve, backend.Client())
- require.NoError(t, err)
- commit()
- _, err = linkToken.Transfer(steve, steve.From, big.NewInt(1000))
- require.NoError(t, err)
- commit()
- nativeTokenAddress, _, nativeToken, err := link_token_interface.DeployLinkToken(steve, backend.Client())
- require.NoError(t, err)
- commit()
-
- _, err = nativeToken.Transfer(steve, steve.From, big.NewInt(1000))
- require.NoError(t, err)
- commit()
- verifierProxyAddr, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(steve, backend.Client(), common.Address{}) // zero address for access controller disables access control
- require.NoError(t, err)
- commit()
- verifierAddress, _, verifier, err := verifier.DeployVerifier(steve, backend.Client(), verifierProxyAddr)
- require.NoError(t, err)
- commit()
- _, err = verifierProxy.InitializeVerifier(steve, verifierAddress)
- require.NoError(t, err)
- commit()
- rewardManagerAddr, _, rewardManager, err := reward_manager.DeployRewardManager(steve, backend.Client(), linkTokenAddress)
- require.NoError(t, err)
- commit()
- feeManagerAddr, _, _, err := fee_manager.DeployFeeManager(steve, backend.Client(), linkTokenAddress, nativeTokenAddress, verifierProxyAddr, rewardManagerAddr)
- require.NoError(t, err)
- commit()
- _, err = verifierProxy.SetFeeManager(steve, feeManagerAddr)
- require.NoError(t, err)
- commit()
- _, err = rewardManager.SetFeeManager(steve, feeManagerAddr)
- require.NoError(t, err)
- commit()
-
- return steve, backend, verifier, verifierAddress, commit
-}
-
-func TestIntegration_MercuryV1(t *testing.T) {
- t.Parallel()
-
- integration_MercuryV1(t)
-}
-
-func integration_MercuryV1(t *testing.T) {
- ctx := testutils.Context(t)
- var logObservers []*observer.ObservedLogs
- t.Cleanup(func() {
- detectPanicLogs(t, logObservers)
- })
- lggr := logger.TestLogger(t)
- testStartTimeStamp := uint32(time.Now().Unix())
-
- // test vars
- // pError is the probability that an EA will return an error instead of a result, as integer percentage
- // pError = 0 means it will never return error
- pError := atomic.Int64{}
-
- // feeds
- btcFeed := Feed{"BTC/USD", randomFeedID(1), big.NewInt(20_000 * multiplier), big.NewInt(19_997 * multiplier), big.NewInt(20_004 * multiplier), 0}
- ethFeed := Feed{"ETH/USD", randomFeedID(1), big.NewInt(1_568 * multiplier), big.NewInt(1_566 * multiplier), big.NewInt(1_569 * multiplier), 0}
- linkFeed := Feed{"LINK/USD", randomFeedID(1), big.NewInt(7150 * multiplier / 1000), big.NewInt(7123 * multiplier / 1000), big.NewInt(7177 * multiplier / 1000), 0}
- feeds := []Feed{btcFeed, ethFeed, linkFeed}
- feedM := make(map[[32]byte]Feed, len(feeds))
- for i := range feeds {
- feedM[feeds[i].id] = feeds[i]
- }
-
- reqs := make(chan request)
- serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1))
- serverPubKey := serverKey.PublicKey
- srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, func() []byte {
- report, err := (&reportcodecv1.ReportCodec{}).BuildReport(ctx, v1.ReportFields{BenchmarkPrice: big.NewInt(234567), Bid: big.NewInt(1), Ask: big.NewInt(1), CurrentBlockHash: make([]byte, 32)})
- if err != nil {
- panic(err)
- }
- return report
- })
- clientCSAKeys := make([]csakey.KeyV2, n+1)
- clientPubKeys := make([]ed25519.PublicKey, n+1)
- for i := 0; i < n+1; i++ {
- k := big.NewInt(int64(i))
- key := csakey.MustNewV2XXXTestingOnly(k)
- clientCSAKeys[i] = key
- clientPubKeys[i] = key.PublicKey
- }
- serverURL := startMercuryServer(t, srv, clientPubKeys)
- chainID := testutils.SimulatedChainID
-
- steve, backend, verifier, verifierAddress, commit := setupBlockchain(t)
-
- // Setup bootstrap + oracle nodes
- bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n])
- bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
- logObservers = append(logObservers, observedLogs)
-
- // cannot use zero, start from finality depth
- fromBlock := func() int {
- // Commit blocks to finality depth to ensure LogPoller has finalized blocks to read from
- ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String())
- require.NoError(t, err)
- finalityDepth := ch.Config().EVM().FinalityDepth()
- for i := 0; i < int(finalityDepth); i++ {
- commit()
- }
- return int(finalityDepth)
- }()
-
- // Set up n oracles
- var (
- oracles []confighelper.OracleIdentityExtra
- nodes []Node
- )
- ports := freeport.GetN(t, n)
- for i := 0; i < n; i++ {
- app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i])
-
- nodes = append(nodes, Node{
- app, transmitter, kb,
- })
- offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
- oracles = append(oracles, confighelper.OracleIdentityExtra{
- OracleIdentity: confighelper.OracleIdentity{
- OnchainPublicKey: offchainPublicKey,
- TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])),
- OffchainPublicKey: kb.OffchainPublicKey(),
- PeerID: peerID,
- },
- ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
- })
- logObservers = append(logObservers, observedLogs)
- }
-
- for _, feed := range feeds {
- addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id)
- }
-
- createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) {
- bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
- b, herr := io.ReadAll(req.Body)
- require.NoError(t, herr)
- require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b))
-
- r := rand.Int63n(101)
- if r > pError.Load() {
- res.WriteHeader(http.StatusOK)
- val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String()
- resp := fmt.Sprintf(`{"result": %s}`, val)
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- } else {
- res.WriteHeader(http.StatusInternalServerError)
- resp := `{"error": "pError test error"}`
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- }
- }))
- t.Cleanup(bridge.Close)
- u, _ := url.Parse(bridge.URL)
- bridgeName = fmt.Sprintf("bridge-%s-%d", name, i)
- require.NoError(t, borm.CreateBridgeType(ctx, &bridges.BridgeType{
- Name: bridges.BridgeName(bridgeName),
- URL: models.WebURL(*u),
- }))
-
- return bridgeName
- }
-
- // Add OCR jobs - one per feed on each node
- for i, node := range nodes {
- for j, feed := range feeds {
- bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, node.App.BridgeORM())
- askBridge := createBridge(fmt.Sprintf("ask-%d", j), i, feed.baseAsk, node.App.BridgeORM())
- bidBridge := createBridge(fmt.Sprintf("bid-%d", j), i, feed.baseBid, node.App.BridgeORM())
-
- addV1MercuryJob(
- t,
- node,
- i,
- verifierAddress,
- bootstrapPeerID,
- bootstrapNodePort,
- bmBridge,
- bidBridge,
- askBridge,
- serverURL,
- serverPubKey,
- clientPubKeys[i],
- feed.name,
- feed.id,
- chainID,
- fromBlock,
- )
- }
- }
- // Setup config on contract
- onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(ctx, rawOnchainConfig)
- require.NoError(t, err)
-
- reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig)
- require.NoError(t, err)
-
- signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02(
- 2*time.Second, // DeltaProgress
- 20*time.Second, // DeltaResend
- 400*time.Millisecond, // DeltaInitial
- 200*time.Millisecond, // DeltaRound
- 100*time.Millisecond, // DeltaGrace
- 300*time.Millisecond, // DeltaCertifiedCommitRequest
- 1*time.Minute, // DeltaStage
- 100, // rMax
- []int{len(nodes)}, // S
- oracles,
- reportingPluginConfig, // reportingPluginConfig []byte,
- nil,
- 250*time.Millisecond, // Max duration observation
- int(f), // f
- onchainConfig,
- )
-
- require.NoError(t, err)
- signerAddresses, err := evm.OnchainPublicKeyToAddress(signers)
- require.NoError(t, err)
-
- offchainTransmitters := make([][32]byte, n)
- for i := 0; i < n; i++ {
- offchainTransmitters[i] = nodes[i].ClientPubKey
- }
-
- for i, feed := range feeds {
- lggr.Infow("Setting Config on Oracle Contract",
- "i", i,
- "feedID", feed.id,
- "feedName", feed.name,
- "signerAddresses", signerAddresses,
- "offchainTransmitters", offchainTransmitters,
- "f", f,
- "onchainConfig", onchainConfig,
- "offchainConfigVersion", offchainConfigVersion,
- "offchainConfig", offchainConfig,
- )
-
- _, ferr := verifier.SetConfig(
- steve,
- feed.id,
- signerAddresses,
- offchainTransmitters,
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- nil,
- )
- require.NoError(t, ferr)
- commit()
- }
-
- t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) {
- ctx := testutils.Context(t)
- // Expect at least one report per feed from each oracle
- seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
- for i := range feeds {
- // feedID will be deleted when all n oracles have reported
- seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n)
- }
-
- for req := range reqs {
- v := make(map[string]interface{})
- err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
- require.NoError(t, err)
- report, exists := v["report"]
- if !exists {
- t.Fatalf("expected payload %#v to contain 'report'", v)
- }
- reportElems := make(map[string]interface{})
- err = reportcodecv1.ReportTypes.UnpackIntoMap(reportElems, report.([]byte))
- require.NoError(t, err)
-
- feedID := reportElems["feedId"].([32]uint8)
- feed, exists := feedM[feedID]
- require.True(t, exists)
-
- if _, exists := seen[feedID]; !exists {
- continue // already saw all oracles for this feed
- }
-
- num, err := (&reportcodecv1.ReportCodec{}).CurrentBlockNumFromReport(ctx, ocr2types.Report(report.([]byte)))
- require.NoError(t, err)
- currentBlock, err := backend.Client().BlockByNumber(ctx, nil)
- require.NoError(t, err)
-
- assert.GreaterOrEqual(t, currentBlock.Number().Int64(), num)
-
- expectedBm := feed.baseBenchmarkPrice
- expectedBid := feed.baseBid
- expectedAsk := feed.baseAsk
-
- assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp))
- assert.InDelta(t, expectedBm.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000)
- assert.InDelta(t, expectedBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000)
- assert.InDelta(t, expectedAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000)
- assert.GreaterOrEqual(t, int(currentBlock.Number().Int64()), int(reportElems["currentBlockNum"].(uint64)))
- assert.GreaterOrEqual(t, currentBlock.Time(), reportElems["currentBlockTimestamp"].(uint64))
- assert.NotEqual(t, common.Hash{}, common.Hash(reportElems["currentBlockHash"].([32]uint8)))
- assert.LessOrEqual(t, int(reportElems["validFromBlockNum"].(uint64)), int(reportElems["currentBlockNum"].(uint64)))
- assert.Less(t, int64(0), int64(reportElems["validFromBlockNum"].(uint64)))
-
- t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id)
-
- seen[feedID][req.pk] = struct{}{}
- if len(seen[feedID]) == n {
- t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id)
- delete(seen, feedID)
- if len(seen) == 0 {
- break // saw all oracles; success!
- }
- }
- }
- })
-
- t.Run("receives at least one report per feed from each oracle when EAs are at 80% reliability", func(t *testing.T) {
- ctx := testutils.Context(t)
- pError.Store(20) // 20% chance of EA error
-
- // Expect at least one report per feed from each oracle
- seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
- for i := range feeds {
- // feedID will be deleted when all n oracles have reported
- seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n)
- }
-
- for req := range reqs {
- v := make(map[string]interface{})
- err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
- require.NoError(t, err)
- report, exists := v["report"]
- if !exists {
- t.Fatalf("expected payload %#v to contain 'report'", v)
- }
- reportElems := make(map[string]interface{})
- err = reportcodecv1.ReportTypes.UnpackIntoMap(reportElems, report.([]byte))
- require.NoError(t, err)
-
- feedID := reportElems["feedId"].([32]uint8)
- feed, exists := feedM[feedID]
- require.True(t, exists)
-
- if _, exists := seen[feedID]; !exists {
- continue // already saw all oracles for this feed
- }
-
- num, err := (&reportcodecv1.ReportCodec{}).CurrentBlockNumFromReport(ctx, report.([]byte))
- require.NoError(t, err)
- currentBlock, err := backend.Client().BlockByNumber(testutils.Context(t), nil)
- require.NoError(t, err)
-
- assert.GreaterOrEqual(t, currentBlock.Number().Int64(), num)
-
- expectedBm := feed.baseBenchmarkPrice
- expectedBid := feed.baseBid
- expectedAsk := feed.baseAsk
-
- assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp))
- assert.InDelta(t, expectedBm.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000)
- assert.InDelta(t, expectedBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000)
- assert.InDelta(t, expectedAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000)
- assert.GreaterOrEqual(t, int(currentBlock.Number().Int64()), int(reportElems["currentBlockNum"].(uint64)))
- assert.GreaterOrEqual(t, currentBlock.Time(), reportElems["currentBlockTimestamp"].(uint64))
- assert.NotEqual(t, common.Hash{}, common.Hash(reportElems["currentBlockHash"].([32]uint8)))
- assert.LessOrEqual(t, int(reportElems["validFromBlockNum"].(uint64)), int(reportElems["currentBlockNum"].(uint64)))
-
- t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id)
-
- seen[feedID][req.pk] = struct{}{}
- if len(seen[feedID]) == n {
- t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id)
- delete(seen, feedID)
- if len(seen) == 0 {
- break // saw all oracles; success!
- }
- }
- }
- })
-}
-
-func TestIntegration_MercuryV2(t *testing.T) {
- t.Parallel()
-
- integration_MercuryV2(t)
-}
-
-func integration_MercuryV2(t *testing.T) {
- ctx := testutils.Context(t)
- var logObservers []*observer.ObservedLogs
- t.Cleanup(func() {
- detectPanicLogs(t, logObservers)
- })
-
- testStartTimeStamp := uint32(time.Now().Unix())
-
- // test vars
- // pError is the probability that an EA will return an error instead of a result, as integer percentage
- // pError = 0 means it will never return error
- pError := atomic.Int64{}
-
- // feeds
- btcFeed := Feed{
- name: "BTC/USD",
- id: randomFeedID(2),
- baseBenchmarkPrice: big.NewInt(20_000 * multiplier),
- }
- ethFeed := Feed{
- name: "ETH/USD",
- id: randomFeedID(2),
- baseBenchmarkPrice: big.NewInt(1_568 * multiplier),
- }
- linkFeed := Feed{
- name: "LINK/USD",
- id: randomFeedID(2),
- baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000),
- }
- feeds := []Feed{btcFeed, ethFeed, linkFeed}
- feedM := make(map[[32]byte]Feed, len(feeds))
- for i := range feeds {
- feedM[feeds[i].id] = feeds[i]
- }
-
- reqs := make(chan request)
- serverKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(-1))
- serverPubKey := serverKey.PublicKey
- srv := NewMercuryServer(t, ed25519.PrivateKey(serverKey.Raw()), reqs, func() []byte {
- report, err := (&reportcodecv2.ReportCodec{}).BuildReport(ctx, v2.ReportFields{BenchmarkPrice: big.NewInt(234567), LinkFee: big.NewInt(1), NativeFee: big.NewInt(1)})
- if err != nil {
- panic(err)
- }
- return report
- })
- clientCSAKeys := make([]csakey.KeyV2, n+1)
- clientPubKeys := make([]ed25519.PublicKey, n+1)
- for i := 0; i < n+1; i++ {
- k := big.NewInt(int64(i))
- key := csakey.MustNewV2XXXTestingOnly(k)
- clientCSAKeys[i] = key
- clientPubKeys[i] = key.PublicKey
- }
- serverURL := startMercuryServer(t, srv, clientPubKeys)
- chainID := testutils.SimulatedChainID
-
- steve, backend, verifier, verifierAddress, commit := setupBlockchain(t)
-
- // Setup bootstrap + oracle nodes
- bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n])
- bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
- logObservers = append(logObservers, observedLogs)
-
- // Commit blocks to finality depth to ensure LogPoller has finalized blocks to read from
- ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String())
- require.NoError(t, err)
- finalityDepth := ch.Config().EVM().FinalityDepth()
- for i := 0; i < int(finalityDepth); i++ {
- commit()
- }
-
- // Set up n oracles
- var (
- oracles []confighelper.OracleIdentityExtra
- nodes []Node
- )
- ports := freeport.GetN(t, n)
- for i := 0; i < n; i++ {
- app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i])
-
- nodes = append(nodes, Node{
- app, transmitter, kb,
- })
-
- offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
- oracles = append(oracles, confighelper.OracleIdentityExtra{
- OracleIdentity: confighelper.OracleIdentity{
- OnchainPublicKey: offchainPublicKey,
- TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])),
- OffchainPublicKey: kb.OffchainPublicKey(),
- PeerID: peerID,
- },
- ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
- })
- logObservers = append(logObservers, observedLogs)
- }
-
- for _, feed := range feeds {
- addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id)
- }
-
- createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) {
- bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
- b, herr := io.ReadAll(req.Body)
- require.NoError(t, herr)
- require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b))
-
- r := rand.Int63n(101)
- if r > pError.Load() {
- res.WriteHeader(http.StatusOK)
- val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String()
- resp := fmt.Sprintf(`{"result": %s}`, val)
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- } else {
- res.WriteHeader(http.StatusInternalServerError)
- resp := `{"error": "pError test error"}`
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- }
- }))
- t.Cleanup(bridge.Close)
- u, _ := url.Parse(bridge.URL)
- bridgeName = fmt.Sprintf("bridge-%s-%d", name, i)
- require.NoError(t, borm.CreateBridgeType(ctx, &bridges.BridgeType{
- Name: bridges.BridgeName(bridgeName),
- URL: models.WebURL(*u),
- }))
-
- return bridgeName
- }
-
- // Add OCR jobs - one per feed on each node
- for i, node := range nodes {
- for j, feed := range feeds {
- bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, node.App.BridgeORM())
-
- addV2MercuryJob(
- t,
- node,
- i,
- verifierAddress,
- bootstrapPeerID,
- bootstrapNodePort,
- bmBridge,
- serverURL,
- serverPubKey,
- clientPubKeys[i],
- feed.name,
- feed.id,
- randomFeedID(2),
- randomFeedID(2),
- )
- }
- }
-
- // Setup config on contract
- onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(ctx, rawOnchainConfig)
- require.NoError(t, err)
-
- reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig)
- require.NoError(t, err)
-
- signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02(
- 2*time.Second, // DeltaProgress
- 20*time.Second, // DeltaResend
- 400*time.Millisecond, // DeltaInitial
- 100*time.Millisecond, // DeltaRound
- 0, // DeltaGrace
- 300*time.Millisecond, // DeltaCertifiedCommitRequest
- 1*time.Minute, // DeltaStage
- 100, // rMax
- []int{len(nodes)}, // S
- oracles,
- reportingPluginConfig, // reportingPluginConfig []byte,
- nil,
- 250*time.Millisecond, // Max duration observation
- int(f), // f
- onchainConfig,
- )
-
- require.NoError(t, err)
- signerAddresses, err := evm.OnchainPublicKeyToAddress(signers)
- require.NoError(t, err)
-
- offchainTransmitters := make([][32]byte, n)
- for i := 0; i < n; i++ {
- offchainTransmitters[i] = nodes[i].ClientPubKey
- }
-
- for _, feed := range feeds {
- _, ferr := verifier.SetConfig(
- steve,
- feed.id,
- signerAddresses,
- offchainTransmitters,
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- nil,
- )
- require.NoError(t, ferr)
- commit()
- }
-
- runTestSetup := func() {
- // Expect at least one report per feed from each oracle
- seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
- for i := range feeds {
- // feedID will be deleted when all n oracles have reported
- seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n)
- }
-
- for req := range reqs {
- v := make(map[string]interface{})
- err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
- require.NoError(t, err)
- report, exists := v["report"]
- if !exists {
- t.Fatalf("expected payload %#v to contain 'report'", v)
- }
- reportElems := make(map[string]interface{})
- err = reportcodecv2.ReportTypes.UnpackIntoMap(reportElems, report.([]byte))
- require.NoError(t, err)
-
- feedID := reportElems["feedId"].([32]uint8)
- feed, exists := feedM[feedID]
- require.True(t, exists)
-
- if _, exists := seen[feedID]; !exists {
- continue // already saw all oracles for this feed
- }
-
- expectedFee := datastreamsmercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee)
- expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow
-
- assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp))
- assert.InDelta(t, feed.baseBenchmarkPrice.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000)
- assert.NotZero(t, reportElems["validFromTimestamp"].(uint32))
- assert.GreaterOrEqual(t, reportElems["observationsTimestamp"].(uint32), reportElems["validFromTimestamp"].(uint32))
- assert.Equal(t, expectedExpiresAt, reportElems["expiresAt"].(uint32))
- assert.Equal(t, expectedFee, reportElems["linkFee"].(*big.Int))
- assert.Equal(t, expectedFee, reportElems["nativeFee"].(*big.Int))
-
- t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id)
-
- seen[feedID][req.pk] = struct{}{}
- if len(seen[feedID]) == n {
- t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id)
- delete(seen, feedID)
- if len(seen) == 0 {
- break // saw all oracles; success!
- }
- }
- }
- }
-
- t.Run("receives at least one report per feed from each oracle when EAs are at 100% reliability", func(t *testing.T) {
- runTestSetup()
- })
-
- t.Run("receives at least one report per feed from each oracle when EAs are at 80% reliability", func(t *testing.T) {
- pError.Store(20)
- runTestSetup()
- })
-}
-
-func TestIntegration_MercuryV3(t *testing.T) {
- t.Parallel()
-
- integration_MercuryV3(t)
-}
-
-func integration_MercuryV3(t *testing.T) {
- ctx := testutils.Context(t)
- var logObservers []*observer.ObservedLogs
- t.Cleanup(func() {
- detectPanicLogs(t, logObservers)
- })
-
- testStartTimeStamp := uint32(time.Now().Unix())
-
- // test vars
- // pError is the probability that an EA will return an error instead of a result, as integer percentage
- // pError = 0 means it will never return error
- pError := atomic.Int64{}
-
- // feeds
- btcFeed := Feed{
- name: "BTC/USD",
- id: randomFeedID(3),
- baseBenchmarkPrice: big.NewInt(20_000 * multiplier),
- baseBid: big.NewInt(19_997 * multiplier),
- baseAsk: big.NewInt(20_004 * multiplier),
- }
- ethFeed := Feed{
- name: "ETH/USD",
- id: randomFeedID(3),
- baseBenchmarkPrice: big.NewInt(1_568 * multiplier),
- baseBid: big.NewInt(1_566 * multiplier),
- baseAsk: big.NewInt(1_569 * multiplier),
- }
- linkFeed := Feed{
- name: "LINK/USD",
- id: randomFeedID(3),
- baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000),
- baseBid: big.NewInt(7123 * multiplier / 1000),
- baseAsk: big.NewInt(7177 * multiplier / 1000),
- }
- feeds := []Feed{btcFeed, ethFeed, linkFeed}
- feedM := make(map[[32]byte]Feed, len(feeds))
- for i := range feeds {
- feedM[feeds[i].id] = feeds[i]
- }
-
- clientCSAKeys := make([]csakey.KeyV2, n+1)
- clientPubKeys := make([]ed25519.PublicKey, n+1)
- for i := 0; i < n+1; i++ {
- k := big.NewInt(int64(i))
- key := csakey.MustNewV2XXXTestingOnly(k)
- clientCSAKeys[i] = key
- clientPubKeys[i] = key.PublicKey
- }
-
- // Test multi-send to three servers
- const nSrvs = 3
- reqChs := make([]chan request, nSrvs)
- servers := make(map[string]string)
- for i := 0; i < nSrvs; i++ {
- k := csakey.MustNewV2XXXTestingOnly(big.NewInt(int64(-(i + 1))))
- reqs := make(chan request, 100)
- srv := NewMercuryServer(t, ed25519.PrivateKey(k.Raw()), reqs, func() []byte {
- report, err := (&reportcodecv3.ReportCodec{}).BuildReport(ctx, v3.ReportFields{BenchmarkPrice: big.NewInt(234567), Bid: big.NewInt(1), Ask: big.NewInt(1), LinkFee: big.NewInt(1), NativeFee: big.NewInt(1)})
- if err != nil {
- panic(err)
- }
- return report
- })
- serverURL := startMercuryServer(t, srv, clientPubKeys)
- reqChs[i] = reqs
- servers[serverURL] = fmt.Sprintf("%x", k.PublicKey)
- }
- chainID := testutils.SimulatedChainID
-
- steve, backend, verifier, verifierAddress, commit := setupBlockchain(t)
-
- // Setup bootstrap + oracle nodes
- bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n])
- bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
- logObservers = append(logObservers, observedLogs)
-
- // Commit blocks to finality depth to ensure LogPoller has finalized blocks to read from
- ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String())
- require.NoError(t, err)
- finalityDepth := ch.Config().EVM().FinalityDepth()
- for i := 0; i < int(finalityDepth); i++ {
- commit()
- }
-
- // Set up n oracles
- var (
- oracles []confighelper.OracleIdentityExtra
- nodes []Node
- )
- ports := freeport.GetN(t, n)
- for i := 0; i < n; i++ {
- app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i])
-
- nodes = append(nodes, Node{
- app, transmitter, kb,
- })
-
- offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
- oracles = append(oracles, confighelper.OracleIdentityExtra{
- OracleIdentity: confighelper.OracleIdentity{
- OnchainPublicKey: offchainPublicKey,
- TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])),
- OffchainPublicKey: kb.OffchainPublicKey(),
- PeerID: peerID,
- },
- ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
- })
- logObservers = append(logObservers, observedLogs)
- }
-
- for _, feed := range feeds {
- addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id)
- }
-
- createBridge := func(name string, i int, p *big.Int, borm bridges.ORM) (bridgeName string) {
- bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
- b, herr := io.ReadAll(req.Body)
- require.NoError(t, herr)
- require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b))
-
- r := rand.Int63n(101)
- if r > pError.Load() {
- res.WriteHeader(http.StatusOK)
- val := decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String()
- resp := fmt.Sprintf(`{"result": %s}`, val)
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- } else {
- res.WriteHeader(http.StatusInternalServerError)
- resp := `{"error": "pError test error"}`
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- }
- }))
- t.Cleanup(bridge.Close)
- u, _ := url.Parse(bridge.URL)
- bridgeName = fmt.Sprintf("bridge-%s-%d", name, i)
- require.NoError(t, borm.CreateBridgeType(ctx, &bridges.BridgeType{
- Name: bridges.BridgeName(bridgeName),
- URL: models.WebURL(*u),
- }))
-
- return bridgeName
- }
-
- // Add OCR jobs - one per feed on each node
- for i, node := range nodes {
- for j, feed := range feeds {
- bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, node.App.BridgeORM())
- bidBridge := createBridge(fmt.Sprintf("bid-%d", j), i, feed.baseBid, node.App.BridgeORM())
- askBridge := createBridge(fmt.Sprintf("ask-%d", j), i, feed.baseAsk, node.App.BridgeORM())
-
- addV3MercuryJob(
- t,
- node,
- i,
- verifierAddress,
- bootstrapPeerID,
- bootstrapNodePort,
- bmBridge,
- bidBridge,
- askBridge,
- servers,
- clientPubKeys[i],
- feed.name,
- feed.id,
- randomFeedID(2),
- randomFeedID(2),
- )
- }
- }
-
- // Setup config on contract
- onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(ctx, rawOnchainConfig)
- require.NoError(t, err)
-
- reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig)
- require.NoError(t, err)
-
- signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02(
- 2*time.Second, // DeltaProgress
- 20*time.Second, // DeltaResend
- 400*time.Millisecond, // DeltaInitial
- 100*time.Millisecond, // DeltaRound
- 0, // DeltaGrace
- 300*time.Millisecond, // DeltaCertifiedCommitRequest
- 1*time.Minute, // DeltaStage
- 100, // rMax
- []int{len(nodes)}, // S
- oracles,
- reportingPluginConfig, // reportingPluginConfig []byte,
- nil,
- 250*time.Millisecond, // Max duration observation
- int(f), // f
- onchainConfig,
- )
-
- require.NoError(t, err)
- signerAddresses, err := evm.OnchainPublicKeyToAddress(signers)
- require.NoError(t, err)
-
- offchainTransmitters := make([][32]byte, n)
- for i := 0; i < n; i++ {
- offchainTransmitters[i] = nodes[i].ClientPubKey
- }
-
- for _, feed := range feeds {
- _, ferr := verifier.SetConfig(
- steve,
- feed.id,
- signerAddresses,
- offchainTransmitters,
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- nil,
- )
- require.NoError(t, ferr)
- commit()
- }
-
- runTestSetup := func(reqs chan request) {
- // Expect at least one report per feed from each oracle, per server
- seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
- for i := range feeds {
- // feedID will be deleted when all n oracles have reported
- seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n)
- }
-
- for req := range reqs {
- v := make(map[string]interface{})
- err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
- require.NoError(t, err)
- report, exists := v["report"]
- if !exists {
- t.Fatalf("expected payload %#v to contain 'report'", v)
- }
- reportElems := make(map[string]interface{})
- err = reportcodecv3.ReportTypes.UnpackIntoMap(reportElems, report.([]byte))
- require.NoError(t, err)
-
- feedID := reportElems["feedId"].([32]uint8)
- feed, exists := feedM[feedID]
- require.True(t, exists)
-
- if _, exists := seen[feedID]; !exists {
- continue // already saw all oracles for this feed
- }
-
- expectedFee := datastreamsmercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee)
- expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow
-
- assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp))
- assert.InDelta(t, feed.baseBenchmarkPrice.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000)
- assert.InDelta(t, feed.baseBid.Int64(), reportElems["bid"].(*big.Int).Int64(), 5000000)
- assert.InDelta(t, feed.baseAsk.Int64(), reportElems["ask"].(*big.Int).Int64(), 5000000)
- assert.NotZero(t, reportElems["validFromTimestamp"].(uint32))
- assert.GreaterOrEqual(t, reportElems["observationsTimestamp"].(uint32), reportElems["validFromTimestamp"].(uint32))
- assert.Equal(t, expectedExpiresAt, reportElems["expiresAt"].(uint32))
- assert.Equal(t, expectedFee, reportElems["linkFee"].(*big.Int))
- assert.Equal(t, expectedFee, reportElems["nativeFee"].(*big.Int))
-
- t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id)
-
- seen[feedID][req.pk] = struct{}{}
- if len(seen[feedID]) == n {
- t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id)
- delete(seen, feedID)
- if len(seen) == 0 {
- break // saw all oracles; success!
- }
- }
- }
- }
-
- t.Run("receives at least one report per feed for every server from each oracle when EAs are at 100% reliability", func(t *testing.T) {
- for i := 0; i < nSrvs; i++ {
- reqs := reqChs[i]
- runTestSetup(reqs)
- }
- })
-}
-
-func TestIntegration_MercuryV4(t *testing.T) {
- t.Parallel()
-
- integration_MercuryV4(t)
-}
-
-func integration_MercuryV4(t *testing.T) {
- ctx := testutils.Context(t)
- var logObservers []*observer.ObservedLogs
- t.Cleanup(func() {
- detectPanicLogs(t, logObservers)
- })
-
- testStartTimeStamp := uint32(time.Now().Unix())
-
- // test vars
- // pError is the probability that an EA will return an error instead of a result, as integer percentage
- // pError = 0 means it will never return error
- pError := atomic.Int64{}
-
- // feeds
- btcFeed := Feed{
- name: "BTC/USD",
- id: randomFeedID(4),
- baseBenchmarkPrice: big.NewInt(20_000 * multiplier),
- baseBid: big.NewInt(19_997 * multiplier),
- baseAsk: big.NewInt(20_004 * multiplier),
- baseMarketStatus: 1,
- }
- ethFeed := Feed{
- name: "ETH/USD",
- id: randomFeedID(4),
- baseBenchmarkPrice: big.NewInt(1_568 * multiplier),
- baseBid: big.NewInt(1_566 * multiplier),
- baseAsk: big.NewInt(1_569 * multiplier),
- baseMarketStatus: 2,
- }
- linkFeed := Feed{
- name: "LINK/USD",
- id: randomFeedID(4),
- baseBenchmarkPrice: big.NewInt(7150 * multiplier / 1000),
- baseBid: big.NewInt(7123 * multiplier / 1000),
- baseAsk: big.NewInt(7177 * multiplier / 1000),
- baseMarketStatus: 3,
- }
- feeds := []Feed{btcFeed, ethFeed, linkFeed}
- feedM := make(map[[32]byte]Feed, len(feeds))
- for i := range feeds {
- feedM[feeds[i].id] = feeds[i]
- }
-
- clientCSAKeys := make([]csakey.KeyV2, n+1)
- clientPubKeys := make([]ed25519.PublicKey, n+1)
- for i := 0; i < n+1; i++ {
- k := big.NewInt(int64(i))
- key := csakey.MustNewV2XXXTestingOnly(k)
- clientCSAKeys[i] = key
- clientPubKeys[i] = key.PublicKey
- }
-
- // Test multi-send to three servers
- const nSrvs = 3
- reqChs := make([]chan request, nSrvs)
- servers := make(map[string]string)
- for i := 0; i < nSrvs; i++ {
- k := csakey.MustNewV2XXXTestingOnly(big.NewInt(int64(-(i + 1))))
- reqs := make(chan request, 100)
- srv := NewMercuryServer(t, ed25519.PrivateKey(k.Raw()), reqs, func() []byte {
- report, err := (&reportcodecv4.ReportCodec{}).BuildReport(ctx, v4.ReportFields{BenchmarkPrice: big.NewInt(234567), LinkFee: big.NewInt(1), NativeFee: big.NewInt(1), MarketStatus: 1})
- if err != nil {
- panic(err)
- }
- return report
- })
- serverURL := startMercuryServer(t, srv, clientPubKeys)
- reqChs[i] = reqs
- servers[serverURL] = fmt.Sprintf("%x", k.PublicKey)
- }
- chainID := testutils.SimulatedChainID
-
- steve, backend, verifier, verifierAddress, commit := setupBlockchain(t)
-
- // Setup bootstrap + oracle nodes
- bootstrapNodePort := freeport.GetOne(t)
- appBootstrap, bootstrapPeerID, _, bootstrapKb, observedLogs := setupNode(t, bootstrapNodePort, "bootstrap_mercury", backend, clientCSAKeys[n])
- bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
- logObservers = append(logObservers, observedLogs)
-
- // Commit blocks to finality depth to ensure LogPoller has finalized blocks to read from
- ch, err := bootstrapNode.App.GetRelayers().LegacyEVMChains().Get(testutils.SimulatedChainID.String())
- require.NoError(t, err)
- finalityDepth := ch.Config().EVM().FinalityDepth()
- for i := 0; i < int(finalityDepth); i++ {
- commit()
- }
-
- // Set up n oracles
- var (
- oracles []confighelper.OracleIdentityExtra
- nodes []Node
- )
- ports := freeport.GetN(t, n)
- for i := 0; i < n; i++ {
- app, peerID, transmitter, kb, observedLogs := setupNode(t, ports[i], fmt.Sprintf("oracle_mercury%d", i), backend, clientCSAKeys[i])
-
- nodes = append(nodes, Node{
- app, transmitter, kb,
- })
-
- offchainPublicKey, _ := hex.DecodeString(strings.TrimPrefix(kb.OnChainPublicKey(), "0x"))
- oracles = append(oracles, confighelper.OracleIdentityExtra{
- OracleIdentity: confighelper.OracleIdentity{
- OnchainPublicKey: offchainPublicKey,
- TransmitAccount: ocr2types.Account(fmt.Sprintf("%x", transmitter[:])),
- OffchainPublicKey: kb.OffchainPublicKey(),
- PeerID: peerID,
- },
- ConfigEncryptionPublicKey: kb.ConfigEncryptionPublicKey(),
- })
- logObservers = append(logObservers, observedLogs)
- }
-
- for _, feed := range feeds {
- addBootstrapJob(t, bootstrapNode, chainID, verifierAddress, feed.name, feed.id)
- }
-
- createBridge := func(name string, i int, p *big.Int, marketStatus uint32, borm bridges.ORM) (bridgeName string) {
- bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
- b, herr := io.ReadAll(req.Body)
- require.NoError(t, herr)
- require.Equal(t, `{"data":{"from":"ETH","to":"USD"}}`, string(b))
-
- r := rand.Int63n(101)
- if r > pError.Load() {
- res.WriteHeader(http.StatusOK)
-
- var val string
- if p != nil {
- val = decimal.NewFromBigInt(p, 0).Div(decimal.NewFromInt(multiplier)).Add(decimal.NewFromInt(int64(i)).Div(decimal.NewFromInt(100))).String()
- } else {
- val = fmt.Sprintf("%d", marketStatus)
- }
-
- resp := fmt.Sprintf(`{"result": %s}`, val)
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- } else {
- res.WriteHeader(http.StatusInternalServerError)
- resp := `{"error": "pError test error"}`
- _, herr = res.Write([]byte(resp))
- require.NoError(t, herr)
- }
- }))
- t.Cleanup(bridge.Close)
- u, _ := url.Parse(bridge.URL)
- bridgeName = fmt.Sprintf("bridge-%s-%d", name, i)
- require.NoError(t, borm.CreateBridgeType(ctx, &bridges.BridgeType{
- Name: bridges.BridgeName(bridgeName),
- URL: models.WebURL(*u),
- }))
-
- return bridgeName
- }
-
- // Add OCR jobs - one per feed on each node
- for i, node := range nodes {
- for j, feed := range feeds {
- bmBridge := createBridge(fmt.Sprintf("benchmarkprice-%d", j), i, feed.baseBenchmarkPrice, 0, node.App.BridgeORM())
- marketStatusBridge := createBridge(fmt.Sprintf("marketstatus-%d", j), i, nil, feed.baseMarketStatus, node.App.BridgeORM())
-
- addV4MercuryJob(
- t,
- node,
- i,
- verifierAddress,
- bootstrapPeerID,
- bootstrapNodePort,
- bmBridge,
- marketStatusBridge,
- servers,
- clientPubKeys[i],
- feed.name,
- feed.id,
- randomFeedID(2),
- randomFeedID(2),
- )
- }
- }
-
- // Setup config on contract
- onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(ctx, rawOnchainConfig)
- require.NoError(t, err)
-
- reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig)
- require.NoError(t, err)
-
- signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsForTestsMercuryV02(
- 2*time.Second, // DeltaProgress
- 20*time.Second, // DeltaResend
- 400*time.Millisecond, // DeltaInitial
- 100*time.Millisecond, // DeltaRound
- 0, // DeltaGrace
- 300*time.Millisecond, // DeltaCertifiedCommitRequest
- 1*time.Minute, // DeltaStage
- 100, // rMax
- []int{len(nodes)}, // S
- oracles,
- reportingPluginConfig, // reportingPluginConfig []byte,
- nil,
- 250*time.Millisecond, // Max duration observation
- int(f), // f
- onchainConfig,
- )
-
- require.NoError(t, err)
- signerAddresses, err := evm.OnchainPublicKeyToAddress(signers)
- require.NoError(t, err)
-
- offchainTransmitters := make([][32]byte, n)
- for i := 0; i < n; i++ {
- offchainTransmitters[i] = nodes[i].ClientPubKey
- }
-
- for _, feed := range feeds {
- _, ferr := verifier.SetConfig(
- steve,
- feed.id,
- signerAddresses,
- offchainTransmitters,
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- nil,
- )
- require.NoError(t, ferr)
- commit()
- }
-
- runTestSetup := func(reqs chan request) {
- // Expect at least one report per feed from each oracle, per server
- seen := make(map[[32]byte]map[credentials.StaticSizedPublicKey]struct{})
- for i := range feeds {
- // feedID will be deleted when all n oracles have reported
- seen[feeds[i].id] = make(map[credentials.StaticSizedPublicKey]struct{}, n)
- }
-
- for req := range reqs {
- v := make(map[string]interface{})
- err := mercury.PayloadTypes.UnpackIntoMap(v, req.req.Payload)
- require.NoError(t, err)
- report, exists := v["report"]
- if !exists {
- t.Fatalf("expected payload %#v to contain 'report'", v)
- }
- reportElems := make(map[string]interface{})
- err = reportcodecv4.ReportTypes.UnpackIntoMap(reportElems, report.([]byte))
- require.NoError(t, err)
-
- feedID := reportElems["feedId"].([32]uint8)
- feed, exists := feedM[feedID]
- require.True(t, exists)
-
- if _, exists := seen[feedID]; !exists {
- continue // already saw all oracles for this feed
- }
-
- expectedFee := datastreamsmercury.CalculateFee(big.NewInt(234567), rawReportingPluginConfig.BaseUSDFee)
- expectedExpiresAt := reportElems["observationsTimestamp"].(uint32) + rawReportingPluginConfig.ExpirationWindow
-
- assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp))
- assert.InDelta(t, feed.baseBenchmarkPrice.Int64(), reportElems["benchmarkPrice"].(*big.Int).Int64(), 5000000)
- assert.NotZero(t, reportElems["validFromTimestamp"].(uint32))
- assert.GreaterOrEqual(t, reportElems["observationsTimestamp"].(uint32), reportElems["validFromTimestamp"].(uint32))
- assert.Equal(t, expectedExpiresAt, reportElems["expiresAt"].(uint32))
- assert.Equal(t, expectedFee, reportElems["linkFee"].(*big.Int))
- assert.Equal(t, expectedFee, reportElems["nativeFee"].(*big.Int))
- assert.Equal(t, feed.baseMarketStatus, reportElems["marketStatus"].(uint32))
-
- t.Logf("oracle %x reported for feed %s (0x%x)", req.pk, feed.name, feed.id)
-
- seen[feedID][req.pk] = struct{}{}
- if len(seen[feedID]) == n {
- t.Logf("all oracles reported for feed %s (0x%x)", feed.name, feed.id)
- delete(seen, feedID)
- if len(seen) == 0 {
- break // saw all oracles; success!
- }
- }
- }
- }
-
- t.Run("receives at least one report per feed for every server from each oracle when EAs are at 100% reliability", func(t *testing.T) {
- for i := 0; i < nSrvs; i++ {
- reqs := reqChs[i]
- runTestSetup(reqs)
- }
- })
-}
diff --git a/core/services/ocr2/plugins/mercury/plugin.go b/core/services/ocr2/plugins/mercury/plugin.go
deleted file mode 100644
index b0983e55c89..00000000000
--- a/core/services/ocr2/plugins/mercury/plugin.go
+++ /dev/null
@@ -1,394 +0,0 @@
-package mercury
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "os/exec"
-
- "github.com/pkg/errors"
-
- libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types"
-
- relaymercuryv1 "github.com/smartcontractkit/chainlink-data-streams/mercury/v1"
- relaymercuryv2 "github.com/smartcontractkit/chainlink-data-streams/mercury/v2"
- relaymercuryv3 "github.com/smartcontractkit/chainlink-data-streams/mercury/v3"
- relaymercuryv4 "github.com/smartcontractkit/chainlink-data-streams/mercury/v4"
-
- "github.com/smartcontractkit/chainlink-common/pkg/loop"
- commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/config/env"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- mercuryv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1"
- mercuryv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2"
- mercuryv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3"
- mercuryv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4"
- "github.com/smartcontractkit/chainlink/v2/plugins"
-)
-
-type Config interface {
- MaxSuccessfulRuns() uint64
- ResultWriteQueueDepth() uint64
- plugins.RegistrarConfig
-}
-
-// concrete implementation of MercuryConfig
-type mercuryConfig struct {
- jobPipelineMaxSuccessfulRuns uint64
- jobPipelineResultWriteQueueDepth uint64
- plugins.RegistrarConfig
-}
-
-func NewMercuryConfig(jobPipelineMaxSuccessfulRuns uint64, jobPipelineResultWriteQueueDepth uint64, pluginProcessCfg plugins.RegistrarConfig) Config {
- return &mercuryConfig{
- jobPipelineMaxSuccessfulRuns: jobPipelineMaxSuccessfulRuns,
- jobPipelineResultWriteQueueDepth: jobPipelineResultWriteQueueDepth,
- RegistrarConfig: pluginProcessCfg,
- }
-}
-
-func (m *mercuryConfig) MaxSuccessfulRuns() uint64 {
- return m.jobPipelineMaxSuccessfulRuns
-}
-
-func (m *mercuryConfig) ResultWriteQueueDepth() uint64 {
- return m.jobPipelineResultWriteQueueDepth
-}
-
-func NewServices(
- jb job.Job,
- ocr2Provider commontypes.MercuryProvider,
- pipelineRunner pipeline.Runner,
- lggr logger.Logger,
- argsNoPlugin libocr2.MercuryOracleArgs,
- cfg Config,
- chEnhancedTelem chan ocrcommon.EnhancedTelemetryMercuryData,
- orm types.DataSourceORM,
- feedID utils.FeedID,
- enableTriggerCapability bool,
-) ([]job.ServiceCtx, error) {
- if jb.PipelineSpec == nil {
- return nil, errors.New("expected job to have a non-nil PipelineSpec")
- }
-
- var pluginConfig config.PluginConfig
- if len(jb.OCR2OracleSpec.PluginConfig) == 0 {
- if !enableTriggerCapability {
- return nil, fmt.Errorf("at least one transmission option must be configured")
- }
- } else {
- err := json.Unmarshal(jb.OCR2OracleSpec.PluginConfig.Bytes(), &pluginConfig)
- if err != nil {
- return nil, errors.WithStack(err)
- }
- err = config.ValidatePluginConfig(pluginConfig, feedID)
- if err != nil {
- return nil, err
- }
- }
-
- lggr = lggr.Named("MercuryPlugin").With("jobID", jb.ID, "jobName", jb.Name.ValueOrZero())
-
- // encapsulate all the subservices and ensure we close them all if any fail to start
- srvs := []job.ServiceCtx{ocr2Provider}
- abort := func() {
- if cerr := services.MultiCloser(srvs).Close(); cerr != nil {
- lggr.Errorw("Error closing unused services", "err", cerr)
- }
- }
- saver := ocrcommon.NewResultRunSaver(pipelineRunner, lggr, cfg.MaxSuccessfulRuns(), cfg.ResultWriteQueueDepth())
- srvs = append(srvs, saver)
-
- // this is the factory that will be used to create the mercury plugin
- var (
- factory ocr3types.MercuryPluginFactory
- factoryServices []job.ServiceCtx
- fErr error
- )
- fCfg := factoryCfg{
- orm: orm,
- pipelineRunner: pipelineRunner,
- jb: jb,
- lggr: lggr,
- saver: saver,
- chEnhancedTelem: chEnhancedTelem,
- ocr2Provider: ocr2Provider,
- reportingPluginConfig: pluginConfig,
- cfg: cfg,
- feedID: feedID,
- }
- switch feedID.Version() {
- case 1:
- factory, factoryServices, fErr = newv1factory(fCfg)
- if fErr != nil {
- abort()
- return nil, fmt.Errorf("failed to create mercury v1 factory: %w", fErr)
- }
- srvs = append(srvs, factoryServices...)
- case 2:
- factory, factoryServices, fErr = newv2factory(fCfg)
- if fErr != nil {
- abort()
- return nil, fmt.Errorf("failed to create mercury v2 factory: %w", fErr)
- }
- srvs = append(srvs, factoryServices...)
- case 3:
- factory, factoryServices, fErr = newv3factory(fCfg)
- if fErr != nil {
- abort()
- return nil, fmt.Errorf("failed to create mercury v3 factory: %w", fErr)
- }
- srvs = append(srvs, factoryServices...)
- case 4:
- factory, factoryServices, fErr = newv4factory(fCfg)
- if fErr != nil {
- abort()
- return nil, fmt.Errorf("failed to create mercury v4 factory: %w", fErr)
- }
- srvs = append(srvs, factoryServices...)
- default:
- return nil, errors.Errorf("unknown Mercury report schema version: %d", feedID.Version())
- }
- argsNoPlugin.MercuryPluginFactory = factory
- oracle, err := libocr2.NewOracle(argsNoPlugin)
- if err != nil {
- abort()
- return nil, errors.WithStack(err)
- }
- srvs = append(srvs, job.NewServiceAdapter(oracle))
- return srvs, nil
-}
-
-type factoryCfg struct {
- orm types.DataSourceORM
- pipelineRunner pipeline.Runner
- jb job.Job
- lggr logger.Logger
- saver *ocrcommon.RunResultSaver
- chEnhancedTelem chan ocrcommon.EnhancedTelemetryMercuryData
- ocr2Provider commontypes.MercuryProvider
- reportingPluginConfig config.PluginConfig
- cfg Config
- feedID utils.FeedID
-}
-
-func getPluginFeedIDs(pluginConfig config.PluginConfig) (linkFeedID utils.FeedID, nativeFeedID utils.FeedID) {
- if pluginConfig.LinkFeedID != nil {
- linkFeedID = *pluginConfig.LinkFeedID
- }
- if pluginConfig.NativeFeedID != nil {
- nativeFeedID = *pluginConfig.NativeFeedID
- }
- return linkFeedID, nativeFeedID
-}
-
-func newv4factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) {
- var factory ocr3types.MercuryPluginFactory
- srvs := make([]job.ServiceCtx, 0)
-
- linkFeedID, nativeFeedID := getPluginFeedIDs(factoryCfg.reportingPluginConfig)
-
- ds := mercuryv4.NewDataSource(
- factoryCfg.orm,
- factoryCfg.pipelineRunner,
- factoryCfg.jb,
- *factoryCfg.jb.PipelineSpec,
- factoryCfg.feedID,
- factoryCfg.lggr,
- factoryCfg.saver,
- factoryCfg.chEnhancedTelem,
- factoryCfg.ocr2Provider.MercuryServerFetcher(),
- linkFeedID,
- nativeFeedID,
- )
-
- loopCmd := env.MercuryPlugin.Cmd.Get()
- loopEnabled := loopCmd != ""
-
- if loopEnabled {
- cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
- }
- // in loop mode, the factory is grpc server, and we need to handle the server lifecycle
- // and unregistration of the loop
- factoryServer := loop.NewMercuryV4Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer, unregisterer)
- // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
- factory = factoryServer
- } else {
- factory = relaymercuryv4.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV4())
- }
- return factory, srvs, nil
-}
-
-func newv3factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) {
- var factory ocr3types.MercuryPluginFactory
- srvs := make([]job.ServiceCtx, 0)
-
- linkFeedID, nativeFeedID := getPluginFeedIDs(factoryCfg.reportingPluginConfig)
-
- ds := mercuryv3.NewDataSource(
- factoryCfg.orm,
- factoryCfg.pipelineRunner,
- factoryCfg.jb,
- *factoryCfg.jb.PipelineSpec,
- factoryCfg.feedID,
- factoryCfg.lggr,
- factoryCfg.saver,
- factoryCfg.chEnhancedTelem,
- factoryCfg.ocr2Provider.MercuryServerFetcher(),
- linkFeedID,
- nativeFeedID,
- )
-
- loopCmd := env.MercuryPlugin.Cmd.Get()
- loopEnabled := loopCmd != ""
-
- if loopEnabled {
- cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
- }
- // in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
- // and unregistration of the loop
- factoryServer := loop.NewMercuryV3Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer, unregisterer)
- // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
- factory = factoryServer
- } else {
- factory = relaymercuryv3.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV3())
- }
- return factory, srvs, nil
-}
-
-func newv2factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) {
- var factory ocr3types.MercuryPluginFactory
- srvs := make([]job.ServiceCtx, 0)
-
- linkFeedID, nativeFeedID := getPluginFeedIDs(factoryCfg.reportingPluginConfig)
-
- ds := mercuryv2.NewDataSource(
- factoryCfg.orm,
- factoryCfg.pipelineRunner,
- factoryCfg.jb,
- *factoryCfg.jb.PipelineSpec,
- factoryCfg.feedID,
- factoryCfg.lggr,
- factoryCfg.saver,
- factoryCfg.chEnhancedTelem,
- factoryCfg.ocr2Provider.MercuryServerFetcher(),
- linkFeedID,
- nativeFeedID,
- )
-
- loopCmd := env.MercuryPlugin.Cmd.Get()
- loopEnabled := loopCmd != ""
-
- if loopEnabled {
- cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
- }
- // in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
- // and unregistration of the loop
- factoryServer := loop.NewMercuryV2Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer, unregisterer)
- // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
- factory = factoryServer
- } else {
- factory = relaymercuryv2.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV2())
- }
- return factory, srvs, nil
-}
-
-func newv1factory(factoryCfg factoryCfg) (ocr3types.MercuryPluginFactory, []job.ServiceCtx, error) {
- var factory ocr3types.MercuryPluginFactory
- srvs := make([]job.ServiceCtx, 0)
-
- ds := mercuryv1.NewDataSource(
- factoryCfg.orm,
- factoryCfg.pipelineRunner,
- factoryCfg.jb,
- *factoryCfg.jb.PipelineSpec,
- factoryCfg.lggr,
- factoryCfg.saver,
- factoryCfg.chEnhancedTelem,
- factoryCfg.ocr2Provider.MercuryChainReader(),
- factoryCfg.ocr2Provider.MercuryServerFetcher(),
- factoryCfg.reportingPluginConfig.InitialBlockNumber.Ptr(),
- factoryCfg.feedID,
- )
-
- loopCmd := env.MercuryPlugin.Cmd.Get()
- loopEnabled := loopCmd != ""
-
- if loopEnabled {
- cmdFn, unregisterer, opts, mercuryLggr, err := initLoop(loopCmd, factoryCfg.cfg, factoryCfg.feedID, factoryCfg.lggr)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to init loop for feed %s: %w", factoryCfg.feedID, err)
- }
- // in loopp mode, the factory is grpc server, and we need to handle the server lifecycle
- // and unregistration of the loop
- factoryServer := loop.NewMercuryV1Service(mercuryLggr, opts, cmdFn, factoryCfg.ocr2Provider, ds)
- srvs = append(srvs, factoryServer, unregisterer)
- // adapt the grpc server to the vanilla mercury plugin factory interface used by the oracle
- factory = factoryServer
- } else {
- factory = relaymercuryv1.NewFactory(ds, factoryCfg.lggr, factoryCfg.ocr2Provider.OnchainConfigCodec(), factoryCfg.ocr2Provider.ReportCodecV1())
- }
- return factory, srvs, nil
-}
-
-func initLoop(cmd string, cfg Config, feedID utils.FeedID, lggr logger.Logger) (func() *exec.Cmd, *loopUnregisterCloser, loop.GRPCOpts, logger.Logger, error) {
- lggr.Debugw("Initializing Mercury loop", "command", cmd)
- mercuryLggr := lggr.Named(fmt.Sprintf("MercuryV%d", feedID.Version())).Named(feedID.String())
- envVars, err := plugins.ParseEnvFile(env.MercuryPlugin.Env.Get())
- if err != nil {
- return nil, nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to parse mercury env file: %w", err)
- }
- loopID := mercuryLggr.Name()
- cmdFn, opts, err := cfg.RegisterLOOP(plugins.CmdConfig{
- ID: loopID,
- Cmd: cmd,
- Env: envVars,
- })
- if err != nil {
- return nil, nil, loop.GRPCOpts{}, nil, fmt.Errorf("failed to register loop: %w", err)
- }
- return cmdFn, newLoopUnregister(cfg, loopID), opts, mercuryLggr, nil
-}
-
-// loopUnregisterCloser is a helper to unregister a loop
-// as a service
-// TODO BCF-3451 all other jobs that use custom plugin providers that should be refactored to use this pattern
-// perhaps it can be implemented in the delegate on job delete.
-type loopUnregisterCloser struct {
- r plugins.RegistrarConfig
- id string
-}
-
-func (l *loopUnregisterCloser) Close() error {
- l.r.UnregisterLOOP(l.id)
- return nil
-}
-
-func (l *loopUnregisterCloser) Start(ctx context.Context) error {
- return nil
-}
-
-func newLoopUnregister(r plugins.RegistrarConfig, id string) *loopUnregisterCloser {
- return &loopUnregisterCloser{
- r: r,
- id: id,
- }
-}
diff --git a/core/services/ocr2/plugins/mercury/plugin_test.go b/core/services/ocr2/plugins/mercury/plugin_test.go
deleted file mode 100644
index 71cfabce303..00000000000
--- a/core/services/ocr2/plugins/mercury/plugin_test.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package mercury_test
-
-import (
- "context"
- "errors"
- "os/exec"
- "reflect"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/google/uuid"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink/v2/core/config/env"
- "github.com/smartcontractkit/chainlink/v2/core/logger"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay"
-
- "github.com/smartcontractkit/chainlink-common/pkg/loop"
- commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
- v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
- "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
-
- mercuryocr2 "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury"
-
- libocr2 "github.com/smartcontractkit/libocr/offchainreporting2plus"
- libocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/plugins"
-)
-
-var (
- v1FeedId = [32]uint8{00, 01, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
- v2FeedId = [32]uint8{00, 02, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
- v3FeedId = [32]uint8{00, 03, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
- v4FeedId = [32]uint8{00, 04, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-
- testArgsNoPlugin = libocr2.MercuryOracleArgs{
- LocalConfig: libocr2types.LocalConfig{
- DevelopmentMode: libocr2types.EnableDangerousDevelopmentMode,
- },
- }
-
- testCfg = mercuryocr2.NewMercuryConfig(1, 1, &testRegistrarConfig{})
-
- v1jsonCfg = job.JSONConfig{
- "serverURL": "example.com:80",
- "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93",
- "initialBlockNumber": 1234,
- }
-
- v2jsonCfg = job.JSONConfig{
- "serverURL": "example.com:80",
- "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93",
- "linkFeedID": "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
- "nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
- }
-
- v3jsonCfg = job.JSONConfig{
- "serverURL": "example.com:80",
- "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93",
- "linkFeedID": "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
- "nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
- }
-
- v4jsonCfg = job.JSONConfig{
- "serverURL": "example.com:80",
- "serverPubKey": "724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93",
- "linkFeedID": "0x00026b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
- "nativeFeedID": "0x00036b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472",
- }
-
- testJob = job.Job{
- ID: 1,
- ExternalJobID: uuid.Must(uuid.NewRandom()),
- OCR2OracleSpecID: ptr(int32(7)),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- ID: 7,
- ContractID: "phony",
- FeedID: ptr(common.BytesToHash([]byte{1, 2, 3})),
- Relay: relay.NetworkEVM,
- ChainID: "1",
- },
- PipelineSpec: &pipeline.Spec{},
- PipelineSpecID: int32(1),
- }
-
- // this is kind of gross, but it's the best way to test return values of the services
- expectedEmbeddedServiceCnt = 3
- expectedLoopServiceCnt = expectedEmbeddedServiceCnt + 2 // factory server and loop unregisterer
-)
-
-func TestNewServices(t *testing.T) {
- type args struct {
- pluginConfig job.JSONConfig
- feedID utils.FeedID
- cfg mercuryocr2.Config
- }
- testCases := []struct {
- name string
- args args
- loopMode bool
- wantLoopFactory any
- wantServiceCnt int
- wantErr bool
- wantErrStr string
- }{
- {
- name: "no plugin config error ",
- args: args{
- feedID: v1FeedId,
- },
- wantServiceCnt: 0,
- wantErr: true,
- },
-
- {
- name: "v1 legacy",
- args: args{
- pluginConfig: v1jsonCfg,
- feedID: v1FeedId,
- },
- wantServiceCnt: expectedEmbeddedServiceCnt,
- wantErr: false,
- },
- {
- name: "v2 legacy",
- args: args{
- pluginConfig: v2jsonCfg,
- feedID: v2FeedId,
- },
- wantServiceCnt: expectedEmbeddedServiceCnt,
- wantErr: false,
- },
- {
- name: "v3 legacy",
- args: args{
- pluginConfig: v3jsonCfg,
- feedID: v3FeedId,
- },
- wantServiceCnt: expectedEmbeddedServiceCnt,
- wantErr: false,
- },
- {
- name: "v4 legacy",
- args: args{
- pluginConfig: v4jsonCfg,
- feedID: v4FeedId,
- },
- wantServiceCnt: expectedEmbeddedServiceCnt,
- wantErr: false,
- },
- {
- name: "v1 loop",
- loopMode: true,
- args: args{
- pluginConfig: v1jsonCfg,
- feedID: v1FeedId,
- },
- wantServiceCnt: expectedLoopServiceCnt,
- wantErr: false,
- wantLoopFactory: &loop.MercuryV1Service{},
- },
- {
- name: "v2 loop",
- loopMode: true,
- args: args{
- pluginConfig: v2jsonCfg,
- feedID: v2FeedId,
- },
- wantServiceCnt: expectedLoopServiceCnt,
- wantErr: false,
- wantLoopFactory: &loop.MercuryV2Service{},
- },
- {
- name: "v3 loop",
- loopMode: true,
- args: args{
- pluginConfig: v3jsonCfg,
- feedID: v3FeedId,
- },
- wantServiceCnt: expectedLoopServiceCnt,
- wantErr: false,
- wantLoopFactory: &loop.MercuryV3Service{},
- },
- {
- name: "v3 loop err",
- loopMode: true,
- args: args{
- pluginConfig: v3jsonCfg,
- feedID: v3FeedId,
- cfg: mercuryocr2.NewMercuryConfig(1, 1, &testRegistrarConfig{failRegister: true}),
- },
- wantServiceCnt: expectedLoopServiceCnt,
- wantErr: true,
- wantLoopFactory: &loop.MercuryV3Service{},
- wantErrStr: "failed to init loop for feed",
- },
- {
- name: "v4 loop",
- loopMode: true,
- args: args{
- pluginConfig: v4jsonCfg,
- feedID: v4FeedId,
- },
- wantServiceCnt: expectedLoopServiceCnt,
- wantErr: false,
- wantLoopFactory: &loop.MercuryV4Service{},
- },
- }
- for _, tt := range testCases {
- t.Run(tt.name, func(t *testing.T) {
- if tt.loopMode {
- t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd")
- assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get())
- }
- // use default config if not provided
- if tt.args.cfg == nil {
- tt.args.cfg = testCfg
- }
- got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
- if (err != nil) != tt.wantErr {
- t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if err != nil {
- if tt.wantErrStr != "" {
- assert.Contains(t, err.Error(), tt.wantErrStr)
- }
- return
- }
- assert.Len(t, got, tt.wantServiceCnt)
- if tt.loopMode {
- foundLoopFactory := false
- for i := 0; i < len(got); i++ {
- if reflect.TypeOf(got[i]) == reflect.TypeOf(tt.wantLoopFactory) {
- foundLoopFactory = true
- break
- }
- }
- assert.True(t, foundLoopFactory)
- }
- })
- }
-
- t.Run("restartable loop", func(t *testing.T) {
- // setup a real loop registry to test restartability
- registry := plugins.NewTestLoopRegistry(logger.TestLogger(t))
- loopRegistrarConfig := plugins.NewRegistrarConfig(loop.GRPCOpts{}, registry.Register, registry.Unregister)
- prodCfg := mercuryocr2.NewMercuryConfig(1, 1, loopRegistrarConfig)
- type args struct {
- pluginConfig job.JSONConfig
- feedID utils.FeedID
- cfg mercuryocr2.Config
- }
- testCases := []struct {
- name string
- args args
- wantErr bool
- }{
- {
- name: "v1 loop",
- args: args{
- pluginConfig: v1jsonCfg,
- feedID: v1FeedId,
- cfg: prodCfg,
- },
- wantErr: false,
- },
- {
- name: "v2 loop",
- args: args{
- pluginConfig: v2jsonCfg,
- feedID: v2FeedId,
- cfg: prodCfg,
- },
- wantErr: false,
- },
- {
- name: "v3 loop",
- args: args{
- pluginConfig: v3jsonCfg,
- feedID: v3FeedId,
- cfg: prodCfg,
- },
- wantErr: false,
- },
- {
- name: "v4 loop",
- args: args{
- pluginConfig: v4jsonCfg,
- feedID: v4FeedId,
- cfg: prodCfg,
- },
- wantErr: false,
- },
- }
-
- for _, tt := range testCases {
- t.Run(tt.name, func(t *testing.T) {
- t.Setenv(string(env.MercuryPlugin.Cmd), "fake_cmd")
- assert.NotEmpty(t, env.MercuryPlugin.Cmd.Get())
-
- got, err := newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
- if (err != nil) != tt.wantErr {
- t.Errorf("NewServices() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- // hack to simulate a restart. we don't have enough boilerplate to start the oracle service
- // only care about the subservices so we start all except the oracle, which happens to be the last one
- for i := 0; i < len(got)-1; i++ {
- require.NoError(t, got[i].Start(tests.Context(t)))
- }
- // if we don't close the services, we get conflicts with the loop registry
- _, err = newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
- require.ErrorContains(t, err, "plugin already registered")
-
- // close all services and try again
- for i := len(got) - 2; i >= 0; i-- {
- require.NoError(t, got[i].Close())
- }
- _, err = newServicesTestWrapper(t, tt.args.pluginConfig, tt.args.feedID, tt.args.cfg)
- require.NoError(t, err)
- })
- }
- })
-}
-
-// we are only varying the version via feedID (and the plugin config)
-// this wrapper supplies dummy values for the rest of the arguments
-func newServicesTestWrapper(t *testing.T, pluginConfig job.JSONConfig, feedID utils.FeedID, cfg mercuryocr2.Config) ([]job.ServiceCtx, error) {
- t.Helper()
- jb := testJob
- jb.OCR2OracleSpec.PluginConfig = pluginConfig
- return mercuryocr2.NewServices(jb, &testProvider{}, nil, logger.TestLogger(t), testArgsNoPlugin, cfg, nil, &testDataSourceORM{}, feedID, false)
-}
-
-type testProvider struct{}
-
-// ChainReader implements types.MercuryProvider.
-func (*testProvider) ContractReader() commontypes.ContractReader { panic("unimplemented") }
-
-// Close implements types.MercuryProvider.
-func (*testProvider) Close() error { return nil }
-
-// Codec implements types.MercuryProvider.
-func (*testProvider) Codec() commontypes.Codec { panic("unimplemented") }
-
-// ContractConfigTracker implements types.MercuryProvider.
-func (*testProvider) ContractConfigTracker() libocr2types.ContractConfigTracker {
- panic("unimplemented")
-}
-
-// ContractTransmitter implements types.MercuryProvider.
-func (*testProvider) ContractTransmitter() libocr2types.ContractTransmitter {
- panic("unimplemented")
-}
-
-// HealthReport implements types.MercuryProvider.
-func (*testProvider) HealthReport() map[string]error { panic("unimplemented") }
-
-// MercuryChainReader implements types.MercuryProvider.
-func (*testProvider) MercuryChainReader() mercury.ChainReader { return nil }
-
-// MercuryServerFetcher implements types.MercuryProvider.
-func (*testProvider) MercuryServerFetcher() mercury.ServerFetcher { return nil }
-
-// Name implements types.MercuryProvider.
-func (*testProvider) Name() string { panic("unimplemented") }
-
-// OffchainConfigDigester implements types.MercuryProvider.
-func (*testProvider) OffchainConfigDigester() libocr2types.OffchainConfigDigester {
- panic("unimplemented")
-}
-
-// OnchainConfigCodec implements types.MercuryProvider.
-func (*testProvider) OnchainConfigCodec() mercury.OnchainConfigCodec {
- return nil
-}
-
-// Ready implements types.MercuryProvider.
-func (*testProvider) Ready() error { panic("unimplemented") }
-
-// ReportCodecV1 implements types.MercuryProvider.
-func (*testProvider) ReportCodecV1() v1.ReportCodec { return nil }
-
-// ReportCodecV2 implements types.MercuryProvider.
-func (*testProvider) ReportCodecV2() v2.ReportCodec { return nil }
-
-// ReportCodecV3 implements types.MercuryProvider.
-func (*testProvider) ReportCodecV3() v3.ReportCodec { return nil }
-
-// ReportCodecV4 implements types.MercuryProvider.
-func (*testProvider) ReportCodecV4() v4.ReportCodec { return nil }
-
-// Start implements types.MercuryProvider.
-func (*testProvider) Start(context.Context) error { return nil }
-
-var _ commontypes.MercuryProvider = (*testProvider)(nil)
-
-type testRegistrarConfig struct {
- failRegister bool
-}
-
-func (c *testRegistrarConfig) UnregisterLOOP(ID string) {}
-
-// RegisterLOOP implements plugins.RegistrarConfig.
-func (c *testRegistrarConfig) RegisterLOOP(config plugins.CmdConfig) (func() *exec.Cmd, loop.GRPCOpts, error) {
- if c.failRegister {
- return nil, loop.GRPCOpts{}, errors.New("failed to register")
- }
- return nil, loop.GRPCOpts{}, nil
-}
-
-var _ plugins.RegistrarConfig = (*testRegistrarConfig)(nil)
-
-type testDataSourceORM struct{}
-
-// LatestReport implements types.DataSourceORM.
-func (*testDataSourceORM) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
- return []byte{1, 2, 3}, nil
-}
-
-var _ types.DataSourceORM = (*testDataSourceORM)(nil)
diff --git a/core/services/ocr2/validate/validate.go b/core/services/ocr2/validate/validate.go
index 27a5a885369..acb16777f06 100644
--- a/core/services/ocr2/validate/validate.go
+++ b/core/services/ocr2/validate/validate.go
@@ -22,11 +22,9 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/config"
lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config"
- mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config"
"github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
"github.com/smartcontractkit/chainlink/v2/core/services/relay"
- evmtypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
"github.com/smartcontractkit/chainlink/v2/plugins"
)
@@ -116,8 +114,6 @@ func validateSpec(ctx context.Context, tree *toml.Tree, spec job.Job, rc plugins
case types.Functions:
// TODO validator for DR-OCR spec: https://smartcontract-it.atlassian.net/browse/FUN-112
return nil
- case types.Mercury:
- return validateOCR2MercurySpec(spec.OCR2OracleSpec, *spec.OCR2OracleSpec.FeedID)
case types.CCIPExecution:
return validateOCR2CCIPExecutionSpec(spec.OCR2OracleSpec.PluginConfig)
case types.CCIPCommit:
@@ -297,28 +293,6 @@ func validateOCR2KeeperSpec(jsonConfig job.JSONConfig) error {
return nil
}
-func validateOCR2MercurySpec(spec *job.OCR2OracleSpec, feedID [32]byte) error {
- var relayConfig evmtypes.RelayConfig
- err := json.Unmarshal(spec.RelayConfig.Bytes(), &relayConfig)
- if err != nil {
- return pkgerrors.Wrap(err, "error while unmarshalling relay config")
- }
-
- if len(spec.PluginConfig) == 0 {
- if !relayConfig.EnableTriggerCapability {
- return pkgerrors.Wrap(err, "at least one transmission option must be configured")
- }
- return nil
- }
-
- var pluginConfig mercuryconfig.PluginConfig
- err = json.Unmarshal(spec.PluginConfig.Bytes(), &pluginConfig)
- if err != nil {
- return pkgerrors.Wrap(err, "error while unmarshalling plugin config")
- }
- return pkgerrors.Wrap(mercuryconfig.ValidatePluginConfig(pluginConfig, feedID), "Mercury PluginConfig is invalid")
-}
-
func validateOCR2CCIPExecutionSpec(jsonConfig job.JSONConfig) error {
if jsonConfig == nil {
return errors.New("pluginConfig is empty")
diff --git a/core/services/ocrbootstrap/delegate.go b/core/services/ocrbootstrap/delegate.go
index ad3a602d0bb..f2c547424be 100644
--- a/core/services/ocrbootstrap/delegate.go
+++ b/core/services/ocrbootstrap/delegate.go
@@ -102,16 +102,12 @@ func (d *Delegate) ServicesForSpec(ctx context.Context, jb job.Job) (services []
if err != nil {
return nil, fmt.Errorf("ServiceForSpec: failed to get relay %s is it enabled?: %w", rid.Name(), err)
}
- if spec.FeedID != nil {
- spec.RelayConfig["feedID"] = *spec.FeedID
- }
spec.RelayConfig.ApplyDefaultsOCR2(d.ocr2Cfg)
ctxVals := loop.ContextValues{
JobID: jb.ID,
JobName: jb.Name.ValueOrZero(),
ContractID: spec.ContractID,
- FeedID: spec.FeedID,
}
ctx = ctxVals.ContextWithValues(ctx)
diff --git a/core/services/ocrcommon/telemetry.go b/core/services/ocrcommon/telemetry.go
index 50b99adf8b9..762ed862321 100644
--- a/core/services/ocrcommon/telemetry.go
+++ b/core/services/ocrcommon/telemetry.go
@@ -7,18 +7,12 @@ import (
"math/big"
"strings"
- "github.com/ethereum/go-ethereum/common"
"github.com/shopspring/decimal"
"github.com/smartcontractkit/libocr/commontypes"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"google.golang.org/protobuf/proto"
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
- v1types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- v2types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- v3types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
- v4types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
"github.com/smartcontractkit/chainlink/v2/core/services/job"
"github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
@@ -49,21 +43,7 @@ type EnhancedTelemetryData struct {
RepTimestamp ObservationTimestamp
}
-type EnhancedTelemetryMercuryData struct {
- V1Observation *v1types.Observation
- V2Observation *v2types.Observation
- V3Observation *v3types.Observation
- V4Observation *v4types.Observation
- TaskRunResults pipeline.TaskRunResults
- RepTimestamp ocrtypes.ReportTimestamp
- FeedVersion mercuryutils.FeedVersion
- FetchMaxFinalizedTimestamp bool
- IsLinkFeed bool
- IsNativeFeed bool
- DpInvariantViolationDetected bool
-}
-
-type EnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData] struct {
+type EnhancedTelemetryService[T EnhancedTelemetryData] struct {
services.StateMachine
chTelem <-chan T
@@ -73,7 +53,7 @@ type EnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercury
lggr logger.Logger
}
-func NewEnhancedTelemetryService[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](job *job.Job, chTelem <-chan T, done chan struct{}, me commontypes.MonitoringEndpoint, lggr logger.Logger) *EnhancedTelemetryService[T] {
+func NewEnhancedTelemetryService[T EnhancedTelemetryData](job *job.Job, chTelem <-chan T, done chan struct{}, me commontypes.MonitoringEndpoint, lggr logger.Logger) *EnhancedTelemetryService[T] {
return &EnhancedTelemetryService[T]{
chTelem: chTelem,
chDone: done,
@@ -94,8 +74,6 @@ func (e *EnhancedTelemetryService[T]) Start(context.Context) error {
switch v := any(t).(type) {
case EnhancedTelemetryData:
e.collectEATelemetry(v.TaskRunResults, v.FinalResults, v.RepTimestamp)
- case EnhancedTelemetryMercuryData:
- e.collectMercuryEnhancedTelemetry(v)
default:
e.lggr.Errorf("unrecognised telemetry data type: %T", t)
}
@@ -340,149 +318,6 @@ func (e *EnhancedTelemetryService[T]) collectAndSend(trrs *pipeline.TaskRunResul
}
}
-// collectMercuryEnhancedTelemetry checks if enhanced telemetry should be collected, fetches the information needed and
-// sends the telemetry
-func (e *EnhancedTelemetryService[T]) collectMercuryEnhancedTelemetry(d EnhancedTelemetryMercuryData) {
- if e.monitoringEndpoint == nil {
- return
- }
-
- // v1 fields
- var bn int64
- var bh string
- var bt uint64
- // v1+v2+v3+v4 fields
- bp := big.NewInt(0)
- // v1+v3 fields
- bid := big.NewInt(0)
- ask := big.NewInt(0)
- // v2+v3 fields
- var mfts, lp, np int64
- // v4 fields
- var marketStatus telem.MarketStatus
-
- switch {
- case d.V1Observation != nil:
- obs := *d.V1Observation
- if obs.CurrentBlockNum.Err == nil {
- bn = obs.CurrentBlockNum.Val
- }
- if obs.CurrentBlockHash.Err == nil {
- bh = common.BytesToHash(obs.CurrentBlockHash.Val).Hex()
- }
- if obs.CurrentBlockTimestamp.Err == nil {
- bt = obs.CurrentBlockTimestamp.Val
- }
- if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil {
- bp = obs.BenchmarkPrice.Val
- }
- if obs.Bid.Err == nil && obs.Bid.Val != nil {
- bid = obs.Bid.Val
- }
- if obs.Ask.Err == nil && obs.Ask.Val != nil {
- ask = obs.Ask.Val
- }
- case d.V2Observation != nil:
- obs := *d.V2Observation
- if obs.MaxFinalizedTimestamp.Err == nil {
- mfts = obs.MaxFinalizedTimestamp.Val
- }
- if obs.LinkPrice.Err == nil && obs.LinkPrice.Val != nil {
- lp = obs.LinkPrice.Val.Int64()
- }
- if obs.NativePrice.Err == nil && obs.NativePrice.Val != nil {
- np = obs.NativePrice.Val.Int64()
- }
- if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil {
- bp = obs.BenchmarkPrice.Val
- }
- case d.V3Observation != nil:
- obs := *d.V3Observation
- if obs.MaxFinalizedTimestamp.Err == nil {
- mfts = obs.MaxFinalizedTimestamp.Val
- }
- if obs.LinkPrice.Err == nil && obs.LinkPrice.Val != nil {
- lp = obs.LinkPrice.Val.Int64()
- }
- if obs.NativePrice.Err == nil && obs.NativePrice.Val != nil {
- np = obs.NativePrice.Val.Int64()
- }
- if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil {
- bp = obs.BenchmarkPrice.Val
- }
- if obs.Bid.Err == nil && obs.Bid.Val != nil {
- bid = obs.Bid.Val
- }
- if obs.Ask.Err == nil && obs.Ask.Val != nil {
- ask = obs.Ask.Val
- }
- case d.V4Observation != nil:
- obs := *d.V4Observation
- if obs.MaxFinalizedTimestamp.Err == nil {
- mfts = obs.MaxFinalizedTimestamp.Val
- }
- if obs.LinkPrice.Err == nil && obs.LinkPrice.Val != nil {
- lp = obs.LinkPrice.Val.Int64()
- }
- if obs.NativePrice.Err == nil && obs.NativePrice.Val != nil {
- np = obs.NativePrice.Val.Int64()
- }
- if obs.BenchmarkPrice.Err == nil && obs.BenchmarkPrice.Val != nil {
- bp = obs.BenchmarkPrice.Val
- }
- if obs.MarketStatus.Err == nil {
- marketStatus = telem.MarketStatus(obs.MarketStatus.Val)
- }
- }
-
- eaTelemetryValues := ParseMercuryEATelemetry(logger.Sugared(e.lggr).With("jobID", e.job.ID), d.TaskRunResults, d.FeedVersion)
- for _, eaTelem := range eaTelemetryValues {
- t := &telem.EnhancedEAMercury{
- DataSource: eaTelem.DataSource,
- DpBenchmarkPrice: eaTelem.DpBenchmarkPrice,
- DpBid: eaTelem.DpBid,
- DpAsk: eaTelem.DpAsk,
- DpInvariantViolationDetected: d.DpInvariantViolationDetected,
- CurrentBlockNumber: bn,
- CurrentBlockHash: bh,
- CurrentBlockTimestamp: bt,
- FetchMaxFinalizedTimestamp: d.FetchMaxFinalizedTimestamp,
- MaxFinalizedTimestamp: mfts,
- BridgeTaskRunStartedTimestamp: eaTelem.BridgeTaskRunStartedTimestamp,
- BridgeTaskRunEndedTimestamp: eaTelem.BridgeTaskRunEndedTimestamp,
- ProviderRequestedTimestamp: eaTelem.ProviderRequestedTimestamp,
- ProviderReceivedTimestamp: eaTelem.ProviderReceivedTimestamp,
- ProviderDataStreamEstablished: eaTelem.ProviderDataStreamEstablished,
- ProviderIndicatedTime: eaTelem.ProviderIndicatedTime,
- Feed: e.job.OCR2OracleSpec.FeedID.Hex(),
- ObservationBenchmarkPrice: bp.Int64(),
- ObservationBid: bid.Int64(),
- ObservationAsk: ask.Int64(),
- ObservationBenchmarkPriceString: stringOrEmpty(bp),
- ObservationBidString: stringOrEmpty(bid),
- ObservationAskString: stringOrEmpty(ask),
- ObservationMarketStatus: marketStatus,
- IsLinkFeed: d.IsLinkFeed,
- LinkPrice: lp,
- IsNativeFeed: d.IsNativeFeed,
- NativePrice: np,
- ConfigDigest: d.RepTimestamp.ConfigDigest.Hex(),
- Round: int64(d.RepTimestamp.Round),
- Epoch: int64(d.RepTimestamp.Epoch),
- BridgeRequestData: eaTelem.BridgeRequestData,
- AssetSymbol: eaTelem.AssetSymbol,
- Version: uint32(d.FeedVersion),
- }
- bytes, err := proto.Marshal(t)
- if err != nil {
- e.lggr.Warnf("protobuf marshal failed %v", err.Error())
- continue
- }
-
- e.monitoringEndpoint.SendLog(bytes)
- }
-}
-
type telemetryAttributes struct {
PriceType *string `json:"priceType"`
}
@@ -672,15 +507,8 @@ func getPricesFromResultsByOrder(lggr logger.Logger, startTask pipeline.TaskRunR
return benchmarkPrice, bidPrice, askPrice
}
-// MaybeEnqueueEnhancedTelem sends data to the telemetry channel for processing
-func MaybeEnqueueEnhancedTelem(jb job.Job, ch chan<- EnhancedTelemetryMercuryData, data EnhancedTelemetryMercuryData) {
- if ShouldCollectEnhancedTelemetryMercury(jb) {
- EnqueueEnhancedTelem[EnhancedTelemetryMercuryData](ch, data)
- }
-}
-
// EnqueueEnhancedTelem sends data to the telemetry channel for processing
-func EnqueueEnhancedTelem[T EnhancedTelemetryData | EnhancedTelemetryMercuryData](ch chan<- T, data T) {
+func EnqueueEnhancedTelem[T EnhancedTelemetryData](ch chan<- T, data T) {
select {
case ch <- data:
default:
diff --git a/core/services/ocrcommon/telemetry_test.go b/core/services/ocrcommon/telemetry_test.go
index bfc2b1f2314..2e2a411b53a 100644
--- a/core/services/ocrcommon/telemetry_test.go
+++ b/core/services/ocrcommon/telemetry_test.go
@@ -1,14 +1,11 @@
package ocrcommon
import (
- "fmt"
"math/big"
"sync"
"testing"
- "github.com/ethereum/go-ethereum/common"
"github.com/shopspring/decimal"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@@ -16,10 +13,6 @@ import (
"google.golang.org/protobuf/proto"
"github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- mercuryv1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- mercuryv2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- mercuryv4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
evmtypes "github.com/smartcontractkit/chainlink-integrations/evm/types"
"github.com/smartcontractkit/chainlink-integrations/evm/utils"
ubig "github.com/smartcontractkit/chainlink-integrations/evm/utils/big"
@@ -815,401 +808,3 @@ func getViewFunctionTaskRunResults() pipeline.TaskRunResults {
},
}
}
-
-func TestCollectMercuryEnhancedTelemetryV1ViewFunction(t *testing.T) {
- wg := sync.WaitGroup{}
- ingressClient := mocks.NewTelemetryService(t)
- ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury)
-
- var sentMessage []byte
- ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
- sentMessage = args[1].([]byte)
- wg.Done()
- })
-
- lggr, _ := logger.TestLoggerObserved(t, zap.WarnLevel)
- chTelem := make(chan EnhancedTelemetryMercuryData, 100)
- chDone := make(chan struct{})
- feedID := common.HexToHash("0x111")
- e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{
- chDone: chDone,
- chTelem: chTelem,
- job: &job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- FeedID: &feedID,
- },
- },
- lggr: lggr,
- monitoringEndpoint: monitoringEndpoint,
- }
- servicetest.Run(t, &e)
-
- wg.Add(1)
-
- taskRunResults := getViewFunctionTaskRunResults()
-
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: taskRunResults,
- V1Observation: &mercuryv1.Observation{
- BenchmarkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(111111)},
- Bid: mercury.ObsResult[*big.Int]{Val: big.NewInt(222222)},
- Ask: mercury.ObsResult[*big.Int]{Val: big.NewInt(333333)},
- CurrentBlockNum: mercury.ObsResult[int64]{Val: 123456789},
- CurrentBlockHash: mercury.ObsResult[[]byte]{Val: common.HexToHash("0x123321").Bytes()},
- CurrentBlockTimestamp: mercury.ObsResult[uint64]{Val: 987654321},
- },
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
-
- expectedTelemetry := telem.EnhancedEAMercury{
- DataSource: "VIEW_FUNCTION",
- DpBenchmarkPrice: 1178718957397490400,
- DpBid: 1178718957397490400,
- DpAsk: 1178718957397490400,
- CurrentBlockNumber: 123456789,
- CurrentBlockHash: common.HexToHash("0x123321").String(),
- CurrentBlockTimestamp: 987654321,
- BridgeTaskRunStartedTimestamp: taskRunResults[0].CreatedAt.UnixMilli(),
- BridgeTaskRunEndedTimestamp: taskRunResults[0].FinishedAt.Time.UnixMilli(),
- ProviderRequestedTimestamp: 1726243598046,
- ProviderReceivedTimestamp: 1726243598341,
- ProviderDataStreamEstablished: 0,
- ProviderIndicatedTime: 0,
- Feed: common.HexToHash("0x111").String(),
- ObservationBenchmarkPrice: 111111,
- ObservationBid: 222222,
- ObservationAsk: 333333,
- ConfigDigest: "0200000000000000000000000000000000000000000000000000000000000000",
- Round: 22,
- Epoch: 11,
- BridgeRequestData: `{"data":{"address":"0x1234","signature":"function stEthPerToken() external view returns (uint256)"}}`,
- AssetSymbol: "0x1234",
- ObservationBenchmarkPriceString: "111111",
- ObservationBidString: "222222",
- ObservationAskString: "333333",
- }
-
- expectedMessage, _ := proto.Marshal(&expectedTelemetry)
- wg.Wait()
- require.Equal(t, expectedMessage, sentMessage)
-
- chDone <- struct{}{}
-}
-
-func TestCollectMercuryEnhancedTelemetryV1(t *testing.T) {
- wg := sync.WaitGroup{}
- ingressClient := mocks.NewTelemetryService(t)
- ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury)
-
- var sentMessage []byte
- ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
- sentMessage = args[1].([]byte)
- wg.Done()
- })
-
- lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel)
- chTelem := make(chan EnhancedTelemetryMercuryData, 100)
- chDone := make(chan struct{})
- feedID := common.HexToHash("0x111")
- e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{
- chDone: chDone,
- chTelem: chTelem,
- job: &job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- FeedID: &feedID,
- },
- },
- lggr: lggr,
- monitoringEndpoint: monitoringEndpoint,
- }
- servicetest.Run(t, &e)
-
- wg.Add(1)
-
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: trrsMercuryV1,
- V1Observation: &mercuryv1.Observation{
- BenchmarkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(111111)},
- Bid: mercury.ObsResult[*big.Int]{Val: big.NewInt(222222)},
- Ask: mercury.ObsResult[*big.Int]{Val: big.NewInt(333333)},
- CurrentBlockNum: mercury.ObsResult[int64]{Val: 123456789},
- CurrentBlockHash: mercury.ObsResult[[]byte]{Val: common.HexToHash("0x123321").Bytes()},
- CurrentBlockTimestamp: mercury.ObsResult[uint64]{Val: 987654321},
- },
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
-
- expectedTelemetry := telem.EnhancedEAMercury{
- DataSource: "data-source-name",
- DpBenchmarkPrice: 123456.123456,
- DpBid: 1234567.1234567,
- DpAsk: 321123,
- CurrentBlockNumber: 123456789,
- CurrentBlockHash: common.HexToHash("0x123321").String(),
- CurrentBlockTimestamp: 987654321,
- BridgeTaskRunStartedTimestamp: trrsMercuryV1[0].CreatedAt.UnixMilli(),
- BridgeTaskRunEndedTimestamp: trrsMercuryV1[0].FinishedAt.Time.UnixMilli(),
- ProviderRequestedTimestamp: 92233720368547760,
- ProviderReceivedTimestamp: -92233720368547760,
- ProviderDataStreamEstablished: 1,
- ProviderIndicatedTime: -123456789,
- Feed: common.HexToHash("0x111").String(),
- ObservationBenchmarkPrice: 111111,
- ObservationBid: 222222,
- ObservationAsk: 333333,
- ConfigDigest: "0200000000000000000000000000000000000000000000000000000000000000",
- Round: 22,
- Epoch: 11,
- BridgeRequestData: `{"data":{"to":"LINK","from":"USD"}}`,
- AssetSymbol: "USD/LINK",
- ObservationBenchmarkPriceString: "111111",
- ObservationBidString: "222222",
- ObservationAskString: "333333",
- }
-
- expectedMessage, _ := proto.Marshal(&expectedTelemetry)
- wg.Wait()
- require.Equal(t, expectedMessage, sentMessage)
-
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: pipeline.TaskRunResults{
- pipeline.TaskRunResult{Task: &pipeline.BridgeTask{
- Name: "test-mercury-bridge-1",
- BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0),
- },
- Result: pipeline.Result{
- Value: nil,
- }},
- },
- V1Observation: &mercuryv1.Observation{},
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
- wg.Add(1)
- trrsMercuryV1[0].Result.Value = ""
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: trrsMercuryV1,
- V1Observation: &mercuryv1.Observation{},
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
-
- wg.Wait()
- require.Equal(t, 1, logs.Len())
- require.Contains(t, logs.All()[0].Message, "cannot parse EA telemetry")
- chDone <- struct{}{}
-}
-
-func TestCollectMercuryEnhancedTelemetryV2(t *testing.T) {
- wg := sync.WaitGroup{}
- ingressClient := mocks.NewTelemetryService(t)
- ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury)
-
- var sentMessage []byte
- ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
- sentMessage = args[1].([]byte)
- wg.Done()
- })
-
- lggr, logs := logger.TestLoggerObserved(t, zap.WarnLevel)
- chTelem := make(chan EnhancedTelemetryMercuryData, 100)
- chDone := make(chan struct{})
- feedID := common.HexToHash("0x111")
- e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{
- chDone: chDone,
- chTelem: chTelem,
- job: &job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- FeedID: &feedID,
- },
- },
- lggr: lggr,
- monitoringEndpoint: monitoringEndpoint,
- }
- servicetest.Run(t, &e)
-
- wg.Add(1)
-
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: trrsMercuryV2,
- V2Observation: &mercuryv2.Observation{
- BenchmarkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(111111)},
- MaxFinalizedTimestamp: mercury.ObsResult[int64]{Val: 321},
- LinkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(4321)},
- NativePrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(54321)},
- },
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
-
- expectedTelemetry := telem.EnhancedEAMercury{
- DataSource: "data-source-name",
- DpBenchmarkPrice: 123456.123456,
- CurrentBlockNumber: 0,
- CurrentBlockHash: "",
- CurrentBlockTimestamp: 0,
- BridgeTaskRunStartedTimestamp: trrsMercuryV1[0].CreatedAt.UnixMilli(),
- BridgeTaskRunEndedTimestamp: trrsMercuryV1[0].FinishedAt.Time.UnixMilli(),
- ProviderRequestedTimestamp: 92233720368547760,
- ProviderReceivedTimestamp: -92233720368547760,
- ProviderDataStreamEstablished: 1,
- ProviderIndicatedTime: -123456789,
- Feed: common.HexToHash("0x111").String(),
- ObservationBenchmarkPrice: 111111,
- ObservationBid: 0,
- ObservationAsk: 0,
- ConfigDigest: "0200000000000000000000000000000000000000000000000000000000000000",
- Round: 22,
- Epoch: 11,
- BridgeRequestData: `{"data":{"to":"LINK","from":"USD"}}`,
- AssetSymbol: "USD/LINK",
- ObservationBenchmarkPriceString: "111111",
- MaxFinalizedTimestamp: 321,
- LinkPrice: 4321,
- NativePrice: 54321,
- }
-
- expectedMessage, _ := proto.Marshal(&expectedTelemetry)
- wg.Wait()
-
- require.Equal(t, expectedMessage, sentMessage)
-
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: pipeline.TaskRunResults{
- pipeline.TaskRunResult{Task: &pipeline.BridgeTask{
- Name: "test-mercury-bridge-2",
- BaseTask: pipeline.NewBaseTask(0, "ds1", nil, nil, 0),
- },
- Result: pipeline.Result{
- Value: nil,
- }},
- },
- V2Observation: &mercuryv2.Observation{},
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
- wg.Add(1)
- trrsMercuryV2[0].Result.Value = ""
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: trrsMercuryV2,
- V2Observation: &mercuryv2.Observation{},
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
-
- wg.Wait()
- require.Equal(t, 1, logs.Len())
- fmt.Println(logs.All())
- require.Contains(t, logs.All()[0].Message, "cannot parse EA telemetry")
- chDone <- struct{}{}
-}
-
-func TestCollectMercuryEnhancedTelemetryV4(t *testing.T) {
- ingressClient := mocks.NewTelemetryService(t)
- ingressAgent := telemetry.NewIngressAgentWrapper(ingressClient)
- monitoringEndpoint := ingressAgent.GenMonitoringEndpoint("test-network", "test-chainID", "0xa", synchronization.EnhancedEAMercury)
-
- sentMessageCh := make(chan []byte)
- ingressClient.On("Send", mock.Anything, mock.AnythingOfType("[]uint8"), mock.AnythingOfType("string"), mock.AnythingOfType("TelemetryType")).Return().Run(func(args mock.Arguments) {
- sentMessageCh <- args[1].([]byte)
- })
-
- lggr, _ := logger.TestLoggerObserved(t, zap.WarnLevel)
- chTelem := make(chan EnhancedTelemetryMercuryData, 100)
- chDone := make(chan struct{})
- feedID := common.HexToHash("0x0004")
- e := EnhancedTelemetryService[EnhancedTelemetryMercuryData]{
- chDone: chDone,
- chTelem: chTelem,
- job: &job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- FeedID: &feedID,
- },
- },
- lggr: lggr,
- monitoringEndpoint: monitoringEndpoint,
- }
- servicetest.Run(t, &e)
-
- chTelem <- EnhancedTelemetryMercuryData{
- TaskRunResults: trrsMercuryV4,
- FeedVersion: 4,
- V4Observation: &mercuryv4.Observation{
- BenchmarkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(111111)},
- MarketStatus: mercury.ObsResult[uint32]{Val: 2},
- MaxFinalizedTimestamp: mercury.ObsResult[int64]{Val: 321},
- LinkPrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(4321)},
- NativePrice: mercury.ObsResult[*big.Int]{Val: big.NewInt(54321)},
- },
- RepTimestamp: types.ReportTimestamp{
- ConfigDigest: types.ConfigDigest{2},
- Epoch: 11,
- Round: 22,
- },
- }
-
- expectedPricingTelemetry := telem.EnhancedEAMercury{
- DataSource: "data-source-name",
- DpBenchmarkPrice: 123456.123456,
- BridgeTaskRunStartedTimestamp: trrsMercuryV4[0].CreatedAt.UnixMilli(),
- BridgeTaskRunEndedTimestamp: trrsMercuryV4[0].FinishedAt.Time.UnixMilli(),
- ProviderRequestedTimestamp: 92233720368547760,
- ProviderReceivedTimestamp: -92233720368547760,
- ProviderDataStreamEstablished: 1,
- ProviderIndicatedTime: -123456789,
- Feed: common.HexToHash("0x0004").String(),
- ObservationBenchmarkPrice: 111111,
- ObservationMarketStatus: 2,
- ConfigDigest: "0200000000000000000000000000000000000000000000000000000000000000",
- Round: 22,
- Epoch: 11,
- AssetSymbol: "USD/LINK",
- ObservationBenchmarkPriceString: "111111",
- MaxFinalizedTimestamp: 321,
- LinkPrice: 4321,
- NativePrice: 54321,
- Version: 4,
- BridgeRequestData: `{"data":{"to":"LINK","from":"USD"}}`,
- }
- expectedPricingMessage, _ := proto.Marshal(&expectedPricingTelemetry)
- require.Equal(t, expectedPricingMessage, <-sentMessageCh)
-
- chDone <- struct{}{}
-
- // Verify that no other telemetry is sent.
- require.Len(t, sentMessageCh, 0)
-}
diff --git a/core/services/pg/connection.go b/core/services/pg/connection.go
index b099bbb2f35..93c161b4c0d 100644
--- a/core/services/pg/connection.go
+++ b/core/services/pg/connection.go
@@ -2,13 +2,11 @@ package pg
import (
"context"
- "errors"
"fmt"
"log"
"os"
"time"
- "github.com/jackc/pgconn"
_ "github.com/jackc/pgx/v4/stdlib" // need to make sure pgx driver is registered before opening connection
"github.com/jmoiron/sqlx"
@@ -56,7 +54,6 @@ func NewConnection(ctx context.Context, uri string, driverName string, config Co
if err != nil {
return nil, err
}
- setMaxMercuryConns(db, config)
if os.Getenv("SKIP_PG_VERSION_CHECK") != "true" {
if err = checkVersion(db, MinRequiredPGVersion); err != nil {
@@ -67,38 +64,6 @@ func NewConnection(ctx context.Context, uri string, driverName string, config Co
return db, nil
}
-func setMaxMercuryConns(db *sqlx.DB, config ConnectionConfig) {
- // HACK: In the case of mercury jobs, one conn is needed per job for good
- // performance. Most nops will forget to increase the defaults to account
- // for this so we detect it here instead.
- //
- // This problem will be solved by replacing mercury with parallel
- // compositions (llo plugin).
- //
- // See: https://smartcontract-it.atlassian.net/browse/MERC-3654
- var cnt int
- if err := db.Get(&cnt, `SELECT COUNT(*) FROM ocr2_oracle_specs WHERE plugin_type = 'mercury'`); err != nil {
- const errUndefinedTable = "42P01"
- var pqerr *pgconn.PgError
- if errors.As(err, &pqerr) {
- if pqerr.Code == errUndefinedTable {
- // no mercury jobs defined
- return
- }
- }
- log.Printf("Error checking mercury jobs: %s", err.Error())
- return
- }
- if cnt > config.MaxOpenConns() {
- log.Printf("Detected %d mercury jobs, increasing max open connections from %d to %d", cnt, config.MaxOpenConns(), cnt)
- db.SetMaxOpenConns(cnt)
- }
- if cnt > config.MaxIdleConns() {
- log.Printf("Detected %d mercury jobs, increasing max idle connections from %d to %d", cnt, config.MaxIdleConns(), cnt)
- db.SetMaxIdleConns(cnt)
- }
-}
-
type Getter interface {
Get(dest interface{}, query string, args ...interface{}) error
}
diff --git a/core/services/relay/evm/evm.go b/core/services/relay/evm/evm.go
index 059157a2ca4..103873641bc 100644
--- a/core/services/relay/evm/evm.go
+++ b/core/services/relay/evm/evm.go
@@ -13,7 +13,6 @@ import (
"net/http"
"strings"
"sync"
- "time"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
@@ -30,7 +29,6 @@ import (
chainselectors "github.com/smartcontractkit/chain-selectors"
ocr3capability "github.com/smartcontractkit/chainlink-common/pkg/capabilities/consensus/ocr3"
- "github.com/smartcontractkit/chainlink-common/pkg/capabilities/triggers"
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
"github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
@@ -56,18 +54,10 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/estimatorconfig"
cciptransmitter "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ccip/transmitter"
lloconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/llo/config"
- mercuryconfig "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/mercury/config"
"github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/codec"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/functions"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/interceptors/mantle"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reportcodecv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
- reportcodecv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/reportcodec"
- reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
- reportcodecv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
)
@@ -152,16 +142,11 @@ type Relayer struct {
lggr logger.SugaredLogger
registerer prometheus.Registerer
ks CSAETHKeystore
- mercuryPool wsrpc.Pool
codec commontypes.Codec
capabilitiesRegistry coretypes.CapabilitiesRegistry
- // Mercury
- mercuryORM mercury.ORM
- mercuryCfg MercuryConfig
- triggerCapability *triggers.MercuryTriggerService
-
// LLO/data streams
+ mercuryCfg MercuryConfig
cdcFactory func() (llo.ChannelDefinitionCacheFactory, error)
retirementReportCache llo.RetirementReportCache
}
@@ -180,7 +165,6 @@ type RelayerOpts struct {
DS sqlutil.DataSource
Registerer prometheus.Registerer
CSAETHKeystore
- MercuryPool wsrpc.Pool
RetirementReportCache llo.RetirementReportCache
MercuryConfig
CapabilitiesRegistry coretypes.CapabilitiesRegistry
@@ -209,8 +193,7 @@ func NewRelayer(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain,
if err != nil {
return nil, fmt.Errorf("cannot create evm relayer: %w", err)
}
- sugared := logger.Sugared(lggr).Named("Relayer").With("evmChainID", chain.ID())
- mercuryORM := mercury.NewORM(opts.DS)
+ sugared := logger.Sugared(lggr).Named("Relayer")
cdcFactory := sync.OnceValues(func() (llo.ChannelDefinitionCacheFactory, error) {
chainSelector, err := chainselectors.SelectorFromChainId(chain.ID().Uint64())
if err != nil {
@@ -225,10 +208,8 @@ func NewRelayer(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain,
lggr: logger.Sugared(sugared),
registerer: opts.Registerer,
ks: opts.CSAETHKeystore,
- mercuryPool: opts.MercuryPool,
cdcFactory: cdcFactory,
retirementReportCache: opts.RetirementReportCache,
- mercuryORM: mercuryORM,
mercuryCfg: opts.MercuryConfig,
capabilitiesRegistry: opts.CapabilitiesRegistry,
}
@@ -263,17 +244,6 @@ func (r *Relayer) Start(ctx context.Context) error {
func (r *Relayer) Close() error {
cs := make([]io.Closer, 0, 2)
- if r.triggerCapability != nil {
- cs = append(cs, r.triggerCapability)
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
-
- err := r.capabilitiesRegistry.Remove(ctx, r.triggerCapability.ID)
- if err != nil {
- return err
- }
- }
cs = append(cs, r.chain)
return services.MultiCloser(cs).Close()
}
@@ -410,104 +380,7 @@ func (r *Relayer) NewPluginProvider(ctx context.Context, rargs commontypes.Relay
}
func (r *Relayer) NewMercuryProvider(ctx context.Context, rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.MercuryProvider, error) {
- lggr := logger.Sugared(r.lggr).Named("MercuryProvider").Named(rargs.ExternalJobID.String())
- relayOpts := types.NewRelayOpts(rargs)
- relayConfig, err := relayOpts.RelayConfig()
- if err != nil {
- return nil, fmt.Errorf("failed to get relay config: %w", err)
- }
-
- var mercuryConfig mercuryconfig.PluginConfig
- if err = json.Unmarshal(pargs.PluginConfig, &mercuryConfig); err != nil {
- return nil, pkgerrors.WithStack(err)
- }
-
- if relayConfig.FeedID == nil {
- return nil, pkgerrors.New("FeedID must be specified")
- }
-
- if relayConfig.ChainID.String() != r.chain.ID().String() {
- return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String())
- }
- cp, err := newMercuryConfigProvider(ctx, lggr, r.chain, relayOpts)
- if err != nil {
- return nil, pkgerrors.WithStack(err)
- }
-
- if !relayConfig.EffectiveTransmitterID.Valid {
- return nil, pkgerrors.New("EffectiveTransmitterID must be specified")
- }
- privKey, err := r.ks.CSA().Get(relayConfig.EffectiveTransmitterID.String)
- if err != nil {
- return nil, pkgerrors.Wrap(err, "failed to get CSA key for mercury connection")
- }
-
- clients := make(map[string]wsrpc.Client)
- for _, server := range mercuryConfig.GetServers() {
- clients[server.URL], err = r.mercuryPool.Checkout(ctx, privKey, server.PubKey, server.URL)
- if err != nil {
- return nil, err
- }
- }
-
- // initialize trigger capability service lazily
- if relayConfig.EnableTriggerCapability && r.triggerCapability == nil {
- if r.capabilitiesRegistry == nil {
- lggr.Errorw("trigger capability is enabled but capabilities registry is not set")
- } else {
- var err2 error
- r.triggerCapability, err2 = triggers.NewMercuryTriggerService(0, relayConfig.TriggerCapabilityName, relayConfig.TriggerCapabilityVersion, lggr)
- if err2 != nil {
- return nil, fmt.Errorf("failed to start required trigger service: %w", err2)
- }
- if err2 = r.triggerCapability.Start(ctx); err2 != nil {
- return nil, err2
- }
- if err2 = r.capabilitiesRegistry.Add(ctx, r.triggerCapability); err2 != nil {
- return nil, err2
- }
- lggr.Infow("successfully added trigger service to the Registry")
- }
- }
-
- reportCodecV1 := reportcodecv1.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV1"))
- reportCodecV2 := reportcodecv2.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV2"))
- reportCodecV3 := reportcodecv3.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV3"))
- reportCodecV4 := reportcodecv4.NewReportCodec(*relayConfig.FeedID, lggr.Named("ReportCodecV4"))
-
- getCodecForFeed := func(feedID mercuryutils.FeedID) (mercury.TransmitterReportDecoder, error) {
- var transmitterCodec mercury.TransmitterReportDecoder
- switch feedID.Version() {
- case 1:
- transmitterCodec = reportCodecV1
- case 2:
- transmitterCodec = reportCodecV2
- case 3:
- transmitterCodec = reportCodecV3
- case 4:
- transmitterCodec = reportCodecV4
- default:
- return nil, fmt.Errorf("invalid feed version %d", feedID.Version())
- }
- return transmitterCodec, nil
- }
-
- benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) {
- benchmarkPriceCodec, benchmarkPriceErr := getCodecForFeed(feedID)
- if benchmarkPriceErr != nil {
- return nil, benchmarkPriceErr
- }
- return benchmarkPriceCodec.BenchmarkPriceFromReport(ctx, report)
- }
-
- transmitterCodec, err := getCodecForFeed(mercuryutils.FeedID(*relayConfig.FeedID))
- if err != nil {
- return nil, err
- }
-
- transmitter := mercury.NewTransmitter(lggr, r.mercuryCfg.Transmitter(), clients, privKey.PublicKey, rargs.JobID, *relayConfig.FeedID, r.mercuryORM, transmitterCodec, benchmarkPriceDecoder, r.triggerCapability)
-
- return NewMercuryProvider(cp, r.codec, NewMercuryChainReader(r.chain.HeadTracker()), transmitter, reportCodecV1, reportCodecV2, reportCodecV3, reportCodecV4, lggr), nil
+ return nil, errors.New("mercury jobs are no longer supported")
}
func chainToUUID(chainID *big.Int) uuid.UUID {
@@ -691,12 +564,31 @@ func (r *Relayer) NewCCIPExecProvider(ctx context.Context, rargs commontypes.Rel
func (r *Relayer) NewLLOProvider(ctx context.Context, rargs commontypes.RelayArgs, pargs commontypes.PluginArgs) (commontypes.LLOProvider, error) {
relayOpts := types.NewRelayOpts(rargs)
+ var relayConfig types.RelayConfig
+ {
+ var err error
+ relayConfig, err = relayOpts.RelayConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get relay config: %w", err)
+ }
+ }
+ if relayConfig.LLODONID == 0 {
+ return nil, errors.New("donID must be specified in relayConfig for LLO jobs")
+ }
+
+ var lloCfg lloconfig.PluginConfig
+ if err := json.Unmarshal(pargs.PluginConfig, &lloCfg); err != nil {
+ return nil, pkgerrors.WithStack(err)
+ }
+ if err := lloCfg.Validate(); err != nil {
+ return nil, err
+ }
relayConfig, err := relayOpts.RelayConfig()
if err != nil {
return nil, fmt.Errorf("failed to get relay config: %w", err)
}
- if relayConfig.LLOConfigMode == "" {
- return nil, fmt.Errorf("LLOConfigMode must be specified in relayConfig for LLO jobs (can be either: %q or %q)", types.LLOConfigModeMercury, types.LLOConfigModeBlueGreen)
+ if relayConfig.LLODONID == 0 {
+ return nil, errors.New("donID must be specified in relayConfig for LLO jobs")
}
if relayConfig.ChainID.String() != r.chain.ID().String() {
return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String())
@@ -711,18 +603,11 @@ func (r *Relayer) NewLLOProvider(ctx context.Context, rargs commontypes.RelayArg
lggr := r.lggr.Named(fmt.Sprintf("job-%d", rargs.JobID)).With("donID", relayConfig.LLODONID, "transmitterID", relayConfig.EffectiveTransmitterID.String)
switch relayConfig.LLOConfigMode {
- case types.LLOConfigModeMercury, types.LLOConfigModeBlueGreen:
+ case types.LLOConfigModeBlueGreen:
default:
- return nil, fmt.Errorf("LLOConfigMode must be specified in relayConfig for LLO jobs (only %q or %q is currently supported)", types.LLOConfigModeMercury, types.LLOConfigModeBlueGreen)
+ return nil, fmt.Errorf("LLOConfigMode must be specified in relayConfig for LLO jobs (only %q is currently supported)", types.LLOConfigModeBlueGreen)
}
- var lloCfg lloconfig.PluginConfig
- if err = json.Unmarshal(pargs.PluginConfig, &lloCfg); err != nil {
- return nil, pkgerrors.WithStack(err)
- }
- if err = lloCfg.Validate(); err != nil {
- return nil, err
- }
privKey, err := r.ks.CSA().Get(relayConfig.EffectiveTransmitterID.String)
if err != nil {
return nil, pkgerrors.Wrap(err, "failed to get CSA key for mercury connection")
@@ -744,12 +629,6 @@ func (r *Relayer) NewLLOProvider(ctx context.Context, rargs commontypes.RelayArg
ServerPubKey: ed25519.PublicKey(server.PubKey),
ServerURL: server.URL,
})
- case config.MercuryTransmitterProtocolWSRPC:
- wsrpcClient, checkoutErr := r.mercuryPool.Checkout(ctx, privKey, server.PubKey, server.URL)
- if checkoutErr != nil {
- return nil, checkoutErr
- }
- client = wsrpc.GRPCCompatibilityWrapper{Client: wsrpcClient}
default:
return nil, fmt.Errorf("unsupported protocol %q", r.mercuryCfg.Transmitter().Protocol())
}
@@ -813,23 +692,17 @@ func (r *Relayer) NewConfigProvider(ctx context.Context, args commontypes.RelayA
return nil, fmt.Errorf("internal error: chain id in spec does not match this relayer's chain: have %s expected %s", relayConfig.ChainID.String(), r.chain.ID().String())
}
- // Handle legacy jobs which did not yet specify provider type and
- // switched between median/mercury based on presence of feed ID
if args.ProviderType == "" {
- if relayConfig.FeedID == nil {
- args.ProviderType = "median"
- } else if relayConfig.LLODONID > 0 {
+ if relayConfig.LLODONID > 0 {
args.ProviderType = "llo"
} else {
- args.ProviderType = "mercury"
+ args.ProviderType = "median"
}
}
switch args.ProviderType {
case "median":
configProvider, err = newStandardConfigProvider(ctx, lggr, r.chain, relayOpts)
- case "mercury":
- configProvider, err = newMercuryConfigProvider(ctx, lggr, r.chain, relayOpts)
case "llo":
// Use NullRetirementReportCache since we never run LLO jobs on
// bootstrap nodes, and there's no need to introduce a failure mode or
@@ -858,11 +731,7 @@ func FilterNamesFromRelayArgs(args commontypes.RelayArgs) (filterNames []string,
return nil, pkgerrors.WithStack(err)
}
- if relayConfig.FeedID != nil {
- filterNames = []string{mercury.FilterName(addr.Address(), *relayConfig.FeedID)}
- } else {
- filterNames = []string{configPollerFilterName(addr.Address()), transmitterFilterName(addr.Address())}
- }
+ filterNames = []string{configPollerFilterName(addr.Address()), transmitterFilterName(addr.Address())}
return filterNames, err
}
diff --git a/core/services/relay/evm/llo_provider.go b/core/services/relay/evm/llo_provider.go
index 83e62f75f33..7fd474649be 100644
--- a/core/services/relay/evm/llo_provider.go
+++ b/core/services/relay/evm/llo_provider.go
@@ -9,6 +9,11 @@ import (
"github.com/ethereum/go-ethereum/common"
ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller"
+ "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/llo"
+ "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
+
"github.com/smartcontractkit/chainlink-common/pkg/logger"
"github.com/smartcontractkit/chainlink-common/pkg/services"
commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
@@ -19,7 +24,6 @@ import (
"github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/llo"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
"github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
)
@@ -181,64 +185,10 @@ func (p *lloProvider) ShouldRetireCache() llotypes.ShouldRetireCache {
return p.shouldRetireCache
}
-// wrapper is needed to turn mercury config poller into a service
-type mercuryConfigPollerWrapper struct {
- *mercury.ConfigPoller
- services.Service
- eng *services.Engine
-
- runReplay bool
- fromBlock uint64
-}
-
-func newMercuryConfigPollerWrapper(lggr logger.Logger, cp *mercury.ConfigPoller, fromBlock uint64, runReplay bool) *mercuryConfigPollerWrapper {
- w := &mercuryConfigPollerWrapper{cp, nil, nil, runReplay, fromBlock}
- w.Service, w.eng = services.Config{
- Name: "LLOMercuryConfigWrapper",
- Start: w.start,
- Close: w.close,
- }.NewServiceEngine(lggr)
- return w
-}
-
-func (w *mercuryConfigPollerWrapper) Start(ctx context.Context) error {
- return w.Service.Start(ctx)
-}
-
-func (w *mercuryConfigPollerWrapper) start(ctx context.Context) error {
- w.ConfigPoller.Start()
- return nil
-}
-
-func (w *mercuryConfigPollerWrapper) Close() error {
- return w.Service.Close()
-}
-
-func (w *mercuryConfigPollerWrapper) close() error {
- return w.ConfigPoller.Close()
-}
-
func newLLOConfigPollers(ctx context.Context, lggr logger.Logger, cc llo.ConfigCache, lp logpoller.LogPoller, chainID *big.Int, configuratorAddress common.Address, relayConfig types.RelayConfig) (cps []llo.ConfigPollerService, configDigester ocrtypes.OffchainConfigDigester, err error) {
donID := relayConfig.LLODONID
donIDHash := llo.DonIDToBytes32(donID)
switch relayConfig.LLOConfigMode {
- case types.LLOConfigModeMercury:
- // NOTE: This uses the old config digest prefix for compatibility with legacy contracts
- configDigester = mercury.NewOffchainConfigDigester(donIDHash, chainID, configuratorAddress, ocrtypes.ConfigDigestPrefixMercuryV02)
- // Mercury config poller will register its own filter
- mcp, err := mercury.NewConfigPoller(
- ctx,
- lggr,
- lp,
- configuratorAddress,
- llo.DonIDToBytes32(donID),
- )
- if err != nil {
- return nil, nil, err
- }
- // don't need to replay in the wrapper since the provider will handle it
- w := newMercuryConfigPollerWrapper(lggr, mcp, relayConfig.FromBlock, false)
- cps = []llo.ConfigPollerService{w}
case types.LLOConfigModeBlueGreen:
// NOTE: Register filter here because the config poller doesn't do it on its own
err := lp.RegisterFilter(ctx, logpoller.Filter{Name: lloProviderConfiguratorFilterName(configuratorAddress, donID), EventSigs: []common.Hash{llo.ProductionConfigSet, llo.StagingConfigSet, llo.PromoteStagingConfig}, Topic2: []common.Hash{donIDHash}, Addresses: []common.Address{configuratorAddress}})
diff --git a/core/services/relay/evm/mercury/config_digest.go b/core/services/relay/evm/mercury/config_digest.go
deleted file mode 100644
index c7a6076c886..00000000000
--- a/core/services/relay/evm/mercury/config_digest.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package mercury
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
- "strings"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/wsrpc/credentials"
-
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/exposed_verifier"
-)
-
-func makeConfigDigestArgs() abi.Arguments {
- abi, err := abi.JSON(strings.NewReader(exposed_verifier.ExposedVerifierABI))
- if err != nil {
- // assertion
- panic(fmt.Sprintf("could not parse aggregator ABI: %s", err.Error()))
- }
- return abi.Methods["exposedConfigDigestFromConfigData"].Inputs
-}
-
-var configDigestArgs = makeConfigDigestArgs()
-
-func configDigest(
- feedID common.Hash,
- chainID *big.Int,
- contractAddress common.Address,
- configCount uint64,
- oracles []common.Address,
- transmitters []credentials.StaticSizedPublicKey,
- f uint8,
- onchainConfig []byte,
- offchainConfigVersion uint64,
- offchainConfig []byte,
- prefix types.ConfigDigestPrefix,
-) types.ConfigDigest {
- msg, err := configDigestArgs.Pack(
- feedID,
- chainID,
- contractAddress,
- configCount,
- oracles,
- transmitters,
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- )
- if err != nil {
- // assertion
- panic(err)
- }
- rawHash := crypto.Keccak256(msg)
- configDigest := types.ConfigDigest{}
- if n := copy(configDigest[:], rawHash); n != len(configDigest) {
- // assertion
- panic("copy too little data")
- }
- binary.BigEndian.PutUint16(configDigest[:2], uint16(prefix))
- return configDigest
-}
diff --git a/core/services/relay/evm/mercury/config_digest_test.go b/core/services/relay/evm/mercury/config_digest_test.go
deleted file mode 100644
index 600eb8c88d5..00000000000
--- a/core/services/relay/evm/mercury/config_digest_test.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package mercury
-
-import (
- "math/big"
- "reflect"
- "testing"
- "unsafe"
-
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/ethclient/simulated"
- "github.com/leanovate/gopter"
- "github.com/leanovate/gopter/gen"
- "github.com/leanovate/gopter/prop"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/wsrpc/credentials"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/exposed_verifier"
-)
-
-// Adapted from: https://github.com/smartcontractkit/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/config_digest_test.go
-
-func TestConfigCalculationMatches(t *testing.T) {
- key, err := crypto.GenerateKey()
- require.NoError(t, err, "could not make private key for EOA owner")
- owner, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- require.NoError(t, err)
- backend := simulated.NewBackend(
- types.GenesisAlloc{owner.From: {Balance: new(big.Int).Lsh(big.NewInt(1), 60)}},
- simulated.WithBlockGasLimit(ethconfig.Defaults.Miner.GasCeil),
- )
- _, _, eoa, err := exposed_verifier.DeployExposedVerifier(
- owner, backend.Client(),
- )
- backend.Commit()
- require.NoError(t, err, "could not deploy test EOA")
- p := gopter.NewProperties(nil)
- p.Property("onchain/offchain config digests match", prop.ForAll(
- func(
- feedID [32]byte,
- chainID uint64,
- contractAddress common.Address,
- configCount uint64,
- oracles []common.Address,
- transmitters [][32]byte,
- f uint8,
- onchainConfig []byte,
- offchainConfigVersion uint64,
- offchainConfig []byte,
- ) bool {
- chainIDBig := new(big.Int).SetUint64(chainID)
- golangDigest := configDigest(
- feedID,
- chainIDBig,
- contractAddress,
- configCount,
- oracles,
- *(*[]credentials.StaticSizedPublicKey)(unsafe.Pointer(&transmitters)),
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- ocrtypes.ConfigDigestPrefixMercuryV02,
- )
-
- bigChainID := new(big.Int)
- bigChainID.SetUint64(chainID)
-
- solidityDigest, err := eoa.ExposedConfigDigestFromConfigData(nil,
- feedID,
- bigChainID,
- contractAddress,
- configCount,
- oracles,
- transmitters,
- f,
- onchainConfig,
- offchainConfigVersion,
- offchainConfig,
- )
- require.NoError(t, err, "could not compute solidity version of config digest")
- return golangDigest == solidityDigest
- },
- GenHash(t),
- gen.UInt64(),
- GenAddress(t),
- gen.UInt64(),
- GenAddressArray(t),
- GenClientPubKeyArray(t),
- gen.UInt8(),
- GenBytes(t),
- gen.UInt64(),
- GenBytes(t),
- ))
- p.TestingRun(t)
-}
-
-func GenHash(t *testing.T) gopter.Gen {
- var byteGens []gopter.Gen
- for i := 0; i < 32; i++ {
- byteGens = append(byteGens, gen.UInt8())
- }
- return gopter.CombineGens(byteGens...).Map(
- func(byteArray interface{}) (rv common.Hash) {
- array, ok := byteArray.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "failed to retrieve gen result")
- for i, byteVal := range array.([]interface{}) {
- rv[i] = byteVal.(uint8)
- }
- return rv
- },
- )
-}
-
-func GenHashArray(t *testing.T) gopter.Gen {
- return gen.UInt8Range(0, 31).FlatMap(
- func(length interface{}) gopter.Gen {
- var hashGens []gopter.Gen
- for i := uint8(0); i < length.(uint8); i++ {
- hashGens = append(hashGens, GenHash(t))
- }
- return gopter.CombineGens(hashGens...).Map(
- func(hashArray interface{}) (rv []common.Hash) {
- array, ok := hashArray.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "could not extract hash array")
- for _, hashVal := range array.([]interface{}) {
- rv = append(rv, hashVal.(common.Hash))
- }
- return rv
- },
- )
- },
- reflect.ValueOf([]common.Hash{}).Type(),
- )
-}
-
-func GenAddress(t *testing.T) gopter.Gen {
- return GenHash(t).Map(
- func(hash interface{}) common.Address {
- iHash, ok := hash.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "failed to retrieve hash")
- return common.BytesToAddress(iHash.(common.Hash).Bytes())
- },
- )
-}
-
-func GenAddressArray(t *testing.T) gopter.Gen {
- return GenHashArray(t).Map(
- func(hashes interface{}) (rv []common.Address) {
- hashArray, ok := hashes.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "failed to retrieve hashes")
- for _, hash := range hashArray.([]common.Hash) {
- rv = append(rv, common.BytesToAddress(hash.Bytes()))
- }
- return rv
- },
- )
-}
-
-func GenClientPubKey(t *testing.T) gopter.Gen {
- return GenHash(t).Map(
- func(hash interface{}) (pk [32]byte) {
- iHash, ok := hash.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "failed to retrieve hash")
- copy(pk[:], (iHash.(common.Hash).Bytes()))
- return
- },
- )
-}
-
-func GenClientPubKeyArray(t *testing.T) gopter.Gen {
- return GenHashArray(t).Map(
- func(hashes interface{}) (rv [][32]byte) {
- hashArray, ok := hashes.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "failed to retrieve hashes")
- for _, hash := range hashArray.([]common.Hash) {
- pk := [32]byte{}
- copy(pk[:], hash.Bytes())
- rv = append(rv, pk)
- }
- return rv
- },
- )
-}
-
-func GenBytes(t *testing.T) gopter.Gen {
- return gen.UInt16Range(0, 2000).FlatMap(
- func(length interface{}) gopter.Gen {
- var byteGens []gopter.Gen
- for i := uint16(0); i < length.(uint16); i++ {
- byteGens = append(byteGens, gen.UInt8())
- }
- return gopter.CombineGens(byteGens...).Map(
- func(byteArray interface{}) []byte {
- array, ok := byteArray.(*gopter.GenResult).Retrieve()
- require.True(t, ok, "failed to retrieve gen result")
- iArray := array.([]interface{})
- rv := make([]byte, len(iArray))
- for i, byteVal := range iArray {
- rv[i] = byteVal.(uint8)
- }
- return rv
- },
- )
- },
- reflect.ValueOf([]byte{}).Type(),
- )
-}
diff --git a/core/services/relay/evm/mercury/config_poller.go b/core/services/relay/evm/mercury/config_poller.go
deleted file mode 100644
index 8074a8202ba..00000000000
--- a/core/services/relay/evm/mercury/config_poller.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package mercury
-
-import (
- "context"
- "database/sql"
- "fmt"
- "strings"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
- "github.com/ethereum/go-ethereum/common"
- "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
-
- "github.com/smartcontractkit/chainlink-integrations/evm/logpoller"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
-)
-
-// FeedScopedConfigSet ConfigSet with FeedID for use with mercury (and multi-config DON)
-var FeedScopedConfigSet common.Hash
-
-var verifierABI abi.ABI
-
-const (
- configSetEventName = "ConfigSet"
- feedIdTopicIndex = 1
-)
-
-func init() {
- var err error
- verifierABI, err = abi.JSON(strings.NewReader(verifier.VerifierABI))
- if err != nil {
- panic(err)
- }
- FeedScopedConfigSet = verifierABI.Events[configSetEventName].ID
-}
-
-// FullConfigFromLog defines the contract config with the feedID
-type FullConfigFromLog struct {
- ocrtypes.ContractConfig
- feedID utils.FeedID
-}
-
-func unpackLogData(d []byte) (*verifier.VerifierConfigSet, error) {
- unpacked := new(verifier.VerifierConfigSet)
-
- err := verifierABI.UnpackIntoInterface(unpacked, configSetEventName, d)
- if err != nil {
- return nil, errors.Wrap(err, "failed to unpack log data")
- }
-
- return unpacked, nil
-}
-
-func ConfigFromLog(logData []byte) (FullConfigFromLog, error) {
- unpacked, err := unpackLogData(logData)
- if err != nil {
- return FullConfigFromLog{}, err
- }
-
- var transmitAccounts []ocrtypes.Account
- for _, addr := range unpacked.OffchainTransmitters {
- transmitAccounts = append(transmitAccounts, ocrtypes.Account(fmt.Sprintf("%x", addr)))
- }
- var signers []ocrtypes.OnchainPublicKey
- for _, addr := range unpacked.Signers {
- addr := addr
- signers = append(signers, addr[:])
- }
-
- return FullConfigFromLog{
- feedID: unpacked.FeedId,
- ContractConfig: ocrtypes.ContractConfig{
- ConfigDigest: unpacked.ConfigDigest,
- ConfigCount: unpacked.ConfigCount,
- Signers: signers,
- Transmitters: transmitAccounts,
- F: unpacked.F,
- OnchainConfig: unpacked.OnchainConfig,
- OffchainConfigVersion: unpacked.OffchainConfigVersion,
- OffchainConfig: unpacked.OffchainConfig,
- },
- }, nil
-}
-
-// ConfigPoller defines the Mercury Config Poller
-type ConfigPoller struct {
- lggr logger.Logger
- destChainLogPoller logpoller.LogPoller
- addr common.Address
- feedId common.Hash
-}
-
-func FilterName(addr common.Address, feedID common.Hash) string {
- return logpoller.FilterName("OCR3 Mercury ConfigPoller", addr.String(), feedID.Hex())
-}
-
-// NewConfigPoller creates a new Mercury ConfigPoller
-func NewConfigPoller(ctx context.Context, lggr logger.Logger, destChainPoller logpoller.LogPoller, addr common.Address, feedId common.Hash) (*ConfigPoller, error) {
- err := destChainPoller.RegisterFilter(ctx, logpoller.Filter{Name: FilterName(addr, feedId), EventSigs: []common.Hash{FeedScopedConfigSet}, Addresses: []common.Address{addr}})
- if err != nil {
- return nil, err
- }
-
- cp := &ConfigPoller{
- lggr: lggr,
- destChainLogPoller: destChainPoller,
- addr: addr,
- feedId: feedId,
- }
-
- return cp, nil
-}
-
-func (cp *ConfigPoller) Start() {}
-
-func (cp *ConfigPoller) Close() error {
- return nil
-}
-
-func (cp *ConfigPoller) Notify() <-chan struct{} {
- return nil // rely on libocr's builtin config polling
-}
-
-// Replay abstracts the logpoller.LogPoller Replay() implementation
-func (cp *ConfigPoller) Replay(ctx context.Context, fromBlock int64) error {
- return cp.destChainLogPoller.Replay(ctx, fromBlock)
-}
-
-// LatestConfigDetails returns the latest config details from the logs
-func (cp *ConfigPoller) LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error) {
- cp.lggr.Debugw("LatestConfigDetails", "eventSig", FeedScopedConfigSet, "addr", cp.addr, "topicIndex", feedIdTopicIndex, "feedID", cp.feedId)
- logs, err := cp.destChainLogPoller.IndexedLogs(ctx, FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId}, 1)
- if err != nil {
- return 0, ocrtypes.ConfigDigest{}, err
- }
- if len(logs) == 0 {
- return 0, ocrtypes.ConfigDigest{}, nil
- }
- latest := logs[len(logs)-1]
- latestConfigSet, err := ConfigFromLog(latest.Data)
- if err != nil {
- return 0, ocrtypes.ConfigDigest{}, err
- }
- return uint64(latest.BlockNumber), latestConfigSet.ConfigDigest, nil
-}
-
-// LatestConfig returns the latest config from the logs on a certain block
-func (cp *ConfigPoller) LatestConfig(ctx context.Context, changedInBlock uint64) (ocrtypes.ContractConfig, error) {
- lgs, err := cp.destChainLogPoller.IndexedLogsByBlockRange(ctx, int64(changedInBlock), int64(changedInBlock), FeedScopedConfigSet, cp.addr, feedIdTopicIndex, []common.Hash{cp.feedId})
- if err != nil {
- return ocrtypes.ContractConfig{}, err
- }
- if len(lgs) == 0 {
- return ocrtypes.ContractConfig{}, nil
- }
- latestConfigSet, err := ConfigFromLog(lgs[len(lgs)-1].Data)
- if err != nil {
- return ocrtypes.ContractConfig{}, err
- }
- cp.lggr.Infow("LatestConfig", "latestConfig", latestConfigSet)
- return latestConfigSet.ContractConfig, nil
-}
-
-// LatestBlockHeight returns the latest block height from the logs
-func (cp *ConfigPoller) LatestBlockHeight(ctx context.Context) (blockHeight uint64, err error) {
- latest, err := cp.destChainLogPoller.LatestBlock(ctx)
- if err != nil {
- if errors.Is(err, sql.ErrNoRows) {
- return 0, nil
- }
- return 0, err
- }
- return uint64(latest.BlockNumber), nil
-}
diff --git a/core/services/relay/evm/mercury/config_poller_test.go b/core/services/relay/evm/mercury/config_poller_test.go
deleted file mode 100644
index 91baa4acddb..00000000000
--- a/core/services/relay/evm/mercury/config_poller_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package mercury
-
-import (
- "fmt"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/onsi/gomega"
- "github.com/pkg/errors"
- confighelper2 "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- ocrtypes2 "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "github.com/umbracle/ethgo/abi"
-
- evmutils "github.com/smartcontractkit/chainlink-integrations/evm/utils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-func TestMercuryConfigPoller(t *testing.T) {
- feedID := evmutils.NewHash()
- feedIDBytes := [32]byte(feedID)
-
- th := SetupTH(t, feedID)
-
- notify := th.configPoller.Notify()
- assert.Empty(t, notify)
-
- // Should have no config to begin with.
- _, config, err := th.configPoller.LatestConfigDetails(testutils.Context(t))
- require.NoError(t, err)
- require.Equal(t, ocrtypes2.ConfigDigest{}, config)
-
- // Create minimum number of nodes.
- n := 4
- var oracles []confighelper2.OracleIdentityExtra
- for i := 0; i < n; i++ {
- oracles = append(oracles, confighelper2.OracleIdentityExtra{
- OracleIdentity: confighelper2.OracleIdentity{
- OnchainPublicKey: evmutils.RandomAddress().Bytes(),
- TransmitAccount: ocrtypes2.Account(evmutils.RandomAddress().String()),
- OffchainPublicKey: evmutils.RandomBytes32(),
- PeerID: utils.MustNewPeerID(),
- },
- ConfigEncryptionPublicKey: evmutils.RandomBytes32(),
- })
- }
- f := uint8(1)
- // Setup config on contract
- configType := abi.MustNewType("tuple()")
- onchainConfigVal, err := abi.Encode(map[string]interface{}{}, configType)
- require.NoError(t, err)
- signers, _, threshold, onchainConfig, offchainConfigVersion, offchainConfig, err := confighelper2.ContractSetConfigArgsForTests(
- 2*time.Second, // DeltaProgress
- 20*time.Second, // DeltaResend
- 100*time.Millisecond, // DeltaRound
- 0, // DeltaGrace
- 1*time.Minute, // DeltaStage
- 100, // rMax
- []int{len(oracles)}, // S
- oracles,
- []byte{}, // reportingPluginConfig []byte,
- nil,
- 0, // Max duration query
- 250*time.Millisecond, // Max duration observation
- 250*time.Millisecond, // MaxDurationReport
- 250*time.Millisecond, // MaxDurationShouldAcceptFinalizedReport
- 250*time.Millisecond, // MaxDurationShouldTransmitAcceptedReport
- int(f), // f
- onchainConfigVal,
- )
- require.NoError(t, err)
- signerAddresses, err := onchainPublicKeyToAddress(signers)
- require.NoError(t, err)
- offchainTransmitters := make([][32]byte, n)
- encodedTransmitter := make([]ocrtypes2.Account, n)
- for i := 0; i < n; i++ {
- offchainTransmitters[i] = oracles[i].OffchainPublicKey
- encodedTransmitter[i] = ocrtypes2.Account(fmt.Sprintf("%x", oracles[i].OffchainPublicKey[:]))
- }
-
- _, err = th.verifierContract.SetConfig(th.user, feedIDBytes, signerAddresses, offchainTransmitters, f, onchainConfig, offchainConfigVersion, offchainConfig, nil)
- require.NoError(t, err, "failed to setConfig with feed ID")
- th.backend.Commit()
-
- latest, err := th.backend.Client().BlockByNumber(testutils.Context(t), nil)
- require.NoError(t, err)
- // Ensure we capture this config set log.
- require.NoError(t, th.logPoller.Replay(testutils.Context(t), latest.Number().Int64()-1))
-
- // Send blocks until we see the config updated.
- var configBlock uint64
- var digest [32]byte
- gomega.NewGomegaWithT(t).Eventually(func() bool {
- th.backend.Commit()
- configBlock, digest, err = th.configPoller.LatestConfigDetails(testutils.Context(t))
- require.NoError(t, err)
- return ocrtypes2.ConfigDigest{} != digest
- }, testutils.WaitTimeout(t), 100*time.Millisecond).Should(gomega.BeTrue())
-
- // Assert the config returned is the one we configured.
- newConfig, err := th.configPoller.LatestConfig(testutils.Context(t), configBlock)
- require.NoError(t, err)
- // Note we don't check onchainConfig, as that is populated in the contract itself.
- assert.Equal(t, digest, [32]byte(newConfig.ConfigDigest))
- assert.Equal(t, signers, newConfig.Signers)
- assert.Equal(t, threshold, newConfig.F)
- assert.Equal(t, encodedTransmitter, newConfig.Transmitters)
- assert.Equal(t, offchainConfigVersion, newConfig.OffchainConfigVersion)
- assert.Equal(t, offchainConfig, newConfig.OffchainConfig)
-}
-
-func onchainPublicKeyToAddress(publicKeys []types.OnchainPublicKey) (addresses []common.Address, err error) {
- for _, signer := range publicKeys {
- if len(signer) != 20 {
- return []common.Address{}, errors.Errorf("address is not 20 bytes %s", signer)
- }
- addresses = append(addresses, common.BytesToAddress(signer))
- }
- return addresses, nil
-}
diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go
deleted file mode 100644
index f44594514d7..00000000000
--- a/core/services/relay/evm/mercury/helpers_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package mercury
-
-import (
- "math/big"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/ethclient/simulated"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
-
- evmclient "github.com/smartcontractkit/chainlink-integrations/evm/client"
- "github.com/smartcontractkit/chainlink-integrations/evm/heads/headstest"
- "github.com/smartcontractkit/chainlink-integrations/evm/logpoller"
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
-
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier"
- "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- reportcodecv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
- reportcodecv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/reportcodec"
- reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
-)
-
-var (
- sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
- sampleClientPubKey = hexutil.MustDecode("0x724ff6eae9e900270edfff233e16322a70ec06e1a6e62a81ef13921f398f6c93")
-)
-
-var sampleReports [][]byte
-
-var (
- sampleV1Report = buildSampleV1Report(242)
- sampleV2Report = buildSampleV2Report(242)
- sampleV3Report = buildSampleV3Report(242)
- sig2 = ocrtypes.AttributedOnchainSignature{Signature: testutils.MustDecodeBase64("kbeuRczizOJCxBzj7MUAFpz3yl2WRM6K/f0ieEBvA+oTFUaKslbQey10krumVjzAvlvKxMfyZo0WkOgNyfF6xwE="), Signer: 2}
- sig3 = ocrtypes.AttributedOnchainSignature{Signature: testutils.MustDecodeBase64("9jz4b6Dh2WhXxQ97a6/S9UNjSfrEi9016XKTrfN0mLQFDiNuws23x7Z4n+6g0sqKH/hnxx1VukWUH/ohtw83/wE="), Signer: 3}
- sampleSigs = []ocrtypes.AttributedOnchainSignature{sig2, sig3}
- sampleReportContext = ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- ConfigDigest: MustHexToConfigDigest("0x0006fc30092226b37f6924b464e16a54a7978a9a524519a73403af64d487dc45"),
- Epoch: 6,
- Round: 28,
- },
- ExtraHash: [32]uint8{27, 144, 106, 73, 166, 228, 123, 166, 179, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114},
- }
-)
-
-func init() {
- sampleReports = make([][]byte, 4)
- for i := 0; i < len(sampleReports); i++ {
- sampleReports[i] = buildSampleV1Report(int64(i))
- }
-}
-
-func buildSampleV1Report(p int64) []byte {
- feedID := sampleFeedID
- timestamp := uint32(42)
- bp := big.NewInt(p)
- bid := big.NewInt(243)
- ask := big.NewInt(244)
- currentBlockNumber := uint64(143)
- currentBlockHash := utils.NewHash()
- currentBlockTimestamp := uint64(123)
- validFromBlockNum := uint64(142)
-
- b, err := reportcodecv1.ReportTypes.Pack(feedID, timestamp, bp, bid, ask, currentBlockNumber, currentBlockHash, currentBlockTimestamp, validFromBlockNum)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func buildSampleV2Report(ts int64) []byte {
- feedID := sampleFeedID
- timestamp := uint32(ts)
- bp := big.NewInt(242)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
-
- b, err := reportcodecv2.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func buildSampleV3Report(ts int64) []byte {
- feedID := sampleFeedID
- timestamp := uint32(ts)
- bp := big.NewInt(242)
- bid := big.NewInt(243)
- ask := big.NewInt(244)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
-
- b, err := reportcodecv3.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func buildSamplePayload(report []byte) []byte {
- var rs [][32]byte
- var ss [][32]byte
- var vs [32]byte
- for i, as := range sampleSigs {
- r, s, v, err := evmutil.SplitSignature(as.Signature)
- if err != nil {
- panic("eventTransmit(ev): error in SplitSignature")
- }
- rs = append(rs, r)
- ss = append(ss, s)
- vs[i] = v
- }
- rawReportCtx := evmutil.RawReportContext(sampleReportContext)
- payload, err := PayloadTypes.Pack(rawReportCtx, report, rs, ss, vs)
- if err != nil {
- panic(err)
- }
- return payload
-}
-
-type TestHarness struct {
- configPoller *ConfigPoller
- user *bind.TransactOpts
- backend *simulated.Backend
- verifierAddress common.Address
- verifierContract *verifier.Verifier
- logPoller logpoller.LogPoller
-}
-
-func SetupTH(t *testing.T, feedID common.Hash) TestHarness {
- key, err := crypto.GenerateKey()
- require.NoError(t, err)
- user, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- require.NoError(t, err)
- b := simulated.NewBackend(types.GenesisAlloc{
- user.From: {Balance: big.NewInt(1000000000000000000)}},
- simulated.WithBlockGasLimit(5*ethconfig.Defaults.Miner.GasCeil))
-
- proxyAddress, _, verifierProxy, err := verifier_proxy.DeployVerifierProxy(user, b.Client(), common.Address{})
- require.NoError(t, err, "failed to deploy test mercury verifier proxy contract")
- b.Commit()
- verifierAddress, _, verifierContract, err := verifier.DeployVerifier(user, b.Client(), proxyAddress)
- require.NoError(t, err, "failed to deploy test mercury verifier contract")
- b.Commit()
- _, err = verifierProxy.InitializeVerifier(user, verifierAddress)
- require.NoError(t, err)
- b.Commit()
-
- db := pgtest.NewSqlxDB(t)
- ethClient := evmclient.NewSimulatedBackendClient(t, b, big.NewInt(1337))
- lggr := logger.Test(t)
- lorm := logpoller.NewORM(big.NewInt(1337), db, lggr)
-
- lpOpts := logpoller.Opts{
- PollPeriod: 100 * time.Millisecond,
- FinalityDepth: 1,
- BackfillBatchSize: 2,
- RPCBatchSize: 2,
- KeepFinalizedBlocksDepth: 1000,
- }
- ht := headstest.NewSimulatedHeadTracker(ethClient, lpOpts.UseFinalityTag, lpOpts.FinalityDepth)
- lp := logpoller.NewLogPoller(lorm, ethClient, lggr, ht, lpOpts)
- servicetest.Run(t, lp)
-
- configPoller, err := NewConfigPoller(testutils.Context(t), lggr, lp, verifierAddress, feedID)
- require.NoError(t, err)
-
- configPoller.Start()
- t.Cleanup(func() {
- assert.NoError(t, configPoller.Close())
- })
-
- return TestHarness{
- configPoller: configPoller,
- user: user,
- backend: b,
- verifierAddress: verifierAddress,
- verifierContract: verifierContract,
- logPoller: lp,
- }
-}
diff --git a/core/services/relay/evm/mercury/mocks/async_deleter.go b/core/services/relay/evm/mercury/mocks/async_deleter.go
deleted file mode 100644
index a1d52ecac5e..00000000000
--- a/core/services/relay/evm/mercury/mocks/async_deleter.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Code generated by mockery v2.53.0. DO NOT EDIT.
-
-package mocks
-
-import (
- pb "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
- mock "github.com/stretchr/testify/mock"
-)
-
-// AsyncDeleter is an autogenerated mock type for the asyncDeleter type
-type AsyncDeleter struct {
- mock.Mock
-}
-
-type AsyncDeleter_Expecter struct {
- mock *mock.Mock
-}
-
-func (_m *AsyncDeleter) EXPECT() *AsyncDeleter_Expecter {
- return &AsyncDeleter_Expecter{mock: &_m.Mock}
-}
-
-// AsyncDelete provides a mock function with given fields: req
-func (_m *AsyncDeleter) AsyncDelete(req *pb.TransmitRequest) {
- _m.Called(req)
-}
-
-// AsyncDeleter_AsyncDelete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsyncDelete'
-type AsyncDeleter_AsyncDelete_Call struct {
- *mock.Call
-}
-
-// AsyncDelete is a helper method to define mock.On call
-// - req *pb.TransmitRequest
-func (_e *AsyncDeleter_Expecter) AsyncDelete(req interface{}) *AsyncDeleter_AsyncDelete_Call {
- return &AsyncDeleter_AsyncDelete_Call{Call: _e.mock.On("AsyncDelete", req)}
-}
-
-func (_c *AsyncDeleter_AsyncDelete_Call) Run(run func(req *pb.TransmitRequest)) *AsyncDeleter_AsyncDelete_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(*pb.TransmitRequest))
- })
- return _c
-}
-
-func (_c *AsyncDeleter_AsyncDelete_Call) Return() *AsyncDeleter_AsyncDelete_Call {
- _c.Call.Return()
- return _c
-}
-
-func (_c *AsyncDeleter_AsyncDelete_Call) RunAndReturn(run func(*pb.TransmitRequest)) *AsyncDeleter_AsyncDelete_Call {
- _c.Run(run)
- return _c
-}
-
-// NewAsyncDeleter creates a new instance of AsyncDeleter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
-// The first argument is typically a *testing.T value.
-func NewAsyncDeleter(t interface {
- mock.TestingT
- Cleanup(func())
-}) *AsyncDeleter {
- mock := &AsyncDeleter{}
- mock.Mock.Test(t)
-
- t.Cleanup(func() { mock.AssertExpectations(t) })
-
- return mock
-}
diff --git a/core/services/relay/evm/mercury/mocks/pipeline.go b/core/services/relay/evm/mercury/mocks/pipeline.go
deleted file mode 100644
index a6947c42a12..00000000000
--- a/core/services/relay/evm/mercury/mocks/pipeline.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package mocks
-
-import (
- "context"
- "time"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
-)
-
-type MockRunner struct {
- Trrs pipeline.TaskRunResults
- Err error
-}
-
-func (m *MockRunner) ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error) {
- return &pipeline.Run{ID: 42}, m.Trrs, m.Err
-}
-
-var _ pipeline.Task = &MockTask{}
-
-type MockTask struct {
- result pipeline.Result
-}
-
-func (m *MockTask) GetDescendantTasks() []pipeline.Task { return nil }
-
-func (m *MockTask) TaskTags() string { return "{\"anything\": \"here\"}" }
-
-func (m *MockTask) Type() pipeline.TaskType { return "MockTask" }
-func (m *MockTask) ID() int { return 0 }
-func (m *MockTask) DotID() string { return "" }
-func (m *MockTask) Run(ctx context.Context, lggr logger.Logger, vars pipeline.Vars, inputs []pipeline.Result) (pipeline.Result, pipeline.RunInfo) {
- return m.result, pipeline.RunInfo{}
-}
-func (m *MockTask) Base() *pipeline.BaseTask { return nil }
-func (m *MockTask) Outputs() []pipeline.Task { return nil }
-func (m *MockTask) Inputs() []pipeline.TaskDependency { return nil }
-func (m *MockTask) OutputIndex() int32 { return 0 }
-func (m *MockTask) TaskTimeout() (time.Duration, bool) { return 0, false }
-func (m *MockTask) TaskRetries() uint32 { return 0 }
-func (m *MockTask) TaskMinBackoff() time.Duration { return 0 }
-func (m *MockTask) TaskMaxBackoff() time.Duration { return 0 }
-func (m *MockTask) TaskStreamID() *uint32 { return nil }
diff --git a/core/services/relay/evm/mercury/offchain_config_digester.go b/core/services/relay/evm/mercury/offchain_config_digester.go
deleted file mode 100644
index e771053c37b..00000000000
--- a/core/services/relay/evm/mercury/offchain_config_digester.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package mercury
-
-import (
- "context"
- "crypto/ed25519"
- "encoding/hex"
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/pkg/errors"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/smartcontractkit/wsrpc/credentials"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
-)
-
-// Originally sourced from: https://github.com/smartcontractkit/offchain-reporting/blob/991ebe1462fd56826a1ddfb34287d542acb2baee/lib/offchainreporting2/chains/evmutil/offchain_config_digester.go
-
-var _ ocrtypes.OffchainConfigDigester = OffchainConfigDigester{}
-
-func NewOffchainConfigDigester(feedID [32]byte, chainID *big.Int, contractAddress common.Address, prefix ocrtypes.ConfigDigestPrefix) OffchainConfigDigester {
- return OffchainConfigDigester{feedID, chainID, contractAddress, prefix}
-}
-
-type OffchainConfigDigester struct {
- FeedID utils.FeedID
- ChainID *big.Int
- ContractAddress common.Address
- Prefix ocrtypes.ConfigDigestPrefix
-}
-
-func (d OffchainConfigDigester) ConfigDigest(ctx context.Context, cc ocrtypes.ContractConfig) (ocrtypes.ConfigDigest, error) {
- signers := []common.Address{}
- for i, signer := range cc.Signers {
- if len(signer) != 20 {
- return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm signer should be a 20 byte address, but got %x", i, signer)
- }
- a := common.BytesToAddress(signer)
- signers = append(signers, a)
- }
- transmitters := []credentials.StaticSizedPublicKey{}
- for i, transmitter := range cc.Transmitters {
- if len(transmitter) != 2*ed25519.PublicKeySize {
- return ocrtypes.ConfigDigest{}, errors.Errorf("%v-th evm transmitter should be a 64 character hex-encoded ed25519 public key, but got '%v' (%d chars)", i, transmitter, len(transmitter))
- }
- var t credentials.StaticSizedPublicKey
- b, err := hex.DecodeString(string(transmitter))
- if err != nil {
- return ocrtypes.ConfigDigest{}, errors.Wrapf(err, "%v-th evm transmitter is not valid hex, got: %q", i, transmitter)
- }
- copy(t[:], b)
-
- transmitters = append(transmitters, t)
- }
-
- return configDigest(
- common.Hash(d.FeedID),
- d.ChainID,
- d.ContractAddress,
- cc.ConfigCount,
- signers,
- transmitters,
- cc.F,
- cc.OnchainConfig,
- cc.OffchainConfigVersion,
- cc.OffchainConfig,
- d.Prefix,
- ), nil
-}
-
-func (d OffchainConfigDigester) ConfigDigestPrefix(ctx context.Context) (ocrtypes.ConfigDigestPrefix, error) {
- return d.Prefix, nil
-}
diff --git a/core/services/relay/evm/mercury/offchain_config_digester_test.go b/core/services/relay/evm/mercury/offchain_config_digester_test.go
deleted file mode 100644
index 62869cf6f3d..00000000000
--- a/core/services/relay/evm/mercury/offchain_config_digester_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package mercury
-
-import (
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
-)
-
-func Test_OffchainConfigDigester_ConfigDigest(t *testing.T) {
- ctx := tests.Context(t)
- // ChainID and ContractAddress are taken into account for computation
- cd1, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(ctx, types.ContractConfig{})
- require.NoError(t, err)
- cd2, err := OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(ctx, types.ContractConfig{})
- require.NoError(t, err)
- cd3, err := OffchainConfigDigester{ChainID: big.NewInt(1)}.ConfigDigest(ctx, types.ContractConfig{})
- require.NoError(t, err)
- cd4, err := OffchainConfigDigester{ChainID: big.NewInt(1), ContractAddress: common.Address{1}}.ConfigDigest(ctx, types.ContractConfig{})
- require.NoError(t, err)
-
- require.Equal(t, cd1, cd2)
- require.NotEqual(t, cd2, cd3)
- require.NotEqual(t, cd2, cd4)
- require.NotEqual(t, cd3, cd4)
-
- // malformed signers
- _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{
- Signers: []types.OnchainPublicKey{{1, 2}},
- })
- require.Error(t, err)
-
- // malformed transmitters
- _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{
- Transmitters: []types.Account{"0x"},
- })
- require.Error(t, err)
-
- _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{
- Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353"},
- })
- require.Error(t, err)
-
- _, err = OffchainConfigDigester{}.ConfigDigest(ctx, types.ContractConfig{
- Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaz"},
- })
- require.Error(t, err)
-
- // well-formed transmitters
- _, err = OffchainConfigDigester{ChainID: big.NewInt(0)}.ConfigDigest(ctx, types.ContractConfig{
- Transmitters: []types.Account{"7343581f55146951b0f678dc6cfa8fd360e2f353aabbccddeeffaaccddeeffaa"},
- })
- require.NoError(t, err)
-}
diff --git a/core/services/relay/evm/mercury/orm.go b/core/services/relay/evm/mercury/orm.go
deleted file mode 100644
index 65df9ab4cc6..00000000000
--- a/core/services/relay/evm/mercury/orm.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package mercury
-
-import (
- "context"
- "crypto/sha256"
- "database/sql"
- "errors"
- "fmt"
- "strings"
- "sync"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/lib/pq"
- pkgerrors "github.com/pkg/errors"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-type ORM interface {
- InsertTransmitRequest(ctx context.Context, serverURLs []string, req *pb.TransmitRequest, jobID int32, reportCtx ocrtypes.ReportContext) error
- DeleteTransmitRequests(ctx context.Context, serverURL string, reqs []*pb.TransmitRequest) error
- GetTransmitRequests(ctx context.Context, serverURL string, jobID int32) ([]*Transmission, error)
- PruneTransmitRequests(ctx context.Context, serverURL string, jobID int32, maxSize int) error
- LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error)
-}
-
-func FeedIDFromReport(report ocrtypes.Report) (feedID utils.FeedID, err error) {
- if n := copy(feedID[:], report); n != 32 {
- return feedID, pkgerrors.Errorf("invalid length for report: %d", len(report))
- }
- return feedID, nil
-}
-
-type orm struct {
- ds sqlutil.DataSource
-}
-
-func NewORM(ds sqlutil.DataSource) ORM {
- return &orm{ds: ds}
-}
-
-// InsertTransmitRequest inserts one transmit request if the payload does not exist already.
-func (o *orm) InsertTransmitRequest(ctx context.Context, serverURLs []string, req *pb.TransmitRequest, jobID int32, reportCtx ocrtypes.ReportContext) error {
- feedID, err := FeedIDFromReport(req.Payload)
- if err != nil {
- return err
- }
- if len(serverURLs) == 0 {
- return errors.New("no server URLs provided")
- }
-
- var wg sync.WaitGroup
- wg.Add(2)
- var err1, err2 error
-
- go func() {
- defer wg.Done()
-
- values := make([]string, len(serverURLs))
- args := []interface{}{
- req.Payload,
- hashPayload(req.Payload),
- reportCtx.ConfigDigest[:],
- reportCtx.Epoch,
- reportCtx.Round,
- reportCtx.ExtraHash[:],
- jobID,
- feedID[:],
- }
- for i, serverURL := range serverURLs {
- // server url is the only thing that changes, might as well re-use
- // the same parameters for each insert
- values[i] = fmt.Sprintf("($1, $2, $3, $4, $5, $6, $7, $8, $%d)", i+9)
- args = append(args, serverURL)
- }
-
- _, err1 = o.ds.ExecContext(ctx, fmt.Sprintf(`
- INSERT INTO mercury_transmit_requests (payload, payload_hash, config_digest, epoch, round, extra_hash, job_id, feed_id, server_url)
- VALUES %s
- ON CONFLICT (server_url, payload_hash) DO NOTHING
- `, strings.Join(values, ",")), args...)
- }()
-
- go func() {
- defer wg.Done()
- _, err2 = o.ds.ExecContext(ctx, `
- INSERT INTO feed_latest_reports (feed_id, report, epoch, round, updated_at, job_id)
- VALUES ($1, $2, $3, $4, NOW(), $5)
- ON CONFLICT (feed_id) DO UPDATE
- SET feed_id=$1, report=$2, epoch=$3, round=$4, updated_at=NOW()
- WHERE excluded.epoch > feed_latest_reports.epoch OR (excluded.epoch = feed_latest_reports.epoch AND excluded.round > feed_latest_reports.round)
- `, feedID[:], req.Payload, reportCtx.Epoch, reportCtx.Round, jobID)
- }()
- wg.Wait()
- return errors.Join(err1, err2)
-}
-
-// DeleteTransmitRequest deletes the given transmit requests if they exist.
-func (o *orm) DeleteTransmitRequests(ctx context.Context, serverURL string, reqs []*pb.TransmitRequest) error {
- if len(reqs) == 0 {
- return nil
- }
-
- var hashes pq.ByteaArray
- for _, req := range reqs {
- hashes = append(hashes, hashPayload(req.Payload))
- }
-
- _, err := o.ds.ExecContext(ctx, `
- DELETE FROM mercury_transmit_requests
- WHERE server_url = $1 AND payload_hash = ANY($2)
- `, serverURL, hashes)
- return err
-}
-
-// GetTransmitRequests returns all transmit requests in chronologically descending order.
-func (o *orm) GetTransmitRequests(ctx context.Context, serverURL string, jobID int32) ([]*Transmission, error) {
- // The priority queue uses epoch and round to sort transmissions so order by
- // the same fields here for optimal insertion into the pq.
- rows, err := o.ds.QueryContext(ctx, `
- SELECT payload, config_digest, epoch, round, extra_hash
- FROM mercury_transmit_requests
- WHERE job_id = $1 AND server_url = $2
- ORDER BY epoch DESC, round DESC
- `, jobID, serverURL)
- if err != nil {
- return nil, err
- }
- defer rows.Close()
-
- var transmissions []*Transmission
- for rows.Next() {
- transmission := &Transmission{Req: &pb.TransmitRequest{}}
- var digest, extraHash common.Hash
-
- err := rows.Scan(
- &transmission.Req.Payload,
- &digest,
- &transmission.ReportCtx.Epoch,
- &transmission.ReportCtx.Round,
- &extraHash,
- )
- if err != nil {
- return nil, err
- }
- transmission.ReportCtx.ConfigDigest = ocrtypes.ConfigDigest(digest)
- transmission.ReportCtx.ExtraHash = extraHash
-
- transmissions = append(transmissions, transmission)
- }
- if err := rows.Err(); err != nil {
- return nil, err
- }
-
- return transmissions, nil
-}
-
-// PruneTransmitRequests keeps at most maxSize rows for the given job ID,
-// deleting the oldest transactions.
-func (o *orm) PruneTransmitRequests(ctx context.Context, serverURL string, jobID int32, maxSize int) error {
- // Prune the oldest requests by epoch and round.
- _, err := o.ds.ExecContext(ctx, `
- DELETE FROM mercury_transmit_requests
- WHERE job_id = $1 AND server_url = $2 AND
- payload_hash NOT IN (
- SELECT payload_hash
- FROM mercury_transmit_requests
- WHERE job_id = $1 AND server_url = $2
- ORDER BY epoch DESC, round DESC
- LIMIT $3
- )
- `, jobID, serverURL, maxSize)
- return err
-}
-
-func (o *orm) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
- err = o.ds.GetContext(ctx, &report, `SELECT report FROM feed_latest_reports WHERE feed_id = $1`, feedID[:])
- if errors.Is(err, sql.ErrNoRows) {
- return nil, nil
- }
- return report, err
-}
-
-func hashPayload(payload []byte) []byte {
- checksum := sha256.Sum256(payload)
- return checksum[:]
-}
diff --git a/core/services/relay/evm/mercury/orm_test.go b/core/services/relay/evm/mercury/orm_test.go
deleted file mode 100644
index 3cdf734874f..00000000000
--- a/core/services/relay/evm/mercury/orm_test.go
+++ /dev/null
@@ -1,379 +0,0 @@
-package mercury
-
-import (
- "math/rand/v2"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-var (
- sURL = "wss://example.com/mercury"
- sURL2 = "wss://mercuryserver.test"
- sURL3 = "wss://mercuryserver.example/foo"
-)
-
-func TestORM(t *testing.T) {
- ctx := testutils.Context(t)
- db := pgtest.NewSqlxDB(t)
-
- jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
- orm := NewORM(db)
- feedID := sampleFeedID
-
- reports := sampleReports
- reportContexts := make([]ocrtypes.ReportContext, 4)
- for i := range reportContexts {
- reportContexts[i] = ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- ConfigDigest: ocrtypes.ConfigDigest{'1'},
- Epoch: 10,
- Round: uint8(i),
- },
- ExtraHash: [32]byte{'2'},
- }
- }
-
- l, err := orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Nil(t, l)
-
- // Test insert and get requests.
- // s1
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, reportContexts[0])
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[1]}, jobID, reportContexts[1])
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[2]}, jobID, reportContexts[2])
- require.NoError(t, err)
-
- // s2
- err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[0])
- require.NoError(t, err)
-
- transmissions, err := orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]},
- {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: reportContexts[1]},
- {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]},
- })
- transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[0]},
- })
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.NotEqual(t, reports[0], l)
- assert.Equal(t, reports[2], l)
-
- // Test requests can be deleted.
- err = orm.DeleteTransmitRequests(ctx, sURL, []*pb.TransmitRequest{{Payload: reports[1]}})
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]},
- {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]},
- })
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[2], l)
-
- // Test deleting non-existent requests does not error.
- err = orm.DeleteTransmitRequests(ctx, sURL, []*pb.TransmitRequest{{Payload: []byte("does-not-exist")}})
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: reportContexts[2]},
- {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]},
- })
-
- // Test deleting multiple requests.
- err = orm.DeleteTransmitRequests(ctx, sURL, []*pb.TransmitRequest{
- {Payload: reports[0]},
- {Payload: reports[2]},
- })
- require.NoError(t, err)
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[2], l)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Empty(t, transmissions)
-
- // More inserts.
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3])
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[3]},
- })
-
- // Duplicate requests are ignored.
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3])
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, reportContexts[3])
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: reportContexts[3]},
- })
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[3], l)
-
- // s2 not affected by deletion
- transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID)
- require.NoError(t, err)
- require.Len(t, transmissions, 1)
-}
-
-func TestORM_InsertTransmitRequest_MultipleServerURLs(t *testing.T) {
- ctx := testutils.Context(t)
- db := pgtest.NewSqlxDB(t)
-
- jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
- orm := NewORM(db)
- feedID := sampleFeedID
-
- reports := sampleReports
- reportContexts := make([]ocrtypes.ReportContext, 4)
- for i := range reportContexts {
- reportContexts[i] = ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- ConfigDigest: ocrtypes.ConfigDigest{'1'},
- Epoch: 10,
- Round: uint8(i),
- },
- ExtraHash: [32]byte{'2'},
- }
- }
- err := orm.InsertTransmitRequest(ctx, []string{sURL, sURL2, sURL3}, &pb.TransmitRequest{Payload: reports[0]}, jobID, reportContexts[0])
- require.NoError(t, err)
-
- transmissions, err := orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Len(t, transmissions, 1)
- assert.Equal(t, &Transmission{Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, transmissions[0])
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID)
- require.NoError(t, err)
- require.Len(t, transmissions, 1)
- assert.Equal(t, &Transmission{Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, transmissions[0])
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL3, jobID)
- require.NoError(t, err)
- require.Len(t, transmissions, 1)
- assert.Equal(t, &Transmission{Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: reportContexts[0]}, transmissions[0])
-
- l, err := orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[0], l)
-}
-
-func TestORM_PruneTransmitRequests(t *testing.T) {
- ctx := testutils.Context(t)
- db := pgtest.NewSqlxDB(t)
- jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
-
- orm := NewORM(db)
-
- reports := sampleReports
-
- makeReportContext := func(epoch uint32, round uint8) ocrtypes.ReportContext {
- return ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- ConfigDigest: ocrtypes.ConfigDigest{'1'},
- Epoch: epoch,
- Round: round,
- },
- ExtraHash: [32]byte{'2'},
- }
- }
-
- // s1
- err := orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 1))
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 2))
- require.NoError(t, err)
- // s2 - should not be touched
- err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 0))
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(1, 1))
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 2))
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(1, 3))
- require.NoError(t, err)
-
- // Max size greater than number of records, expect no-op
- err = orm.PruneTransmitRequests(ctx, sURL, jobID, 5)
- require.NoError(t, err)
-
- transmissions, err := orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)},
- {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)},
- })
-
- // Max size equal to number of records, expect no-op
- err = orm.PruneTransmitRequests(ctx, sURL, jobID, 2)
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, transmissions, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)},
- {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)},
- })
-
- // Max size is number of records + 1, but jobID differs, expect no-op
- err = orm.PruneTransmitRequests(ctx, sURL, -1, 2)
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)},
- {Req: &pb.TransmitRequest{Payload: reports[0]}, ReportCtx: makeReportContext(1, 1)},
- }, transmissions)
-
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(2, 1))
- require.NoError(t, err)
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[3]}, jobID, makeReportContext(2, 2))
- require.NoError(t, err)
-
- // Max size is table size - 1, expect the oldest row to be pruned.
- err = orm.PruneTransmitRequests(ctx, sURL, jobID, 3)
- require.NoError(t, err)
-
- transmissions, err = orm.GetTransmitRequests(ctx, sURL, jobID)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[3]}, ReportCtx: makeReportContext(2, 2)},
- {Req: &pb.TransmitRequest{Payload: reports[2]}, ReportCtx: makeReportContext(2, 1)},
- {Req: &pb.TransmitRequest{Payload: reports[1]}, ReportCtx: makeReportContext(1, 2)},
- }, transmissions)
-
- // s2 not touched
- transmissions, err = orm.GetTransmitRequests(ctx, sURL2, jobID)
- require.NoError(t, err)
- assert.Len(t, transmissions, 3)
-}
-
-func TestORM_InsertTransmitRequest_LatestReport(t *testing.T) {
- ctx := testutils.Context(t)
- db := pgtest.NewSqlxDB(t)
- jobID := rand.Int32() // foreign key constraints disabled so value doesn't matter
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
-
- orm := NewORM(db)
- feedID := sampleFeedID
-
- reports := sampleReports
-
- makeReportContext := func(epoch uint32, round uint8) ocrtypes.ReportContext {
- return ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- ConfigDigest: ocrtypes.ConfigDigest{'1'},
- Epoch: epoch,
- Round: round,
- },
- ExtraHash: [32]byte{'2'},
- }
- }
-
- err := orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(
- 0, 0,
- ))
- require.NoError(t, err)
-
- // this should be ignored, because report context is the same
- err = orm.InsertTransmitRequest(ctx, []string{sURL2}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(
- 0, 0,
- ))
- require.NoError(t, err)
-
- l, err := orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[0], l)
-
- t.Run("replaces if epoch and round are larger", func(t *testing.T) {
- err = orm.InsertTransmitRequest(ctx, []string{"foo"}, &pb.TransmitRequest{Payload: reports[1]}, jobID, makeReportContext(1, 1))
- require.NoError(t, err)
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[1], l)
- })
- t.Run("replaces if epoch is the same but round is greater", func(t *testing.T) {
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[2]}, jobID, makeReportContext(1, 2))
- require.NoError(t, err)
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[2], l)
- })
- t.Run("replaces if epoch is larger but round is smaller", func(t *testing.T) {
- err = orm.InsertTransmitRequest(ctx, []string{"bar"}, &pb.TransmitRequest{Payload: reports[3]}, jobID, makeReportContext(2, 1))
- require.NoError(t, err)
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[3], l)
- })
- t.Run("does not overwrite if epoch/round is the same", func(t *testing.T) {
- err = orm.InsertTransmitRequest(ctx, []string{sURL}, &pb.TransmitRequest{Payload: reports[0]}, jobID, makeReportContext(2, 1))
- require.NoError(t, err)
-
- l, err = orm.LatestReport(testutils.Context(t), feedID)
- require.NoError(t, err)
- assert.Equal(t, reports[3], l)
- })
-}
-
-func Test_ReportCodec_FeedIDFromReport(t *testing.T) {
- t.Run("FeedIDFromReport extracts the current block number from a valid report", func(t *testing.T) {
- report := buildSampleV1Report(42)
-
- f, err := FeedIDFromReport(report)
- require.NoError(t, err)
-
- assert.Equal(t, sampleFeedID[:], f[:])
- })
- t.Run("FeedIDFromReport returns error if report is invalid", func(t *testing.T) {
- report := []byte{1}
-
- _, err := FeedIDFromReport(report)
- assert.EqualError(t, err, "invalid length for report: 1")
- })
-}
diff --git a/core/services/relay/evm/mercury/payload_types.go b/core/services/relay/evm/mercury/payload_types.go
new file mode 100644
index 00000000000..ef4c48f55f5
--- /dev/null
+++ b/core/services/relay/evm/mercury/payload_types.go
@@ -0,0 +1,26 @@
+package mercury
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/accounts/abi"
+)
+
+var PayloadTypes = getPayloadTypes()
+
+func getPayloadTypes() abi.Arguments {
+ mustNewType := func(t string) abi.Type {
+ result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{})
+ if err != nil {
+ panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err))
+ }
+ return result
+ }
+ return abi.Arguments([]abi.Argument{
+ {Name: "reportContext", Type: mustNewType("bytes32[3]")},
+ {Name: "report", Type: mustNewType("bytes")},
+ {Name: "rawRs", Type: mustNewType("bytes32[]")},
+ {Name: "rawSs", Type: mustNewType("bytes32[]")},
+ {Name: "rawVs", Type: mustNewType("bytes32")},
+ })
+}
diff --git a/core/services/relay/evm/mercury/persistence_manager.go b/core/services/relay/evm/mercury/persistence_manager.go
deleted file mode 100644
index 68137d04c14..00000000000
--- a/core/services/relay/evm/mercury/persistence_manager.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package mercury
-
-import (
- "context"
- "sync"
- "time"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
- "github.com/smartcontractkit/chainlink-common/pkg/sqlutil"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-var (
- flushDeletesFrequency = time.Second
- pruneFrequency = time.Hour
-)
-
-type PersistenceManager struct {
- lggr logger.Logger
- orm ORM
- serverURL string
-
- once services.StateMachine
- stopCh services.StopChan
- wg sync.WaitGroup
-
- deleteMu sync.Mutex
- deleteQueue []*pb.TransmitRequest
-
- jobID int32
-
- maxTransmitQueueSize int
- flushDeletesFrequency time.Duration
- pruneFrequency time.Duration
-}
-
-func NewPersistenceManager(lggr logger.Logger, serverURL string, orm ORM, jobID int32, maxTransmitQueueSize int, flushDeletesFrequency, pruneFrequency time.Duration) *PersistenceManager {
- return &PersistenceManager{
- lggr: logger.Sugared(lggr).Named("MercuryPersistenceManager").With("serverURL", serverURL),
- orm: orm,
- serverURL: serverURL,
- stopCh: make(services.StopChan),
- jobID: jobID,
- maxTransmitQueueSize: maxTransmitQueueSize,
- flushDeletesFrequency: flushDeletesFrequency,
- pruneFrequency: pruneFrequency,
- }
-}
-
-func (pm *PersistenceManager) Start(ctx context.Context) error {
- return pm.once.StartOnce("MercuryPersistenceManager", func() error {
- pm.wg.Add(2)
- go pm.runFlushDeletesLoop()
- go pm.runPruneLoop()
- return nil
- })
-}
-
-func (pm *PersistenceManager) Close() error {
- return pm.once.StopOnce("MercuryPersistenceManager", func() error {
- close(pm.stopCh)
- pm.wg.Wait()
- return nil
- })
-}
-
-func (pm *PersistenceManager) Insert(ctx context.Context, req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) error {
- return pm.orm.InsertTransmitRequest(ctx, []string{pm.serverURL}, req, pm.jobID, reportCtx)
-}
-
-func (pm *PersistenceManager) Delete(ctx context.Context, req *pb.TransmitRequest) error {
- return pm.orm.DeleteTransmitRequests(ctx, pm.serverURL, []*pb.TransmitRequest{req})
-}
-
-func (pm *PersistenceManager) AsyncDelete(req *pb.TransmitRequest) {
- pm.addToDeleteQueue(req)
-}
-
-func (pm *PersistenceManager) Load(ctx context.Context) ([]*Transmission, error) {
- return pm.orm.GetTransmitRequests(ctx, pm.serverURL, pm.jobID)
-}
-
-func (pm *PersistenceManager) runFlushDeletesLoop() {
- defer pm.wg.Done()
-
- ctx, cancel := pm.stopCh.NewCtx()
- defer cancel()
-
- ticker := services.NewTicker(pm.flushDeletesFrequency)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- queuedReqs := pm.resetDeleteQueue()
- if err := pm.orm.DeleteTransmitRequests(ctx, pm.serverURL, queuedReqs); err != nil {
- pm.lggr.Errorw("Failed to delete queued transmit requests", "err", err)
- pm.addToDeleteQueue(queuedReqs...)
- } else {
- pm.lggr.Debugw("Deleted queued transmit requests")
- }
- }
- }
-}
-
-func (pm *PersistenceManager) runPruneLoop() {
- defer pm.wg.Done()
-
- ctx, cancel := pm.stopCh.NewCtx()
- defer cancel()
-
- ticker := services.NewTicker(pm.pruneFrequency)
- defer ticker.Stop()
- for {
- select {
- case <-ctx.Done():
- return
- case <-ticker.C:
- func(ctx context.Context) {
- ctx, cancelPrune := context.WithTimeout(sqlutil.WithoutDefaultTimeout(ctx), time.Minute)
- defer cancelPrune()
- if err := pm.orm.PruneTransmitRequests(ctx, pm.serverURL, pm.jobID, pm.maxTransmitQueueSize); err != nil {
- pm.lggr.Errorw("Failed to prune transmit requests table", "err", err)
- } else {
- pm.lggr.Debugw("Pruned transmit requests table")
- }
- }(ctx)
- }
- }
-}
-
-func (pm *PersistenceManager) addToDeleteQueue(reqs ...*pb.TransmitRequest) {
- pm.deleteMu.Lock()
- defer pm.deleteMu.Unlock()
- pm.deleteQueue = append(pm.deleteQueue, reqs...)
-}
-
-func (pm *PersistenceManager) resetDeleteQueue() []*pb.TransmitRequest {
- pm.deleteMu.Lock()
- defer pm.deleteMu.Unlock()
- queue := pm.deleteQueue
- pm.deleteQueue = nil
- return queue
-}
diff --git a/core/services/relay/evm/mercury/persistence_manager_test.go b/core/services/relay/evm/mercury/persistence_manager_test.go
deleted file mode 100644
index fb73ad2cf5a..00000000000
--- a/core/services/relay/evm/mercury/persistence_manager_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package mercury
-
-import (
- "math/rand/v2"
- "testing"
- "time"
-
- "github.com/jmoiron/sqlx"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zaptest/observer"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-func bootstrapPersistenceManager(t *testing.T, jobID int32, db *sqlx.DB) (*PersistenceManager, *observer.ObservedLogs) {
- t.Helper()
- lggr, observedLogs := logger.TestObserved(t, zapcore.DebugLevel)
- orm := NewORM(db)
- return NewPersistenceManager(lggr, "mercuryserver.example", orm, jobID, 2, 5*time.Millisecond, 5*time.Millisecond), observedLogs
-}
-
-func TestPersistenceManager(t *testing.T) {
- jobID1 := rand.Int32()
- jobID2 := jobID1 + 1
-
- ctx := testutils.Context(t)
- db := pgtest.NewSqlxDB(t)
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
- pm, _ := bootstrapPersistenceManager(t, jobID1, db)
-
- reports := sampleReports
-
- err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[0]}, ocrtypes.ReportContext{})
- require.NoError(t, err)
- err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[1]}, ocrtypes.ReportContext{})
- require.NoError(t, err)
-
- transmissions, err := pm.Load(ctx)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[0]}},
- {Req: &pb.TransmitRequest{Payload: reports[1]}},
- }, transmissions)
-
- err = pm.Delete(ctx, &pb.TransmitRequest{Payload: reports[0]})
- require.NoError(t, err)
-
- transmissions, err = pm.Load(ctx)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[1]}},
- }, transmissions)
-
- t.Run("scopes load to only transmissions with matching job ID", func(t *testing.T) {
- pm2, _ := bootstrapPersistenceManager(t, jobID2, db)
- transmissions, err = pm2.Load(ctx)
- require.NoError(t, err)
-
- assert.Len(t, transmissions, 0)
- })
-}
-
-func TestPersistenceManagerAsyncDelete(t *testing.T) {
- ctx := testutils.Context(t)
- jobID := rand.Int32()
- db := pgtest.NewSqlxDB(t)
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
- pm, observedLogs := bootstrapPersistenceManager(t, jobID, db)
-
- reports := sampleReports
-
- err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[0]}, ocrtypes.ReportContext{})
- require.NoError(t, err)
- err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[1]}, ocrtypes.ReportContext{})
- require.NoError(t, err)
-
- err = pm.Start(ctx)
- require.NoError(t, err)
-
- pm.AsyncDelete(&pb.TransmitRequest{Payload: reports[0]})
-
- // Wait for next poll.
- observedLogs.TakeAll()
- testutils.WaitForLogMessage(t, observedLogs, "Deleted queued transmit requests")
-
- transmissions, err := pm.Load(ctx)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[1]}},
- }, transmissions)
-
- // Test AsyncDelete is a no-op after Close.
- err = pm.Close()
- require.NoError(t, err)
-
- pm.AsyncDelete(&pb.TransmitRequest{Payload: reports[1]})
-
- time.Sleep(15 * time.Millisecond)
-
- transmissions, err = pm.Load(ctx)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[1]}},
- }, transmissions)
-}
-
-func TestPersistenceManagerPrune(t *testing.T) {
- jobID1 := rand.Int32()
- jobID2 := jobID1 + 1
- db := pgtest.NewSqlxDB(t)
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
-
- ctx := testutils.Context(t)
-
- reports := make([][]byte, 25)
- for i := 0; i < 25; i++ {
- reports[i] = buildSampleV1Report(int64(i))
- }
-
- pm2, _ := bootstrapPersistenceManager(t, jobID2, db)
- for i := 0; i < 20; i++ {
- err := pm2.Insert(ctx, &pb.TransmitRequest{Payload: reports[i]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: uint32(i)}}) //nolint:gosec // G115
- require.NoError(t, err)
- }
-
- pm, observedLogs := bootstrapPersistenceManager(t, jobID1, db)
-
- err := pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[21]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 21}})
- require.NoError(t, err)
- err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[22]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}})
- require.NoError(t, err)
- err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[23]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}})
- require.NoError(t, err)
-
- err = pm.Start(ctx)
- require.NoError(t, err)
-
- // Wait for next poll.
- observedLogs.TakeAll()
- testutils.WaitForLogMessage(t, observedLogs, "Pruned transmit requests table")
-
- transmissions, err := pm.Load(ctx)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[23]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}},
- {Req: &pb.TransmitRequest{Payload: reports[22]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}},
- }, transmissions)
-
- // Test pruning stops after Close.
- err = pm.Close()
- require.NoError(t, err)
-
- err = pm.Insert(ctx, &pb.TransmitRequest{Payload: reports[24]}, ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 24}})
- require.NoError(t, err)
-
- transmissions, err = pm.Load(ctx)
- require.NoError(t, err)
- require.Equal(t, []*Transmission{
- {Req: &pb.TransmitRequest{Payload: reports[24]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 24}}},
- {Req: &pb.TransmitRequest{Payload: reports[23]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 23}}},
- {Req: &pb.TransmitRequest{Payload: reports[22]}, ReportCtx: ocrtypes.ReportContext{ReportTimestamp: ocrtypes.ReportTimestamp{Epoch: 22}}},
- }, transmissions)
-
- t.Run("prune was scoped to job ID", func(t *testing.T) {
- transmissions, err = pm2.Load(ctx)
- require.NoError(t, err)
- assert.Len(t, transmissions, 20)
- })
-}
diff --git a/core/services/relay/evm/mercury/queue.go b/core/services/relay/evm/mercury/queue.go
deleted file mode 100644
index a450d21af6e..00000000000
--- a/core/services/relay/evm/mercury/queue.go
+++ /dev/null
@@ -1,259 +0,0 @@
-package mercury
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- heap "github.com/esote/minmaxheap"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-type asyncDeleter interface {
- AsyncDelete(req *pb.TransmitRequest)
-}
-
-var _ services.Service = (*transmitQueue)(nil)
-
-var transmitQueueLoad = promauto.NewGaugeVec(prometheus.GaugeOpts{
- Name: "mercury_transmit_queue_load",
- Help: "Current count of items in the transmit queue",
-},
- []string{"feedID", "serverURL", "capacity"},
-)
-
-// Prometheus' default interval is 15s, set this to under 7.5s to avoid
-// aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency)
-const promInterval = 6500 * time.Millisecond
-
-// TransmitQueue is the high-level package that everything outside of this file should be using
-// It stores pending transmissions, yielding the latest (highest priority) first to the caller
-type transmitQueue struct {
- services.StateMachine
-
- cond sync.Cond
- lggr logger.SugaredLogger
- asyncDeleter asyncDeleter
- mu *sync.RWMutex
-
- pq *priorityQueue
- maxlen int
- closed bool
-
- // monitor loop
- stopMonitor func()
- transmitQueueLoad prometheus.Gauge
-}
-
-type Transmission struct {
- Req *pb.TransmitRequest // the payload to transmit
- ReportCtx ocrtypes.ReportContext // contains priority information (latest epoch/round wins)
-}
-
-type TransmitQueue interface {
- services.Service
-
- BlockingPop() (t *Transmission)
- Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool)
- Init(transmissions []*Transmission)
- IsEmpty() bool
-}
-
-// maxlen controls how many items will be stored in the queue
-// 0 means unlimited - be careful, this can cause memory leaks
-func NewTransmitQueue(lggr logger.Logger, serverURL, feedID string, maxlen int, asyncDeleter asyncDeleter) TransmitQueue {
- mu := new(sync.RWMutex)
- return &transmitQueue{
- services.StateMachine{},
- sync.Cond{L: mu},
- logger.Sugared(lggr).Named("TransmitQueue"),
- asyncDeleter,
- mu,
- nil, // pq needs to be initialized by calling tq.Init before use
- maxlen,
- false,
- nil,
- transmitQueueLoad.WithLabelValues(feedID, serverURL, fmt.Sprintf("%d", maxlen)),
- }
-}
-
-func (tq *transmitQueue) Init(transmissions []*Transmission) {
- pq := priorityQueue(transmissions)
- heap.Init(&pq) // ensure the heap is ordered
- tq.pq = &pq
-}
-
-func (tq *transmitQueue) Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool) {
- tq.cond.L.Lock()
- defer tq.cond.L.Unlock()
-
- if tq.closed {
- return false
- }
-
- if tq.maxlen != 0 && tq.pq.Len() == tq.maxlen {
- // evict oldest entry to make room
- tq.lggr.Criticalf("Transmit queue is full; dropping oldest transmission (reached max length of %d)", tq.maxlen)
- removed := heap.PopMax(tq.pq)
- if transmission, ok := removed.(*Transmission); ok {
- tq.asyncDeleter.AsyncDelete(transmission.Req)
- }
- }
-
- heap.Push(tq.pq, &Transmission{req, reportCtx})
- tq.cond.Signal()
-
- return true
-}
-
-// BlockingPop will block until at least one item is in the heap, and then return it
-// If the queue is closed, it will immediately return nil
-func (tq *transmitQueue) BlockingPop() (t *Transmission) {
- tq.cond.L.Lock()
- defer tq.cond.L.Unlock()
- if tq.closed {
- return nil
- }
- for t = tq.pop(); t == nil; t = tq.pop() {
- tq.cond.Wait()
- if tq.closed {
- return nil
- }
- }
- return t
-}
-
-func (tq *transmitQueue) IsEmpty() bool {
- tq.mu.RLock()
- defer tq.mu.RUnlock()
- return tq.pq.Len() == 0
-}
-
-func (tq *transmitQueue) Start(context.Context) error {
- return tq.StartOnce("TransmitQueue", func() error {
- t := services.NewTicker(promInterval)
- wg := new(sync.WaitGroup)
- chStop := make(chan struct{})
- tq.stopMonitor = func() {
- t.Stop()
- close(chStop)
- wg.Wait()
- }
- wg.Add(1)
- go tq.monitorLoop(t.C, chStop, wg)
- return nil
- })
-}
-
-func (tq *transmitQueue) Close() error {
- return tq.StopOnce("TransmitQueue", func() error {
- tq.cond.L.Lock()
- tq.closed = true
- tq.cond.L.Unlock()
- tq.cond.Broadcast()
- tq.stopMonitor()
- return nil
- })
-}
-
-func (tq *transmitQueue) monitorLoop(c <-chan time.Time, chStop <-chan struct{}, wg *sync.WaitGroup) {
- defer wg.Done()
-
- for {
- select {
- case <-c:
- tq.report()
- case <-chStop:
- return
- }
- }
-}
-
-func (tq *transmitQueue) report() {
- tq.mu.RLock()
- length := tq.pq.Len()
- tq.mu.RUnlock()
- tq.transmitQueueLoad.Set(float64(length))
-}
-
-func (tq *transmitQueue) Ready() error {
- return nil
-}
-func (tq *transmitQueue) Name() string { return tq.lggr.Name() }
-func (tq *transmitQueue) HealthReport() map[string]error {
- report := map[string]error{tq.Name(): errors.Join(
- tq.status(),
- )}
- return report
-}
-
-func (tq *transmitQueue) status() (merr error) {
- tq.mu.RLock()
- length := tq.pq.Len()
- closed := tq.closed
- tq.mu.RUnlock()
- if tq.maxlen != 0 && length > (tq.maxlen/2) {
- merr = errors.Join(merr, fmt.Errorf("transmit priority queue is greater than 50%% full (%d/%d)", length, tq.maxlen))
- }
- if closed {
- merr = errors.New("transmit queue is closed")
- }
- return merr
-}
-
-// pop latest Transmission from the heap
-// Not thread-safe
-func (tq *transmitQueue) pop() *Transmission {
- if tq.pq.Len() == 0 {
- return nil
- }
- return heap.Pop(tq.pq).(*Transmission)
-}
-
-// HEAP
-// Adapted from https://pkg.go.dev/container/heap#example-package-PriorityQueue
-
-// WARNING: None of these methods are thread-safe, caller must synchronize
-
-var _ heap.Interface = &priorityQueue{}
-
-type priorityQueue []*Transmission
-
-func (pq priorityQueue) Len() int { return len(pq) }
-
-func (pq priorityQueue) Less(i, j int) bool {
- // We want Pop to give us the latest round, so we use greater than here
- // i.e. a later epoch/round is "less" than an earlier one
- return pq[i].ReportCtx.ReportTimestamp.Epoch > pq[j].ReportCtx.ReportTimestamp.Epoch &&
- pq[i].ReportCtx.ReportTimestamp.Round > pq[j].ReportCtx.ReportTimestamp.Round
-}
-
-func (pq priorityQueue) Swap(i, j int) {
- pq[i], pq[j] = pq[j], pq[i]
-}
-
-func (pq *priorityQueue) Pop() any {
- n := len(*pq)
- if n == 0 {
- return nil
- }
- old := *pq
- item := old[n-1]
- old[n-1] = nil // avoid memory leak
- *pq = old[0 : n-1]
- return item
-}
-
-func (pq *priorityQueue) Push(x any) {
- *pq = append(*pq, x.(*Transmission))
-}
diff --git a/core/services/relay/evm/mercury/queue_test.go b/core/services/relay/evm/mercury/queue_test.go
deleted file mode 100644
index ddaad5c835a..00000000000
--- a/core/services/relay/evm/mercury/queue_test.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package mercury
-
-import (
- "sync"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zapcore"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-type TestTransmissionWithReport struct {
- tr *pb.TransmitRequest
- ctx ocrtypes.ReportContext
-}
-
-func createTestTransmissions(t *testing.T) []TestTransmissionWithReport {
- t.Helper()
- return []TestTransmissionWithReport{
- {
- tr: &pb.TransmitRequest{
- Payload: []byte("test1"),
- },
- ctx: ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- Epoch: 1,
- Round: 1,
- ConfigDigest: ocrtypes.ConfigDigest{},
- },
- },
- },
- {
- tr: &pb.TransmitRequest{
- Payload: []byte("test2"),
- },
- ctx: ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- Epoch: 2,
- Round: 2,
- ConfigDigest: ocrtypes.ConfigDigest{},
- },
- },
- },
- {
- tr: &pb.TransmitRequest{
- Payload: []byte("test3"),
- },
- ctx: ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- Epoch: 3,
- Round: 3,
- ConfigDigest: ocrtypes.ConfigDigest{},
- },
- },
- },
- }
-}
-
-func Test_Queue(t *testing.T) {
- t.Parallel()
- lggr, observedLogs := logger.TestObserved(t, zapcore.ErrorLevel)
- testTransmissions := createTestTransmissions(t)
- deleter := mocks.NewAsyncDeleter(t)
- transmitQueue := NewTransmitQueue(lggr, sURL, "foo feed ID", 7, deleter)
- transmitQueue.Init([]*Transmission{})
-
- t.Run("successfully add transmissions to transmit queue", func(t *testing.T) {
- for _, tt := range testTransmissions {
- ok := transmitQueue.Push(tt.tr, tt.ctx)
- require.True(t, ok)
- }
- report := transmitQueue.HealthReport()
- assert.Nil(t, report[transmitQueue.Name()])
- })
-
- t.Run("transmit queue is more than 50% full", func(t *testing.T) {
- transmitQueue.Push(testTransmissions[2].tr, testTransmissions[2].ctx)
- report := transmitQueue.HealthReport()
- assert.Equal(t, report[transmitQueue.Name()].Error(), "transmit priority queue is greater than 50% full (4/7)")
- })
-
- t.Run("transmit queue pops the highest priority transmission", func(t *testing.T) {
- tr := transmitQueue.BlockingPop()
- assert.Equal(t, testTransmissions[2].tr, tr.Req)
- })
-
- t.Run("transmit queue is full and evicts the oldest transmission", func(t *testing.T) {
- deleter.On("AsyncDelete", testTransmissions[0].tr).Once()
-
- // add 5 more transmissions to overflow the queue by 1
- for i := 0; i < 5; i++ {
- transmitQueue.Push(testTransmissions[1].tr, testTransmissions[1].ctx)
- }
-
- // expecting testTransmissions[0] to get evicted and not present in the queue anymore
- testutils.WaitForLogMessage(t, observedLogs, "Transmit queue is full; dropping oldest transmission (reached max length of 7)")
- for i := 0; i < 7; i++ {
- tr := transmitQueue.BlockingPop()
- assert.NotEqual(t, tr.Req, testTransmissions[0].tr)
- }
- })
-
- t.Run("transmit queue blocks when empty and resumes when tranmission available", func(t *testing.T) {
- assert.True(t, transmitQueue.IsEmpty())
-
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- defer wg.Done()
- tr := transmitQueue.BlockingPop()
- assert.Equal(t, tr.Req, testTransmissions[0].tr)
- }()
- go func() {
- defer wg.Done()
- transmitQueue.Push(testTransmissions[0].tr, testTransmissions[0].ctx)
- }()
- wg.Wait()
- })
-
- t.Run("initializes transmissions", func(t *testing.T) {
- transmissions := []*Transmission{
- {
- Req: &pb.TransmitRequest{
- Payload: []byte("new1"),
- },
- ReportCtx: ocrtypes.ReportContext{
- ReportTimestamp: ocrtypes.ReportTimestamp{
- Epoch: 1,
- Round: 1,
- ConfigDigest: ocrtypes.ConfigDigest{},
- },
- },
- },
- }
- transmitQueue := NewTransmitQueue(lggr, sURL, "foo feed ID", 7, deleter)
- transmitQueue.Init(transmissions)
-
- transmission := transmitQueue.BlockingPop()
- assert.Equal(t, transmission.Req.Payload, []byte("new1"))
- assert.True(t, transmitQueue.IsEmpty())
- })
-}
diff --git a/core/services/relay/evm/mercury/test_helpers.go b/core/services/relay/evm/mercury/test_helpers.go
deleted file mode 100644
index 27093268b3e..00000000000
--- a/core/services/relay/evm/mercury/test_helpers.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package mercury
-
-import (
- "github.com/ethereum/go-ethereum/common/hexutil"
-
- "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-)
-
-func BuildSamplePayload(report []byte, reportCtx ocrtypes.ReportContext, sigs []ocrtypes.AttributedOnchainSignature) []byte {
- var rs [][32]byte
- var ss [][32]byte
- var vs [32]byte
- for i, as := range sigs {
- r, s, v, err := evmutil.SplitSignature(as.Signature)
- if err != nil {
- panic("eventTransmit(ev): error in SplitSignature")
- }
- rs = append(rs, r)
- ss = append(ss, s)
- vs[i] = v
- }
- rawReportCtx := evmutil.RawReportContext(reportCtx)
- payload, err := PayloadTypes.Pack(rawReportCtx, report, rs, ss, vs)
- if err != nil {
- panic(err)
- }
- return payload
-}
-
-func MustHexToConfigDigest(s string) (cd ocrtypes.ConfigDigest) {
- b := hexutil.MustDecode(s)
- var err error
- cd, err = ocrtypes.BytesToConfigDigest(b)
- if err != nil {
- panic(err)
- }
- return
-}
diff --git a/core/services/relay/evm/mercury/transmitter.go b/core/services/relay/evm/mercury/transmitter.go
deleted file mode 100644
index be500593bf3..00000000000
--- a/core/services/relay/evm/mercury/transmitter.go
+++ /dev/null
@@ -1,613 +0,0 @@
-package mercury
-
-import (
- "bytes"
- "context"
- "crypto/ed25519"
- "errors"
- "fmt"
- "io"
- "math/big"
- "sort"
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/jpillora/backoff"
- pkgerrors "github.com/pkg/errors"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
- "golang.org/x/exp/maps"
- "golang.org/x/sync/errgroup"
-
- "github.com/smartcontractkit/libocr/offchainreporting2plus/chains/evmutil"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- capStreams "github.com/smartcontractkit/chainlink-common/pkg/capabilities/datastreams"
- "github.com/smartcontractkit/chainlink-common/pkg/capabilities/triggers"
- commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
-
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-const (
- // Mercury server error codes
- DuplicateReport = 2
-)
-
-var (
- transmitSuccessCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_success_count",
- Help: "Number of successful transmissions (duplicates are counted as success)",
- },
- []string{"feedID", "serverURL"},
- )
- transmitDuplicateCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_duplicate_count",
- Help: "Number of transmissions where the server told us it was a duplicate",
- },
- []string{"feedID", "serverURL"},
- )
- transmitConnectionErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_connection_error_count",
- Help: "Number of errored transmissions that failed due to problem with the connection",
- },
- []string{"feedID", "serverURL"},
- )
- transmitQueueDeleteErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_queue_delete_error_count",
- Help: "Running count of DB errors when trying to delete an item from the queue DB",
- },
- []string{"feedID", "serverURL"},
- )
- transmitQueueInsertErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_queue_insert_error_count",
- Help: "Running count of DB errors when trying to insert an item into the queue DB",
- },
- []string{"feedID", "serverURL"},
- )
- transmitQueuePushErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_queue_push_error_count",
- Help: "Running count of DB errors when trying to push an item onto the queue",
- },
- []string{"feedID", "serverURL"},
- )
- transmitServerErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_server_error_count",
- Help: "Number of errored transmissions that failed due to an error returned by the mercury server",
- },
- []string{"feedID", "serverURL", "code"},
- )
-)
-
-type Transmitter interface {
- mercury.Transmitter
- services.Service
-}
-
-type ConfigTracker interface {
- LatestConfigDetails(ctx context.Context) (changedInBlock uint64, configDigest ocrtypes.ConfigDigest, err error)
-}
-
-type TransmitterReportDecoder interface {
- BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error)
- ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error)
-}
-
-type BenchmarkPriceDecoder func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error)
-
-var _ Transmitter = (*mercuryTransmitter)(nil)
-
-type TransmitterConfig interface {
- TransmitTimeout() commonconfig.Duration
-}
-
-type mercuryTransmitter struct {
- services.StateMachine
- lggr logger.SugaredLogger
- cfg TransmitterConfig
-
- orm ORM
- servers map[string]*server
-
- codec TransmitterReportDecoder
- benchmarkPriceDecoder BenchmarkPriceDecoder
- triggerCapability *triggers.MercuryTriggerService
-
- feedID mercuryutils.FeedID
- jobID int32
- fromAccount string
-
- stopCh services.StopChan
- wg *sync.WaitGroup
-}
-
-var PayloadTypes = getPayloadTypes()
-
-func getPayloadTypes() abi.Arguments {
- mustNewType := func(t string) abi.Type {
- result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{})
- if err != nil {
- panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err))
- }
- return result
- }
- return abi.Arguments([]abi.Argument{
- {Name: "reportContext", Type: mustNewType("bytes32[3]")},
- {Name: "report", Type: mustNewType("bytes")},
- {Name: "rawRs", Type: mustNewType("bytes32[]")},
- {Name: "rawSs", Type: mustNewType("bytes32[]")},
- {Name: "rawVs", Type: mustNewType("bytes32")},
- })
-}
-
-type server struct {
- lggr logger.SugaredLogger
-
- transmitTimeout time.Duration
-
- c wsrpc.Client
- pm *PersistenceManager
- q TransmitQueue
-
- deleteQueue chan *pb.TransmitRequest
-
- url string
-
- transmitSuccessCount prometheus.Counter
- transmitDuplicateCount prometheus.Counter
- transmitConnectionErrorCount prometheus.Counter
- transmitQueueDeleteErrorCount prometheus.Counter
- transmitQueueInsertErrorCount prometheus.Counter
- transmitQueuePushErrorCount prometheus.Counter
-}
-
-func (s *server) HealthReport() map[string]error {
- report := map[string]error{}
- services.CopyHealth(report, s.c.HealthReport())
- services.CopyHealth(report, s.q.HealthReport())
- return report
-}
-
-func (s *server) runDeleteQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup) {
- defer wg.Done()
- ctx, cancel := stopCh.NewCtx()
- defer cancel()
-
- // Exponential backoff for very rarely occurring errors (DB disconnect etc)
- b := backoff.Backoff{
- Min: 1 * time.Second,
- Max: 120 * time.Second,
- Factor: 2,
- Jitter: true,
- }
-
- for {
- select {
- case req := <-s.deleteQueue:
- for {
- if err := s.pm.Delete(ctx, req); err != nil {
- s.lggr.Errorw("Failed to delete transmit request record", "err", err, "req.Payload", req.Payload)
- s.transmitQueueDeleteErrorCount.Inc()
- select {
- case <-time.After(b.Duration()):
- // Wait a backoff duration before trying to delete again
- continue
- case <-stopCh:
- // abort and return immediately on stop even if items remain in queue
- return
- }
- }
- break
- }
- // success
- b.Reset()
- case <-stopCh:
- // abort and return immediately on stop even if items remain in queue
- return
- }
- }
-}
-
-func (s *server) runQueueLoop(stopCh services.StopChan, wg *sync.WaitGroup, feedIDHex string) {
- defer wg.Done()
- // Exponential backoff with very short retry interval (since latency is a priority)
- // 5ms, 10ms, 20ms, 40ms etc
- b := backoff.Backoff{
- Min: 5 * time.Millisecond,
- Max: 1 * time.Second,
- Factor: 2,
- Jitter: true,
- }
- ctx, cancel := stopCh.NewCtx()
- defer cancel()
- for {
- t := s.q.BlockingPop()
- if t == nil {
- // queue was closed
- return
- }
- res, err := func(ctx context.Context) (*pb.TransmitResponse, error) {
- ctx, cancel := context.WithTimeout(ctx, utils.WithJitter(s.transmitTimeout))
- defer cancel()
- return s.c.Transmit(ctx, t.Req)
- }(ctx)
- if ctx.Err() != nil {
- // only canceled on transmitter close so we can exit
- return
- } else if err != nil {
- s.transmitConnectionErrorCount.Inc()
- s.lggr.Errorw("Transmit report failed", "err", err, "reportCtx", t.ReportCtx)
- if ok := s.q.Push(t.Req, t.ReportCtx); !ok {
- s.lggr.Error("Failed to push report to transmit queue; queue is closed")
- return
- }
- // Wait a backoff duration before pulling the most recent transmission
- // the heap
- select {
- case <-time.After(b.Duration()):
- continue
- case <-stopCh:
- return
- }
- }
-
- b.Reset()
- if res.Error == "" {
- s.transmitSuccessCount.Inc()
- s.lggr.Debugw("Transmit report success", "payload", hexutil.Encode(t.Req.Payload), "response", res, "repts", t.ReportCtx.ReportTimestamp)
- } else {
- // We don't need to retry here because the mercury server
- // has confirmed it received the report. We only need to retry
- // on networking/unknown errors
- switch res.Code {
- case DuplicateReport:
- s.transmitSuccessCount.Inc()
- s.transmitDuplicateCount.Inc()
- s.lggr.Debugw("Transmit report success; duplicate report", "payload", hexutil.Encode(t.Req.Payload), "response", res, "repts", t.ReportCtx.ReportTimestamp)
- default:
- transmitServerErrorCount.WithLabelValues(feedIDHex, s.url, fmt.Sprintf("%d", res.Code)).Inc()
- s.lggr.Errorw("Transmit report failed; mercury server returned error", "response", res, "reportCtx", t.ReportCtx, "err", res.Error, "code", res.Code)
- }
- }
-
- select {
- case s.deleteQueue <- t.Req:
- default:
- s.lggr.Criticalw("Delete queue is full", "reportCtx", t.ReportCtx)
- }
- }
-}
-
-const TransmitQueueMaxSize = 10_000 // hardcode this for legacy transmitter since we want the config var to apply only to LLO
-
-func newServer(lggr logger.Logger, cfg TransmitterConfig, client wsrpc.Client, pm *PersistenceManager, serverURL, feedIDHex string) *server {
- return &server{
- logger.Sugared(lggr),
- cfg.TransmitTimeout().Duration(),
- client,
- pm,
- NewTransmitQueue(lggr, serverURL, feedIDHex, TransmitQueueMaxSize, pm),
- make(chan *pb.TransmitRequest, TransmitQueueMaxSize),
- serverURL,
- transmitSuccessCount.WithLabelValues(feedIDHex, serverURL),
- transmitDuplicateCount.WithLabelValues(feedIDHex, serverURL),
- transmitConnectionErrorCount.WithLabelValues(feedIDHex, serverURL),
- transmitQueueDeleteErrorCount.WithLabelValues(feedIDHex, serverURL),
- transmitQueueInsertErrorCount.WithLabelValues(feedIDHex, serverURL),
- transmitQueuePushErrorCount.WithLabelValues(feedIDHex, serverURL),
- }
-}
-
-func NewTransmitter(lggr logger.Logger, cfg TransmitterConfig, clients map[string]wsrpc.Client, fromAccount ed25519.PublicKey, jobID int32, feedID [32]byte, orm ORM, codec TransmitterReportDecoder, benchmarkPriceDecoder BenchmarkPriceDecoder, triggerCapability *triggers.MercuryTriggerService) *mercuryTransmitter {
- sugared := logger.Sugared(lggr)
- feedIDHex := fmt.Sprintf("0x%x", feedID[:])
- servers := make(map[string]*server, len(clients))
- for serverURL, client := range clients {
- cLggr := sugared.Named(serverURL).With("serverURL", serverURL)
- pm := NewPersistenceManager(cLggr, serverURL, orm, jobID, TransmitQueueMaxSize, flushDeletesFrequency, pruneFrequency)
- servers[serverURL] = newServer(cLggr, cfg, client, pm, serverURL, feedIDHex)
- }
- return &mercuryTransmitter{
- services.StateMachine{},
- sugared.Named("MercuryTransmitter").With("feedID", feedIDHex),
- cfg,
- orm,
- servers,
- codec,
- benchmarkPriceDecoder,
- triggerCapability,
- feedID,
- jobID,
- fmt.Sprintf("%x", fromAccount),
- make(services.StopChan),
- &sync.WaitGroup{},
- }
-}
-
-func (mt *mercuryTransmitter) Start(ctx context.Context) (err error) {
- return mt.StartOnce("MercuryTransmitter", func() error {
- mt.lggr.Debugw("Loading transmit requests from database")
-
- {
- var startClosers []services.StartClose
- for _, s := range mt.servers {
- transmissions, err := s.pm.Load(ctx)
- if err != nil {
- return err
- }
- s.q.Init(transmissions)
- // starting pm after loading from it is fine because it simply spawns some garbage collection/prune goroutines
- startClosers = append(startClosers, s.c, s.q, s.pm)
-
- mt.wg.Add(2)
- go s.runDeleteQueueLoop(mt.stopCh, mt.wg)
- go s.runQueueLoop(mt.stopCh, mt.wg, mt.feedID.Hex())
- }
- if err := (&services.MultiStart{}).Start(ctx, startClosers...); err != nil {
- return err
- }
- }
-
- return nil
- })
-}
-
-func (mt *mercuryTransmitter) Close() error {
- return mt.StopOnce("MercuryTransmitter", func() error {
- // Drain all the queues first
- var qs []io.Closer
- for _, s := range mt.servers {
- qs = append(qs, s.q)
- }
- if err := services.CloseAll(qs...); err != nil {
- return err
- }
-
- close(mt.stopCh)
- mt.wg.Wait()
-
- // Close all the persistence managers
- // Close all the clients
- var closers []io.Closer
- for _, s := range mt.servers {
- closers = append(closers, s.pm)
- closers = append(closers, s.c)
- }
- return services.CloseAll(closers...)
- })
-}
-
-func (mt *mercuryTransmitter) Name() string { return mt.lggr.Name() }
-
-func (mt *mercuryTransmitter) HealthReport() map[string]error {
- report := map[string]error{mt.Name(): mt.Healthy()}
- for _, s := range mt.servers {
- services.CopyHealth(report, s.HealthReport())
- }
- return report
-}
-
-func (mt *mercuryTransmitter) sendToTrigger(report ocrtypes.Report, rawReportCtx [3][32]byte, signatures []ocrtypes.AttributedOnchainSignature) error {
- rawSignatures := [][]byte{}
- for _, sig := range signatures {
- rawSignatures = append(rawSignatures, sig.Signature)
- }
-
- reportContextFlat := []byte{}
- reportContextFlat = append(reportContextFlat, rawReportCtx[0][:]...)
- reportContextFlat = append(reportContextFlat, rawReportCtx[1][:]...)
- reportContextFlat = append(reportContextFlat, rawReportCtx[2][:]...)
-
- converted := capStreams.FeedReport{
- FeedID: mt.feedID.Hex(),
- FullReport: report,
- ReportContext: reportContextFlat,
- Signatures: rawSignatures,
- // NOTE: Skipping fields derived from FullReport, they will be filled out at a later stage
- // after decoding and validating signatures.
- }
- return mt.triggerCapability.ProcessReport([]capStreams.FeedReport{converted})
-}
-
-// Transmit sends the report to the on-chain smart contract's Transmit method.
-func (mt *mercuryTransmitter) Transmit(ctx context.Context, reportCtx ocrtypes.ReportContext, report ocrtypes.Report, signatures []ocrtypes.AttributedOnchainSignature) error {
- rawReportCtx := evmutil.RawReportContext(reportCtx)
- if mt.triggerCapability != nil {
- // Acting as a Capability - send report to trigger service and exit.
- return mt.sendToTrigger(report, rawReportCtx, signatures)
- }
-
- var rs [][32]byte
- var ss [][32]byte
- var vs [32]byte
- for i, as := range signatures {
- r, s, v, err := evmutil.SplitSignature(as.Signature)
- if err != nil {
- panic("eventTransmit(ev): error in SplitSignature")
- }
- rs = append(rs, r)
- ss = append(ss, s)
- vs[i] = v
- }
-
- payload, err := PayloadTypes.Pack(rawReportCtx, []byte(report), rs, ss, vs)
- if err != nil {
- return pkgerrors.Wrap(err, "abi.Pack failed")
- }
-
- req := &pb.TransmitRequest{
- Payload: payload,
- }
-
- ts, err := mt.codec.ObservationTimestampFromReport(ctx, report)
- if err != nil {
- mt.lggr.Warnw("Failed to get observation timestamp from report", "err", err)
- }
- mt.lggr.Debugw("Transmit enqueue", "req.Payload", hexutil.Encode(req.Payload), "report", report, "repts", reportCtx.ReportTimestamp, "signatures", signatures, "observationsTimestamp", ts)
-
- if err := mt.orm.InsertTransmitRequest(ctx, maps.Keys(mt.servers), req, mt.jobID, reportCtx); err != nil {
- return err
- }
-
- g := new(errgroup.Group)
- for _, s := range mt.servers {
- s := s // https://golang.org/doc/faq#closures_and_goroutines
- g.Go(func() error {
- if ok := s.q.Push(req, reportCtx); !ok {
- s.transmitQueuePushErrorCount.Inc()
- return errors.New("transmit queue is closed")
- }
- return nil
- })
- }
-
- return g.Wait()
-}
-
-// FromAccount returns the stringified (hex) CSA public key
-func (mt *mercuryTransmitter) FromAccount(ctx context.Context) (ocrtypes.Account, error) {
- return ocrtypes.Account(mt.fromAccount), nil
-}
-
-// LatestConfigDigestAndEpoch retrieves the latest config digest and epoch from the OCR2 contract.
-func (mt *mercuryTransmitter) LatestConfigDigestAndEpoch(ctx context.Context) (cd ocrtypes.ConfigDigest, epoch uint32, err error) {
- panic("not needed for OCR3")
-}
-
-func (mt *mercuryTransmitter) FetchInitialMaxFinalizedBlockNumber(ctx context.Context) (*int64, error) {
- mt.lggr.Trace("FetchInitialMaxFinalizedBlockNumber")
-
- report, err := mt.latestReport(ctx, mt.feedID)
- if err != nil {
- return nil, err
- }
-
- if report == nil {
- mt.lggr.Debugw("FetchInitialMaxFinalizedBlockNumber success; got nil report")
- return nil, nil
- }
-
- mt.lggr.Debugw("FetchInitialMaxFinalizedBlockNumber success", "currentBlockNum", report.CurrentBlockNumber)
-
- return &report.CurrentBlockNumber, nil
-}
-
-func (mt *mercuryTransmitter) LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) {
- mt.lggr.Trace("LatestPrice")
-
- fullReport, err := mt.latestReport(ctx, feedID)
- if err != nil {
- return nil, err
- }
- if fullReport == nil {
- return nil, nil
- }
- payload := fullReport.Payload
- m := make(map[string]interface{})
- if err := PayloadTypes.UnpackIntoMap(m, payload); err != nil {
- return nil, err
- }
- report, is := m["report"].([]byte)
- if !is {
- return nil, fmt.Errorf("expected report to be []byte, but it was %T", m["report"])
- }
- return mt.benchmarkPriceDecoder(ctx, feedID, report)
-}
-
-// LatestTimestamp will return -1, nil if the feed is missing
-func (mt *mercuryTransmitter) LatestTimestamp(ctx context.Context) (int64, error) {
- mt.lggr.Trace("LatestTimestamp")
-
- report, err := mt.latestReport(ctx, mt.feedID)
- if err != nil {
- return 0, err
- }
-
- if report == nil {
- mt.lggr.Debugw("LatestTimestamp success; got nil report")
- return -1, nil
- }
-
- mt.lggr.Debugw("LatestTimestamp success", "timestamp", report.ObservationsTimestamp)
-
- return report.ObservationsTimestamp, nil
-}
-
-func (mt *mercuryTransmitter) latestReport(ctx context.Context, feedID [32]byte) (*pb.Report, error) {
- mt.lggr.Trace("latestReport")
-
- req := &pb.LatestReportRequest{
- FeedId: feedID[:],
- }
-
- var reports []*pb.Report
- mu := sync.Mutex{}
- var g errgroup.Group
- for _, s := range mt.servers {
- s := s
- g.Go(func() error {
- resp, err := s.c.LatestReport(ctx, req)
- if err != nil {
- s.lggr.Warnw("latestReport failed", "err", err)
- return err
- }
- if resp == nil {
- err = errors.New("latestReport expected non-nil response from server")
- s.lggr.Warn(err.Error())
- return err
- }
- if resp.Error != "" {
- err = errors.New(resp.Error)
- s.lggr.Warnw("latestReport failed; mercury server returned error", "err", err)
- return fmt.Errorf("latestReport failed; mercury server returned error: %s", resp.Error)
- }
- if resp.Report == nil {
- s.lggr.Tracew("latestReport success: returned nil")
- } else if !bytes.Equal(resp.Report.FeedId, feedID[:]) {
- err = fmt.Errorf("latestReport failed; mismatched feed IDs, expected: 0x%x, got: 0x%x", mt.feedID[:], resp.Report.FeedId)
- s.lggr.Errorw("latestReport failed", "err", err)
- return err
- } else {
- s.lggr.Tracew("latestReport success", "observationsTimestamp", resp.Report.ObservationsTimestamp, "currentBlockNum", resp.Report.CurrentBlockNumber)
- }
- mu.Lock()
- defer mu.Unlock()
- reports = append(reports, resp.Report)
- return nil
- })
- }
- err := g.Wait()
-
- if len(reports) == 0 {
- return nil, fmt.Errorf("latestReport failed; all servers returned an error: %w", err)
- }
-
- sortReportsLatestFirst(reports)
-
- return reports[0], nil
-}
-
-func sortReportsLatestFirst(reports []*pb.Report) {
- sort.Slice(reports, func(i, j int) bool {
- // nils are "earliest" so they go to the end
- if reports[i] == nil {
- return false
- } else if reports[j] == nil {
- return true
- }
- // Handle block number case
- if reports[i].ObservationsTimestamp == reports[j].ObservationsTimestamp {
- return reports[i].CurrentBlockNumber > reports[j].CurrentBlockNumber
- }
- // Timestamp case
- return reports[i].ObservationsTimestamp > reports[j].ObservationsTimestamp
- })
-}
diff --git a/core/services/relay/evm/mercury/transmitter_test.go b/core/services/relay/evm/mercury/transmitter_test.go
deleted file mode 100644
index 7c2cf063ea1..00000000000
--- a/core/services/relay/evm/mercury/transmitter_test.go
+++ /dev/null
@@ -1,599 +0,0 @@
-package mercury
-
-import (
- "context"
- "math/big"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink-common/pkg/capabilities/triggers"
- commonconfig "github.com/smartcontractkit/chainlink-common/pkg/config"
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
-
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
- "github.com/smartcontractkit/chainlink/v2/core/config"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest"
- mercurytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-type mockCfg struct{}
-
-func (m mockCfg) Protocol() config.MercuryTransmitterProtocol {
- return config.MercuryTransmitterProtocolGRPC
-}
-
-func (m mockCfg) TransmitQueueMaxSize() uint32 {
- return 100_000
-}
-
-func (m mockCfg) TransmitTimeout() commonconfig.Duration {
- return *commonconfig.MustNewDuration(1 * time.Hour)
-}
-
-func Test_MercuryTransmitter_Transmit(t *testing.T) {
- lggr := logger.Test(t)
- db := pgtest.NewSqlxDB(t)
- var jobID int32
- pgtest.MustExec(t, db, `SET CONSTRAINTS mercury_transmit_requests_job_id_fkey DEFERRED`)
- pgtest.MustExec(t, db, `SET CONSTRAINTS feed_latest_reports_job_id_fkey DEFERRED`)
- codec := new(mockCodec)
- benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) {
- return codec.BenchmarkPriceFromReport(ctx, report)
- }
- orm := NewORM(db)
- clients := map[string]wsrpc.Client{}
-
- t.Run("with one mercury server", func(t *testing.T) {
- t.Run("v1 report transmission successfully enqueued", func(t *testing.T) {
- report := sampleV1Report
- c := &mocks.MockWSRPCClient{}
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- // init the queue since we skipped starting transmitter
- mt.servers[sURL].q.Init([]*Transmission{})
- err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs)
- require.NoError(t, err)
-
- // ensure it was added to the queue
- require.Equal(t, mt.servers[sURL].q.(*transmitQueue).pq.Len(), 1)
- assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report)
- })
- t.Run("v2 report transmission successfully enqueued", func(t *testing.T) {
- report := sampleV2Report
- c := &mocks.MockWSRPCClient{}
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- // init the queue since we skipped starting transmitter
- mt.servers[sURL].q.Init([]*Transmission{})
- err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs)
- require.NoError(t, err)
-
- // ensure it was added to the queue
- require.Equal(t, mt.servers[sURL].q.(*transmitQueue).pq.Len(), 1)
- assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report)
- })
- t.Run("v3 report transmission successfully enqueued", func(t *testing.T) {
- report := sampleV3Report
- c := &mocks.MockWSRPCClient{}
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- // init the queue since we skipped starting transmitter
- mt.servers[sURL].q.Init([]*Transmission{})
- err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs)
- require.NoError(t, err)
-
- // ensure it was added to the queue
- require.Equal(t, mt.servers[sURL].q.(*transmitQueue).pq.Len(), 1)
- assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report)
- })
- t.Run("v3 report transmission sent only to trigger service", func(t *testing.T) {
- report := sampleV3Report
- c := &mocks.MockWSRPCClient{}
- clients[sURL] = c
- triggerService, err := triggers.NewMercuryTriggerService(0, "", "", lggr)
- require.NoError(t, err)
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, triggerService)
- // init the queue since we skipped starting transmitter
- mt.servers[sURL].q.Init([]*Transmission{})
- err = mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs)
- require.NoError(t, err)
- // queue is empty
- require.Equal(t, mt.servers[sURL].q.(*transmitQueue).pq.Len(), 0)
- })
- })
-
- t.Run("with multiple mercury servers", func(t *testing.T) {
- report := sampleV3Report
- c := &mocks.MockWSRPCClient{}
- clients[sURL] = c
- clients[sURL2] = c
- clients[sURL3] = c
-
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- // init the queue since we skipped starting transmitter
- mt.servers[sURL].q.Init([]*Transmission{})
- mt.servers[sURL2].q.Init([]*Transmission{})
- mt.servers[sURL3].q.Init([]*Transmission{})
-
- err := mt.Transmit(testutils.Context(t), sampleReportContext, report, sampleSigs)
- require.NoError(t, err)
-
- // ensure it was added to the queue
- require.Equal(t, mt.servers[sURL].q.(*transmitQueue).pq.Len(), 1)
- assert.Subset(t, mt.servers[sURL].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report)
- require.Equal(t, mt.servers[sURL2].q.(*transmitQueue).pq.Len(), 1)
- assert.Subset(t, mt.servers[sURL2].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report)
- require.Equal(t, mt.servers[sURL3].q.(*transmitQueue).pq.Len(), 1)
- assert.Subset(t, mt.servers[sURL3].q.(*transmitQueue).pq.Pop().(*Transmission).Req.Payload, report)
- })
-}
-
-func Test_MercuryTransmitter_LatestTimestamp(t *testing.T) {
- t.Parallel()
- lggr := logger.Test(t)
- db := pgtest.NewSqlxDB(t)
- var jobID int32
- codec := new(mockCodec)
- benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) {
- return codec.BenchmarkPriceFromReport(ctx, report)
- }
-
- orm := NewORM(db)
- clients := map[string]wsrpc.Client{}
-
- t.Run("successful query", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- require.NotNil(t, in)
- assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId))
- out = new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.FeedId = sampleFeedID[:]
- out.Report.ObservationsTimestamp = 42
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- ts, err := mt.LatestTimestamp(testutils.Context(t))
- require.NoError(t, err)
-
- assert.Equal(t, int64(42), ts)
- })
-
- t.Run("successful query returning nil report (new feed) gives latest timestamp = -1", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- out = new(pb.LatestReportResponse)
- out.Report = nil
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- ts, err := mt.LatestTimestamp(testutils.Context(t))
- require.NoError(t, err)
-
- assert.Equal(t, int64(-1), ts)
- })
-
- t.Run("failing query", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- return nil, errors.New("something exploded")
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- _, err := mt.LatestTimestamp(testutils.Context(t))
- require.Error(t, err)
- assert.Contains(t, err.Error(), "something exploded")
- })
-
- t.Run("with multiple servers, uses latest", func(t *testing.T) {
- clients[sURL] = &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- return nil, errors.New("something exploded")
- },
- }
- clients[sURL2] = &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- out = new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.FeedId = sampleFeedID[:]
- out.Report.ObservationsTimestamp = 42
- return out, nil
- },
- }
- clients[sURL3] = &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- out = new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.FeedId = sampleFeedID[:]
- out.Report.ObservationsTimestamp = 41
- return out, nil
- },
- }
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- ts, err := mt.LatestTimestamp(testutils.Context(t))
- require.NoError(t, err)
-
- assert.Equal(t, int64(42), ts)
- })
-}
-
-type mockCodec struct {
- val *big.Int
- err error
-}
-
-var _ mercurytypes.ReportCodec = &mockCodec{}
-
-func (m *mockCodec) BenchmarkPriceFromReport(ctx context.Context, _ ocrtypes.Report) (*big.Int, error) {
- return m.val, m.err
-}
-
-func (m *mockCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) {
- return 0, nil
-}
-
-func Test_MercuryTransmitter_LatestPrice(t *testing.T) {
- t.Parallel()
- lggr := logger.Test(t)
- db := pgtest.NewSqlxDB(t)
- var jobID int32
-
- codec := new(mockCodec)
- benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) {
- return codec.BenchmarkPriceFromReport(ctx, report)
- }
- orm := NewORM(db)
- clients := map[string]wsrpc.Client{}
-
- t.Run("successful query", func(t *testing.T) {
- originalPrice := big.NewInt(123456789)
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- require.NotNil(t, in)
- assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId))
- out = new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.FeedId = sampleFeedID[:]
- out.Report.Payload = buildSamplePayload([]byte("doesn't matter"))
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
-
- t.Run("BenchmarkPriceFromReport succeeds", func(t *testing.T) {
- codec.val = originalPrice
- codec.err = nil
-
- price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID)
- require.NoError(t, err)
-
- assert.Equal(t, originalPrice, price)
- })
- t.Run("BenchmarkPriceFromReport fails", func(t *testing.T) {
- codec.val = nil
- codec.err = errors.New("something exploded")
-
- _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID)
- require.Error(t, err)
-
- assert.EqualError(t, err, "something exploded")
- })
- })
-
- t.Run("successful query returning nil report (new feed)", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- out = new(pb.LatestReportResponse)
- out.Report = nil
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- price, err := mt.LatestPrice(testutils.Context(t), sampleFeedID)
- require.NoError(t, err)
-
- assert.Nil(t, price)
- })
-
- t.Run("failing query", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- return nil, errors.New("something exploded")
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- _, err := mt.LatestPrice(testutils.Context(t), sampleFeedID)
- require.Error(t, err)
- assert.Contains(t, err.Error(), "something exploded")
- })
-}
-
-func Test_MercuryTransmitter_FetchInitialMaxFinalizedBlockNumber(t *testing.T) {
- t.Parallel()
-
- lggr := logger.Test(t)
- db := pgtest.NewSqlxDB(t)
- var jobID int32
- codec := new(mockCodec)
- benchmarkPriceDecoder := func(ctx context.Context, feedID mercuryutils.FeedID, report ocrtypes.Report) (*big.Int, error) {
- return codec.BenchmarkPriceFromReport(ctx, report)
- }
- orm := NewORM(db)
- clients := map[string]wsrpc.Client{}
-
- t.Run("successful query", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- require.NotNil(t, in)
- assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId))
- out = new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.FeedId = sampleFeedID[:]
- out.Report.CurrentBlockNumber = 42
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t))
- require.NoError(t, err)
-
- require.NotNil(t, bn)
- assert.Equal(t, 42, int(*bn))
- })
- t.Run("successful query returning nil report (new feed)", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- out = new(pb.LatestReportResponse)
- out.Report = nil
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- bn, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t))
- require.NoError(t, err)
-
- assert.Nil(t, bn)
- })
- t.Run("failing query", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- return nil, errors.New("something exploded")
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t))
- require.Error(t, err)
- assert.Contains(t, err.Error(), "something exploded")
- })
- t.Run("return feed ID is wrong", func(t *testing.T) {
- c := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- require.NotNil(t, in)
- assert.Equal(t, hexutil.Encode(sampleFeedID[:]), hexutil.Encode(in.FeedId))
- out = new(pb.LatestReportResponse)
- out.Report = new(pb.Report)
- out.Report.CurrentBlockNumber = 42
- out.Report.FeedId = []byte{1, 2}
- return out, nil
- },
- }
- clients[sURL] = c
- mt := NewTransmitter(lggr, mockCfg{}, clients, sampleClientPubKey, jobID, sampleFeedID, orm, codec, benchmarkPriceDecoder, nil)
- _, err := mt.FetchInitialMaxFinalizedBlockNumber(testutils.Context(t))
- require.Error(t, err)
- assert.Contains(t, err.Error(), "latestReport failed; mismatched feed IDs, expected: 0x1c916b4aa7e57ca7b68ae1bf45653f56b656fd3aa335ef7fae696b663f1b8472, got: 0x")
- })
-}
-
-func Test_sortReportsLatestFirst(t *testing.T) {
- reports := []*pb.Report{
- nil,
- {ObservationsTimestamp: 1},
- {ObservationsTimestamp: 1},
- {ObservationsTimestamp: 2},
- {CurrentBlockNumber: 1},
- nil,
- {CurrentBlockNumber: 2},
- {},
- }
-
- sortReportsLatestFirst(reports)
-
- assert.Equal(t, int64(2), reports[0].ObservationsTimestamp)
- assert.Equal(t, int64(1), reports[1].ObservationsTimestamp)
- assert.Equal(t, int64(1), reports[2].ObservationsTimestamp)
- assert.Equal(t, int64(0), reports[3].ObservationsTimestamp)
- assert.Equal(t, int64(2), reports[3].CurrentBlockNumber)
- assert.Equal(t, int64(0), reports[4].ObservationsTimestamp)
- assert.Equal(t, int64(1), reports[4].CurrentBlockNumber)
- assert.Equal(t, int64(0), reports[5].ObservationsTimestamp)
- assert.Equal(t, int64(0), reports[5].CurrentBlockNumber)
- assert.Nil(t, reports[6])
- assert.Nil(t, reports[7])
-}
-
-type mockQ struct {
- ch chan *Transmission
-}
-
-func newMockQ() *mockQ {
- return &mockQ{make(chan *Transmission, 100)}
-}
-
-func (m *mockQ) Start(context.Context) error { return nil }
-func (m *mockQ) Close() error {
- m.ch <- nil
- return nil
-}
-func (m *mockQ) Ready() error { return nil }
-func (m *mockQ) HealthReport() map[string]error { return nil }
-func (m *mockQ) Name() string { return "" }
-func (m *mockQ) BlockingPop() (t *Transmission) {
- val := <-m.ch
- return val
-}
-func (m *mockQ) Push(req *pb.TransmitRequest, reportCtx ocrtypes.ReportContext) (ok bool) {
- m.ch <- &Transmission{Req: req, ReportCtx: reportCtx}
- return true
-}
-func (m *mockQ) Init(transmissions []*Transmission) {}
-func (m *mockQ) IsEmpty() bool { return false }
-
-func Test_MercuryTransmitter_runQueueLoop(t *testing.T) {
- feedIDHex := utils.NewHash().Hex()
- lggr := logger.Test(t)
- c := &mocks.MockWSRPCClient{}
- db := pgtest.NewSqlxDB(t)
- orm := NewORM(db)
- pm := NewPersistenceManager(lggr, sURL, orm, 0, 0, 0, 0)
- cfg := mockCfg{}
-
- s := newServer(lggr, cfg, c, pm, sURL, feedIDHex)
-
- req := &pb.TransmitRequest{
- Payload: []byte{1, 2, 3},
- ReportFormat: 32,
- }
-
- t.Run("pulls from queue and transmits successfully", func(t *testing.T) {
- transmit := make(chan *pb.TransmitRequest, 1)
- c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- transmit <- in
- return &pb.TransmitResponse{Code: 0, Error: ""}, nil
- }
- q := newMockQ()
- s.q = q
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go s.runQueueLoop(nil, wg, feedIDHex)
-
- q.Push(req, sampleReportContext)
-
- select {
- case tr := <-transmit:
- assert.Equal(t, []byte{1, 2, 3}, tr.Payload)
- assert.Equal(t, 32, int(tr.ReportFormat))
- // case <-time.After(testutils.WaitTimeout(t)):
- case <-time.After(1 * time.Second):
- t.Fatal("expected a transmit request to be sent")
- }
-
- q.Close()
- wg.Wait()
- })
-
- t.Run("on duplicate, success", func(t *testing.T) {
- transmit := make(chan *pb.TransmitRequest, 1)
- c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- transmit <- in
- return &pb.TransmitResponse{Code: DuplicateReport, Error: ""}, nil
- }
- q := newMockQ()
- s.q = q
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go s.runQueueLoop(nil, wg, feedIDHex)
-
- q.Push(req, sampleReportContext)
-
- select {
- case tr := <-transmit:
- assert.Equal(t, []byte{1, 2, 3}, tr.Payload)
- assert.Equal(t, 32, int(tr.ReportFormat))
- // case <-time.After(testutils.WaitTimeout(t)):
- case <-time.After(1 * time.Second):
- t.Fatal("expected a transmit request to be sent")
- }
-
- q.Close()
- wg.Wait()
- })
- t.Run("on server-side error, does not retry", func(t *testing.T) {
- transmit := make(chan *pb.TransmitRequest, 1)
- c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- transmit <- in
- return &pb.TransmitResponse{Code: DuplicateReport, Error: ""}, nil
- }
- q := newMockQ()
- s.q = q
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go s.runQueueLoop(nil, wg, feedIDHex)
-
- q.Push(req, sampleReportContext)
-
- select {
- case tr := <-transmit:
- assert.Equal(t, []byte{1, 2, 3}, tr.Payload)
- assert.Equal(t, 32, int(tr.ReportFormat))
- // case <-time.After(testutils.WaitTimeout(t)):
- case <-time.After(1 * time.Second):
- t.Fatal("expected a transmit request to be sent")
- }
-
- q.Close()
- wg.Wait()
- })
- t.Run("on transmit error, retries", func(t *testing.T) {
- transmit := make(chan *pb.TransmitRequest, 1)
- c.TransmitF = func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- transmit <- in
- return &pb.TransmitResponse{}, errors.New("transmission error")
- }
- q := newMockQ()
- s.q = q
- wg := &sync.WaitGroup{}
- wg.Add(1)
- stopCh := make(chan struct{}, 1)
-
- go s.runQueueLoop(stopCh, wg, feedIDHex)
-
- q.Push(req, sampleReportContext)
-
- cnt := 0
- Loop:
- for {
- select {
- case tr := <-transmit:
- assert.Equal(t, []byte{1, 2, 3}, tr.Payload)
- assert.Equal(t, 32, int(tr.ReportFormat))
- if cnt > 2 {
- break Loop
- }
- cnt++
- // case <-time.After(testutils.WaitTimeout(t)):
- case <-time.After(1 * time.Second):
- t.Fatal("expected 3 transmit requests to be sent")
- }
- }
-
- close(stopCh)
- wg.Wait()
- })
-}
diff --git a/core/services/relay/evm/mercury/types/types.go b/core/services/relay/evm/mercury/types/types.go
deleted file mode 100644
index 98910887111..00000000000
--- a/core/services/relay/evm/mercury/types/types.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package types
-
-import (
- "context"
- "math/big"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-)
-
-type DataSourceORM interface {
- LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error)
-}
-
-type ReportCodec interface {
- BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error)
-}
-
-var (
- PriceFeedMissingCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_price_feed_missing",
- Help: "Running count of times mercury tried to query a price feed for billing from mercury server, but it was missing",
- },
- []string{"queriedFeedID"},
- )
- PriceFeedErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_price_feed_errors",
- Help: "Running count of times mercury tried to query a price feed for billing from mercury server, but got an error",
- },
- []string{"queriedFeedID"},
- )
-)
diff --git a/core/services/relay/evm/mercury/v1/data_source.go b/core/services/relay/evm/mercury/v1/data_source.go
deleted file mode 100644
index 372e5af71dc..00000000000
--- a/core/services/relay/evm/mercury/v1/data_source.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package v1
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
- "sync"
-
- pkgerrors "github.com/pkg/errors"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v1types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- v1 "github.com/smartcontractkit/chainlink-data-streams/mercury/v1"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-var (
- insufficientBlocksCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_insufficient_blocks_count",
- Help: fmt.Sprintf("Count of times that there were not enough blocks in the chain during observation (need: %d)", nBlocksObservation),
- },
- []string{"feedID"},
- )
- zeroBlocksCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_zero_blocks_count",
- Help: "Count of times that there were zero blocks in the chain during observation",
- },
- []string{"feedID"},
- )
-)
-
-const nBlocksObservation int = v1.MaxAllowedBlocks
-
-type Runner interface {
- ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error)
-}
-
-// Fetcher fetcher data from Mercury server
-type Fetcher interface {
- // FetchInitialMaxFinalizedBlockNumber should fetch the initial max finalized block number
- FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error)
-}
-
-type datasource struct {
- pipelineRunner Runner
- jb job.Job
- spec pipeline.Spec
- lggr logger.Logger
- saver ocrcommon.Saver
- orm types.DataSourceORM
- codec reportcodec.ReportCodec
- feedID [32]byte
-
- mu sync.RWMutex
-
- chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData
- mercuryChainReader mercury.ChainReader
- fetcher Fetcher
- initialBlockNumber *int64
-
- insufficientBlocksCounter prometheus.Counter
- zeroBlocksCounter prometheus.Counter
-}
-
-var _ v1.DataSource = &datasource{}
-
-func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, mercuryChainReader mercury.ChainReader, fetcher Fetcher, initialBlockNumber *int64, feedID mercuryutils.FeedID) *datasource {
- return &datasource{pr, jb, spec, lggr, s, orm, reportcodec.ReportCodec{}, feedID, sync.RWMutex{}, enhancedTelemChan, mercuryChainReader, fetcher, initialBlockNumber, insufficientBlocksCount.WithLabelValues(feedID.String()), zeroBlocksCount.WithLabelValues(feedID.String())}
-}
-
-type ErrEmptyLatestReport struct {
- Err error
-}
-
-func (e ErrEmptyLatestReport) Unwrap() error { return e.Err }
-
-func (e ErrEmptyLatestReport) Error() string {
- return fmt.Sprintf("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: %v", e.Err)
-}
-
-func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedBlockNum bool) (obs v1types.Observation, pipelineExecutionErr error) {
- // setLatestBlocks must come chronologically before observations, along
- // with observationTimestamp, to avoid front-running
-
- // Errors are not expected when reading from the underlying ChainReader
- if err := ds.setLatestBlocks(ctx, &obs); err != nil {
- return obs, err
- }
-
- var wg sync.WaitGroup
- if fetchMaxFinalizedBlockNum {
- wg.Add(1)
- go func() {
- defer wg.Done()
- latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID)
- if dbErr != nil {
- obs.MaxFinalizedBlockNumber.Err = dbErr
- return
- }
- if latest != nil {
- obs.MaxFinalizedBlockNumber.Val, obs.MaxFinalizedBlockNumber.Err = ds.codec.CurrentBlockNumFromReport(ctx, latest)
- return
- }
- val, fetchErr := ds.fetcher.FetchInitialMaxFinalizedBlockNumber(ctx)
- if fetchErr != nil {
- obs.MaxFinalizedBlockNumber.Err = fetchErr
- return
- }
- if val != nil {
- obs.MaxFinalizedBlockNumber.Val = *val
- return
- }
- if ds.initialBlockNumber == nil {
- if obs.CurrentBlockNum.Err != nil {
- obs.MaxFinalizedBlockNumber.Err = ErrEmptyLatestReport{Err: obs.CurrentBlockNum.Err}
- } else {
- // Subract 1 here because we will later add 1 to the
- // maxFinalizedBlockNumber to get the first validFromBlockNum, which
- // ought to be the same as current block num.
- obs.MaxFinalizedBlockNumber.Val = obs.CurrentBlockNum.Val - 1
- ds.lggr.Infof("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed so maxFinalizedBlockNumber=%d (initialBlockNumber unset, using currentBlockNum=%d-1)", obs.MaxFinalizedBlockNumber.Val, obs.CurrentBlockNum.Val)
- }
- } else {
- // NOTE: It's important to subtract 1 if the server is missing any past
- // report (brand new feed) since we will add 1 to the
- // maxFinalizedBlockNumber to get the first validFromBlockNum, which
- // ought to be zero.
- //
- // If "initialBlockNumber" is set to zero, this will give a starting block of zero.
- obs.MaxFinalizedBlockNumber.Val = *ds.initialBlockNumber - 1
- ds.lggr.Infof("FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed so maxFinalizedBlockNumber=%d (initialBlockNumber=%d)", obs.MaxFinalizedBlockNumber.Val, *ds.initialBlockNumber)
- }
- }()
- } else {
- obs.MaxFinalizedBlockNumber.Err = errors.New("fetchMaxFinalizedBlockNum=false")
- }
- var trrs pipeline.TaskRunResults
- wg.Add(1)
- go func() {
- defer wg.Done()
- var run *pipeline.Run
- run, trrs, pipelineExecutionErr = ds.executeRun(ctx)
- if pipelineExecutionErr != nil {
- pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr)
- return
- }
-
- ds.saver.Save(run)
-
- // NOTE: trrs comes back as _all_ tasks, but we only want the terminal ones
- // They are guaranteed to be sorted by index asc so should be in the correct order
- var finaltrrs []pipeline.TaskRunResult
- for _, trr := range trrs {
- if trr.IsTerminal() {
- finaltrrs = append(finaltrrs, trr)
- }
- }
-
- var parsed parseOutput
- parsed, pipelineExecutionErr = ds.parse(finaltrrs)
- if pipelineExecutionErr != nil {
- pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr)
- return
- }
- obs.BenchmarkPrice = parsed.benchmarkPrice
- obs.Bid = parsed.bid
- obs.Ask = parsed.ask
- }()
-
- wg.Wait()
-
- if pipelineExecutionErr != nil {
- return
- }
-
- ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{
- V1Observation: &obs,
- TaskRunResults: trrs,
- RepTimestamp: repts,
- FeedVersion: mercuryutils.REPORT_V1,
- })
-
- return obs, nil
-}
-
-func toBigInt(val interface{}) (*big.Int, error) {
- dec, err := utils.ToDecimal(val)
- if err != nil {
- return nil, err
- }
- return dec.BigInt(), nil
-}
-
-type parseOutput struct {
- benchmarkPrice mercury.ObsResult[*big.Int]
- bid mercury.ObsResult[*big.Int]
- ask mercury.ObsResult[*big.Int]
-}
-
-// parse expects the output of observe to be three values, in the following order:
-// 1. benchmark price
-// 2. bid
-// 3. ask
-//
-// returns error on parse errors: if something is the wrong type
-func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) {
- var finaltrrs []pipeline.TaskRunResult
- for _, trr := range trrs {
- // only return terminal trrs from executeRun
- if trr.IsTerminal() {
- finaltrrs = append(finaltrrs, trr)
- }
- }
-
- // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed
- // by the pipeline executor
- if len(finaltrrs) != 3 {
- return o, fmt.Errorf("invalid number of results, expected: 3, got: %d", len(finaltrrs))
- }
- merr = errors.Join(
- setBenchmarkPrice(&o, finaltrrs[0].Result),
- setBid(&o, finaltrrs[1].Result),
- setAsk(&o, finaltrrs[2].Result),
- )
-
- return o, merr
-}
-
-func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.benchmarkPrice.Err = res.Error
- } else if val, err := toBigInt(res.Value); err != nil {
- return fmt.Errorf("failed to parse BenchmarkPrice: %w", err)
- } else {
- o.benchmarkPrice.Val = val
- }
- return nil
-}
-
-func setBid(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.bid.Err = res.Error
- } else if val, err := toBigInt(res.Value); err != nil {
- return fmt.Errorf("failed to parse Bid: %w", err)
- } else {
- o.bid.Val = val
- }
- return nil
-}
-
-func setAsk(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.ask.Err = res.Error
- } else if val, err := toBigInt(res.Value); err != nil {
- return fmt.Errorf("failed to parse Ask: %w", err)
- } else {
- o.ask.Val = val
- }
- return nil
-}
-
-// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod).
-// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod.
-func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) {
- vars := pipeline.NewVarsFrom(map[string]interface{}{
- "jb": map[string]interface{}{
- "databaseID": ds.jb.ID,
- "externalJobID": ds.jb.ExternalJobID,
- "name": ds.jb.Name.ValueOrZero(),
- },
- })
-
- run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars)
- if err != nil {
- return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID)
- }
-
- return run, trrs, err
-}
-
-func (ds *datasource) setLatestBlocks(ctx context.Context, obs *v1types.Observation) error {
- latestBlocks, err := ds.mercuryChainReader.LatestHeads(ctx, nBlocksObservation)
-
- if err != nil {
- ds.lggr.Errorw("failed to read latest blocks", "err", err)
- return err
- }
-
- if len(latestBlocks) < nBlocksObservation {
- ds.insufficientBlocksCounter.Inc()
- ds.lggr.Warnw("Insufficient blocks", "latestBlocks", latestBlocks, "lenLatestBlocks", len(latestBlocks), "nBlocksObservation", nBlocksObservation)
- }
-
- // TODO: remove with https://smartcontract-it.atlassian.net/browse/BCF-2209
- if len(latestBlocks) == 0 {
- obsErr := fmt.Errorf("no blocks available")
- ds.zeroBlocksCounter.Inc()
- obs.CurrentBlockNum.Err = obsErr
- obs.CurrentBlockHash.Err = obsErr
- obs.CurrentBlockTimestamp.Err = obsErr
- } else {
- obs.CurrentBlockNum.Val = int64(latestBlocks[0].Number)
- obs.CurrentBlockHash.Val = latestBlocks[0].Hash
- obs.CurrentBlockTimestamp.Val = latestBlocks[0].Timestamp
- }
-
- for _, block := range latestBlocks {
- obs.LatestBlocks = append(
- obs.LatestBlocks,
- v1types.NewBlock(int64(block.Number), block.Hash, block.Timestamp))
- }
-
- return nil
-}
diff --git a/core/services/relay/evm/mercury/v1/data_source_test.go b/core/services/relay/evm/mercury/v1/data_source_test.go
deleted file mode 100644
index 9200d8e3abc..00000000000
--- a/core/services/relay/evm/mercury/v1/data_source_test.go
+++ /dev/null
@@ -1,469 +0,0 @@
-package v1
-
-import (
- "context"
- "fmt"
- "io"
- "math/big"
- "math/rand"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/pkg/errors"
- "github.com/stretchr/testify/assert"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
-
- "github.com/smartcontractkit/chainlink-integrations/evm/assets"
- "github.com/smartcontractkit/chainlink-integrations/evm/heads/headstest"
- evmtypes "github.com/smartcontractkit/chainlink-integrations/evm/types"
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm"
- mercurymocks "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reportcodecv1 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/reportcodec"
-)
-
-var _ mercurytypes.ServerFetcher = &mockFetcher{}
-
-type mockFetcher struct {
- num *int64
- err error
-}
-
-func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) {
- return m.num, m.err
-}
-
-func (m *mockFetcher) LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error) {
- return nil, nil
-}
-
-func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) {
- return 0, nil
-}
-
-type mockSaver struct {
- r *pipeline.Run
-}
-
-func (ms *mockSaver) Save(r *pipeline.Run) {
- ms.r = r
-}
-
-type mockORM struct {
- report []byte
- err error
-}
-
-func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
- return m.report, m.err
-}
-
-type mockChainReader struct {
- err error
- obs []mercurytypes.Head
-}
-
-func (m *mockChainReader) LatestHeads(context.Context, int) ([]mercurytypes.Head, error) {
- return m.obs, m.err
-}
-
-func TestMercury_Observe(t *testing.T) {
- orm := &mockORM{}
- lggr := logger.Test(t)
- ds := NewDataSource(orm, nil, job.Job{}, pipeline.Spec{}, lggr, nil, nil, nil, nil, nil, mercuryutils.FeedID{})
- ctx := testutils.Context(t)
- repts := ocrtypes.ReportTimestamp{}
-
- fetcher := &mockFetcher{}
- ds.fetcher = fetcher
-
- saver := &mockSaver{}
- ds.saver = saver
-
- trrs := []pipeline.TaskRunResult{
- {
- // benchmark price
- Result: pipeline.Result{Value: "122.345"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // bid
- Result: pipeline.Result{Value: "121.993"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // ask
- Result: pipeline.Result{Value: "123.111"},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- runner := &mercurymocks.MockRunner{
- Trrs: trrs,
- }
- ds.pipelineRunner = runner
-
- spec := pipeline.Spec{}
- ds.spec = spec
-
- h := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- ds.mercuryChainReader = evm.NewMercuryChainReader(h)
-
- head := &evmtypes.Head{
- Number: int64(rand.Int31()),
- Hash: utils.NewHash(),
- Timestamp: time.Now(),
- }
- h.On("LatestChain").Return(head)
-
- t.Run("when fetchMaxFinalizedBlockNum=true", func(t *testing.T) {
- t.Run("with latest report in database", func(t *testing.T) {
- orm.report = buildSampleV1Report()
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedBlockNumber.Err)
- assert.Equal(t, int64(143), obs.MaxFinalizedBlockNumber.Val)
- })
- t.Run("if querying latest report fails", func(t *testing.T) {
- orm.report = nil
- orm.err = errors.New("something exploded")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "something exploded")
- assert.Zero(t, obs.MaxFinalizedBlockNumber.Val)
- })
- t.Run("if decoding latest report fails", func(t *testing.T) {
- orm.report = []byte{1, 2, 3}
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- assert.Zero(t, obs.MaxFinalizedBlockNumber.Val)
- })
-
- orm.report = nil
- orm.err = nil
-
- t.Run("without latest report in database", func(t *testing.T) {
- t.Run("if FetchInitialMaxFinalizedBlockNumber returns error", func(t *testing.T) {
- fetcher.err = errors.New("mock fetcher error")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "mock fetcher error")
- assert.Zero(t, obs.MaxFinalizedBlockNumber.Val)
- })
- t.Run("if FetchInitialMaxFinalizedBlockNumber succeeds", func(t *testing.T) {
- fetcher.err = nil
- var num int64 = 32
- fetcher.num = &num
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedBlockNumber.Err)
- assert.Equal(t, int64(32), obs.MaxFinalizedBlockNumber.Val)
- })
- t.Run("if FetchInitialMaxFinalizedBlockNumber returns nil (new feed) and initialBlockNumber is set", func(t *testing.T) {
- var initialBlockNumber int64 = 50
- ds.initialBlockNumber = &initialBlockNumber
- fetcher.err = nil
- fetcher.num = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedBlockNumber.Err)
- assert.Equal(t, int64(49), obs.MaxFinalizedBlockNumber.Val)
- })
- t.Run("if FetchInitialMaxFinalizedBlockNumber returns nil (new feed) and initialBlockNumber is not set", func(t *testing.T) {
- ds.initialBlockNumber = nil
- t.Run("if current block num is valid", func(t *testing.T) {
- fetcher.err = nil
- fetcher.num = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedBlockNumber.Err)
- assert.Equal(t, head.Number-1, obs.MaxFinalizedBlockNumber.Val)
- })
- t.Run("if no current block available", func(t *testing.T) {
- h2 := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- h2.On("LatestChain").Return((*evmtypes.Head)(nil))
- ds.mercuryChainReader = evm.NewMercuryChainReader(h2)
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "FetchInitialMaxFinalizedBlockNumber returned empty LatestReport; this is a new feed. No initialBlockNumber was set, tried to use current block number to determine maxFinalizedBlockNumber but got error: no blocks available")
- })
- })
- })
- })
-
- ds.mercuryChainReader = evm.NewMercuryChainReader(h)
-
- t.Run("when fetchMaxFinalizedBlockNum=false", func(t *testing.T) {
- t.Run("when run execution fails, returns error", func(t *testing.T) {
- t.Cleanup(func() {
- runner.Err = nil
- })
- runner.Err = errors.New("run execution failed")
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed")
- })
- t.Run("makes observation using pipeline, when all tasks succeed", func(t *testing.T) {
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, big.NewInt(121), obs.Bid.Val)
- assert.NoError(t, obs.Bid.Err)
- assert.Equal(t, big.NewInt(123), obs.Ask.Val)
- assert.NoError(t, obs.Ask.Err)
- assert.Equal(t, head.Number, obs.CurrentBlockNum.Val)
- assert.NoError(t, obs.CurrentBlockNum.Err)
- assert.Equal(t, fmt.Sprintf("%x", head.Hash), fmt.Sprintf("%x", obs.CurrentBlockHash.Val))
- assert.NoError(t, obs.CurrentBlockHash.Err)
- assert.Equal(t, uint64(head.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val)
- assert.NoError(t, obs.CurrentBlockTimestamp.Err)
-
- assert.Zero(t, obs.MaxFinalizedBlockNumber.Val)
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "fetchMaxFinalizedBlockNum=false")
- })
- t.Run("makes observation using pipeline, with erroring tasks", func(t *testing.T) {
- for i := range trrs {
- trrs[i].Result.Error = fmt.Errorf("task error %d", i)
- }
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Zero(t, obs.BenchmarkPrice.Val)
- assert.EqualError(t, obs.BenchmarkPrice.Err, "task error 0")
- assert.Zero(t, obs.Bid.Val)
- assert.EqualError(t, obs.Bid.Err, "task error 1")
- assert.Zero(t, obs.Ask.Val)
- assert.EqualError(t, obs.Ask.Err, "task error 2")
- assert.Equal(t, head.Number, obs.CurrentBlockNum.Val)
- assert.NoError(t, obs.CurrentBlockNum.Err)
- assert.Equal(t, fmt.Sprintf("%x", head.Hash), fmt.Sprintf("%x", obs.CurrentBlockHash.Val))
- assert.NoError(t, obs.CurrentBlockHash.Err)
- assert.Equal(t, uint64(head.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val)
- assert.NoError(t, obs.CurrentBlockTimestamp.Err)
-
- assert.Zero(t, obs.MaxFinalizedBlockNumber.Val)
- assert.EqualError(t, obs.MaxFinalizedBlockNumber.Err, "fetchMaxFinalizedBlockNum=false")
- })
- t.Run("makes partial observation using pipeline, if only some results have errored", func(t *testing.T) {
- trrs[0].Result.Error = fmt.Errorf("task failed")
- trrs[1].Result.Value = "33"
- trrs[1].Result.Error = nil
- trrs[2].Result.Value = nil
- trrs[2].Result.Error = fmt.Errorf("task failed")
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Zero(t, obs.BenchmarkPrice.Val)
- assert.EqualError(t, obs.BenchmarkPrice.Err, "task failed")
- assert.Equal(t, big.NewInt(33), obs.Bid.Val)
- assert.NoError(t, obs.Bid.Err)
- assert.Zero(t, obs.Ask.Val)
- assert.EqualError(t, obs.Ask.Err, "task failed")
- })
- t.Run("returns error if at least one result is unparseable", func(t *testing.T) {
- trrs[0].Result.Error = fmt.Errorf("task failed")
- trrs[1].Result.Value = "foo"
- trrs[1].Result.Error = nil
- trrs[2].Result.Value = "123456"
- trrs[2].Result.Error = nil
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while parsing run results: failed to parse Bid: can't convert foo to decimal")
- })
- t.Run("saves run", func(t *testing.T) {
- for i := range trrs {
- trrs[i].Result.Value = "123"
- trrs[i].Result.Error = nil
- }
-
- _, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, int64(42), saver.r.ID)
- })
- })
-
- t.Run("LatestBlocks is populated correctly", func(t *testing.T) {
- t.Run("when chain length is zero", func(t *testing.T) {
- ht2 := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- ht2.On("LatestChain").Return((*evmtypes.Head)(nil))
- ds.mercuryChainReader = evm.NewMercuryChainReader(ht2)
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Len(t, obs.LatestBlocks, 0)
-
- ht2.AssertExpectations(t)
- })
- t.Run("when chain is too short", func(t *testing.T) {
- h4 := &evmtypes.Head{
- Number: 4,
- }
- h5 := &evmtypes.Head{
- Number: 5,
- }
- h5.Parent.Store(h4)
- h6 := &evmtypes.Head{
- Number: 6,
- }
- h6.Parent.Store(h5)
-
- ht2 := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- ht2.On("LatestChain").Return(h6)
- ds.mercuryChainReader = evm.NewMercuryChainReader(ht2)
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Len(t, obs.LatestBlocks, 3)
- assert.Equal(t, 6, int(obs.LatestBlocks[0].Num))
- assert.Equal(t, 5, int(obs.LatestBlocks[1].Num))
- assert.Equal(t, 4, int(obs.LatestBlocks[2].Num))
-
- ht2.AssertExpectations(t)
- })
- t.Run("when chain is long enough", func(t *testing.T) {
- heads := make([]*evmtypes.Head, nBlocksObservation+5)
- for i := range heads {
- heads[i] = &evmtypes.Head{Number: int64(i)}
- if i > 0 {
- heads[i].Parent.Store(heads[i-1])
- }
- }
-
- ht2 := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- ht2.On("LatestChain").Return(heads[len(heads)-1])
- ds.mercuryChainReader = evm.NewMercuryChainReader(ht2)
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Len(t, obs.LatestBlocks, nBlocksObservation)
- highestBlockNum := heads[len(heads)-1].Number
- for i := range obs.LatestBlocks {
- assert.Equal(t, int(highestBlockNum)-i, int(obs.LatestBlocks[i].Num))
- }
-
- ht2.AssertExpectations(t)
- })
-
- t.Run("when chain reader returns an error", func(t *testing.T) {
- ds.mercuryChainReader = &mockChainReader{
- err: io.EOF,
- obs: nil,
- }
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.Error(t, err)
- assert.Equal(t, obs, v1.Observation{})
- })
- })
-}
-
-func TestMercury_SetLatestBlocks(t *testing.T) {
- lggr := logger.Test(t)
- ds := NewDataSource(nil, nil, job.Job{}, pipeline.Spec{}, lggr, nil, nil, nil, nil, nil, mercuryutils.FeedID{})
-
- h := evmtypes.Head{
- Number: testutils.NewRandomPositiveInt64(),
- Hash: utils.NewHash(),
- ParentHash: utils.NewHash(),
- Timestamp: time.Now(),
- BaseFeePerGas: assets.NewWeiI(testutils.NewRandomPositiveInt64()),
- ReceiptsRoot: utils.NewHash(),
- TransactionsRoot: utils.NewHash(),
- StateRoot: utils.NewHash(),
- }
-
- t.Run("returns head from headtracker if present", func(t *testing.T) {
- headTracker := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- headTracker.On("LatestChain").Return(&h, nil)
- ds.mercuryChainReader = evm.NewMercuryChainReader(headTracker)
-
- obs := v1.Observation{}
- err := ds.setLatestBlocks(testutils.Context(t), &obs)
-
- assert.NoError(t, err)
- assert.Equal(t, h.Number, obs.CurrentBlockNum.Val)
- assert.Equal(t, h.Hash.Bytes(), obs.CurrentBlockHash.Val)
- assert.Equal(t, uint64(h.Timestamp.Unix()), obs.CurrentBlockTimestamp.Val)
-
- assert.Len(t, obs.LatestBlocks, 1)
- headTracker.AssertExpectations(t)
- })
-
- t.Run("if headtracker returns nil head", func(t *testing.T) {
- headTracker := headstest.NewTracker[*evmtypes.Head, common.Hash](t)
- // This can happen in some cases e.g. RPC node is offline
- headTracker.On("LatestChain").Return((*evmtypes.Head)(nil))
- ds.mercuryChainReader = evm.NewChainReader(headTracker)
- obs := v1.Observation{}
- err := ds.setLatestBlocks(testutils.Context(t), &obs)
-
- assert.NoError(t, err)
- assert.Zero(t, obs.CurrentBlockNum.Val)
- assert.Zero(t, obs.CurrentBlockHash.Val)
- assert.Zero(t, obs.CurrentBlockTimestamp.Val)
- assert.EqualError(t, obs.CurrentBlockNum.Err, "no blocks available")
- assert.EqualError(t, obs.CurrentBlockHash.Err, "no blocks available")
- assert.EqualError(t, obs.CurrentBlockTimestamp.Err, "no blocks available")
-
- assert.Len(t, obs.LatestBlocks, 0)
- headTracker.AssertExpectations(t)
- })
-}
-
-var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-
-func buildSampleV1Report() []byte {
- feedID := sampleFeedID
- timestamp := uint32(42)
- bp := big.NewInt(242)
- bid := big.NewInt(243)
- ask := big.NewInt(244)
- currentBlockNumber := uint64(143)
- currentBlockHash := utils.NewHash()
- currentBlockTimestamp := uint64(123)
- validFromBlockNum := uint64(142)
-
- b, err := reportcodecv1.ReportTypes.Pack(feedID, timestamp, bp, bid, ask, currentBlockNumber, currentBlockHash, currentBlockTimestamp, validFromBlockNum)
- if err != nil {
- panic(err)
- }
- return b
-}
diff --git a/core/services/relay/evm/mercury/v1/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v1/reportcodec/report_codec.go
deleted file mode 100644
index 52cdeff96cb..00000000000
--- a/core/services/relay/evm/mercury/v1/reportcodec/report_codec.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package reportcodec
-
-import (
- "context"
- "errors"
- "fmt"
- "math"
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- pkgerrors "github.com/pkg/errors"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v1/types"
-)
-
-// NOTE:
-// This report codec is based on the original median evmreportcodec
-// here:
-// https://github.com/smartcontractkit/offchain-reporting/blob/master/lib/offchainreporting2/reportingplugin/median/evmreportcodec/reportcodec.go
-var ReportTypes = reporttypes.GetSchema()
-var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word
-
-var _ v1.ReportCodec = &ReportCodec{}
-
-type ReportCodec struct {
- logger logger.Logger
- feedID utils.FeedID
-}
-
-func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec {
- return &ReportCodec{lggr, feedID}
-}
-
-func (r *ReportCodec) BuildReport(ctx context.Context, rf v1.ReportFields) (ocrtypes.Report, error) {
- var merr error
- if rf.BenchmarkPrice == nil {
- merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil"))
- }
- if rf.Bid == nil {
- merr = errors.Join(merr, errors.New("bid may not be nil"))
- }
- if rf.Ask == nil {
- merr = errors.Join(merr, errors.New("ask may not be nil"))
- }
- if len(rf.CurrentBlockHash) != 32 {
- merr = errors.Join(merr, fmt.Errorf("invalid length for currentBlockHash, expected: 32, got: %d", len(rf.CurrentBlockHash)))
- }
- if merr != nil {
- return nil, merr
- }
- var currentBlockHash common.Hash
- copy(currentBlockHash[:], rf.CurrentBlockHash)
-
- reportBytes, err := ReportTypes.Pack(r.feedID, rf.Timestamp, rf.BenchmarkPrice, rf.Bid, rf.Ask, uint64(rf.CurrentBlockNum), currentBlockHash, uint64(rf.ValidFromBlockNum), rf.CurrentBlockTimestamp)
- return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob")
-}
-
-// Maximum length in bytes of Report returned by BuildReport. Used for
-// defending against spam attacks.
-func (r *ReportCodec) MaxReportLength(ctx context.Context, n int) (int, error) {
- return maxReportLength, nil
-}
-
-func (r *ReportCodec) CurrentBlockNumFromReport(ctx context.Context, report ocrtypes.Report) (int64, error) {
- decoded, err := r.Decode(report)
- if err != nil {
- return 0, err
- }
- if decoded.CurrentBlockNum > math.MaxInt64 {
- return 0, fmt.Errorf("CurrentBlockNum=%d overflows max int64", decoded.CurrentBlockNum)
- }
- return int64(decoded.CurrentBlockNum), nil
-}
-
-func (r *ReportCodec) Decode(report ocrtypes.Report) (*reporttypes.Report, error) {
- return reporttypes.Decode(report)
-}
-
-func (r *ReportCodec) BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) {
- decoded, err := r.Decode(report)
- if err != nil {
- return nil, err
- }
- return decoded.BenchmarkPrice, nil
-}
-
-func (r *ReportCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) {
- decoded, err := r.Decode(report)
- if err != nil {
- return 0, err
- }
- return decoded.ObservationsTimestamp, nil
-}
diff --git a/core/services/relay/evm/mercury/v1/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v1/reportcodec/report_codec_test.go
deleted file mode 100644
index 249091d7fbc..00000000000
--- a/core/services/relay/evm/mercury/v1/reportcodec/report_codec_test.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package reportcodec
-
-import (
- "fmt"
- "math"
- "math/big"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2/types"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
-)
-
-var hash = hexutil.MustDecode("0x552c2cea3ab43bae137d89ee6142a01db3ae2b5678bc3c9bd5f509f537bea57b")
-
-func newValidReportFields() v1.ReportFields {
- return v1.ReportFields{
- Timestamp: 242,
- BenchmarkPrice: big.NewInt(243),
- Bid: big.NewInt(244),
- Ask: big.NewInt(245),
- CurrentBlockNum: 248,
- CurrentBlockHash: hash,
- ValidFromBlockNum: 46,
- CurrentBlockTimestamp: 123,
- }
-}
-
-func Test_ReportCodec(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("BuildReport errors on zero fields", func(t *testing.T) {
- ctx := testutils.Context(t)
- _, err := r.BuildReport(ctx, v1.ReportFields{})
- require.Error(t, err)
- assert.Contains(t, err.Error(), "benchmarkPrice may not be nil")
- assert.Contains(t, err.Error(), "bid may not be nil")
- assert.Contains(t, err.Error(), "ask may not be nil")
- assert.Contains(t, err.Error(), "invalid length for currentBlockHash, expected: 32, got: 0")
- })
-
- t.Run("BuildReport constructs a report from observations", func(t *testing.T) {
- ctx := testutils.Context(t)
- rf := newValidReportFields()
- // only need to test happy path since validations are done in relaymercury
-
- report, err := r.BuildReport(ctx, rf)
- require.NoError(t, err)
-
- reportElems := make(map[string]interface{})
- err = ReportTypes.UnpackIntoMap(reportElems, report)
- require.NoError(t, err)
-
- assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242)
- assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243))
- assert.Equal(t, reportElems["bid"].(*big.Int).Int64(), int64(244))
- assert.Equal(t, reportElems["ask"].(*big.Int).Int64(), int64(245))
- assert.Equal(t, reportElems["currentBlockNum"].(uint64), uint64(248))
- assert.Equal(t, common.Hash(reportElems["currentBlockHash"].([32]byte)), common.BytesToHash(hash))
- assert.Equal(t, reportElems["currentBlockTimestamp"].(uint64), uint64(123))
- assert.Equal(t, reportElems["validFromBlockNum"].(uint64), uint64(46))
-
- assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf8, 0x55, 0x2c, 0x2c, 0xea, 0x3a, 0xb4, 0x3b, 0xae, 0x13, 0x7d, 0x89, 0xee, 0x61, 0x42, 0xa0, 0x1d, 0xb3, 0xae, 0x2b, 0x56, 0x78, 0xbc, 0x3c, 0x9b, 0xd5, 0xf5, 0x9, 0xf5, 0x37, 0xbe, 0xa5, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b}, report)
-
- max, err := r.MaxReportLength(ctx, 4)
- require.NoError(t, err)
- assert.LessOrEqual(t, len(report), max)
-
- t.Run("Decode decodes the report", func(t *testing.T) {
- decoded, err := r.Decode(report)
- require.NoError(t, err)
-
- require.NotNil(t, decoded)
-
- assert.Equal(t, uint32(242), decoded.ObservationsTimestamp)
- assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice)
- assert.Equal(t, big.NewInt(244), decoded.Bid)
- assert.Equal(t, big.NewInt(245), decoded.Ask)
- assert.Equal(t, uint64(248), decoded.CurrentBlockNum)
- assert.Equal(t, [32]byte(common.BytesToHash(hash)), decoded.CurrentBlockHash)
- assert.Equal(t, uint64(123), decoded.CurrentBlockTimestamp)
- assert.Equal(t, uint64(46), decoded.ValidFromBlockNum)
- })
- })
-
- t.Run("Decode errors on invalid report", func(t *testing.T) {
- _, err := r.Decode([]byte{1, 2, 3})
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
-
- longBad := make([]byte, 64)
- for i := 0; i < len(longBad); i++ {
- longBad[i] = byte(i)
- }
- _, err = r.Decode(longBad)
- assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value")
- })
-}
-
-func buildSampleReport(bn, validFromBn int64, feedID [32]byte) []byte {
- timestamp := uint32(42)
- bp := big.NewInt(242)
- bid := big.NewInt(243)
- ask := big.NewInt(244)
- currentBlockNumber := uint64(bn)
- currentBlockHash := utils.NewHash()
- currentBlockTimestamp := uint64(123)
- validFromBlockNum := uint64(validFromBn)
-
- b, err := ReportTypes.Pack(feedID, timestamp, bp, bid, ask, currentBlockNumber, currentBlockHash, validFromBlockNum, currentBlockTimestamp)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func Test_ReportCodec_CurrentBlockNumFromReport(t *testing.T) {
- r := ReportCodec{}
- feedID := utils.NewHash()
-
- var validBn int64 = 42
- var invalidBn int64 = -1
-
- t.Run("CurrentBlockNumFromReport extracts the current block number from a valid report", func(t *testing.T) {
- report := buildSampleReport(validBn, 143, feedID)
-
- ctx := testutils.Context(t)
- bn, err := r.CurrentBlockNumFromReport(ctx, report)
- require.NoError(t, err)
-
- assert.Equal(t, validBn, bn)
- })
- t.Run("CurrentBlockNumFromReport returns error if block num is too large", func(t *testing.T) {
- report := buildSampleReport(invalidBn, 143, feedID)
-
- ctx := testutils.Context(t)
- _, err := r.CurrentBlockNumFromReport(ctx, report)
- require.Error(t, err)
-
- assert.Contains(t, err.Error(), "CurrentBlockNum=18446744073709551615 overflows max int64")
- })
-}
-
-func (r *ReportCodec) ValidFromBlockNumFromReport(report ocrtypes.Report) (int64, error) {
- decoded, err := r.Decode(report)
- if err != nil {
- return 0, err
- }
- n := decoded.ValidFromBlockNum
- if n > math.MaxInt64 {
- return 0, fmt.Errorf("ValidFromBlockNum=%d overflows max int64", n)
- }
- return int64(n), nil
-}
-
-func Test_ReportCodec_ValidFromBlockNumFromReport(t *testing.T) {
- r := ReportCodec{}
- feedID := utils.NewHash()
-
- t.Run("ValidFromBlockNumFromReport extracts the valid from block number from a valid report", func(t *testing.T) {
- report := buildSampleReport(42, 999, feedID)
-
- bn, err := r.ValidFromBlockNumFromReport(report)
- require.NoError(t, err)
-
- assert.Equal(t, int64(999), bn)
- })
- t.Run("ValidFromBlockNumFromReport returns error if valid from block number is too large", func(t *testing.T) {
- report := buildSampleReport(42, -1, feedID)
-
- _, err := r.ValidFromBlockNumFromReport(report)
- require.Error(t, err)
-
- assert.Contains(t, err.Error(), "ValidFromBlockNum=18446744073709551615 overflows max int64")
- })
-}
-
-func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) {
- r := ReportCodec{}
- feedID := utils.NewHash()
-
- t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) {
- ctx := testutils.Context(t)
- report := buildSampleReport(42, 999, feedID)
-
- bp, err := r.BenchmarkPriceFromReport(ctx, report)
- require.NoError(t, err)
-
- assert.Equal(t, big.NewInt(242), bp)
- })
- t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) {
- ctx := testutils.Context(t)
- _, err := r.BenchmarkPriceFromReport(ctx, []byte{1, 2, 3})
- require.Error(t, err)
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- })
-}
diff --git a/core/services/relay/evm/mercury/v1/types/types.go b/core/services/relay/evm/mercury/v1/types/types.go
deleted file mode 100644
index 709fd856a21..00000000000
--- a/core/services/relay/evm/mercury/v1/types/types.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package reporttypes
-
-import (
- "fmt"
- "math/big"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
-)
-
-var schema = GetSchema()
-
-func GetSchema() abi.Arguments {
- mustNewType := func(t string) abi.Type {
- result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{})
- if err != nil {
- panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err))
- }
- return result
- }
- return abi.Arguments([]abi.Argument{
- {Name: "feedId", Type: mustNewType("bytes32")},
- {Name: "observationsTimestamp", Type: mustNewType("uint32")},
- {Name: "benchmarkPrice", Type: mustNewType("int192")},
- {Name: "bid", Type: mustNewType("int192")},
- {Name: "ask", Type: mustNewType("int192")},
- {Name: "currentBlockNum", Type: mustNewType("uint64")},
- {Name: "currentBlockHash", Type: mustNewType("bytes32")},
- {Name: "validFromBlockNum", Type: mustNewType("uint64")},
- {Name: "currentBlockTimestamp", Type: mustNewType("uint64")},
- })
-}
-
-type Report struct {
- FeedId [32]byte
- ObservationsTimestamp uint32
- BenchmarkPrice *big.Int
- Bid *big.Int
- Ask *big.Int
- CurrentBlockNum uint64
- CurrentBlockHash [32]byte
- ValidFromBlockNum uint64
- CurrentBlockTimestamp uint64
-}
-
-// Decode is made available to external users (i.e. mercury server)
-func Decode(report []byte) (*Report, error) {
- values, err := schema.Unpack(report)
- if err != nil {
- return nil, fmt.Errorf("failed to decode report: %w", err)
- }
- decoded := new(Report)
- if err = schema.Copy(decoded, values); err != nil {
- return nil, fmt.Errorf("failed to copy report values to struct: %w", err)
- }
- return decoded, nil
-}
diff --git a/core/services/relay/evm/mercury/v2/data_source.go b/core/services/relay/evm/mercury/v2/data_source.go
deleted file mode 100644
index dc74b59645c..00000000000
--- a/core/services/relay/evm/mercury/v2/data_source.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package v2
-
-import (
- "context"
- "fmt"
- "math/big"
- "sync"
-
- pkgerrors "github.com/pkg/errors"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v2types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- v2 "github.com/smartcontractkit/chainlink-data-streams/mercury/v2"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercurytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/reportcodec"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-type Runner interface {
- ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error)
-}
-
-type LatestReportFetcher interface {
- LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error)
- LatestTimestamp(context.Context) (int64, error)
-}
-
-type datasource struct {
- pipelineRunner Runner
- jb job.Job
- spec pipeline.Spec
- feedID mercuryutils.FeedID
- lggr logger.Logger
- saver ocrcommon.Saver
- orm types.DataSourceORM
- codec reportcodec.ReportCodec
-
- fetcher LatestReportFetcher
- linkFeedID mercuryutils.FeedID
- nativeFeedID mercuryutils.FeedID
-
- mu sync.RWMutex
-
- chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData
-}
-
-var _ v2.DataSource = &datasource{}
-
-func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource {
- return &datasource{pr, jb, spec, feedID, lggr, s, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan}
-}
-
-func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedTimestamp bool) (obs v2types.Observation, pipelineExecutionErr error) {
- var wg sync.WaitGroup
- ctx, cancel := context.WithCancel(ctx)
-
- if fetchMaxFinalizedTimestamp {
- wg.Add(1)
- go func() {
- defer wg.Done()
- latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID)
- if dbErr != nil {
- obs.MaxFinalizedTimestamp.Err = dbErr
- return
- }
- if latest != nil {
- maxFinalizedBlockNumber, decodeErr := ds.codec.ObservationTimestampFromReport(ctx, latest)
- obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = int64(maxFinalizedBlockNumber), decodeErr
- return
- }
- obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = ds.fetcher.LatestTimestamp(ctx)
- }()
- }
-
- var trrs pipeline.TaskRunResults
- wg.Add(1)
- go func() {
- defer wg.Done()
- var run *pipeline.Run
- run, trrs, pipelineExecutionErr = ds.executeRun(ctx)
- if pipelineExecutionErr != nil {
- cancel()
- pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr)
- return
- }
-
- ds.saver.Save(run)
-
- var parsed parseOutput
- parsed, pipelineExecutionErr = ds.parse(trrs)
- if pipelineExecutionErr != nil {
- cancel()
- // This is not expected under normal circumstances
- ds.lggr.Errorw("Observe failed while parsing run results", "err", pipelineExecutionErr)
- pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr)
- return
- }
- obs.BenchmarkPrice = parsed.benchmarkPrice
- }()
-
- var isLink, isNative bool
- if len(ds.jb.OCR2OracleSpec.PluginConfig) == 0 {
- obs.LinkPrice.Val = v2.MissingPrice
- } else if ds.feedID == ds.linkFeedID {
- isLink = true
- } else {
- wg.Add(1)
- go func() {
- defer wg.Done()
- obs.LinkPrice.Val, obs.LinkPrice.Err = ds.fetcher.LatestPrice(ctx, ds.linkFeedID)
- if obs.LinkPrice.Val == nil && obs.LinkPrice.Err == nil {
- mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.linkFeedID.String()).Inc()
- ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing LINK feed, using sentinel value of %s", v2.MissingPrice), "linkFeedID", ds.linkFeedID)
- obs.LinkPrice.Val = v2.MissingPrice
- } else if obs.LinkPrice.Err != nil {
- mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.linkFeedID.String()).Inc()
- ds.lggr.Errorw("Mercury server returned error querying LINK price feed", "err", obs.LinkPrice.Err, "linkFeedID", ds.linkFeedID)
- }
- }()
- }
-
- if len(ds.jb.OCR2OracleSpec.PluginConfig) == 0 {
- obs.NativePrice.Val = v2.MissingPrice
- } else if ds.feedID == ds.nativeFeedID {
- isNative = true
- } else {
- wg.Add(1)
- go func() {
- defer wg.Done()
- obs.NativePrice.Val, obs.NativePrice.Err = ds.fetcher.LatestPrice(ctx, ds.nativeFeedID)
- if obs.NativePrice.Val == nil && obs.NativePrice.Err == nil {
- mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
- ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing native feed, using sentinel value of %s", v2.MissingPrice), "nativeFeedID", ds.nativeFeedID)
- obs.NativePrice.Val = v2.MissingPrice
- } else if obs.NativePrice.Err != nil {
- mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
- ds.lggr.Errorw("Mercury server returned error querying native price feed", "err", obs.NativePrice.Err, "nativeFeedID", ds.nativeFeedID)
- }
- }()
- }
-
- wg.Wait()
- cancel()
-
- if pipelineExecutionErr != nil {
- return
- }
-
- if isLink || isNative {
- // run has now completed so it is safe to use benchmark price
- if isLink {
- // This IS the LINK feed, use our observed price
- obs.LinkPrice.Val, obs.LinkPrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
- }
- if isNative {
- // This IS the native feed, use our observed price
- obs.NativePrice.Val, obs.NativePrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
- }
- }
-
- ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{
- V2Observation: &obs,
- TaskRunResults: trrs,
- RepTimestamp: repts,
- FeedVersion: mercuryutils.REPORT_V2,
- FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp,
- IsLinkFeed: isLink,
- IsNativeFeed: isNative,
- })
-
- return obs, nil
-}
-
-func toBigInt(val interface{}) (*big.Int, error) {
- dec, err := utils.ToDecimal(val)
- if err != nil {
- return nil, err
- }
- return dec.BigInt(), nil
-}
-
-type parseOutput struct {
- benchmarkPrice mercury.ObsResult[*big.Int]
-}
-
-func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) {
- var finaltrrs []pipeline.TaskRunResult
- for _, trr := range trrs {
- // only return terminal trrs from executeRun
- if trr.IsTerminal() {
- finaltrrs = append(finaltrrs, trr)
- }
- }
-
- if len(finaltrrs) != 1 {
- return o, fmt.Errorf("invalid number of results, expected: 1, got: %d", len(finaltrrs))
- }
-
- return o, setBenchmarkPrice(&o, finaltrrs[0].Result)
-}
-
-func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.benchmarkPrice.Err = res.Error
- return res.Error
- }
- val, err := toBigInt(res.Value)
- if err != nil {
- return fmt.Errorf("failed to parse BenchmarkPrice: %w", err)
- }
- o.benchmarkPrice.Val = val
- return nil
-}
-
-// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod).
-// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod.
-func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) {
- vars := pipeline.NewVarsFrom(map[string]interface{}{
- "jb": map[string]interface{}{
- "databaseID": ds.jb.ID,
- "externalJobID": ds.jb.ExternalJobID,
- "name": ds.jb.Name.ValueOrZero(),
- },
- })
-
- run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars)
- if err != nil {
- return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID)
- }
-
- return run, trrs, err
-}
diff --git a/core/services/relay/evm/mercury/v2/data_source_test.go b/core/services/relay/evm/mercury/v2/data_source_test.go
deleted file mode 100644
index ae598937d90..00000000000
--- a/core/services/relay/evm/mercury/v2/data_source_test.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package v2
-
-import (
- "context"
- "math/big"
- "testing"
-
- "github.com/pkg/errors"
- "github.com/stretchr/testify/assert"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v2 "github.com/smartcontractkit/chainlink-data-streams/mercury/v2"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- mercurymocks "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reportcodecv2 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/reportcodec"
-)
-
-var _ mercury.ServerFetcher = &mockFetcher{}
-
-type mockFetcher struct {
- ts int64
- tsErr error
- linkPrice *big.Int
- linkPriceErr error
- nativePrice *big.Int
- nativePriceErr error
-}
-
-var feedId utils.FeedID = [32]byte{1}
-var linkFeedId utils.FeedID = [32]byte{2}
-var nativeFeedId utils.FeedID = [32]byte{3}
-
-func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) {
- return nil, nil
-}
-
-func (m *mockFetcher) LatestPrice(ctx context.Context, fId [32]byte) (*big.Int, error) {
- if fId == linkFeedId {
- return m.linkPrice, m.linkPriceErr
- } else if fId == nativeFeedId {
- return m.nativePrice, m.nativePriceErr
- }
- return nil, nil
-}
-
-func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) {
- return m.ts, m.tsErr
-}
-
-type mockORM struct {
- report []byte
- err error
-}
-
-func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
- return m.report, m.err
-}
-
-type mockSaver struct {
- r *pipeline.Run
-}
-
-func (ms *mockSaver) Save(r *pipeline.Run) {
- ms.r = r
-}
-
-func Test_Datasource(t *testing.T) {
- orm := &mockORM{}
- jb := job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- PluginConfig: map[string]interface{}{
- "serverURL": "a",
- },
- },
- }
- ds := &datasource{orm: orm, lggr: logger.Test(t), jb: jb}
- ctx := testutils.Context(t)
- repts := ocrtypes.ReportTimestamp{}
-
- fetcher := &mockFetcher{}
- ds.fetcher = fetcher
-
- saver := &mockSaver{}
- ds.saver = saver
-
- goodTrrs := []pipeline.TaskRunResult{
- {
- // bp
- Result: pipeline.Result{Value: "122.345"},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- }
-
- spec := pipeline.Spec{}
- ds.spec = spec
-
- t.Run("when fetchMaxFinalizedTimestamp=true", func(t *testing.T) {
- t.Run("with latest report in database", func(t *testing.T) {
- orm.report = buildSampleV2Report()
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, int64(124), obs.MaxFinalizedTimestamp.Val)
- })
- t.Run("if querying latest report fails", func(t *testing.T) {
- orm.report = nil
- orm.err = errors.New("something exploded")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "something exploded")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
- t.Run("if codec fails to decode", func(t *testing.T) {
- orm.report = []byte{1, 2, 3}
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- orm.report = nil
- orm.err = nil
-
- t.Run("if LatestTimestamp returns error", func(t *testing.T) {
- fetcher.tsErr = errors.New("some error")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "some error")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- t.Run("if LatestTimestamp succeeds", func(t *testing.T) {
- fetcher.tsErr = nil
- fetcher.ts = 123
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Equal(t, int64(123), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- })
-
- t.Run("if LatestTimestamp succeeds but ts=0 (new feed)", func(t *testing.T) {
- fetcher.tsErr = nil
- fetcher.ts = 0
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- t.Run("when run execution succeeded", func(t *testing.T) {
- t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
- t.Cleanup(func() {
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
- })
-
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
-
- fetcher.ts = 123123
- fetcher.tsErr = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, int64(123123), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
- assert.NoError(t, obs.LinkPrice.Err)
- assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
- assert.NoError(t, obs.NativePrice.Err)
- })
- })
- })
-
- t.Run("when fetchMaxFinalizedTimestamp=false", func(t *testing.T) {
- t.Run("when run execution fails, returns error", func(t *testing.T) {
- t.Cleanup(func() {
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- })
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: errors.New("run execution failed"),
- }
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed")
- })
-
- t.Run("when parsing run results fails, return error", func(t *testing.T) {
- t.Cleanup(func() {
- runner := &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- ds.pipelineRunner = runner
- })
-
- badTrrs := []pipeline.TaskRunResult{
- {
- // benchmark price
- Result: pipeline.Result{Error: errors.New("some error with bp")},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: badTrrs,
- Err: nil,
- }
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while parsing run results: some error with bp")
- })
-
- t.Run("when run execution succeeded", func(t *testing.T) {
- t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
- t.Cleanup(func() {
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
- })
-
- var feedId utils.FeedID = [32]byte{1}
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, int64(0), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
- assert.NoError(t, obs.LinkPrice.Err)
- assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
- assert.NoError(t, obs.NativePrice.Err)
- })
-
- t.Run("when fails to fetch linkPrice or nativePrice", func(t *testing.T) {
- t.Cleanup(func() {
- fetcher.linkPriceErr = nil
- fetcher.nativePriceErr = nil
- })
-
- fetcher.linkPriceErr = errors.New("some error fetching link price")
- fetcher.nativePriceErr = errors.New("some error fetching native price")
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Nil(t, obs.LinkPrice.Val)
- assert.EqualError(t, obs.LinkPrice.Err, "some error fetching link price")
- assert.Nil(t, obs.NativePrice.Val)
- assert.EqualError(t, obs.NativePrice.Err, "some error fetching native price")
- })
-
- t.Run("when PluginConfig is empty", func(t *testing.T) {
- t.Cleanup(func() {
- ds.jb = jb
- })
-
- fetcher.linkPriceErr = errors.New("some error fetching link price")
- fetcher.nativePriceErr = errors.New("some error fetching native price")
-
- ds.jb.OCR2OracleSpec.PluginConfig = job.JSONConfig{}
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
- assert.Nil(t, obs.LinkPrice.Err)
- assert.Equal(t, obs.LinkPrice.Val, v2.MissingPrice)
- assert.Nil(t, obs.NativePrice.Err)
- assert.Equal(t, obs.NativePrice.Val, v2.MissingPrice)
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- })
-
- t.Run("when succeeds to fetch linkPrice or nativePrice but got nil (new feed)", func(t *testing.T) {
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, obs.LinkPrice.Val, v2.MissingPrice)
- assert.Nil(t, obs.LinkPrice.Err)
- assert.Equal(t, obs.NativePrice.Val, v2.MissingPrice)
- assert.Nil(t, obs.NativePrice.Err)
- })
- })
- })
-}
-
-var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-
-func buildSampleV2Report() []byte {
- feedID := sampleFeedID
- timestamp := uint32(124)
- bp := big.NewInt(242)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
-
- b, err := reportcodecv2.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp)
- if err != nil {
- panic(err)
- }
- return b
-}
diff --git a/core/services/relay/evm/mercury/v2/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v2/reportcodec/report_codec.go
deleted file mode 100644
index d35621da01b..00000000000
--- a/core/services/relay/evm/mercury/v2/reportcodec/report_codec.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package reportcodec
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
-
- pkgerrors "github.com/pkg/errors"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v2/types"
-)
-
-var ReportTypes = reporttypes.GetSchema()
-var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word
-var zero = big.NewInt(0)
-
-var _ v2.ReportCodec = &ReportCodec{}
-
-type ReportCodec struct {
- logger logger.Logger
- feedID utils.FeedID
-}
-
-func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec {
- return &ReportCodec{lggr, feedID}
-}
-
-func (r *ReportCodec) BuildReport(ctx context.Context, rf v2.ReportFields) (ocrtypes.Report, error) {
- var merr error
- if rf.BenchmarkPrice == nil {
- merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil"))
- }
- if rf.LinkFee == nil {
- merr = errors.Join(merr, errors.New("linkFee may not be nil"))
- } else if rf.LinkFee.Cmp(zero) < 0 {
- merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee))
- }
- if rf.NativeFee == nil {
- merr = errors.Join(merr, errors.New("nativeFee may not be nil"))
- } else if rf.NativeFee.Cmp(zero) < 0 {
- merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee))
- }
- if merr != nil {
- return nil, merr
- }
- reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice)
- return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob")
-}
-
-func (r *ReportCodec) MaxReportLength(ctx context.Context, n int) (int, error) {
- return maxReportLength, nil
-}
-
-func (r *ReportCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) {
- decoded, err := r.Decode(ctx, report)
- if err != nil {
- return 0, err
- }
- return decoded.ObservationsTimestamp, nil
-}
-
-func (r *ReportCodec) Decode(ctx context.Context, report ocrtypes.Report) (*reporttypes.Report, error) {
- return reporttypes.Decode(report)
-}
-
-func (r *ReportCodec) BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) {
- decoded, err := r.Decode(ctx, report)
- if err != nil {
- return nil, err
- }
- return decoded.BenchmarkPrice, nil
-}
diff --git a/core/services/relay/evm/mercury/v2/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v2/reportcodec/report_codec_test.go
deleted file mode 100644
index 809869282b7..00000000000
--- a/core/services/relay/evm/mercury/v2/reportcodec/report_codec_test.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package reportcodec
-
-import (
- "math/big"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
-)
-
-func newValidReportFields() v2.ReportFields {
- return v2.ReportFields{
- Timestamp: 242,
- BenchmarkPrice: big.NewInt(243),
- ValidFromTimestamp: 123,
- ExpiresAt: 20,
- LinkFee: big.NewInt(456),
- NativeFee: big.NewInt(457),
- }
-}
-
-func Test_ReportCodec_BuildReport(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("BuildReport errors on zero values", func(t *testing.T) {
- ctx := testutils.Context(t)
- _, err := r.BuildReport(ctx, v2.ReportFields{})
- require.Error(t, err)
- assert.Contains(t, err.Error(), "benchmarkPrice may not be nil")
- assert.Contains(t, err.Error(), "linkFee may not be nil")
- assert.Contains(t, err.Error(), "nativeFee may not be nil")
- })
-
- t.Run("BuildReport constructs a report from observations", func(t *testing.T) {
- ctx := testutils.Context(t)
- rf := newValidReportFields()
- // only need to test happy path since validations are done in relaymercury
-
- report, err := r.BuildReport(ctx, rf)
- require.NoError(t, err)
-
- reportElems := make(map[string]interface{})
- err = ReportTypes.UnpackIntoMap(reportElems, report)
- require.NoError(t, err)
-
- assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242)
- assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243))
- assert.Equal(t, reportElems["validFromTimestamp"].(uint32), uint32(123))
- assert.Equal(t, reportElems["expiresAt"].(uint32), uint32(20))
- assert.Equal(t, reportElems["linkFee"].(*big.Int).Int64(), int64(456))
- assert.Equal(t, reportElems["nativeFee"].(*big.Int).Int64(), int64(457))
-
- assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3}, report)
- max, err := r.MaxReportLength(ctx, 4)
- require.NoError(t, err)
- assert.LessOrEqual(t, len(report), max)
-
- t.Run("Decode decodes the report", func(t *testing.T) {
- ctx := testutils.Context(t)
- decoded, err := r.Decode(ctx, report)
- require.NoError(t, err)
-
- require.NotNil(t, decoded)
-
- assert.Equal(t, uint32(242), decoded.ObservationsTimestamp)
- assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice)
- assert.Equal(t, uint32(123), decoded.ValidFromTimestamp)
- assert.Equal(t, uint32(20), decoded.ExpiresAt)
- assert.Equal(t, big.NewInt(456), decoded.LinkFee)
- assert.Equal(t, big.NewInt(457), decoded.NativeFee)
- })
- })
-
- t.Run("errors on negative fee", func(t *testing.T) {
- rf := newValidReportFields()
- rf.LinkFee = big.NewInt(-1)
- rf.NativeFee = big.NewInt(-1)
- ctx := testutils.Context(t)
- _, err := r.BuildReport(ctx, rf)
- require.Error(t, err)
-
- assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)")
- assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)")
- })
-
- t.Run("Decode errors on invalid report", func(t *testing.T) {
- ctx := testutils.Context(t)
- _, err := r.Decode(ctx, []byte{1, 2, 3})
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
-
- longBad := make([]byte, 64)
- for i := 0; i < len(longBad); i++ {
- longBad[i] = byte(i)
- }
- _, err = r.Decode(ctx, longBad)
- assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value")
- })
-}
-
-func buildSampleReport(ts int64) []byte {
- feedID := [32]byte{'f', 'o', 'o'}
- timestamp := uint32(ts)
- bp := big.NewInt(242)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
-
- b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) {
- report := buildSampleReport(123)
-
- ctx := testutils.Context(t)
- ts, err := r.ObservationTimestampFromReport(ctx, report)
- require.NoError(t, err)
-
- assert.Equal(t, ts, uint32(123))
- })
- t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) {
- report := []byte{1, 2, 3}
-
- ctx := testutils.Context(t)
- _, err := r.ObservationTimestampFromReport(ctx, report)
- require.Error(t, err)
-
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- })
-}
-
-func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) {
- ctx := testutils.Context(t)
- report := buildSampleReport(123)
-
- bp, err := r.BenchmarkPriceFromReport(ctx, report)
- require.NoError(t, err)
-
- assert.Equal(t, big.NewInt(242), bp)
- })
- t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) {
- ctx := testutils.Context(t)
- _, err := r.BenchmarkPriceFromReport(ctx, []byte{1, 2, 3})
- require.Error(t, err)
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- })
-}
diff --git a/core/services/relay/evm/mercury/v2/types/types.go b/core/services/relay/evm/mercury/v2/types/types.go
deleted file mode 100644
index 3c1df286d14..00000000000
--- a/core/services/relay/evm/mercury/v2/types/types.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package reporttypes
-
-import (
- "fmt"
- "math/big"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
-)
-
-var schema = GetSchema()
-
-func GetSchema() abi.Arguments {
- mustNewType := func(t string) abi.Type {
- result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{})
- if err != nil {
- panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err))
- }
- return result
- }
- return abi.Arguments([]abi.Argument{
- {Name: "feedId", Type: mustNewType("bytes32")},
- {Name: "validFromTimestamp", Type: mustNewType("uint32")},
- {Name: "observationsTimestamp", Type: mustNewType("uint32")},
- {Name: "nativeFee", Type: mustNewType("uint192")},
- {Name: "linkFee", Type: mustNewType("uint192")},
- {Name: "expiresAt", Type: mustNewType("uint32")},
- {Name: "benchmarkPrice", Type: mustNewType("int192")},
- })
-}
-
-type Report struct {
- FeedId [32]byte
- ObservationsTimestamp uint32
- BenchmarkPrice *big.Int
- ValidFromTimestamp uint32
- ExpiresAt uint32
- LinkFee *big.Int
- NativeFee *big.Int
-}
-
-// Decode is made available to external users (i.e. mercury server)
-func Decode(report []byte) (*Report, error) {
- values, err := schema.Unpack(report)
- if err != nil {
- return nil, fmt.Errorf("failed to decode report: %w", err)
- }
- decoded := new(Report)
- if err = schema.Copy(decoded, values); err != nil {
- return nil, fmt.Errorf("failed to copy report values to struct: %w", err)
- }
- return decoded, nil
-}
diff --git a/core/services/relay/evm/mercury/v3/data_source.go b/core/services/relay/evm/mercury/v3/data_source.go
deleted file mode 100644
index 540006d1535..00000000000
--- a/core/services/relay/evm/mercury/v3/data_source.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package v3
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
- "sync"
-
- pkgerrors "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v3types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
- v3 "github.com/smartcontractkit/chainlink-data-streams/mercury/v3"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline/eautils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercurytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-const adapterLWBAErrorName = "AdapterLWBAError"
-
-type Runner interface {
- ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error)
-}
-
-type LatestReportFetcher interface {
- LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error)
- LatestTimestamp(context.Context) (int64, error)
-}
-
-type datasource struct {
- pipelineRunner Runner
- jb job.Job
- spec pipeline.Spec
- feedID mercuryutils.FeedID
- lggr logger.Logger
- saver ocrcommon.Saver
- orm types.DataSourceORM
- codec reportcodec.ReportCodec
-
- fetcher LatestReportFetcher
- linkFeedID mercuryutils.FeedID
- nativeFeedID mercuryutils.FeedID
-
- mu sync.RWMutex
-
- chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData
-}
-
-var _ v3.DataSource = &datasource{}
-
-func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource {
- return &datasource{pr, jb, spec, feedID, lggr, s, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan}
-}
-
-func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedTimestamp bool) (obs v3types.Observation, pipelineExecutionErr error) {
- var wg sync.WaitGroup
- ctx, cancel := context.WithCancel(ctx)
-
- if fetchMaxFinalizedTimestamp {
- wg.Add(1)
- go func() {
- defer wg.Done()
- latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID)
- if dbErr != nil {
- obs.MaxFinalizedTimestamp.Err = dbErr
- return
- }
- if latest != nil {
- maxFinalizedBlockNumber, decodeErr := ds.codec.ObservationTimestampFromReport(ctx, latest)
- obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = int64(maxFinalizedBlockNumber), decodeErr
- return
- }
- obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = ds.fetcher.LatestTimestamp(ctx)
- }()
- }
-
- var trrs pipeline.TaskRunResults
- wg.Add(1)
- go func() {
- defer wg.Done()
- var run *pipeline.Run
- run, trrs, pipelineExecutionErr = ds.executeRun(ctx)
- if pipelineExecutionErr != nil {
- cancel()
- pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr)
- return
- }
-
- ds.saver.Save(run)
-
- var parsed parseOutput
- parsed, pipelineExecutionErr = ds.parse(trrs)
- if pipelineExecutionErr != nil {
- cancel()
- // This is not expected under normal circumstances
- ds.lggr.Errorw("Observe failed while parsing run results", "err", pipelineExecutionErr)
- pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr)
- return
- }
- obs.BenchmarkPrice = parsed.benchmarkPrice
- obs.Bid = parsed.bid
- obs.Ask = parsed.ask
- }()
-
- var isLink, isNative bool
- if len(ds.jb.OCR2OracleSpec.PluginConfig) == 0 {
- obs.LinkPrice.Val = v3.MissingPrice
- } else if ds.feedID == ds.linkFeedID {
- isLink = true
- } else {
- wg.Add(1)
- go func() {
- defer wg.Done()
- obs.LinkPrice.Val, obs.LinkPrice.Err = ds.fetcher.LatestPrice(ctx, ds.linkFeedID)
- if obs.LinkPrice.Val == nil && obs.LinkPrice.Err == nil {
- mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.linkFeedID.String()).Inc()
- ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing LINK feed, using sentinel value of %s", v3.MissingPrice), "linkFeedID", ds.linkFeedID)
- obs.LinkPrice.Val = v3.MissingPrice
- } else if obs.LinkPrice.Err != nil {
- mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.linkFeedID.String()).Inc()
- ds.lggr.Errorw("Mercury server returned error querying LINK price feed", "err", obs.LinkPrice.Err, "linkFeedID", ds.linkFeedID)
- }
- }()
- }
-
- if len(ds.jb.OCR2OracleSpec.PluginConfig) == 0 {
- obs.NativePrice.Val = v3.MissingPrice
- } else if ds.feedID == ds.nativeFeedID {
- isNative = true
- } else {
- wg.Add(1)
- go func() {
- defer wg.Done()
- obs.NativePrice.Val, obs.NativePrice.Err = ds.fetcher.LatestPrice(ctx, ds.nativeFeedID)
- if obs.NativePrice.Val == nil && obs.NativePrice.Err == nil {
- mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
- ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing native feed, using sentinel value of %s", v3.MissingPrice), "nativeFeedID", ds.nativeFeedID)
- obs.NativePrice.Val = v3.MissingPrice
- } else if obs.NativePrice.Err != nil {
- mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
- ds.lggr.Errorw("Mercury server returned error querying native price feed", "err", obs.NativePrice.Err, "nativeFeedID", ds.nativeFeedID)
- }
- }()
- }
-
- wg.Wait()
- cancel()
-
- if pipelineExecutionErr != nil {
- var adapterError *eautils.AdapterError
- if errors.As(pipelineExecutionErr, &adapterError) && adapterError.Name == adapterLWBAErrorName {
- ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{
- V3Observation: &obs,
- TaskRunResults: trrs,
- RepTimestamp: repts,
- FeedVersion: mercuryutils.REPORT_V3,
- FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp,
- IsLinkFeed: isLink,
- IsNativeFeed: isNative,
- DpInvariantViolationDetected: true,
- })
- }
- return
- }
-
- if isLink || isNative {
- // run has now completed so it is safe to use benchmark price
- if isLink {
- // This IS the LINK feed, use our observed price
- obs.LinkPrice.Val, obs.LinkPrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
- }
- if isNative {
- // This IS the native feed, use our observed price
- obs.NativePrice.Val, obs.NativePrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
- }
- }
-
- ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{
- V3Observation: &obs,
- TaskRunResults: trrs,
- RepTimestamp: repts,
- FeedVersion: mercuryutils.REPORT_V3,
- FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp,
- IsLinkFeed: isLink,
- IsNativeFeed: isNative,
- })
-
- return obs, nil
-}
-
-func toBigInt(val interface{}) (*big.Int, error) {
- dec, err := utils.ToDecimal(val)
- if err != nil {
- return nil, err
- }
- return dec.BigInt(), nil
-}
-
-type parseOutput struct {
- benchmarkPrice mercury.ObsResult[*big.Int]
- bid mercury.ObsResult[*big.Int]
- ask mercury.ObsResult[*big.Int]
-}
-
-func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) {
- var finaltrrs []pipeline.TaskRunResult
- for _, trr := range trrs {
- // only return terminal trrs from executeRun
- if trr.IsTerminal() {
- finaltrrs = append(finaltrrs, trr)
- }
- }
-
- // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed
- // by the pipeline executor
- if len(finaltrrs) != 3 {
- return o, fmt.Errorf("invalid number of results, expected: 3, got: %d", len(finaltrrs))
- }
-
- merr = errors.Join(
- setBenchmarkPrice(&o, finaltrrs[0].Result),
- setBid(&o, finaltrrs[1].Result),
- setAsk(&o, finaltrrs[2].Result),
- )
-
- return o, merr
-}
-
-func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.benchmarkPrice.Err = res.Error
- return res.Error
- }
- val, err := toBigInt(res.Value)
- if err != nil {
- return fmt.Errorf("failed to parse BenchmarkPrice: %w", err)
- }
- o.benchmarkPrice.Val = val
- return nil
-}
-
-func setBid(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.bid.Err = res.Error
- return res.Error
- }
- val, err := toBigInt(res.Value)
- if err != nil {
- return fmt.Errorf("failed to parse Bid: %w", err)
- }
- o.bid.Val = val
- return nil
-}
-
-func setAsk(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.ask.Err = res.Error
- return res.Error
- }
- val, err := toBigInt(res.Value)
- if err != nil {
- return fmt.Errorf("failed to parse Ask: %w", err)
- }
- o.ask.Val = val
- return nil
-}
-
-// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod).
-// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod.
-func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) {
- vars := pipeline.NewVarsFrom(map[string]interface{}{
- "jb": map[string]interface{}{
- "databaseID": ds.jb.ID,
- "externalJobID": ds.jb.ExternalJobID,
- "name": ds.jb.Name.ValueOrZero(),
- },
- })
-
- run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars)
- if err != nil {
- return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID)
- }
-
- return run, trrs, err
-}
diff --git a/core/services/relay/evm/mercury/v3/data_source_test.go b/core/services/relay/evm/mercury/v3/data_source_test.go
deleted file mode 100644
index 42f79785ba8..00000000000
--- a/core/services/relay/evm/mercury/v3/data_source_test.go
+++ /dev/null
@@ -1,418 +0,0 @@
-package v3
-
-import (
- "context"
- "math/big"
- "testing"
-
- relaymercuryv3 "github.com/smartcontractkit/chainlink-data-streams/mercury/v3"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline/eautils"
-
- "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/stretchr/testify/assert"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- mercurymocks "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reportcodecv3 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v3/reportcodec"
-)
-
-var _ mercurytypes.ServerFetcher = &mockFetcher{}
-
-type mockFetcher struct {
- ts int64
- tsErr error
- linkPrice *big.Int
- linkPriceErr error
- nativePrice *big.Int
- nativePriceErr error
-}
-
-var feedId utils.FeedID = [32]byte{1}
-var linkFeedId utils.FeedID = [32]byte{2}
-var nativeFeedId utils.FeedID = [32]byte{3}
-
-func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) {
- return nil, nil
-}
-
-func (m *mockFetcher) LatestPrice(ctx context.Context, fId [32]byte) (*big.Int, error) {
- if fId == linkFeedId {
- return m.linkPrice, m.linkPriceErr
- } else if fId == nativeFeedId {
- return m.nativePrice, m.nativePriceErr
- }
- return nil, nil
-}
-
-func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) {
- return m.ts, m.tsErr
-}
-
-type mockORM struct {
- report []byte
- err error
-}
-
-func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
- return m.report, m.err
-}
-
-type mockSaver struct {
- r *pipeline.Run
-}
-
-func (ms *mockSaver) Save(r *pipeline.Run) {
- ms.r = r
-}
-
-func Test_Datasource(t *testing.T) {
- orm := &mockORM{}
- jb := job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- PluginConfig: map[string]interface{}{
- "serverURL": "a",
- },
- },
- }
- ds := &datasource{orm: orm, lggr: logger.Test(t), jb: jb}
- ctx := testutils.Context(t)
- repts := ocrtypes.ReportTimestamp{}
-
- fetcher := &mockFetcher{}
- ds.fetcher = fetcher
-
- saver := &mockSaver{}
- ds.saver = saver
-
- goodTrrs := []pipeline.TaskRunResult{
- {
- // bp
- Result: pipeline.Result{Value: "122.345"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // bid
- Result: pipeline.Result{Value: "121.993"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // ask
- Result: pipeline.Result{Value: "123.111"},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- }
-
- spec := pipeline.Spec{}
- ds.spec = spec
-
- t.Run("when fetchMaxFinalizedTimestamp=true", func(t *testing.T) {
- t.Run("with latest report in database", func(t *testing.T) {
- orm.report = buildSampleV3Report()
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, int64(124), obs.MaxFinalizedTimestamp.Val)
- })
- t.Run("if querying latest report fails", func(t *testing.T) {
- orm.report = nil
- orm.err = errors.New("something exploded")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "something exploded")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
- t.Run("if codec fails to decode", func(t *testing.T) {
- orm.report = []byte{1, 2, 3}
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- orm.report = nil
- orm.err = nil
-
- t.Run("if LatestTimestamp returns error", func(t *testing.T) {
- fetcher.tsErr = errors.New("some error")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "some error")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- t.Run("if LatestTimestamp succeeds", func(t *testing.T) {
- fetcher.tsErr = nil
- fetcher.ts = 123
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Equal(t, int64(123), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- })
-
- t.Run("if LatestTimestamp succeeds but ts=0 (new feed)", func(t *testing.T) {
- fetcher.tsErr = nil
- fetcher.ts = 0
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- t.Run("when run execution succeeded", func(t *testing.T) {
- t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
- t.Cleanup(func() {
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
- })
-
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
-
- fetcher.ts = 123123
- fetcher.tsErr = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, big.NewInt(121), obs.Bid.Val)
- assert.NoError(t, obs.Bid.Err)
- assert.Equal(t, big.NewInt(123), obs.Ask.Val)
- assert.NoError(t, obs.Ask.Err)
- assert.Equal(t, int64(123123), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
- assert.NoError(t, obs.LinkPrice.Err)
- assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
- assert.NoError(t, obs.NativePrice.Err)
- })
- })
- })
-
- t.Run("when fetchMaxFinalizedTimestamp=false", func(t *testing.T) {
- t.Run("when run execution fails, returns error", func(t *testing.T) {
- t.Cleanup(func() {
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- })
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: errors.New("run execution failed"),
- }
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed")
- })
-
- t.Run("when parsing run results fails, return error", func(t *testing.T) {
- t.Cleanup(func() {
- runner := &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- ds.pipelineRunner = runner
- })
-
- badTrrs := []pipeline.TaskRunResult{
- {
- // benchmark price
- Result: pipeline.Result{Value: "122.345"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // bid
- Result: pipeline.Result{Value: "121.993"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // ask
- Result: pipeline.Result{Error: errors.New("some error with ask")},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: badTrrs,
- Err: nil,
- }
-
- chEnhancedTelem := make(chan ocrcommon.EnhancedTelemetryMercuryData, 1)
- ds.chEnhancedTelem = chEnhancedTelem
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while parsing run results: some error with ask")
-
- select {
- case <-chEnhancedTelem:
- assert.Fail(t, "did not expect to receive telemetry")
- default:
- }
- })
-
- t.Run("when run results fails with a bid ask violation", func(t *testing.T) {
- t.Cleanup(func() {
- runner := &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- ds.pipelineRunner = runner
- })
-
- badTrrs := []pipeline.TaskRunResult{
- {
- // benchmark price
- Result: pipeline.Result{Value: "122.345"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // bid
- Result: pipeline.Result{Value: "121.993"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // ask
- Result: pipeline.Result{Error: &eautils.AdapterError{Name: adapterLWBAErrorName, Message: "bid ask violation"}},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: badTrrs,
- Err: nil,
- }
-
- chEnhancedTelem := make(chan ocrcommon.EnhancedTelemetryMercuryData, 1)
- ds.chEnhancedTelem = chEnhancedTelem
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while parsing run results: AdapterLWBAError: bid ask violation")
-
- telem := <-chEnhancedTelem
- assert.True(t, telem.DpInvariantViolationDetected)
- })
-
- t.Run("when run execution succeeded", func(t *testing.T) {
- t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
- t.Cleanup(func() {
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
- })
-
- var feedId utils.FeedID = [32]byte{1}
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, big.NewInt(121), obs.Bid.Val)
- assert.NoError(t, obs.Bid.Err)
- assert.Equal(t, big.NewInt(123), obs.Ask.Val)
- assert.NoError(t, obs.Ask.Err)
- assert.Equal(t, int64(0), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
- assert.NoError(t, obs.LinkPrice.Err)
- assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
- assert.NoError(t, obs.NativePrice.Err)
- })
-
- t.Run("when fails to fetch linkPrice or nativePrice", func(t *testing.T) {
- t.Cleanup(func() {
- fetcher.linkPriceErr = nil
- fetcher.nativePriceErr = nil
- })
-
- fetcher.linkPriceErr = errors.New("some error fetching link price")
- fetcher.nativePriceErr = errors.New("some error fetching native price")
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Nil(t, obs.LinkPrice.Val)
- assert.EqualError(t, obs.LinkPrice.Err, "some error fetching link price")
- assert.Nil(t, obs.NativePrice.Val)
- assert.EqualError(t, obs.NativePrice.Err, "some error fetching native price")
- })
-
- t.Run("when PluginConfig is empty", func(t *testing.T) {
- t.Cleanup(func() {
- ds.jb = jb
- })
-
- fetcher.linkPriceErr = errors.New("some error fetching link price")
- fetcher.nativePriceErr = errors.New("some error fetching native price")
-
- ds.jb.OCR2OracleSpec.PluginConfig = job.JSONConfig{}
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
- assert.Nil(t, obs.LinkPrice.Err)
- assert.Equal(t, obs.LinkPrice.Val, relaymercuryv3.MissingPrice)
- assert.Nil(t, obs.NativePrice.Err)
- assert.Equal(t, obs.NativePrice.Val, relaymercuryv3.MissingPrice)
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- })
-
- t.Run("when succeeds to fetch linkPrice or nativePrice but got nil (new feed)", func(t *testing.T) {
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, obs.LinkPrice.Val, relaymercuryv3.MissingPrice)
- assert.Nil(t, obs.LinkPrice.Err)
- assert.Equal(t, obs.NativePrice.Val, relaymercuryv3.MissingPrice)
- assert.Nil(t, obs.NativePrice.Err)
- })
- })
- })
-}
-
-var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-
-func buildSampleV3Report() []byte {
- feedID := sampleFeedID
- timestamp := uint32(124)
- bp := big.NewInt(242)
- bid := big.NewInt(243)
- ask := big.NewInt(244)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
-
- b, err := reportcodecv3.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, bid, ask)
- if err != nil {
- panic(err)
- }
- return b
-}
diff --git a/core/services/relay/evm/mercury/v4/data_source.go b/core/services/relay/evm/mercury/v4/data_source.go
deleted file mode 100644
index 26fe3379a0a..00000000000
--- a/core/services/relay/evm/mercury/v4/data_source.go
+++ /dev/null
@@ -1,263 +0,0 @@
-package v4
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
- "sync"
-
- pkgerrors "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v4types "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
- v4 "github.com/smartcontractkit/chainlink-data-streams/mercury/v4"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercurytypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/types"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-type Runner interface {
- ExecuteRun(ctx context.Context, spec pipeline.Spec, vars pipeline.Vars) (run *pipeline.Run, trrs pipeline.TaskRunResults, err error)
-}
-
-type LatestReportFetcher interface {
- LatestPrice(ctx context.Context, feedID [32]byte) (*big.Int, error)
- LatestTimestamp(context.Context) (int64, error)
-}
-
-type datasource struct {
- pipelineRunner Runner
- jb job.Job
- spec pipeline.Spec
- feedID mercuryutils.FeedID
- lggr logger.Logger
- saver ocrcommon.Saver
- orm types.DataSourceORM
- codec reportcodec.ReportCodec
-
- fetcher LatestReportFetcher
- linkFeedID mercuryutils.FeedID
- nativeFeedID mercuryutils.FeedID
-
- mu sync.RWMutex
-
- chEnhancedTelem chan<- ocrcommon.EnhancedTelemetryMercuryData
-}
-
-var _ v4.DataSource = &datasource{}
-
-func NewDataSource(orm types.DataSourceORM, pr pipeline.Runner, jb job.Job, spec pipeline.Spec, feedID mercuryutils.FeedID, lggr logger.Logger, s ocrcommon.Saver, enhancedTelemChan chan ocrcommon.EnhancedTelemetryMercuryData, fetcher LatestReportFetcher, linkFeedID, nativeFeedID mercuryutils.FeedID) *datasource {
- return &datasource{pr, jb, spec, feedID, lggr, s, orm, reportcodec.ReportCodec{}, fetcher, linkFeedID, nativeFeedID, sync.RWMutex{}, enhancedTelemChan}
-}
-
-func (ds *datasource) Observe(ctx context.Context, repts ocrtypes.ReportTimestamp, fetchMaxFinalizedTimestamp bool) (obs v4types.Observation, pipelineExecutionErr error) {
- var wg sync.WaitGroup
- ctx, cancel := context.WithCancel(ctx)
-
- if fetchMaxFinalizedTimestamp {
- wg.Add(1)
- go func() {
- defer wg.Done()
- latest, dbErr := ds.orm.LatestReport(ctx, ds.feedID)
- if dbErr != nil {
- obs.MaxFinalizedTimestamp.Err = dbErr
- return
- }
- if latest != nil {
- maxFinalizedBlockNumber, decodeErr := ds.codec.ObservationTimestampFromReport(ctx, latest)
- obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = int64(maxFinalizedBlockNumber), decodeErr
- return
- }
- obs.MaxFinalizedTimestamp.Val, obs.MaxFinalizedTimestamp.Err = ds.fetcher.LatestTimestamp(ctx)
- }()
- }
-
- var trrs pipeline.TaskRunResults
- wg.Add(1)
- go func() {
- defer wg.Done()
- var run *pipeline.Run
- run, trrs, pipelineExecutionErr = ds.executeRun(ctx)
- if pipelineExecutionErr != nil {
- cancel()
- pipelineExecutionErr = fmt.Errorf("Observe failed while executing run: %w", pipelineExecutionErr)
- return
- }
-
- ds.saver.Save(run)
-
- var parsed parseOutput
- parsed, pipelineExecutionErr = ds.parse(trrs)
- if pipelineExecutionErr != nil {
- cancel()
- // This is not expected under normal circumstances
- ds.lggr.Errorw("Observe failed while parsing run results", "err", pipelineExecutionErr)
- pipelineExecutionErr = fmt.Errorf("Observe failed while parsing run results: %w", pipelineExecutionErr)
- return
- }
- obs.BenchmarkPrice = parsed.benchmarkPrice
- obs.MarketStatus = parsed.marketStatus
- }()
-
- var isLink, isNative bool
- if len(ds.jb.OCR2OracleSpec.PluginConfig) == 0 {
- obs.LinkPrice.Val = v4.MissingPrice
- } else if ds.feedID == ds.linkFeedID {
- isLink = true
- } else {
- wg.Add(1)
- go func() {
- defer wg.Done()
- obs.LinkPrice.Val, obs.LinkPrice.Err = ds.fetcher.LatestPrice(ctx, ds.linkFeedID)
- if obs.LinkPrice.Val == nil && obs.LinkPrice.Err == nil {
- mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.linkFeedID.String()).Inc()
- ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing LINK feed, using sentinel value of %s", v4.MissingPrice), "linkFeedID", ds.linkFeedID)
- obs.LinkPrice.Val = v4.MissingPrice
- } else if obs.LinkPrice.Err != nil {
- mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.linkFeedID.String()).Inc()
- ds.lggr.Errorw("Mercury server returned error querying LINK price feed", "err", obs.LinkPrice.Err, "linkFeedID", ds.linkFeedID)
- }
- }()
- }
-
- if len(ds.jb.OCR2OracleSpec.PluginConfig) == 0 {
- obs.NativePrice.Val = v4.MissingPrice
- } else if ds.feedID == ds.nativeFeedID {
- isNative = true
- } else {
- wg.Add(1)
- go func() {
- defer wg.Done()
- obs.NativePrice.Val, obs.NativePrice.Err = ds.fetcher.LatestPrice(ctx, ds.nativeFeedID)
- if obs.NativePrice.Val == nil && obs.NativePrice.Err == nil {
- mercurytypes.PriceFeedMissingCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
- ds.lggr.Warnw(fmt.Sprintf("Mercury server was missing native feed, using sentinel value of %s", v4.MissingPrice), "nativeFeedID", ds.nativeFeedID)
- obs.NativePrice.Val = v4.MissingPrice
- } else if obs.NativePrice.Err != nil {
- mercurytypes.PriceFeedErrorCount.WithLabelValues(ds.nativeFeedID.String()).Inc()
- ds.lggr.Errorw("Mercury server returned error querying native price feed", "err", obs.NativePrice.Err, "nativeFeedID", ds.nativeFeedID)
- }
- }()
- }
-
- wg.Wait()
- cancel()
-
- if pipelineExecutionErr != nil {
- return
- }
-
- if isLink || isNative {
- // run has now completed so it is safe to use benchmark price
- if isLink {
- // This IS the LINK feed, use our observed price
- obs.LinkPrice.Val, obs.LinkPrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
- }
- if isNative {
- // This IS the native feed, use our observed price
- obs.NativePrice.Val, obs.NativePrice.Err = obs.BenchmarkPrice.Val, obs.BenchmarkPrice.Err
- }
- }
-
- ocrcommon.MaybeEnqueueEnhancedTelem(ds.jb, ds.chEnhancedTelem, ocrcommon.EnhancedTelemetryMercuryData{
- V4Observation: &obs,
- TaskRunResults: trrs,
- RepTimestamp: repts,
- FeedVersion: mercuryutils.REPORT_V4,
- FetchMaxFinalizedTimestamp: fetchMaxFinalizedTimestamp,
- IsLinkFeed: isLink,
- IsNativeFeed: isNative,
- })
-
- return obs, nil
-}
-
-func toBigInt(val interface{}) (*big.Int, error) {
- dec, err := utils.ToDecimal(val)
- if err != nil {
- return nil, err
- }
- return dec.BigInt(), nil
-}
-
-type parseOutput struct {
- benchmarkPrice mercury.ObsResult[*big.Int]
- marketStatus mercury.ObsResult[uint32]
-}
-
-func (ds *datasource) parse(trrs pipeline.TaskRunResults) (o parseOutput, merr error) {
- var finaltrrs []pipeline.TaskRunResult
- for _, trr := range trrs {
- // only return terminal trrs from executeRun
- if trr.IsTerminal() {
- finaltrrs = append(finaltrrs, trr)
- }
- }
-
- // pipeline.TaskRunResults comes ordered asc by index, this is guaranteed
- // by the pipeline executor
- if len(finaltrrs) != 2 {
- return o, fmt.Errorf("invalid number of results, expected: 2, got: %d", len(finaltrrs))
- }
-
- merr = errors.Join(
- setBenchmarkPrice(&o, finaltrrs[0].Result),
- setMarketStatus(&o, finaltrrs[1].Result),
- )
-
- return o, merr
-}
-
-func setBenchmarkPrice(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.benchmarkPrice.Err = res.Error
- return res.Error
- }
- val, err := toBigInt(res.Value)
- if err != nil {
- return fmt.Errorf("failed to parse BenchmarkPrice: %w", err)
- }
- o.benchmarkPrice.Val = val
- return nil
-}
-
-func setMarketStatus(o *parseOutput, res pipeline.Result) error {
- if res.Error != nil {
- o.marketStatus.Err = res.Error
- return res.Error
- }
- val, err := toBigInt(res.Value)
- if err != nil {
- return fmt.Errorf("failed to parse MarketStatus: %w", err)
- }
- o.marketStatus.Val = uint32(val.Int64())
- return nil
-}
-
-// The context passed in here has a timeout of (ObservationTimeout + ObservationGracePeriod).
-// Upon context cancellation, its expected that we return any usable values within ObservationGracePeriod.
-func (ds *datasource) executeRun(ctx context.Context) (*pipeline.Run, pipeline.TaskRunResults, error) {
- vars := pipeline.NewVarsFrom(map[string]interface{}{
- "jb": map[string]interface{}{
- "databaseID": ds.jb.ID,
- "externalJobID": ds.jb.ExternalJobID,
- "name": ds.jb.Name.ValueOrZero(),
- },
- })
-
- run, trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, vars)
- if err != nil {
- return nil, nil, pkgerrors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID)
- }
-
- return run, trrs, err
-}
diff --git a/core/services/relay/evm/mercury/v4/data_source_test.go b/core/services/relay/evm/mercury/v4/data_source_test.go
deleted file mode 100644
index d6af158ac4f..00000000000
--- a/core/services/relay/evm/mercury/v4/data_source_test.go
+++ /dev/null
@@ -1,348 +0,0 @@
-package v4
-
-import (
- "context"
- "math/big"
- "testing"
-
- "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/stretchr/testify/assert"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- relaymercuryv4 "github.com/smartcontractkit/chainlink-data-streams/mercury/v4"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/job"
- "github.com/smartcontractkit/chainlink/v2/core/services/pipeline"
- mercurymocks "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reportcodecv4 "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/reportcodec"
-)
-
-var _ mercurytypes.ServerFetcher = &mockFetcher{}
-
-type mockFetcher struct {
- ts int64
- tsErr error
- linkPrice *big.Int
- linkPriceErr error
- nativePrice *big.Int
- nativePriceErr error
-}
-
-var feedId utils.FeedID = [32]byte{1}
-var linkFeedId utils.FeedID = [32]byte{2}
-var nativeFeedId utils.FeedID = [32]byte{3}
-
-func (m *mockFetcher) FetchInitialMaxFinalizedBlockNumber(context.Context) (*int64, error) {
- return nil, nil
-}
-
-func (m *mockFetcher) LatestPrice(ctx context.Context, fId [32]byte) (*big.Int, error) {
- if fId == linkFeedId {
- return m.linkPrice, m.linkPriceErr
- } else if fId == nativeFeedId {
- return m.nativePrice, m.nativePriceErr
- }
- return nil, nil
-}
-
-func (m *mockFetcher) LatestTimestamp(context.Context) (int64, error) {
- return m.ts, m.tsErr
-}
-
-type mockORM struct {
- report []byte
- err error
-}
-
-func (m *mockORM) LatestReport(ctx context.Context, feedID [32]byte) (report []byte, err error) {
- return m.report, m.err
-}
-
-type mockSaver struct {
- r *pipeline.Run
-}
-
-func (ms *mockSaver) Save(r *pipeline.Run) {
- ms.r = r
-}
-
-func Test_Datasource(t *testing.T) {
- orm := &mockORM{}
- jb := job.Job{
- Type: job.Type(pipeline.OffchainReporting2JobType),
- OCR2OracleSpec: &job.OCR2OracleSpec{
- CaptureEATelemetry: true,
- PluginConfig: map[string]interface{}{
- "serverURL": "a",
- },
- },
- }
- ds := &datasource{orm: orm, lggr: logger.Test(t), jb: jb}
- ctx := testutils.Context(t)
- repts := ocrtypes.ReportTimestamp{}
-
- fetcher := &mockFetcher{}
- ds.fetcher = fetcher
-
- saver := &mockSaver{}
- ds.saver = saver
-
- goodTrrs := []pipeline.TaskRunResult{
- {
- // bp
- Result: pipeline.Result{Value: "122.345"},
- Task: &mercurymocks.MockTask{},
- },
- {
- // marketStatus
- Result: pipeline.Result{Value: "1"},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- }
-
- spec := pipeline.Spec{}
- ds.spec = spec
-
- t.Run("when fetchMaxFinalizedTimestamp=true", func(t *testing.T) {
- t.Run("with latest report in database", func(t *testing.T) {
- orm.report = buildSamplev4Report()
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, int64(124), obs.MaxFinalizedTimestamp.Val)
- })
- t.Run("if querying latest report fails", func(t *testing.T) {
- orm.report = nil
- orm.err = errors.New("something exploded")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "something exploded")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
- t.Run("if codec fails to decode", func(t *testing.T) {
- orm.report = []byte{1, 2, 3}
- orm.err = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- orm.report = nil
- orm.err = nil
-
- t.Run("if LatestTimestamp returns error", func(t *testing.T) {
- fetcher.tsErr = errors.New("some error")
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.EqualError(t, obs.MaxFinalizedTimestamp.Err, "some error")
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- t.Run("if LatestTimestamp succeeds", func(t *testing.T) {
- fetcher.tsErr = nil
- fetcher.ts = 123
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Equal(t, int64(123), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- })
-
- t.Run("if LatestTimestamp succeeds but ts=0 (new feed)", func(t *testing.T) {
- fetcher.tsErr = nil
- fetcher.ts = 0
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Zero(t, obs.MaxFinalizedTimestamp.Val)
- })
-
- t.Run("when run execution succeeded", func(t *testing.T) {
- t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
- t.Cleanup(func() {
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
- })
-
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
-
- fetcher.ts = 123123
- fetcher.tsErr = nil
-
- obs, err := ds.Observe(ctx, repts, true)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, int64(123123), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
- assert.NoError(t, obs.LinkPrice.Err)
- assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
- assert.NoError(t, obs.NativePrice.Err)
- assert.Equal(t, uint32(1), obs.MarketStatus.Val)
- assert.NoError(t, obs.MarketStatus.Err)
- })
- })
- })
-
- t.Run("when fetchMaxFinalizedTimestamp=false", func(t *testing.T) {
- t.Run("when run execution fails, returns error", func(t *testing.T) {
- t.Cleanup(func() {
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- })
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: errors.New("run execution failed"),
- }
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while executing run: error executing run for spec ID 0: run execution failed")
- })
-
- t.Run("when parsing run results fails, return error", func(t *testing.T) {
- t.Cleanup(func() {
- runner := &mercurymocks.MockRunner{
- Trrs: goodTrrs,
- Err: nil,
- }
- ds.pipelineRunner = runner
- })
-
- badTrrs := []pipeline.TaskRunResult{
- {
- // benchmark price
- Result: pipeline.Result{Error: errors.New("some error with bp")},
- Task: &mercurymocks.MockTask{},
- },
- {
- // marketStatus
- Result: pipeline.Result{Value: "1"},
- Task: &mercurymocks.MockTask{},
- },
- }
-
- ds.pipelineRunner = &mercurymocks.MockRunner{
- Trrs: badTrrs,
- Err: nil,
- }
-
- _, err := ds.Observe(ctx, repts, false)
- assert.EqualError(t, err, "Observe failed while parsing run results: some error with bp")
- })
-
- t.Run("when run execution succeeded", func(t *testing.T) {
- t.Run("when feedId=linkFeedID=nativeFeedId", func(t *testing.T) {
- t.Cleanup(func() {
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, linkFeedId, nativeFeedId
- })
-
- var feedId utils.FeedID = [32]byte{1}
- ds.feedID, ds.linkFeedID, ds.nativeFeedID = feedId, feedId, feedId
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- assert.NoError(t, obs.BenchmarkPrice.Err)
- assert.Equal(t, int64(0), obs.MaxFinalizedTimestamp.Val)
- assert.NoError(t, obs.MaxFinalizedTimestamp.Err)
- assert.Equal(t, big.NewInt(122), obs.LinkPrice.Val)
- assert.NoError(t, obs.LinkPrice.Err)
- assert.Equal(t, big.NewInt(122), obs.NativePrice.Val)
- assert.NoError(t, obs.NativePrice.Err)
- assert.Equal(t, uint32(1), obs.MarketStatus.Val)
- assert.NoError(t, obs.MarketStatus.Err)
- })
-
- t.Run("when fails to fetch linkPrice or nativePrice", func(t *testing.T) {
- t.Cleanup(func() {
- fetcher.linkPriceErr = nil
- fetcher.nativePriceErr = nil
- })
-
- fetcher.linkPriceErr = errors.New("some error fetching link price")
- fetcher.nativePriceErr = errors.New("some error fetching native price")
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Nil(t, obs.LinkPrice.Val)
- assert.EqualError(t, obs.LinkPrice.Err, "some error fetching link price")
- assert.Nil(t, obs.NativePrice.Val)
- assert.EqualError(t, obs.NativePrice.Err, "some error fetching native price")
- })
-
- t.Run("when PluginConfig is empty", func(t *testing.T) {
- t.Cleanup(func() {
- ds.jb = jb
- })
-
- fetcher.linkPriceErr = errors.New("some error fetching link price")
- fetcher.nativePriceErr = errors.New("some error fetching native price")
-
- ds.jb.OCR2OracleSpec.PluginConfig = job.JSONConfig{}
-
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
- assert.Nil(t, obs.LinkPrice.Err)
- assert.Equal(t, obs.LinkPrice.Val, relaymercuryv4.MissingPrice)
- assert.Nil(t, obs.NativePrice.Err)
- assert.Equal(t, obs.NativePrice.Val, relaymercuryv4.MissingPrice)
- assert.Equal(t, big.NewInt(122), obs.BenchmarkPrice.Val)
- })
-
- t.Run("when succeeds to fetch linkPrice or nativePrice but got nil (new feed)", func(t *testing.T) {
- obs, err := ds.Observe(ctx, repts, false)
- assert.NoError(t, err)
-
- assert.Equal(t, obs.LinkPrice.Val, relaymercuryv4.MissingPrice)
- assert.Nil(t, obs.LinkPrice.Err)
- assert.Equal(t, obs.NativePrice.Val, relaymercuryv4.MissingPrice)
- assert.Nil(t, obs.NativePrice.Err)
- })
- })
- })
-}
-
-var sampleFeedID = [32]uint8{28, 145, 107, 74, 167, 229, 124, 167, 182, 138, 225, 191, 69, 101, 63, 86, 182, 86, 253, 58, 163, 53, 239, 127, 174, 105, 107, 102, 63, 27, 132, 114}
-
-func buildSamplev4Report() []byte {
- feedID := sampleFeedID
- timestamp := uint32(124)
- bp := big.NewInt(242)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
- marketStatus := uint32(1)
-
- b, err := reportcodecv4.ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, marketStatus)
- if err != nil {
- panic(err)
- }
- return b
-}
diff --git a/core/services/relay/evm/mercury/v4/reportcodec/report_codec.go b/core/services/relay/evm/mercury/v4/reportcodec/report_codec.go
deleted file mode 100644
index c5d32c02ed4..00000000000
--- a/core/services/relay/evm/mercury/v4/reportcodec/report_codec.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package reportcodec
-
-import (
- "context"
- "errors"
- "fmt"
- "math/big"
-
- pkgerrors "github.com/pkg/errors"
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- reporttypes "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/v4/types"
-)
-
-var ReportTypes = reporttypes.GetSchema()
-var maxReportLength = 32 * len(ReportTypes) // each arg is 256 bit EVM word
-var zero = big.NewInt(0)
-
-var _ v4.ReportCodec = &ReportCodec{}
-
-type ReportCodec struct {
- logger logger.Logger
- feedID utils.FeedID
-}
-
-func NewReportCodec(feedID [32]byte, lggr logger.Logger) *ReportCodec {
- return &ReportCodec{lggr, feedID}
-}
-
-func (r *ReportCodec) BuildReport(ctx context.Context, rf v4.ReportFields) (ocrtypes.Report, error) {
- var merr error
- if rf.BenchmarkPrice == nil {
- merr = errors.Join(merr, errors.New("benchmarkPrice may not be nil"))
- }
- if rf.LinkFee == nil {
- merr = errors.Join(merr, errors.New("linkFee may not be nil"))
- } else if rf.LinkFee.Cmp(zero) < 0 {
- merr = errors.Join(merr, fmt.Errorf("linkFee may not be negative (got: %s)", rf.LinkFee))
- }
- if rf.NativeFee == nil {
- merr = errors.Join(merr, errors.New("nativeFee may not be nil"))
- } else if rf.NativeFee.Cmp(zero) < 0 {
- merr = errors.Join(merr, fmt.Errorf("nativeFee may not be negative (got: %s)", rf.NativeFee))
- }
- if merr != nil {
- return nil, merr
- }
- reportBytes, err := ReportTypes.Pack(r.feedID, rf.ValidFromTimestamp, rf.Timestamp, rf.NativeFee, rf.LinkFee, rf.ExpiresAt, rf.BenchmarkPrice, rf.MarketStatus)
- return ocrtypes.Report(reportBytes), pkgerrors.Wrap(err, "failed to pack report blob")
-}
-
-func (r *ReportCodec) MaxReportLength(ctx context.Context, n int) (int, error) {
- return maxReportLength, nil
-}
-
-func (r *ReportCodec) ObservationTimestampFromReport(ctx context.Context, report ocrtypes.Report) (uint32, error) {
- decoded, err := r.Decode(ctx, report)
- if err != nil {
- return 0, err
- }
- return decoded.ObservationsTimestamp, nil
-}
-
-func (r *ReportCodec) Decode(ctx context.Context, report ocrtypes.Report) (*reporttypes.Report, error) {
- return reporttypes.Decode(report)
-}
-
-func (r *ReportCodec) BenchmarkPriceFromReport(ctx context.Context, report ocrtypes.Report) (*big.Int, error) {
- decoded, err := r.Decode(ctx, report)
- if err != nil {
- return nil, err
- }
- return decoded.BenchmarkPrice, nil
-}
diff --git a/core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go b/core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go
deleted file mode 100644
index 9813d422cc1..00000000000
--- a/core/services/relay/evm/mercury/v4/reportcodec/report_codec_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package reportcodec
-
-import (
- "math/big"
- "testing"
-
- "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
- "github.com/smartcontractkit/chainlink-common/pkg/utils/tests"
-)
-
-func newValidReportFields() v4.ReportFields {
- return v4.ReportFields{
- Timestamp: 242,
- BenchmarkPrice: big.NewInt(243),
- ValidFromTimestamp: 123,
- ExpiresAt: 20,
- LinkFee: big.NewInt(456),
- NativeFee: big.NewInt(457),
- MarketStatus: 1,
- }
-}
-
-func Test_ReportCodec_BuildReport(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("BuildReport errors on zero values", func(t *testing.T) {
- ctx := tests.Context(t)
- _, err := r.BuildReport(ctx, v4.ReportFields{})
- require.Error(t, err)
- assert.Contains(t, err.Error(), "benchmarkPrice may not be nil")
- assert.Contains(t, err.Error(), "linkFee may not be nil")
- assert.Contains(t, err.Error(), "nativeFee may not be nil")
- })
-
- t.Run("BuildReport constructs a report from observations", func(t *testing.T) {
- ctx := tests.Context(t)
- rf := newValidReportFields()
- // only need to test happy path since validations are done in relaymercury
-
- report, err := r.BuildReport(ctx, rf)
- require.NoError(t, err)
-
- reportElems := make(map[string]interface{})
- err = ReportTypes.UnpackIntoMap(reportElems, report)
- require.NoError(t, err)
-
- assert.Equal(t, int(reportElems["observationsTimestamp"].(uint32)), 242)
- assert.Equal(t, reportElems["benchmarkPrice"].(*big.Int).Int64(), int64(243))
- assert.Equal(t, reportElems["validFromTimestamp"].(uint32), uint32(123))
- assert.Equal(t, reportElems["expiresAt"].(uint32), uint32(20))
- assert.Equal(t, reportElems["linkFee"].(*big.Int).Int64(), int64(456))
- assert.Equal(t, reportElems["nativeFee"].(*big.Int).Int64(), int64(457))
- assert.Equal(t, reportElems["marketStatus"].(uint32), uint32(1))
-
- assert.Equal(t, types.Report{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, report)
- max, err := r.MaxReportLength(ctx, 4)
- require.NoError(t, err)
- assert.LessOrEqual(t, len(report), max)
-
- t.Run("Decode decodes the report", func(t *testing.T) {
- ctx := tests.Context(t)
- decoded, err := r.Decode(ctx, report)
- require.NoError(t, err)
-
- require.NotNil(t, decoded)
-
- assert.Equal(t, uint32(242), decoded.ObservationsTimestamp)
- assert.Equal(t, big.NewInt(243), decoded.BenchmarkPrice)
- assert.Equal(t, uint32(123), decoded.ValidFromTimestamp)
- assert.Equal(t, uint32(20), decoded.ExpiresAt)
- assert.Equal(t, big.NewInt(456), decoded.LinkFee)
- assert.Equal(t, big.NewInt(457), decoded.NativeFee)
- assert.Equal(t, uint32(1), decoded.MarketStatus)
- })
- })
-
- t.Run("errors on negative fee", func(t *testing.T) {
- ctx := tests.Context(t)
- rf := newValidReportFields()
- rf.LinkFee = big.NewInt(-1)
- rf.NativeFee = big.NewInt(-1)
- _, err := r.BuildReport(ctx, rf)
- require.Error(t, err)
-
- assert.Contains(t, err.Error(), "linkFee may not be negative (got: -1)")
- assert.Contains(t, err.Error(), "nativeFee may not be negative (got: -1)")
- })
-
- t.Run("Decode errors on invalid report", func(t *testing.T) {
- ctx := tests.Context(t)
- _, err := r.Decode(ctx, []byte{1, 2, 3})
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
-
- longBad := make([]byte, 64)
- for i := 0; i < len(longBad); i++ {
- longBad[i] = byte(i)
- }
- _, err = r.Decode(ctx, longBad)
- assert.EqualError(t, err, "failed to decode report: abi: improperly encoded uint32 value")
- })
-}
-
-func buildSampleReport(ts int64) []byte {
- feedID := [32]byte{'f', 'o', 'o'}
- timestamp := uint32(ts)
- bp := big.NewInt(242)
- validFromTimestamp := uint32(123)
- expiresAt := uint32(456)
- linkFee := big.NewInt(3334455)
- nativeFee := big.NewInt(556677)
- marketStatus := uint32(1)
-
- b, err := ReportTypes.Pack(feedID, validFromTimestamp, timestamp, nativeFee, linkFee, expiresAt, bp, marketStatus)
- if err != nil {
- panic(err)
- }
- return b
-}
-
-func Test_ReportCodec_ObservationTimestampFromReport(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("ObservationTimestampFromReport extracts observation timestamp from a valid report", func(t *testing.T) {
- ctx := tests.Context(t)
- report := buildSampleReport(123)
-
- ts, err := r.ObservationTimestampFromReport(ctx, report)
- require.NoError(t, err)
-
- assert.Equal(t, ts, uint32(123))
- })
- t.Run("ObservationTimestampFromReport returns error when report is invalid", func(t *testing.T) {
- ctx := tests.Context(t)
- report := []byte{1, 2, 3}
-
- _, err := r.ObservationTimestampFromReport(ctx, report)
- require.Error(t, err)
-
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- })
-}
-
-func Test_ReportCodec_BenchmarkPriceFromReport(t *testing.T) {
- r := ReportCodec{}
-
- t.Run("BenchmarkPriceFromReport extracts the benchmark price from valid report", func(t *testing.T) {
- ctx := tests.Context(t)
- report := buildSampleReport(123)
-
- bp, err := r.BenchmarkPriceFromReport(ctx, report)
- require.NoError(t, err)
-
- assert.Equal(t, big.NewInt(242), bp)
- })
- t.Run("BenchmarkPriceFromReport errors on invalid report", func(t *testing.T) {
- ctx := tests.Context(t)
- _, err := r.BenchmarkPriceFromReport(ctx, []byte{1, 2, 3})
- require.Error(t, err)
- assert.EqualError(t, err, "failed to decode report: abi: cannot marshal in to go type: length insufficient 3 require 32")
- })
-}
diff --git a/core/services/relay/evm/mercury/v4/types/types.go b/core/services/relay/evm/mercury/v4/types/types.go
deleted file mode 100644
index 584836c1e9b..00000000000
--- a/core/services/relay/evm/mercury/v4/types/types.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package reporttypes
-
-import (
- "fmt"
- "math/big"
-
- "github.com/ethereum/go-ethereum/accounts/abi"
-)
-
-var schema = GetSchema()
-
-func GetSchema() abi.Arguments {
- mustNewType := func(t string) abi.Type {
- result, err := abi.NewType(t, "", []abi.ArgumentMarshaling{})
- if err != nil {
- panic(fmt.Sprintf("Unexpected error during abi.NewType: %s", err))
- }
- return result
- }
- return abi.Arguments([]abi.Argument{
- {Name: "feedId", Type: mustNewType("bytes32")},
- {Name: "validFromTimestamp", Type: mustNewType("uint32")},
- {Name: "observationsTimestamp", Type: mustNewType("uint32")},
- {Name: "nativeFee", Type: mustNewType("uint192")},
- {Name: "linkFee", Type: mustNewType("uint192")},
- {Name: "expiresAt", Type: mustNewType("uint32")},
- {Name: "benchmarkPrice", Type: mustNewType("int192")},
- {Name: "marketStatus", Type: mustNewType("uint32")},
- })
-}
-
-type Report struct {
- FeedId [32]byte
- ObservationsTimestamp uint32
- BenchmarkPrice *big.Int
- ValidFromTimestamp uint32
- ExpiresAt uint32
- LinkFee *big.Int
- NativeFee *big.Int
- MarketStatus uint32
-}
-
-// Decode is made available to external users (i.e. mercury server)
-func Decode(report []byte) (*Report, error) {
- values, err := schema.Unpack(report)
- if err != nil {
- return nil, fmt.Errorf("failed to decode report: %w", err)
- }
- decoded := new(Report)
- if err = schema.Copy(decoded, values); err != nil {
- return nil, fmt.Errorf("failed to copy report values to struct: %w", err)
- }
- return decoded, nil
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache.go b/core/services/relay/evm/mercury/wsrpc/cache/cache.go
deleted file mode 100644
index 0e2a2d3215a..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/cache/cache.go
+++ /dev/null
@@ -1,399 +0,0 @@
-package cache
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/jpillora/backoff"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-var (
- promFetchFailedCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_cache_fetch_failure_count",
- Help: "Number of times we tried to call LatestReport from the mercury server, but some kind of error occurred",
- },
- []string{"serverURL", "feedID"},
- )
- promCacheHitCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_cache_hit_count",
- Help: "Running count of cache hits",
- },
- []string{"serverURL", "feedID"},
- )
- promCacheWaitCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_cache_wait_count",
- Help: "Running count of times that we had to wait for a fetch to complete before reading from cache",
- },
- []string{"serverURL", "feedID"},
- )
- promCacheMissCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_cache_miss_count",
- Help: "Running count of cache misses",
- },
- []string{"serverURL", "feedID"},
- )
-)
-
-type Fetcher interface {
- LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error)
-}
-
-type Client interface {
- Fetcher
- ServerURL() string
- RawClient() pb.MercuryClient
-}
-
-// Cache is scoped to one particular mercury server
-// Use CacheSet to hold lookups for multiple servers
-type Cache interface {
- Fetcher
- services.Service
-}
-
-type Config struct {
- // LatestReportTTL controls how "stale" we will allow a price to be e.g. if
- // set to 1s, a new price will always be fetched if the last result was
- // from more than 1 second ago.
- //
- // Another way of looking at it is such: the cache will _never_ return a
- // price that was queried from before now-LatestReportTTL.
- //
- // Setting to zero disables caching entirely.
- LatestReportTTL time.Duration
- // MaxStaleAge is that maximum amount of time that a value can be stale
- // before it is deleted from the cache (a form of garbage collection).
- //
- // This should generally be set to something much larger than
- // LatestReportTTL. Setting to zero disables garbage collection.
- MaxStaleAge time.Duration
- // LatestReportDeadline controls how long to wait for a response before
- // retrying. Setting this to zero will wait indefinitely.
- LatestReportDeadline time.Duration
-}
-
-func NewCache(lggr logger.Logger, client Client, cfg Config) Cache {
- return newMemCache(lggr, client, cfg)
-}
-
-type cacheVal struct {
- sync.RWMutex
-
- fetching bool
- fetchCh chan (struct{})
-
- val *pb.LatestReportResponse
- err error
-
- expiresAt time.Time
-}
-
-func (v *cacheVal) read() (*pb.LatestReportResponse, error) {
- v.RLock()
- defer v.RUnlock()
- return v.val, v.err
-}
-
-// caller expected to hold lock
-func (v *cacheVal) initiateFetch() <-chan struct{} {
- if v.fetching {
- panic("cannot initiateFetch on cache val that is already fetching")
- }
- v.fetching = true
- v.fetchCh = make(chan struct{})
- return v.fetchCh
-}
-
-func (v *cacheVal) setError(err error) {
- v.Lock()
- defer v.Unlock()
- v.err = err
-}
-
-func (v *cacheVal) completeFetch(val *pb.LatestReportResponse, err error, expiresAt time.Time) {
- v.Lock()
- defer v.Unlock()
- if !v.fetching {
- panic("can only completeFetch on cache val that is fetching")
- }
- v.val = val
- v.err = err
- if err == nil {
- v.expiresAt = expiresAt
- }
- close(v.fetchCh)
- v.fetchCh = nil
- v.fetching = false
-}
-
-func (v *cacheVal) abandonFetch(err error) {
- v.completeFetch(nil, err, time.Now())
-}
-
-func (v *cacheVal) waitForResult(ctx context.Context, chResult <-chan struct{}, chStop <-chan struct{}) (*pb.LatestReportResponse, error) {
- select {
- case <-ctx.Done():
- _, err := v.read()
- return nil, errors.Join(err, ctx.Err())
- case <-chStop:
- return nil, errors.New("stopped")
- case <-chResult:
- return v.read()
- }
-}
-
-// memCache stores values in memory
-// it will never return a stale value older than latestPriceTTL, instead
-// waiting for a successful fetch or caller context cancels, whichever comes
-// first
-type memCache struct {
- services.StateMachine
- lggr logger.SugaredLogger
-
- client Client
-
- cfg Config
-
- cache sync.Map
-
- wg sync.WaitGroup
- chStop services.StopChan
-}
-
-func newMemCache(lggr logger.Logger, client Client, cfg Config) *memCache {
- return &memCache{
- services.StateMachine{},
- logger.Sugared(lggr).Named("MemCache").Named(client.ServerURL()),
- client,
- cfg,
- sync.Map{},
- sync.WaitGroup{},
- make(chan (struct{})),
- }
-}
-
-// LatestReport
-// NOTE: This will actually block on all types of errors, even non-timeouts.
-// Context should be set carefully and timed to be the maximum time we are
-// willing to wait for a result, the background thread will keep re-querying
-// until it gets one even on networking errors etc.
-func (m *memCache) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) {
- if req == nil {
- return nil, errors.New("req must not be nil")
- }
- feedIDHex := mercuryutils.BytesToFeedID(req.FeedId).String()
- if m.cfg.LatestReportTTL <= 0 {
- return m.client.RawClient().LatestReport(ctx, req)
- }
- vi, loaded := m.cache.LoadOrStore(feedIDHex, &cacheVal{
- sync.RWMutex{},
- false,
- nil,
- nil,
- nil,
- time.Now(), // first result is always "expired" and requires fetch
- })
- v := vi.(*cacheVal)
-
- m.lggr.Tracew("LatestReport", "feedID", feedIDHex, "loaded", loaded)
-
- // HOT PATH
- v.RLock()
- if time.Now().Before(v.expiresAt) {
- // CACHE HIT
- promCacheHitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc()
- m.lggr.Tracew("LatestReport CACHE HIT (hot path)", "feedID", feedIDHex)
-
- defer v.RUnlock()
- return v.val, nil
- } else if v.fetching {
- // CACHE WAIT
- promCacheWaitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc()
- m.lggr.Tracew("LatestReport CACHE WAIT (hot path)", "feedID", feedIDHex)
- // if someone else is fetching then wait for the fetch to complete
- ch := v.fetchCh
- v.RUnlock()
- return v.waitForResult(ctx, ch, m.chStop)
- }
- // CACHE MISS
- promCacheMissCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc()
- // fallthrough to cold path and fetch
- v.RUnlock()
-
- // COLD PATH
- v.Lock()
- if time.Now().Before(v.expiresAt) {
- // CACHE HIT
- promCacheHitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc()
- m.lggr.Tracew("LatestReport CACHE HIT (cold path)", "feedID", feedIDHex)
- defer v.Unlock()
- return v.val, nil
- } else if v.fetching {
- // CACHE WAIT
- promCacheWaitCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc()
- m.lggr.Tracew("LatestReport CACHE WAIT (cold path)", "feedID", feedIDHex)
- // if someone else is fetching then wait for the fetch to complete
- ch := v.fetchCh
- v.Unlock()
- return v.waitForResult(ctx, ch, m.chStop)
- }
- // CACHE MISS
- promCacheMissCount.WithLabelValues(m.client.ServerURL(), feedIDHex).Inc()
- m.lggr.Tracew("LatestReport CACHE MISS (cold path)", "feedID", feedIDHex)
- // initiate the fetch and wait for result
- ch := v.initiateFetch()
- v.Unlock()
-
- ok := m.IfStarted(func() {
- m.wg.Add(1)
- go m.fetch(req, v)
- })
- if !ok {
- err := fmt.Errorf("memCache must be started, but is: %v", m.State())
- v.abandonFetch(err)
- return nil, err
- }
- return v.waitForResult(ctx, ch, m.chStop)
-}
-
-const minBackoffRetryInterval = 50 * time.Millisecond
-
-// newBackoff creates a backoff for retrying
-func (m *memCache) newBackoff() backoff.Backoff {
- min := minBackoffRetryInterval
- max := m.cfg.LatestReportTTL / 2
- if min > max {
- // avoid setting a min that is greater than max
- min = max
- }
- return backoff.Backoff{
- Min: min,
- Max: max,
- Factor: 2,
- Jitter: true,
- }
-}
-
-// fetch continually tries to call FetchLatestReport and write the result to v
-// it writes errors as they come up
-func (m *memCache) fetch(req *pb.LatestReportRequest, v *cacheVal) {
- defer m.wg.Done()
- b := m.newBackoff()
- memcacheCtx, cancel := m.chStop.NewCtx()
- defer cancel()
- var t time.Time
- var val *pb.LatestReportResponse
- var err error
- defer func() {
- v.completeFetch(val, err, t.Add(m.cfg.LatestReportTTL))
- }()
-
- for {
- t = time.Now()
-
- ctx := memcacheCtx
- cancel := func() {}
- if m.cfg.LatestReportDeadline > 0 {
- ctx, cancel = context.WithTimeoutCause(memcacheCtx, m.cfg.LatestReportDeadline, errors.New("latest report fetch deadline exceeded"))
- }
-
- // NOTE: must drop down to RawClient here otherwise we enter an
- // infinite loop of calling a client that calls back to this same cache
- // and on and on
- val, err = m.client.RawClient().LatestReport(ctx, req)
- cancel()
- v.setError(err)
- if memcacheCtx.Err() != nil {
- // stopped
- return
- } else if err != nil {
- m.lggr.Warnw("FetchLatestReport failed", "err", err)
- promFetchFailedCount.WithLabelValues(m.client.ServerURL(), mercuryutils.BytesToFeedID(req.FeedId).String()).Inc()
- select {
- case <-m.chStop:
- return
- case <-time.After(b.Duration()):
- continue
- }
- }
- return
- }
-}
-
-func (m *memCache) Start(context.Context) error {
- return m.StartOnce(m.Name(), func() error {
- m.lggr.Debugw("MemCache starting", "config", m.cfg, "serverURL", m.client.ServerURL())
- m.wg.Add(1)
- go m.runloop()
- return nil
- })
-}
-
-func (m *memCache) runloop() {
- defer m.wg.Done()
-
- if m.cfg.MaxStaleAge == 0 {
- return
- }
- t := services.NewTicker(m.cfg.MaxStaleAge)
- defer t.Stop()
-
- for {
- select {
- case <-t.C:
- m.cleanup()
- t.Reset()
- case <-m.chStop:
- return
- }
- }
-}
-
-// remove anything that has been stale for longer than maxStaleAge so that
-// cache doesn't grow forever and cause memory leaks
-//
-// NOTE: This should be concurrent-safe with LatestReport. The only time they
-// can race is if the cache item has expired past the stale age between
-// creation of the cache item and start of fetch. This is unlikely, and even if
-// it does occur, the worst case is that we discard a cache item early and
-// double fetch, which isn't bad at all.
-func (m *memCache) cleanup() {
- m.cache.Range(func(k, vi any) bool {
- v := vi.(*cacheVal)
- v.RLock()
- defer v.RUnlock()
- if v.fetching {
- // skip cleanup if fetching
- return true
- }
- if time.Now().After(v.expiresAt.Add(m.cfg.MaxStaleAge)) {
- // garbage collection
- m.cache.Delete(k)
- }
- return true
- })
-}
-
-func (m *memCache) Close() error {
- return m.StopOnce(m.Name(), func() error {
- close(m.chStop)
- m.wg.Wait()
- return nil
- })
-}
-func (m *memCache) HealthReport() map[string]error {
- return map[string]error{
- m.Name(): m.Ready(),
- }
-}
-func (m *memCache) Name() string { return m.lggr.Name() }
diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache_set.go b/core/services/relay/evm/mercury/wsrpc/cache/cache_set.go
deleted file mode 100644
index 689bfc87987..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/cache/cache_set.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package cache
-
-import (
- "context"
- "fmt"
- "sync"
-
- "golang.org/x/exp/maps"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
-)
-
-// CacheSet holds a set of mercury caches keyed by server URL
-type CacheSet interface {
- services.Service
- Get(ctx context.Context, client Client) (Fetcher, error)
-}
-
-var _ CacheSet = (*cacheSet)(nil)
-
-type cacheSet struct {
- sync.RWMutex
- services.StateMachine
-
- lggr logger.SugaredLogger
- caches map[string]Cache
-
- cfg Config
-}
-
-func NewCacheSet(lggr logger.Logger, cfg Config) CacheSet {
- return newCacheSet(lggr, cfg)
-}
-
-func newCacheSet(lggr logger.Logger, cfg Config) *cacheSet {
- return &cacheSet{
- sync.RWMutex{},
- services.StateMachine{},
- logger.Sugared(lggr).Named("CacheSet"),
- make(map[string]Cache),
- cfg,
- }
-}
-
-func (cs *cacheSet) Start(context.Context) error {
- return cs.StartOnce("CacheSet", func() error {
- cs.lggr.Debugw("CacheSet starting", "config", cs.cfg, "cachingEnabled", cs.cfg.LatestReportTTL > 0)
- return nil
- })
-}
-
-func (cs *cacheSet) Close() error {
- return cs.StopOnce("CacheSet", func() error {
- cs.Lock()
- defer cs.Unlock()
- caches := maps.Values(cs.caches)
- if err := services.MultiCloser(caches).Close(); err != nil {
- return err
- }
- cs.caches = nil
- return nil
- })
-}
-
-func (cs *cacheSet) Get(ctx context.Context, client Client) (f Fetcher, err error) {
- if cs.cfg.LatestReportTTL == 0 {
- // caching disabled
- return nil, nil
- }
- ok := cs.IfStarted(func() {
- f, err = cs.get(ctx, client)
- })
- if !ok {
- return nil, fmt.Errorf("cacheSet must be started, but is: %v", cs.State())
- }
- return
-}
-
-func (cs *cacheSet) get(ctx context.Context, client Client) (Fetcher, error) {
- sURL := client.ServerURL()
- // HOT PATH
- cs.RLock()
- c, exists := cs.caches[sURL]
- cs.RUnlock()
- if exists {
- return c, nil
- }
-
- // COLD PATH
- cs.Lock()
- defer cs.Unlock()
- c, exists = cs.caches[sURL]
- if exists {
- return c, nil
- }
- c = newMemCache(cs.lggr, client, cs.cfg)
- if err := c.Start(ctx); err != nil {
- return nil, err
- }
- cs.caches[sURL] = c
- return c, nil
-}
-
-func (cs *cacheSet) HealthReport() map[string]error {
- report := map[string]error{
- cs.Name(): cs.Ready(),
- }
- cs.RLock()
- caches := maps.Values(cs.caches)
- cs.RUnlock()
- for _, c := range caches {
- services.CopyHealth(report, c.HealthReport())
- }
- return report
-}
-func (cs *cacheSet) Name() string { return cs.lggr.Name() }
diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache_set_test.go b/core/services/relay/evm/mercury/wsrpc/cache/cache_set_test.go
deleted file mode 100644
index f0085a2036f..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/cache/cache_set_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package cache
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
-)
-
-func Test_CacheSet(t *testing.T) {
- lggr := logger.Test(t)
- cs := newCacheSet(lggr, Config{LatestReportTTL: 1})
- disabledCs := newCacheSet(lggr, Config{LatestReportTTL: 0})
- ctx := testutils.Context(t)
- servicetest.Run(t, cs)
-
- t.Run("Get", func(t *testing.T) {
- c := &mockClient{}
-
- var err error
- var f Fetcher
- t.Run("with caching disabled, returns nil, nil", func(t *testing.T) {
- assert.Len(t, disabledCs.caches, 0)
-
- f, err = disabledCs.Get(ctx, c)
- require.NoError(t, err)
-
- assert.Nil(t, f)
- assert.Len(t, disabledCs.caches, 0)
- })
-
- t.Run("with virgin cacheset, makes new entry and returns it", func(t *testing.T) {
- assert.Len(t, cs.caches, 0)
-
- f, err = cs.Get(ctx, c)
- require.NoError(t, err)
-
- assert.IsType(t, f, &memCache{})
- assert.Len(t, cs.caches, 1)
- })
- t.Run("with existing cache for value, returns that", func(t *testing.T) {
- var f2 Fetcher
- assert.Len(t, cs.caches, 1)
-
- f2, err = cs.Get(ctx, c)
- require.NoError(t, err)
-
- assert.IsType(t, f, &memCache{})
- assert.Equal(t, f, f2)
- assert.Len(t, cs.caches, 1)
- })
- })
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/cache/cache_test.go b/core/services/relay/evm/mercury/wsrpc/cache/cache_test.go
deleted file mode 100644
index 61311e2e81a..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/cache/cache_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package cache
-
-import (
- "context"
- "errors"
- "strconv"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
-
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- mercuryutils "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/utils"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-const neverExpireTTL = 1000 * time.Hour // some massive value that will never expire during a test
-
-func Test_Cache(t *testing.T) {
- lggr := logger.Test(t)
- client := &mockClient{}
- cfg := Config{}
- ctx := testutils.Context(t)
-
- req1 := &pb.LatestReportRequest{FeedId: []byte{1}}
- req2 := &pb.LatestReportRequest{FeedId: []byte{2}}
- req3 := &pb.LatestReportRequest{FeedId: []byte{3}}
-
- feedID1Hex := mercuryutils.BytesToFeedID(req1.FeedId).String()
-
- t.Run("errors with nil req", func(t *testing.T) {
- c := newMemCache(lggr, client, cfg)
-
- _, err := c.LatestReport(ctx, nil)
- assert.EqualError(t, err, "req must not be nil")
- })
-
- t.Run("with LatestReportTTL=0 does no caching", func(t *testing.T) {
- c := newMemCache(lggr, client, cfg)
-
- req := &pb.LatestReportRequest{}
- for i := 0; i < 5; i++ {
- client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}}
-
- resp, err := c.LatestReport(ctx, req)
- require.NoError(t, err)
- assert.Equal(t, client.resp, resp)
- }
-
- client.resp = nil
- client.err = errors.New("something exploded")
-
- resp, err := c.LatestReport(ctx, req)
- assert.EqualError(t, err, "something exploded")
- assert.Nil(t, resp)
- })
-
- t.Run("caches repeated calls to LatestReport, keyed by request", func(t *testing.T) {
- cfg.LatestReportTTL = neverExpireTTL
- client.err = nil
- c := newMemCache(lggr, client, cfg)
-
- t.Run("if cache is unstarted, returns error", func(t *testing.T) {
- // starting the cache is required for state management if we
- // actually cache results, since fetches are initiated async and
- // need to be cleaned up properly on close
- _, err := c.LatestReport(ctx, &pb.LatestReportRequest{})
- assert.EqualError(t, err, "memCache must be started, but is: Unstarted")
- })
-
- err := c.StartOnce("test start", func() error { return nil })
- require.NoError(t, err)
-
- t.Run("returns cached value for key", func(t *testing.T) {
- var firstResp *pb.LatestReportResponse
- for i := 0; i < 5; i++ {
- client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}}
- if firstResp == nil {
- firstResp = client.resp
- }
-
- resp, err := c.LatestReport(ctx, req1)
- require.NoError(t, err)
- assert.Equal(t, firstResp, resp)
- }
- })
-
- t.Run("cache keys do not conflict", func(t *testing.T) {
- var firstResp1 *pb.LatestReportResponse
- for i := 5; i < 10; i++ {
- client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}}
- if firstResp1 == nil {
- firstResp1 = client.resp
- }
-
- resp, err := c.LatestReport(ctx, req2)
- require.NoError(t, err)
- assert.Equal(t, firstResp1, resp)
- }
-
- var firstResp2 *pb.LatestReportResponse
- for i := 10; i < 15; i++ {
- client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(i))}}
- if firstResp2 == nil {
- firstResp2 = client.resp
- }
-
- resp, err := c.LatestReport(ctx, req3)
- require.NoError(t, err)
- assert.Equal(t, firstResp2, resp)
- }
-
- // req1 key still has same value
- resp, err := c.LatestReport(ctx, req1)
- require.NoError(t, err)
- assert.Equal(t, []byte(strconv.Itoa(0)), resp.Report.Price)
-
- // req2 key still has same value
- resp, err = c.LatestReport(ctx, req2)
- require.NoError(t, err)
- assert.Equal(t, []byte(strconv.Itoa(5)), resp.Report.Price)
- })
-
- t.Run("re-queries when a cache item has expired", func(t *testing.T) {
- vi, exists := c.cache.Load(feedID1Hex)
- require.True(t, exists)
- v := vi.(*cacheVal)
- v.expiresAt = time.Now().Add(-1 * time.Second)
-
- client.resp = &pb.LatestReportResponse{Report: &pb.Report{Price: []byte(strconv.Itoa(15))}}
-
- resp, err := c.LatestReport(ctx, req1)
- require.NoError(t, err)
- assert.Equal(t, client.resp, resp)
-
- // querying again yields the same cached item
- resp, err = c.LatestReport(ctx, req1)
- require.NoError(t, err)
- assert.Equal(t, client.resp, resp)
- })
- })
-
- t.Run("complete fetch", func(t *testing.T) {
- t.Run("does not change expiry if fetch returns error", func(t *testing.T) {
- expires := time.Now().Add(-1 * time.Second)
- v := &cacheVal{
- fetching: true,
- fetchCh: make(chan (struct{})),
- val: nil,
- err: nil,
- expiresAt: expires,
- }
- v.completeFetch(nil, errors.New("foo"), time.Now().Add(neverExpireTTL))
- assert.Equal(t, expires, v.expiresAt)
-
- v = &cacheVal{
- fetching: true,
- fetchCh: make(chan (struct{})),
- val: nil,
- err: nil,
- expiresAt: expires,
- }
- expires = time.Now().Add(neverExpireTTL)
- v.completeFetch(nil, nil, expires)
- assert.Equal(t, expires, v.expiresAt)
- })
- })
-
- t.Run("timeouts", func(t *testing.T) {
- c := newMemCache(lggr, client, cfg)
- // simulate fetch already executing in background
- v := &cacheVal{
- fetching: true,
- fetchCh: make(chan (struct{})),
- val: nil,
- err: nil,
- expiresAt: time.Now().Add(-1 * time.Second),
- }
- c.cache.Store(feedID1Hex, v)
-
- canceledCtx, cancel := context.WithCancel(testutils.Context(t))
- cancel()
-
- t.Run("returns context deadline exceeded error if fetch takes too long", func(t *testing.T) {
- _, err := c.LatestReport(canceledCtx, req1)
- require.Error(t, err)
- assert.True(t, errors.Is(err, context.Canceled))
- assert.EqualError(t, err, "context canceled")
- })
- t.Run("returns wrapped context deadline exceeded error if fetch has errored and is in the retry loop", func(t *testing.T) {
- v.err = errors.New("some background fetch error")
-
- _, err := c.LatestReport(canceledCtx, req1)
- require.Error(t, err)
- assert.True(t, errors.Is(err, context.Canceled))
- assert.EqualError(t, err, "some background fetch error\ncontext canceled")
- })
- })
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/cache/helpers_test.go b/core/services/relay/evm/mercury/wsrpc/cache/helpers_test.go
deleted file mode 100644
index 4cc08bdd52e..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/cache/helpers_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package cache
-
-import (
- "context"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-var _ Client = &mockClient{}
-
-type mockClient struct {
- resp *pb.LatestReportResponse
- err error
-}
-
-func (m *mockClient) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) {
- return m.resp, m.err
-}
-
-func (m *mockClient) ServerURL() string {
- return "mock client url"
-}
-
-func (m *mockClient) RawClient() pb.MercuryClient {
- return &mockRawClient{m.resp, m.err}
-}
-
-type mockRawClient struct {
- resp *pb.LatestReportResponse
- err error
-}
-
-func (m *mockRawClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- return nil, nil
-}
-func (m *mockRawClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) {
- return m.resp, m.err
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/client.go b/core/services/relay/evm/mercury/wsrpc/client.go
deleted file mode 100644
index 69db5508d77..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/client.go
+++ /dev/null
@@ -1,422 +0,0 @@
-package wsrpc
-
-import (
- "context"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/pkg/errors"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
- grpc_connectivity "google.golang.org/grpc/connectivity"
-
- "github.com/smartcontractkit/wsrpc"
- "github.com/smartcontractkit/wsrpc/connectivity"
-
- "github.com/smartcontractkit/chainlink-data-streams/rpc"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
- "github.com/smartcontractkit/chainlink/v2/core/services/llo/grpc"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/cache"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-// MaxConsecutiveRequestFailures controls how many consecutive requests are
-// allowed to time out before we reset the connection
-const MaxConsecutiveRequestFailures = 10
-
-var (
- timeoutCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_transmit_timeout_count",
- Help: "Running count of transmit timeouts",
- },
- []string{"serverURL"},
- )
- dialCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_dial_count",
- Help: "Running count of dials to mercury server",
- },
- []string{"serverURL"},
- )
- dialSuccessCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_dial_success_count",
- Help: "Running count of successful dials to mercury server",
- },
- []string{"serverURL"},
- )
- dialErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_dial_error_count",
- Help: "Running count of errored dials to mercury server",
- },
- []string{"serverURL"},
- )
- connectionResetCount = promauto.NewCounterVec(prometheus.CounterOpts{
- Name: "mercury_connection_reset_count",
- Help: fmt.Sprintf("Running count of times connection to mercury server has been reset (connection reset happens automatically after %d consecutive request failures)", MaxConsecutiveRequestFailures),
- },
- []string{"serverURL"},
- )
-)
-
-type Client interface {
- services.Service
- pb.MercuryClient
- ServerURL() string
- RawClient() pb.MercuryClient
-}
-
-type Conn interface {
- wsrpc.ClientInterface
- WaitForReady(ctx context.Context) bool
- GetState() grpc_connectivity.State
- Close() error
-}
-
-type DialWithContextFunc func(ctxCaller context.Context, target string, opts ...wsrpc.DialOption) (Conn, error)
-
-type client struct {
- services.StateMachine
-
- csaKey csakey.KeyV2
- serverPubKey []byte
- serverURL string
-
- dialWithContext DialWithContextFunc
-
- logger logger.SugaredLogger
- conn Conn
- rawClient pb.MercuryClient
- mu sync.RWMutex
-
- consecutiveTimeoutCnt atomic.Int32
- wg sync.WaitGroup
- chStop services.StopChan
- chResetTransport chan struct{}
-
- cacheSet cache.CacheSet
- cache cache.Fetcher
-
- timeoutCountMetric prometheus.Counter
- dialCountMetric prometheus.Counter
- dialSuccessCountMetric prometheus.Counter
- dialErrorCountMetric prometheus.Counter
- connectionResetCountMetric prometheus.Counter
-}
-
-type ClientOpts struct {
- Logger logger.SugaredLogger
- ClientPrivKey csakey.KeyV2
- ServerPubKey []byte
- ServerURL string
- CacheSet cache.CacheSet
-
- // DialWithContext allows optional dependency injection for testing
- DialWithContext DialWithContextFunc
-}
-
-// Consumers of wsrpc package should not usually call NewClient directly, but instead use the Pool
-func NewClient(opts ClientOpts) Client {
- return newClient(opts)
-}
-
-func newClient(opts ClientOpts) *client {
- var dialWithContext DialWithContextFunc
- if opts.DialWithContext != nil {
- dialWithContext = opts.DialWithContext
- } else {
- // NOTE: Wrap here since wsrpc.DialWithContext returns a concrete *wsrpc.Conn, not an interface
- dialWithContext = func(ctxCaller context.Context, target string, opts ...wsrpc.DialOption) (Conn, error) {
- conn, err := wsrpc.DialWithContext(ctxCaller, target, opts...)
- return conn, err
- }
- }
- return &client{
- dialWithContext: dialWithContext,
- csaKey: opts.ClientPrivKey,
- serverPubKey: opts.ServerPubKey,
- serverURL: opts.ServerURL,
- logger: opts.Logger.Named("WSRPC").Named(opts.ServerURL).With("serverURL", opts.ServerURL),
- chResetTransport: make(chan struct{}, 1),
- cacheSet: opts.CacheSet,
- chStop: make(services.StopChan),
- timeoutCountMetric: timeoutCount.WithLabelValues(opts.ServerURL),
- dialCountMetric: dialCount.WithLabelValues(opts.ServerURL),
- dialSuccessCountMetric: dialSuccessCount.WithLabelValues(opts.ServerURL),
- dialErrorCountMetric: dialErrorCount.WithLabelValues(opts.ServerURL),
- connectionResetCountMetric: connectionResetCount.WithLabelValues(opts.ServerURL),
- }
-}
-
-func (w *client) Start(ctx context.Context) error {
- return w.StartOnce("WSRPC Client", func() (err error) {
- // NOTE: This is not a mistake, dial is non-blocking so it should use a
- // background context, not the Start context
- if err = w.dial(context.Background()); err != nil {
- return err
- }
- w.cache, err = w.cacheSet.Get(ctx, w)
- if err != nil {
- return err
- }
- w.wg.Add(1)
- go w.runloop()
- return nil
- })
-}
-
-// NOTE: Dial is non-blocking, and will retry on an exponential backoff
-// in the background until close is called, or context is cancelled.
-// This is why we use the background context, not the start context here.
-//
-// Any transmits made while client is still trying to dial will fail
-// with error.
-func (w *client) dial(ctx context.Context, opts ...wsrpc.DialOption) error {
- w.dialCountMetric.Inc()
- conn, err := w.dialWithContext(ctx, w.serverURL,
- append(opts,
- wsrpc.WithTransportCreds(w.csaKey.Raw().Bytes(), w.serverPubKey),
- wsrpc.WithLogger(w.logger),
- )...,
- )
- if err != nil {
- w.dialErrorCountMetric.Inc()
- setLivenessMetric(false)
- return errors.Wrap(err, "failed to dial wsrpc client")
- }
- w.dialSuccessCountMetric.Inc()
- setLivenessMetric(true)
- w.mu.Lock()
- w.conn = conn
- w.rawClient = pb.NewMercuryClient(conn)
- w.mu.Unlock()
- return nil
-}
-
-func (w *client) runloop() {
- defer w.wg.Done()
- for {
- select {
- case <-w.chStop:
- return
- case <-w.chResetTransport:
- // Using channel here ensures we only have one reset in process at
- // any given time
- w.resetTransport()
- }
- }
-}
-
-// resetTransport disconnects and reconnects to the mercury server
-func (w *client) resetTransport() {
- w.connectionResetCountMetric.Inc()
- ok := w.IfStarted(func() {
- w.mu.RLock()
- defer w.mu.RUnlock()
- w.conn.Close() // Close is safe to call multiple times
- })
- if !ok {
- panic("resetTransport should never be called unless client is in 'started' state")
- }
- ctx, cancel := w.chStop.NewCtx()
- defer cancel()
- b := utils.NewRedialBackoff()
- for {
- // Will block until successful dial, or context is canceled (i.e. on close)
- err := w.dial(ctx, wsrpc.WithBlock())
- if err == nil {
- break
- }
- if ctx.Err() != nil {
- w.logger.Debugw("ResetTransport exiting due to client Close", "err", err)
- return
- }
- w.logger.Errorw("ResetTransport failed to redial", "err", err)
- time.Sleep(b.Duration())
- }
- w.logger.Info("ResetTransport successfully redialled")
-}
-
-func (w *client) Close() error {
- return w.StopOnce("WSRPC Client", func() error {
- close(w.chStop)
- w.mu.RLock()
- w.conn.Close()
- w.mu.RUnlock()
- w.wg.Wait()
- return nil
- })
-}
-
-func (w *client) Name() string {
- return w.logger.Name()
-}
-
-func (w *client) HealthReport() map[string]error {
- return map[string]error{w.Name(): w.Healthy()}
-}
-
-// Healthy if connected
-func (w *client) Healthy() (err error) {
- if err = w.StateMachine.Healthy(); err != nil {
- return err
- }
- state := w.conn.GetState()
- if state != grpc_connectivity.Ready {
- return errors.Errorf("client state should be %s; got %s", connectivity.Ready, state)
- }
- return nil
-}
-
-func (w *client) waitForReady(ctx context.Context) (err error) {
- ok := w.IfStarted(func() {
- if ready := w.conn.WaitForReady(ctx); !ready {
- err = errors.Errorf("websocket client not ready; got state: %v", w.conn.GetState())
- return
- }
- })
- if !ok {
- return errors.New("client is not started")
- }
- return
-}
-
-func (w *client) Transmit(ctx context.Context, req *pb.TransmitRequest) (resp *pb.TransmitResponse, err error) {
- ok := w.IfStarted(func() {
- w.logger.Trace("Transmit")
- start := time.Now()
- if err = w.waitForReady(ctx); err != nil {
- err = errors.Wrap(err, "Transmit call failed")
- return
- }
- w.mu.RLock()
- rc := w.rawClient
- w.mu.RUnlock()
- resp, err = rc.Transmit(ctx, req)
- w.handleTimeout(err)
- if err != nil {
- w.logger.Warnw("Transmit call failed due to networking error", "err", err, "resp", resp)
- incRequestStatusMetric(statusFailed)
- } else {
- w.logger.Tracew("Transmit call succeeded", "resp", resp)
- incRequestStatusMetric(statusSuccess)
- setRequestLatencyMetric(float64(time.Since(start).Milliseconds()))
- }
- })
- if !ok {
- err = errors.New("client is not started")
- }
- return
-}
-
-// hacky workaround to trap panics from buggy underlying wsrpc lib and restart
-// the connection from a known good state
-func (w *client) handlePanic(r interface{}) {
- w.chResetTransport <- struct{}{}
-}
-
-func (w *client) handleTimeout(err error) {
- if errors.Is(err, context.DeadlineExceeded) {
- w.timeoutCountMetric.Inc()
- cnt := w.consecutiveTimeoutCnt.Add(1)
- if cnt == MaxConsecutiveRequestFailures {
- w.logger.Errorf("Timed out on %d consecutive transmits, resetting transport", cnt)
- // NOTE: If we get at least MaxConsecutiveRequestFailures request
- // timeouts in a row, close and re-open the websocket connection.
- //
- // This *shouldn't* be necessary in theory (ideally, wsrpc would
- // handle it for us) but it acts as a "belts and braces" approach
- // to ensure we get a websocket connection back up and running
- // again if it gets itself into a bad state.
- select {
- case w.chResetTransport <- struct{}{}:
- default:
- // This can happen if we had MaxConsecutiveRequestFailures
- // consecutive timeouts, already sent a reset signal, then the
- // connection started working again (resetting the count) then
- // we got MaxConsecutiveRequestFailures additional failures
- // before the runloop was able to close the bad connection.
- //
- // It should be safe to just ignore in this case.
- //
- // Debug log in case my reasoning is wrong.
- w.logger.Debugf("Transport is resetting, cnt=%d", cnt)
- }
- }
- } else {
- w.consecutiveTimeoutCnt.Store(0)
- }
-}
-
-func (w *client) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) {
- ok := w.IfStarted(func() {
- lggr := w.logger.With("req.FeedId", hexutil.Encode(req.FeedId))
- lggr.Trace("LatestReport")
- if err = w.waitForReady(ctx); err != nil {
- err = errors.Wrap(err, "LatestReport failed")
- return
- }
- var cached bool
- if w.cache == nil {
- w.mu.RLock()
- rc := w.rawClient
- w.mu.RUnlock()
- resp, err = rc.LatestReport(ctx, req)
- w.handleTimeout(err)
- } else {
- cached = true
- resp, err = w.cache.LatestReport(ctx, req)
- }
- switch {
- case err != nil:
- lggr.Errorw("LatestReport failed", "err", err, "resp", resp, "cached", cached)
- case resp.Error != "":
- lggr.Errorw("LatestReport failed; mercury server returned error", "err", resp.Error, "resp", resp, "cached", cached)
- case !cached:
- lggr.Debugw("LatestReport succeeded", "resp", resp, "cached", cached)
- default:
- lggr.Tracew("LatestReport succeeded", "resp", resp, "cached", cached)
- }
- })
- if !ok {
- err = errors.New("client is not started")
- }
- return
-}
-
-func (w *client) ServerURL() string {
- return w.serverURL
-}
-
-func (w *client) RawClient() pb.MercuryClient {
- w.mu.RLock()
- defer w.mu.RUnlock()
- return w.rawClient
-}
-
-var _ grpc.Client = GRPCCompatibilityWrapper{}
-
-type GRPCCompatibilityWrapper struct {
- Client
-}
-
-func (w GRPCCompatibilityWrapper) Transmit(ctx context.Context, in *rpc.TransmitRequest) (*rpc.TransmitResponse, error) {
- req := &pb.TransmitRequest{
- Payload: in.Payload,
- ReportFormat: in.ReportFormat,
- }
- resp, err := w.Client.Transmit(ctx, req)
- if err != nil {
- return nil, err
- }
- return &rpc.TransmitResponse{
- Code: resp.Code,
- Error: resp.Error,
- }, nil
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/client_test.go b/core/services/relay/evm/mercury/wsrpc/client_test.go
deleted file mode 100644
index 2df752730d0..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/client_test.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package wsrpc
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/cache"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/mocks"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-// simulate start without dialling
-func simulateStart(ctx context.Context, t *testing.T, c *client) {
- require.NoError(t, c.StartOnce("Mock WSRPC Client", func() (err error) {
- c.cache, err = c.cacheSet.Get(ctx, c)
- return err
- }))
-}
-
-var _ cache.CacheSet = &mockCacheSet{}
-
-type mockCacheSet struct{}
-
-func (m *mockCacheSet) Get(ctx context.Context, client cache.Client) (cache.Fetcher, error) {
- return nil, nil
-}
-func (m *mockCacheSet) Start(context.Context) error { return nil }
-func (m *mockCacheSet) Ready() error { return nil }
-func (m *mockCacheSet) HealthReport() map[string]error { return nil }
-func (m *mockCacheSet) Name() string { return "" }
-func (m *mockCacheSet) Close() error { return nil }
-
-var _ cache.Cache = &mockCache{}
-
-type mockCache struct{}
-
-func (m *mockCache) LatestReport(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error) {
- return nil, nil
-}
-func (m *mockCache) Start(context.Context) error { return nil }
-func (m *mockCache) Ready() error { return nil }
-func (m *mockCache) HealthReport() map[string]error { return nil }
-func (m *mockCache) Name() string { return "" }
-func (m *mockCache) Close() error { return nil }
-
-func newNoopCacheSet() cache.CacheSet {
- return &mockCacheSet{}
-}
-
-func Test_Client_Transmit(t *testing.T) {
- lggr := logger.Test(t)
- ctx := testutils.Context(t)
- req := &pb.TransmitRequest{}
-
- noopCacheSet := newNoopCacheSet()
-
- t.Run("sends on reset channel after MaxConsecutiveRequestFailures timed out transmits", func(t *testing.T) {
- calls := 0
- transmitErr := context.DeadlineExceeded
- wsrpcClient := &mocks.MockWSRPCClient{
- TransmitF: func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- calls++
- return nil, transmitErr
- },
- }
- conn := &mocks.MockConn{
- Ready: true,
- }
- opts := ClientOpts{
- logger.Sugared(lggr),
- csakey.KeyV2{},
- nil,
- "",
- noopCacheSet,
- nil,
- }
- c := newClient(opts)
- c.conn = conn
- c.rawClient = wsrpcClient
- require.NoError(t, c.StartOnce("Mock WSRPC Client", func() error { return nil }))
- for i := 1; i < MaxConsecutiveRequestFailures; i++ {
- _, err := c.Transmit(ctx, req)
- require.EqualError(t, err, "context deadline exceeded")
- }
- assert.Equal(t, MaxConsecutiveRequestFailures-1, calls)
- select {
- case <-c.chResetTransport:
- t.Fatal("unexpected send on chResetTransport")
- default:
- }
- _, err := c.Transmit(ctx, req)
- require.EqualError(t, err, "context deadline exceeded")
- assert.Equal(t, MaxConsecutiveRequestFailures, calls)
- select {
- case <-c.chResetTransport:
- default:
- t.Fatal("expected send on chResetTransport")
- }
-
- t.Run("successful transmit resets the counter", func(t *testing.T) {
- transmitErr = nil
- // working transmit to reset counter
- _, err = c.Transmit(ctx, req)
- require.NoError(t, err)
- assert.Equal(t, MaxConsecutiveRequestFailures+1, calls)
- assert.Equal(t, 0, int(c.consecutiveTimeoutCnt.Load()))
- })
-
- t.Run("doesn't block in case channel is full", func(t *testing.T) {
- transmitErr = context.DeadlineExceeded
- c.chResetTransport = nil // simulate full channel
- for i := 0; i < MaxConsecutiveRequestFailures; i++ {
- _, err := c.Transmit(ctx, req)
- require.EqualError(t, err, "context deadline exceeded")
- }
- })
- })
-}
-
-func Test_Client_LatestReport(t *testing.T) {
- lggr := logger.Test(t)
- ctx := testutils.Context(t)
- cacheReads := 5
-
- tests := []struct {
- name string
- ttl time.Duration
- expectedCalls int
- }{
- {
- name: "with cache disabled",
- ttl: 0,
- expectedCalls: 5,
- },
- {
- name: "with cache enabled",
- ttl: 1000 * time.Hour, // some large value that will never expire during a test
- expectedCalls: 1,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- req := &pb.LatestReportRequest{}
-
- cacheSet := cache.NewCacheSet(lggr, cache.Config{LatestReportTTL: tt.ttl})
-
- resp := &pb.LatestReportResponse{}
-
- var calls int
- wsrpcClient := &mocks.MockWSRPCClient{
- LatestReportF: func(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) {
- calls++
- assert.Equal(t, req, in)
- return resp, nil
- },
- }
-
- conn := &mocks.MockConn{
- Ready: true,
- }
- c := newClient(ClientOpts{logger.Sugared(lggr), csakey.KeyV2{}, nil, "", cacheSet, nil})
- c.conn = conn
- c.rawClient = wsrpcClient
-
- servicetest.Run(t, cacheSet)
- simulateStart(ctx, t, c)
-
- for i := 0; i < cacheReads; i++ {
- r, err := c.LatestReport(ctx, req)
-
- require.NoError(t, err)
- assert.Equal(t, resp, r)
- }
- assert.Equal(t, tt.expectedCalls, calls, "expected %d calls to LatestReport but it was called %d times", tt.expectedCalls, calls)
- })
- }
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/metrics.go b/core/services/relay/evm/mercury/wsrpc/metrics.go
deleted file mode 100644
index 8c12184cd85..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/metrics.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package wsrpc
-
-import (
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
-)
-
-type reqStatus string
-
-const (
- statusSuccess reqStatus = "success"
- statusFailed reqStatus = "failed"
-)
-
-var (
- aliveMetric = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "mercury",
- Name: "wsrpc_connection_alive",
- Help: "Total time spent connected to the Mercury WSRPC server",
- })
- requestsStatusMetric = promauto.NewCounterVec(prometheus.CounterOpts{
- Namespace: "mercury",
- Name: "wsrpc_requests_status_count",
- Help: "Number of request status made to the Mercury WSRPC server",
- }, []string{"status"})
-
- requestLatencyMetric = promauto.NewHistogram(prometheus.HistogramOpts{
- Namespace: "mercury",
- Name: "wsrpc_request_latency",
- Help: "Latency of requests made to the Mercury WSRPC server",
- Buckets: []float64{10, 30, 100, 200, 250, 300, 350, 400, 500, 750, 1000, 3000, 10000},
- })
-)
-
-func setLivenessMetric(live bool) {
- if live {
- aliveMetric.Set(1)
- } else {
- aliveMetric.Set(0)
- }
-}
-
-func incRequestStatusMetric(status reqStatus) {
- requestsStatusMetric.WithLabelValues(string(status)).Inc()
-}
-
-func setRequestLatencyMetric(latency float64) {
- requestLatencyMetric.Observe(latency)
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/mocks/mocks.go b/core/services/relay/evm/mercury/wsrpc/mocks/mocks.go
deleted file mode 100644
index 199e0b49fa8..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/mocks/mocks.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package mocks
-
-import (
- "context"
-
- grpc_connectivity "google.golang.org/grpc/connectivity"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-type MockWSRPCClient struct {
- TransmitF func(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error)
- LatestReportF func(ctx context.Context, req *pb.LatestReportRequest) (resp *pb.LatestReportResponse, err error)
-}
-
-func (m *MockWSRPCClient) Name() string { return "" }
-func (m *MockWSRPCClient) Start(context.Context) error { return nil }
-func (m *MockWSRPCClient) Close() error { return nil }
-func (m *MockWSRPCClient) HealthReport() map[string]error { return map[string]error{} }
-func (m *MockWSRPCClient) Ready() error { return nil }
-func (m *MockWSRPCClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (*pb.TransmitResponse, error) {
- return m.TransmitF(ctx, in)
-}
-func (m *MockWSRPCClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (*pb.LatestReportResponse, error) {
- return m.LatestReportF(ctx, in)
-}
-func (m *MockWSRPCClient) ServerURL() string { return "mock server url" }
-
-func (m *MockWSRPCClient) RawClient() pb.MercuryClient { return nil }
-
-type MockConn struct {
- State grpc_connectivity.State
- Ready bool
- Closed bool
- InvokeF func(ctx context.Context, method string, args interface{}, reply interface{}) error
-}
-
-func (m *MockConn) Close() error {
- m.Closed = true
- return nil
-}
-func (m MockConn) WaitForReady(ctx context.Context) bool {
- return m.Ready
-}
-func (m MockConn) GetState() grpc_connectivity.State { return m.State }
-
-func (m MockConn) Invoke(ctx context.Context, method string, args interface{}, reply interface{}) error {
- return m.InvokeF(ctx, method, args, reply)
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/pb/generate.go b/core/services/relay/evm/mercury/wsrpc/pb/generate.go
deleted file mode 100644
index 2bb95012d1c..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/pb/generate.go
+++ /dev/null
@@ -1,2 +0,0 @@
-//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-wsrpc_out=. --go-wsrpc_opt=paths=source_relative mercury.proto
-package pb
diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go b/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go
deleted file mode 100644
index 4d7e6d5551d..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/pb/mercury.pb.go
+++ /dev/null
@@ -1,565 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.36.5
-// protoc v5.29.3
-// source: mercury.proto
-
-package pb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
- unsafe "unsafe"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type TransmitRequest struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
- ReportFormat uint32 `protobuf:"varint,2,opt,name=reportFormat,proto3" json:"reportFormat,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *TransmitRequest) Reset() {
- *x = TransmitRequest{}
- mi := &file_mercury_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *TransmitRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransmitRequest) ProtoMessage() {}
-
-func (x *TransmitRequest) ProtoReflect() protoreflect.Message {
- mi := &file_mercury_proto_msgTypes[0]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransmitRequest.ProtoReflect.Descriptor instead.
-func (*TransmitRequest) Descriptor() ([]byte, []int) {
- return file_mercury_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *TransmitRequest) GetPayload() []byte {
- if x != nil {
- return x.Payload
- }
- return nil
-}
-
-func (x *TransmitRequest) GetReportFormat() uint32 {
- if x != nil {
- return x.ReportFormat
- }
- return 0
-}
-
-type TransmitResponse struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *TransmitResponse) Reset() {
- *x = TransmitResponse{}
- mi := &file_mercury_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *TransmitResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransmitResponse) ProtoMessage() {}
-
-func (x *TransmitResponse) ProtoReflect() protoreflect.Message {
- mi := &file_mercury_proto_msgTypes[1]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransmitResponse.ProtoReflect.Descriptor instead.
-func (*TransmitResponse) Descriptor() ([]byte, []int) {
- return file_mercury_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *TransmitResponse) GetCode() int32 {
- if x != nil {
- return x.Code
- }
- return 0
-}
-
-func (x *TransmitResponse) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-type LatestReportRequest struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- FeedId []byte `protobuf:"bytes,1,opt,name=feedId,proto3" json:"feedId,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *LatestReportRequest) Reset() {
- *x = LatestReportRequest{}
- mi := &file_mercury_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *LatestReportRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LatestReportRequest) ProtoMessage() {}
-
-func (x *LatestReportRequest) ProtoReflect() protoreflect.Message {
- mi := &file_mercury_proto_msgTypes[2]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LatestReportRequest.ProtoReflect.Descriptor instead.
-func (*LatestReportRequest) Descriptor() ([]byte, []int) {
- return file_mercury_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *LatestReportRequest) GetFeedId() []byte {
- if x != nil {
- return x.FeedId
- }
- return nil
-}
-
-type LatestReportResponse struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
- Report *Report `protobuf:"bytes,2,opt,name=report,proto3" json:"report,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *LatestReportResponse) Reset() {
- *x = LatestReportResponse{}
- mi := &file_mercury_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *LatestReportResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*LatestReportResponse) ProtoMessage() {}
-
-func (x *LatestReportResponse) ProtoReflect() protoreflect.Message {
- mi := &file_mercury_proto_msgTypes[3]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use LatestReportResponse.ProtoReflect.Descriptor instead.
-func (*LatestReportResponse) Descriptor() ([]byte, []int) {
- return file_mercury_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *LatestReportResponse) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-func (x *LatestReportResponse) GetReport() *Report {
- if x != nil {
- return x.Report
- }
- return nil
-}
-
-type Report struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- FeedId []byte `protobuf:"bytes,1,opt,name=feedId,proto3" json:"feedId,omitempty"`
- Price []byte `protobuf:"bytes,2,opt,name=price,proto3" json:"price,omitempty"`
- Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
- ValidFromBlockNumber int64 `protobuf:"varint,4,opt,name=validFromBlockNumber,proto3" json:"validFromBlockNumber,omitempty"`
- CurrentBlockNumber int64 `protobuf:"varint,5,opt,name=currentBlockNumber,proto3" json:"currentBlockNumber,omitempty"`
- CurrentBlockHash []byte `protobuf:"bytes,6,opt,name=currentBlockHash,proto3" json:"currentBlockHash,omitempty"`
- CurrentBlockTimestamp uint64 `protobuf:"varint,7,opt,name=currentBlockTimestamp,proto3" json:"currentBlockTimestamp,omitempty"`
- ObservationsTimestamp int64 `protobuf:"varint,8,opt,name=observationsTimestamp,proto3" json:"observationsTimestamp,omitempty"`
- ConfigDigest []byte `protobuf:"bytes,9,opt,name=configDigest,proto3" json:"configDigest,omitempty"`
- Epoch uint32 `protobuf:"varint,10,opt,name=epoch,proto3" json:"epoch,omitempty"`
- Round uint32 `protobuf:"varint,11,opt,name=round,proto3" json:"round,omitempty"`
- OperatorName string `protobuf:"bytes,12,opt,name=operatorName,proto3" json:"operatorName,omitempty"`
- TransmittingOperator []byte `protobuf:"bytes,13,opt,name=transmittingOperator,proto3" json:"transmittingOperator,omitempty"`
- CreatedAt *Timestamp `protobuf:"bytes,14,opt,name=createdAt,proto3" json:"createdAt,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *Report) Reset() {
- *x = Report{}
- mi := &file_mercury_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Report) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Report) ProtoMessage() {}
-
-func (x *Report) ProtoReflect() protoreflect.Message {
- mi := &file_mercury_proto_msgTypes[4]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Report.ProtoReflect.Descriptor instead.
-func (*Report) Descriptor() ([]byte, []int) {
- return file_mercury_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *Report) GetFeedId() []byte {
- if x != nil {
- return x.FeedId
- }
- return nil
-}
-
-func (x *Report) GetPrice() []byte {
- if x != nil {
- return x.Price
- }
- return nil
-}
-
-func (x *Report) GetPayload() []byte {
- if x != nil {
- return x.Payload
- }
- return nil
-}
-
-func (x *Report) GetValidFromBlockNumber() int64 {
- if x != nil {
- return x.ValidFromBlockNumber
- }
- return 0
-}
-
-func (x *Report) GetCurrentBlockNumber() int64 {
- if x != nil {
- return x.CurrentBlockNumber
- }
- return 0
-}
-
-func (x *Report) GetCurrentBlockHash() []byte {
- if x != nil {
- return x.CurrentBlockHash
- }
- return nil
-}
-
-func (x *Report) GetCurrentBlockTimestamp() uint64 {
- if x != nil {
- return x.CurrentBlockTimestamp
- }
- return 0
-}
-
-func (x *Report) GetObservationsTimestamp() int64 {
- if x != nil {
- return x.ObservationsTimestamp
- }
- return 0
-}
-
-func (x *Report) GetConfigDigest() []byte {
- if x != nil {
- return x.ConfigDigest
- }
- return nil
-}
-
-func (x *Report) GetEpoch() uint32 {
- if x != nil {
- return x.Epoch
- }
- return 0
-}
-
-func (x *Report) GetRound() uint32 {
- if x != nil {
- return x.Round
- }
- return 0
-}
-
-func (x *Report) GetOperatorName() string {
- if x != nil {
- return x.OperatorName
- }
- return ""
-}
-
-func (x *Report) GetTransmittingOperator() []byte {
- if x != nil {
- return x.TransmittingOperator
- }
- return nil
-}
-
-func (x *Report) GetCreatedAt() *Timestamp {
- if x != nil {
- return x.CreatedAt
- }
- return nil
-}
-
-// Taken from: https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/timestamp.proto
-type Timestamp struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
-}
-
-func (x *Timestamp) Reset() {
- *x = Timestamp{}
- mi := &file_mercury_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
-}
-
-func (x *Timestamp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Timestamp) ProtoMessage() {}
-
-func (x *Timestamp) ProtoReflect() protoreflect.Message {
- mi := &file_mercury_proto_msgTypes[5]
- if x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead.
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return file_mercury_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *Timestamp) GetSeconds() int64 {
- if x != nil {
- return x.Seconds
- }
- return 0
-}
-
-func (x *Timestamp) GetNanos() int32 {
- if x != nil {
- return x.Nanos
- }
- return 0
-}
-
-var File_mercury_proto protoreflect.FileDescriptor
-
-var file_mercury_proto_rawDesc = string([]byte{
- 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x02, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
- 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x6f,
- 0x72, 0x6d, 0x61, 0x74, 0x22, 0x3c, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x22, 0x2d, 0x0a, 0x13, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f,
- 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x65, 0x65,
- 0x64, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x65, 0x65, 0x64, 0x49,
- 0x64, 0x22, 0x50, 0x0a, 0x14, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12,
- 0x22, 0x0a, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x06, 0x72, 0x65, 0x70,
- 0x6f, 0x72, 0x74, 0x22, 0xa1, 0x04, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x16,
- 0x0a, 0x06, 0x66, 0x65, 0x65, 0x64, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06,
- 0x66, 0x65, 0x65, 0x64, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x69, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x69, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70,
- 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x32, 0x0a, 0x14, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46,
- 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x12, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x6f,
- 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x34, 0x0a, 0x15, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
- 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x15,
- 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6f, 0x62, 0x73,
- 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x69, 0x67, 0x65,
- 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x05,
- 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x6f, 0x75,
- 0x6e, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x61,
- 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6d,
- 0x69, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x0d,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
- 0x70, 0x62, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14,
- 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e,
- 0x61, 0x6e, 0x6f, 0x73, 0x32, 0x83, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x72, 0x63, 0x75, 0x72, 0x79,
- 0x12, 0x35, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x12, 0x13, 0x2e, 0x70,
- 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6d, 0x69, 0x74, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x4c, 0x61, 0x74, 0x65, 0x73,
- 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x74,
- 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6f,
- 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6d, 0x61, 0x72, 0x74, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6b, 0x69, 0x74, 0x2f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x6c,
- 0x69, 0x6e, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
- 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2f, 0x65, 0x76, 0x6d, 0x2f, 0x6d, 0x65, 0x72, 0x63, 0x75, 0x72,
- 0x79, 0x2f, 0x77, 0x73, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-})
-
-var (
- file_mercury_proto_rawDescOnce sync.Once
- file_mercury_proto_rawDescData []byte
-)
-
-func file_mercury_proto_rawDescGZIP() []byte {
- file_mercury_proto_rawDescOnce.Do(func() {
- file_mercury_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mercury_proto_rawDesc), len(file_mercury_proto_rawDesc)))
- })
- return file_mercury_proto_rawDescData
-}
-
-var file_mercury_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
-var file_mercury_proto_goTypes = []any{
- (*TransmitRequest)(nil), // 0: pb.TransmitRequest
- (*TransmitResponse)(nil), // 1: pb.TransmitResponse
- (*LatestReportRequest)(nil), // 2: pb.LatestReportRequest
- (*LatestReportResponse)(nil), // 3: pb.LatestReportResponse
- (*Report)(nil), // 4: pb.Report
- (*Timestamp)(nil), // 5: pb.Timestamp
-}
-var file_mercury_proto_depIdxs = []int32{
- 4, // 0: pb.LatestReportResponse.report:type_name -> pb.Report
- 5, // 1: pb.Report.createdAt:type_name -> pb.Timestamp
- 0, // 2: pb.Mercury.Transmit:input_type -> pb.TransmitRequest
- 2, // 3: pb.Mercury.LatestReport:input_type -> pb.LatestReportRequest
- 1, // 4: pb.Mercury.Transmit:output_type -> pb.TransmitResponse
- 3, // 5: pb.Mercury.LatestReport:output_type -> pb.LatestReportResponse
- 4, // [4:6] is the sub-list for method output_type
- 2, // [2:4] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_mercury_proto_init() }
-func file_mercury_proto_init() {
- if File_mercury_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: unsafe.Slice(unsafe.StringData(file_mercury_proto_rawDesc), len(file_mercury_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 6,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_mercury_proto_goTypes,
- DependencyIndexes: file_mercury_proto_depIdxs,
- MessageInfos: file_mercury_proto_msgTypes,
- }.Build()
- File_mercury_proto = out.File
- file_mercury_proto_goTypes = nil
- file_mercury_proto_depIdxs = nil
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto b/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto
deleted file mode 100644
index 6b71404a6a6..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/pb/mercury.proto
+++ /dev/null
@@ -1,60 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/smartcontractkit/chainlink/v2/services/relay/evm/mercury/wsrpc/pb";
-
-package pb;
-
-service Mercury {
- rpc Transmit(TransmitRequest) returns (TransmitResponse);
- rpc LatestReport(LatestReportRequest) returns (LatestReportResponse);
-}
-
-message TransmitRequest {
- bytes payload = 1;
- uint32 reportFormat = 2;
-}
-
-message TransmitResponse {
- int32 code = 1;
- string error = 2;
-}
-
-message LatestReportRequest {
- bytes feedId = 1;
-}
-
-message LatestReportResponse {
- string error = 1;
- Report report = 2;
-}
-
-message Report {
- bytes feedId = 1;
- bytes price = 2;
- bytes payload = 3;
- int64 validFromBlockNumber = 4;
- int64 currentBlockNumber = 5;
- bytes currentBlockHash = 6;
- uint64 currentBlockTimestamp = 7;
- int64 observationsTimestamp = 8;
- bytes configDigest = 9;
- uint32 epoch = 10;
- uint32 round = 11;
- string operatorName = 12;
- bytes transmittingOperator = 13;
- Timestamp createdAt = 14;
-}
-
-// Taken from: https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/timestamp.proto
-message Timestamp {
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
- int64 seconds = 1;
-
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
- // inclusive.
- int32 nanos = 2;
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go b/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go
deleted file mode 100644
index 1e0a862f487..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/pb/mercury_wsrpc.pb.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Code generated by protoc-gen-go-wsrpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-wsrpc v0.0.1
-// - protoc v5.29.3
-
-package pb
-
-import (
- context "context"
- wsrpc "github.com/smartcontractkit/wsrpc"
-)
-
-// MercuryClient is the client API for Mercury service.
-type MercuryClient interface {
- Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error)
- LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error)
-}
-
-type mercuryClient struct {
- cc wsrpc.ClientInterface
-}
-
-func NewMercuryClient(cc wsrpc.ClientInterface) MercuryClient {
- return &mercuryClient{cc}
-}
-
-func (c *mercuryClient) Transmit(ctx context.Context, in *TransmitRequest) (*TransmitResponse, error) {
- out := new(TransmitResponse)
- err := c.cc.Invoke(ctx, "Transmit", in, out)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *mercuryClient) LatestReport(ctx context.Context, in *LatestReportRequest) (*LatestReportResponse, error) {
- out := new(LatestReportResponse)
- err := c.cc.Invoke(ctx, "LatestReport", in, out)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// MercuryServer is the server API for Mercury service.
-type MercuryServer interface {
- Transmit(context.Context, *TransmitRequest) (*TransmitResponse, error)
- LatestReport(context.Context, *LatestReportRequest) (*LatestReportResponse, error)
-}
-
-func RegisterMercuryServer(s wsrpc.ServiceRegistrar, srv MercuryServer) {
- s.RegisterService(&Mercury_ServiceDesc, srv)
-}
-
-func _Mercury_Transmit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
- in := new(TransmitRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- return srv.(MercuryServer).Transmit(ctx, in)
-}
-
-func _Mercury_LatestReport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) {
- in := new(LatestReportRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- return srv.(MercuryServer).LatestReport(ctx, in)
-}
-
-// Mercury_ServiceDesc is the wsrpc.ServiceDesc for Mercury service.
-// It's only intended for direct use with wsrpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Mercury_ServiceDesc = wsrpc.ServiceDesc{
- ServiceName: "pb.Mercury",
- HandlerType: (*MercuryServer)(nil),
- Methods: []wsrpc.MethodDesc{
- {
- MethodName: "Transmit",
- Handler: _Mercury_Transmit_Handler,
- },
- {
- MethodName: "LatestReport",
- Handler: _Mercury_LatestReport_Handler,
- },
- },
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/pool.go b/core/services/relay/evm/mercury/wsrpc/pool.go
deleted file mode 100644
index 4cd3648e25e..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/pool.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package wsrpc
-
-import (
- "context"
- "errors"
- "sync"
-
- "github.com/smartcontractkit/wsrpc/credentials"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
-
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/cache"
- "github.com/smartcontractkit/chainlink/v2/core/utils"
-)
-
-var _ Client = &clientCheckout{}
-
-type clientCheckout struct {
- *connection // inherit all methods from client, with override on Start/Close
-}
-
-func (cco *clientCheckout) Start(_ context.Context) error {
- return nil
-}
-
-func (cco *clientCheckout) Close() error {
- cco.connection.checkin(cco)
- return nil
-}
-
-type connection struct {
- // Client will be nil when checkouts is empty, if len(checkouts) > 0 then it is expected to be a non-nil, started client
- Client
-
- lggr logger.Logger
- clientPrivKey csakey.KeyV2
- serverPubKey []byte
- serverURL string
-
- pool *pool
-
- checkouts []*clientCheckout // reference count, if this goes to zero the connection should be closed and *client nilified
-
- mu sync.Mutex
-}
-
-func (conn *connection) checkout(ctx context.Context) (cco *clientCheckout, err error) {
- conn.mu.Lock()
- defer conn.mu.Unlock()
- if err = conn.ensureStartedClient(ctx); err != nil {
- return nil, err
- }
- cco = &clientCheckout{conn}
- conn.checkouts = append(conn.checkouts, cco)
- return cco, nil
-}
-
-// not thread-safe, access must be serialized
-func (conn *connection) ensureStartedClient(ctx context.Context) error {
- if len(conn.checkouts) == 0 {
- conn.Client = conn.pool.newClient(ClientOpts{logger.Sugared(conn.lggr), conn.clientPrivKey, conn.serverPubKey, conn.serverURL, conn.pool.cacheSet, nil})
- return conn.Client.Start(ctx)
- }
- return nil
-}
-
-func (conn *connection) checkin(checkinCco *clientCheckout) {
- conn.mu.Lock()
- defer conn.mu.Unlock()
- var removed bool
- for i, cco := range conn.checkouts {
- if cco == checkinCco {
- conn.checkouts = utils.DeleteUnstable(conn.checkouts, i)
- removed = true
- break
- }
- }
- if !removed {
- panic("tried to check in client that was never checked out")
- }
- if len(conn.checkouts) == 0 {
- if err := conn.Client.Close(); err != nil {
- // programming error if we hit this
- panic(err)
- }
- conn.Client = nil
- conn.pool.remove(conn.serverURL, conn.clientPrivKey.StaticSizedPublicKey())
- }
-}
-
-func (conn *connection) forceCloseAll() (err error) {
- conn.mu.Lock()
- defer conn.mu.Unlock()
- if conn.Client != nil {
- err = conn.Client.Close()
- if errors.Is(err, utils.ErrAlreadyStopped) {
- // ignore error if it has already been stopped; no problem
- err = nil
- }
- conn.Client = nil
- conn.checkouts = nil
- }
- return
-}
-
-type Pool interface {
- services.Service
- // Checkout gets a wsrpc.Client for the given arguments
- // The same underlying client can be checked out multiple times, the pool
- // handles lifecycle management. The consumer can treat it as if it were
- // its own unique client.
- Checkout(ctx context.Context, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) (client Client, err error)
-}
-
-// WSRPC allows only one connection per client key per server
-type pool struct {
- lggr logger.Logger
- // server url => client public key => connection
- connections map[string]map[credentials.StaticSizedPublicKey]*connection
-
- // embedding newClient makes testing/mocking easier
- newClient func(opts ClientOpts) Client
-
- mu sync.RWMutex
-
- cacheSet cache.CacheSet
-
- closed bool
-}
-
-func NewPool(lggr logger.Logger, cacheCfg cache.Config) Pool {
- lggr = logger.Sugared(lggr).Named("Mercury.WSRPCPool")
- p := newPool(lggr)
- p.newClient = NewClient
- p.cacheSet = cache.NewCacheSet(lggr, cacheCfg)
- return p
-}
-
-func newPool(lggr logger.Logger) *pool {
- return &pool{
- lggr: lggr,
- connections: make(map[string]map[credentials.StaticSizedPublicKey]*connection),
- }
-}
-
-func (p *pool) Checkout(ctx context.Context, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) (client Client, err error) {
- clientPubKey := clientPrivKey.StaticSizedPublicKey()
-
- p.mu.Lock()
-
- if p.closed {
- p.mu.Unlock()
- return nil, errors.New("pool is closed")
- }
-
- server, exists := p.connections[serverURL]
- if !exists {
- server = make(map[credentials.StaticSizedPublicKey]*connection)
- p.connections[serverURL] = server
- }
- conn, exists := server[clientPubKey]
- if !exists {
- conn = p.newConnection(p.lggr, clientPrivKey, serverPubKey, serverURL)
- server[clientPubKey] = conn
- }
- p.mu.Unlock()
-
- // checkout outside of pool lock since it might take non-trivial time
- // the clientCheckout will be checked in again when its Close() method is called
- // this also should avoid deadlocks between conn.mu and pool.mu
- return conn.checkout(ctx)
-}
-
-// remove performs garbage collection on the connections map after connections are no longer used
-func (p *pool) remove(serverURL string, clientPubKey credentials.StaticSizedPublicKey) {
- p.mu.Lock()
- defer p.mu.Unlock()
- delete(p.connections[serverURL], clientPubKey)
- if len(p.connections[serverURL]) == 0 {
- delete(p.connections, serverURL)
- }
-}
-
-func (p *pool) newConnection(lggr logger.Logger, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) *connection {
- return &connection{
- lggr: lggr,
- clientPrivKey: clientPrivKey,
- serverPubKey: serverPubKey,
- serverURL: serverURL,
- pool: p,
- }
-}
-
-func (p *pool) Start(ctx context.Context) error {
- return p.cacheSet.Start(ctx)
-}
-
-func (p *pool) Close() (merr error) {
- p.mu.Lock()
- defer p.mu.Unlock()
- p.closed = true
- for _, clientPubKeys := range p.connections {
- for _, conn := range clientPubKeys {
- merr = errors.Join(merr, conn.forceCloseAll())
- }
- }
- merr = errors.Join(merr, p.cacheSet.Close())
- return
-}
-
-func (p *pool) Name() string {
- return p.lggr.Name()
-}
-
-func (p *pool) Ready() error {
- p.mu.RLock()
- defer p.mu.RUnlock()
- if p.closed {
- return errors.New("pool is closed")
- }
- return nil
-}
-
-func (p *pool) HealthReport() map[string]error {
- hp := map[string]error{p.Name(): p.Ready()}
- services.CopyHealth(hp, p.cacheSet.HealthReport())
- return hp
-}
diff --git a/core/services/relay/evm/mercury/wsrpc/pool_test.go b/core/services/relay/evm/mercury/wsrpc/pool_test.go
deleted file mode 100644
index a7015ed412e..00000000000
--- a/core/services/relay/evm/mercury/wsrpc/pool_test.go
+++ /dev/null
@@ -1,266 +0,0 @@
-package wsrpc
-
-import (
- "context"
- "math/big"
- "math/rand"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
-
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
- "github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
- "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/csakey"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury/wsrpc/pb"
-)
-
-var _ Client = &mockClient{}
-
-type mockClient struct {
- started bool
- closed bool
- rawClient pb.MercuryClient
-}
-
-func (c *mockClient) Transmit(ctx context.Context, in *pb.TransmitRequest) (out *pb.TransmitResponse, err error) {
- return
-}
-func (c *mockClient) LatestReport(ctx context.Context, in *pb.LatestReportRequest) (out *pb.LatestReportResponse, err error) {
- return
-}
-func (c *mockClient) Start(context.Context) error {
- c.started = true
- return nil
-}
-func (c *mockClient) Close() error {
- c.closed = true
- return nil
-}
-func (c *mockClient) Name() string { return "mock client" }
-func (c *mockClient) Ready() error { return nil }
-func (c *mockClient) HealthReport() map[string]error { return nil }
-func (c *mockClient) ServerURL() string { return "mock client url" }
-func (c *mockClient) RawClient() pb.MercuryClient { return c.rawClient }
-
-func newMockClient(lggr logger.Logger) *mockClient {
- return &mockClient{}
-}
-
-func Test_Pool(t *testing.T) {
- lggr := logger.Sugared(logger.Test(t)).Named("PoolTestLogger")
-
- ctx := testutils.Context(t)
-
- t.Run("Checkout", func(t *testing.T) {
- p := newPool(lggr)
- p.cacheSet = &mockCacheSet{}
-
- t.Run("checks out one started client", func(t *testing.T) {
- clientPrivKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63()))
- serverPubKey := utils.NewHash().Bytes()
- serverURL := "example.com:443/ws"
-
- client := newMockClient(lggr)
- p.newClient = func(opts ClientOpts) Client {
- assert.Equal(t, clientPrivKey, opts.ClientPrivKey)
- assert.Equal(t, serverPubKey, opts.ServerPubKey)
- assert.Equal(t, serverURL, opts.ServerURL)
- return client
- }
-
- c, err := p.Checkout(ctx, clientPrivKey, serverPubKey, serverURL)
- require.NoError(t, err)
-
- assert.True(t, client.started)
-
- require.IsType(t, &clientCheckout{}, c)
-
- conn := c.(*clientCheckout).connection
- require.Equal(t, conn.Client, client)
-
- assert.Len(t, conn.checkouts, 1)
- assert.Same(t, lggr, conn.lggr)
- assert.Equal(t, clientPrivKey, conn.clientPrivKey)
- assert.Equal(t, serverPubKey, conn.serverPubKey)
- assert.Equal(t, serverURL, conn.serverURL)
- assert.Same(t, p, conn.pool)
-
- t.Run("checks in the clientCheckout when Close is called", func(t *testing.T) {
- err := c.Close()
- require.NoError(t, err)
-
- assert.Len(t, conn.checkouts, 0)
- require.IsType(t, nil, conn.Client)
- assert.Nil(t, conn.Client)
- assert.True(t, client.closed)
- })
- })
-
- t.Run("checks out multiple started clients and only closes if all of the clients for a given pk/server pair are checked back in", func(t *testing.T) {
- clientPrivKeys := []csakey.KeyV2{
- csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())),
- csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())),
- }
- serverPubKey := utils.NewHash().Bytes()
- serverURLs := []string{
- "example.com:443/ws",
- "example.invalid:8000/ws",
- }
-
- p.newClient = func(opts ClientOpts) Client {
- return newMockClient(opts.Logger)
- }
-
- // conn 1
- c1 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
- c2 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
- c3 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
- assert.Len(t, p.connections, 1)
- assert.Len(t, p.connections[serverURLs[0]], 1)
- assert.Len(t, p.connections[serverURLs[1]], 0)
-
- // conn 2
- c4 := mustCheckout(t, p, clientPrivKeys[1], serverPubKey, serverURLs[0])
- assert.Len(t, p.connections, 1)
- assert.Len(t, p.connections[serverURLs[0]], 2)
- assert.Len(t, p.connections[serverURLs[1]], 0)
-
- // conn 3
- c5 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1])
- c6 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1])
- assert.Len(t, p.connections, 2)
- assert.Len(t, p.connections[serverURLs[0]], 2)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- conn1 := c1.(*clientCheckout).connection
- assert.Same(t, conn1, c2.(*clientCheckout).connection)
- assert.Same(t, conn1, c3.(*clientCheckout).connection)
- assert.Len(t, conn1.checkouts, 3)
- assert.True(t, conn1.Client.(*mockClient).started)
-
- conn2 := c4.(*clientCheckout).connection
- assert.NotEqual(t, conn1, conn2)
- assert.Len(t, conn2.checkouts, 1)
- assert.True(t, conn2.Client.(*mockClient).started)
-
- conn3 := c5.(*clientCheckout).connection
- assert.NotEqual(t, conn1, conn3)
- assert.NotEqual(t, conn2, conn3)
- assert.Same(t, conn3, c6.(*clientCheckout).connection)
- assert.Len(t, conn3.checkouts, 2)
- assert.True(t, conn3.Client.(*mockClient).started)
-
- require.NoError(t, c1.Close())
- assert.Len(t, conn1.checkouts, 2)
- assert.NotNil(t, conn1.Client)
- assert.Len(t, p.connections, 2)
- assert.Len(t, p.connections[serverURLs[0]], 2)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- require.NoError(t, c2.Close())
- assert.Len(t, conn1.checkouts, 1)
- assert.NotNil(t, conn1.Client)
- assert.Len(t, p.connections, 2)
- assert.Len(t, p.connections[serverURLs[0]], 2)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- require.NoError(t, c3.Close())
- assert.Len(t, conn1.checkouts, 0)
- assert.Nil(t, conn1.Client)
- assert.Len(t, p.connections, 2)
- assert.Len(t, p.connections[serverURLs[0]], 1)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- c7 := mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
- // Not the same one, since previously all checkouts were checked in, the original connection was deleted from the map and a new one created
- assert.NotSame(t, conn1, c7.(*clientCheckout).connection)
- assert.Len(t, conn1.checkouts, 0) // actually, conn1 has already been removed from the map and will be garbage collected
- conn4 := c7.(*clientCheckout).connection
- assert.Len(t, conn4.checkouts, 1)
- assert.NotNil(t, conn4.Client)
- assert.Len(t, p.connections, 2)
- assert.Len(t, p.connections[serverURLs[0]], 2)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- require.NoError(t, c7.Close())
- assert.Len(t, p.connections, 2)
- assert.Len(t, p.connections[serverURLs[0]], 1)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- require.NoError(t, c4.Close())
- assert.Len(t, p.connections, 1)
- assert.Len(t, p.connections[serverURLs[0]], 0)
- assert.Len(t, p.connections[serverURLs[1]], 1)
-
- require.NoError(t, c5.Close())
- require.NoError(t, c6.Close())
- assert.Len(t, p.connections, 0)
-
- require.NoError(t, p.Close())
- })
- })
-
- p := newPool(lggr)
- p.cacheSet = &mockCacheSet{}
-
- t.Run("Name", func(t *testing.T) {
- assert.Equal(t, "PoolTestLogger", p.Name())
- })
- t.Run("Start", func(t *testing.T) {
- require.NoError(t, p.Start(ctx))
- assert.Nil(t, p.Ready())
- assert.Nil(t, p.HealthReport()["PoolTestLogger"])
- })
- t.Run("Close force closes all connections", func(t *testing.T) {
- clientPrivKeys := []csakey.KeyV2{
- csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())),
- csakey.MustNewV2XXXTestingOnly(big.NewInt(rand.Int63())),
- }
- serverPubKey := utils.NewHash().Bytes()
- serverURLs := []string{
- "example.com:443/ws",
- "example.invalid:8000/ws",
- }
-
- var clients []*mockClient
- p.newClient = func(opts ClientOpts) Client {
- c := newMockClient(opts.Logger)
- clients = append(clients, c)
- return c
- }
-
- // conn 1
- mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
- mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
- mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[0])
-
- // conn 2
- mustCheckout(t, p, clientPrivKeys[1], serverPubKey, serverURLs[0])
-
- // conn 3
- mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1])
- mustCheckout(t, p, clientPrivKeys[0], serverPubKey, serverURLs[1])
-
- for _, c := range clients {
- assert.True(t, c.started)
- assert.False(t, c.closed)
- }
-
- require.NoError(t, p.Close())
- assert.EqualError(t, p.Ready(), "pool is closed")
- assert.EqualError(t, p.HealthReport()["PoolTestLogger"], "pool is closed")
-
- for _, c := range clients {
- assert.True(t, c.closed)
- }
- })
-}
-
-func mustCheckout(t *testing.T, p *pool, clientPrivKey csakey.KeyV2, serverPubKey []byte, serverURL string) Client {
- c, err := p.Checkout(testutils.Context(t), clientPrivKey, serverPubKey, serverURL)
- require.NoError(t, err)
- return c
-}
diff --git a/core/services/relay/evm/mercury_config_provider.go b/core/services/relay/evm/mercury_config_provider.go
deleted file mode 100644
index 53bf8e22d24..00000000000
--- a/core/services/relay/evm/mercury_config_provider.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package evm
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/ethereum/go-ethereum/common"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
-
- "github.com/smartcontractkit/chainlink/v2/core/chains/legacyevm"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
- "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/types"
-)
-
-func newMercuryConfigProvider(ctx context.Context, lggr logger.Logger, chain legacyevm.Chain, opts *types.RelayOpts) (commontypes.ConfigProvider, error) {
- if !common.IsHexAddress(opts.ContractID) {
- return nil, errors.New("invalid contractID, expected hex address")
- }
-
- aggregatorAddress := common.HexToAddress(opts.ContractID)
-
- relayConfig, err := opts.RelayConfig()
- if err != nil {
- return nil, fmt.Errorf("failed to get relay config: %w", err)
- }
- if relayConfig.FeedID == nil {
- return nil, errors.New("feed ID is required for tracking config on mercury contracts")
- }
- cp, err := mercury.NewConfigPoller(
- ctx,
- logger.Named(lggr, relayConfig.FeedID.String()),
- chain.LogPoller(),
- aggregatorAddress,
- *relayConfig.FeedID,
- // TODO: Does mercury need to support config contract? DF-19182
- )
- if err != nil {
- return nil, err
- }
-
- offchainConfigDigester := mercury.NewOffchainConfigDigester(*relayConfig.FeedID, chain.Config().EVM().ChainID(), aggregatorAddress, ocrtypes.ConfigDigestPrefixMercuryV02)
- return newConfigWatcher(lggr, aggregatorAddress, offchainConfigDigester, cp, chain, relayConfig.FromBlock, opts.New), nil
-}
diff --git a/core/services/relay/evm/mercury_provider.go b/core/services/relay/evm/mercury_provider.go
deleted file mode 100644
index 07e421c2136..00000000000
--- a/core/services/relay/evm/mercury_provider.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package evm
-
-import (
- "context"
- "errors"
-
- ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types"
-
- "github.com/smartcontractkit/chainlink-common/pkg/logger"
- "github.com/smartcontractkit/chainlink-common/pkg/services"
- commontypes "github.com/smartcontractkit/chainlink-common/pkg/types"
- mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury"
- v1 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v1"
- v2 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v2"
- v3 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v3"
- v4 "github.com/smartcontractkit/chainlink-common/pkg/types/mercury/v4"
-
- "github.com/smartcontractkit/chainlink-data-streams/mercury"
-
- "github.com/smartcontractkit/chainlink-integrations/evm/heads"
-
- evmmercury "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury"
-)
-
-var _ commontypes.MercuryProvider = (*mercuryProvider)(nil)
-
-type mercuryProvider struct {
- cp commontypes.ConfigProvider
- codec commontypes.Codec
- transmitter evmmercury.Transmitter
- reportCodecV1 v1.ReportCodec
- reportCodecV2 v2.ReportCodec
- reportCodecV3 v3.ReportCodec
- reportCodecV4 v4.ReportCodec
- mercuryChainReader mercurytypes.ChainReader
- logger logger.Logger
- ms services.MultiStart
-}
-
-func NewMercuryProvider(
- cp commontypes.ConfigProvider,
- codec commontypes.Codec,
- mercuryChainReader mercurytypes.ChainReader,
- transmitter evmmercury.Transmitter,
- reportCodecV1 v1.ReportCodec,
- reportCodecV2 v2.ReportCodec,
- reportCodecV3 v3.ReportCodec,
- reportCodecV4 v4.ReportCodec,
- lggr logger.Logger,
-) *mercuryProvider {
- return &mercuryProvider{
- cp,
- codec,
- transmitter,
- reportCodecV1,
- reportCodecV2,
- reportCodecV3,
- reportCodecV4,
- mercuryChainReader,
- lggr,
- services.MultiStart{},
- }
-}
-
-func (p *mercuryProvider) Start(ctx context.Context) error {
- return p.ms.Start(ctx, p.cp, p.transmitter)
-}
-
-func (p *mercuryProvider) Close() error {
- return p.ms.Close()
-}
-
-func (p *mercuryProvider) Ready() error {
- return errors.Join(p.cp.Ready(), p.transmitter.Ready())
-}
-
-func (p *mercuryProvider) Name() string {
- return p.logger.Name()
-}
-
-func (p *mercuryProvider) HealthReport() map[string]error {
- report := map[string]error{}
- services.CopyHealth(report, p.cp.HealthReport())
- services.CopyHealth(report, p.transmitter.HealthReport())
- return report
-}
-
-func (p *mercuryProvider) MercuryChainReader() mercurytypes.ChainReader {
- return p.mercuryChainReader
-}
-
-func (p *mercuryProvider) Codec() commontypes.Codec {
- return p.codec
-}
-
-func (p *mercuryProvider) ContractConfigTracker() ocrtypes.ContractConfigTracker {
- return p.cp.ContractConfigTracker()
-}
-
-func (p *mercuryProvider) OffchainConfigDigester() ocrtypes.OffchainConfigDigester {
- return p.cp.OffchainConfigDigester()
-}
-
-func (p *mercuryProvider) OnchainConfigCodec() mercurytypes.OnchainConfigCodec {
- return mercury.StandardOnchainConfigCodec{}
-}
-
-func (p *mercuryProvider) ReportCodecV1() v1.ReportCodec {
- return p.reportCodecV1
-}
-
-func (p *mercuryProvider) ReportCodecV2() v2.ReportCodec {
- return p.reportCodecV2
-}
-
-func (p *mercuryProvider) ReportCodecV3() v3.ReportCodec {
- return p.reportCodecV3
-}
-
-func (p *mercuryProvider) ReportCodecV4() v4.ReportCodec {
- return p.reportCodecV4
-}
-
-func (p *mercuryProvider) ContractTransmitter() ocrtypes.ContractTransmitter {
- return p.transmitter
-}
-
-func (p *mercuryProvider) MercuryServerFetcher() mercurytypes.ServerFetcher {
- return p.transmitter
-}
-
-func (p *mercuryProvider) ContractReader() commontypes.ContractReader {
- return nil
-}
-
-var _ mercurytypes.ChainReader = (*mercuryChainReader)(nil)
-
-type mercuryChainReader struct {
- tracker heads.Tracker
-}
-
-func NewChainReader(h heads.Tracker) mercurytypes.ChainReader {
- return &mercuryChainReader{h}
-}
-
-func NewMercuryChainReader(h heads.Tracker) mercurytypes.ChainReader {
- return &mercuryChainReader{
- tracker: h,
- }
-}
-
-func (r *mercuryChainReader) LatestHeads(ctx context.Context, k int) ([]mercurytypes.Head, error) {
- evmBlocks := r.tracker.LatestChain().AsSlice(k)
- if len(evmBlocks) == 0 {
- return nil, nil
- }
-
- blocks := make([]mercurytypes.Head, len(evmBlocks))
- for x := 0; x < len(evmBlocks); x++ {
- blocks[x] = mercurytypes.Head{
- Number: uint64(evmBlocks[x].BlockNumber()),
- Hash: evmBlocks[x].Hash.Bytes(),
- Timestamp: uint64(evmBlocks[x].Timestamp.Unix()),
- }
- }
-
- return blocks, nil
-}
diff --git a/core/services/relay/evm/types/types.go b/core/services/relay/evm/types/types.go
index f03008f1c14..b38fdeb7646 100644
--- a/core/services/relay/evm/types/types.go
+++ b/core/services/relay/evm/types/types.go
@@ -184,7 +184,6 @@ func (r *ReadType) UnmarshalText(text []byte) error {
type LLOConfigMode string
const (
- LLOConfigModeMercury LLOConfigMode = "mercury"
LLOConfigModeBlueGreen LLOConfigMode = "bluegreen"
)
@@ -192,6 +191,16 @@ func (c LLOConfigMode) String() string {
return string(c)
}
+func (c *LLOConfigMode) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "", "bluegreen":
+ *c = LLOConfigModeBlueGreen
+ default:
+ return fmt.Errorf("unrecognized LLOConfigMode: %s", string(text))
+ }
+ return nil
+}
+
type DualTransmissionConfig struct {
ContractAddress common.Address `json:"contractAddress" toml:"contractAddress"`
TransmitterAddress common.Address `json:"transmitterAddress" toml:"transmitterAddress"`
@@ -212,12 +221,6 @@ type RelayConfig struct {
// Contract-specific
SendingKeys pq.StringArray `json:"sendingKeys"`
- // Mercury-specific
- FeedID *common.Hash `json:"feedID"`
- EnableTriggerCapability bool `json:"enableTriggerCapability"`
- TriggerCapabilityName string `json:"triggerCapabilityName"`
- TriggerCapabilityVersion string `json:"triggerCapabilityVersion"`
-
// LLO-specific
LLODONID uint32 `json:"lloDonID" toml:"lloDonID"`
LLOConfigMode LLOConfigMode `json:"lloConfigMode" toml:"lloConfigMode"`
diff --git a/core/services/relay/evm/types/types_test.go b/core/services/relay/evm/types/types_test.go
index c8bda7e355f..28861b22cde 100644
--- a/core/services/relay/evm/types/types_test.go
+++ b/core/services/relay/evm/types/types_test.go
@@ -13,7 +13,6 @@ import (
"github.com/smartcontractkit/chainlink-common/pkg/codec"
evmtypes "github.com/smartcontractkit/chainlink-integrations/evm/types"
- "github.com/smartcontractkit/chainlink-integrations/evm/utils"
"github.com/smartcontractkit/chainlink/v2/core/internal/testutils"
"github.com/smartcontractkit/chainlink/v2/core/store/models"
)
@@ -24,18 +23,13 @@ import (
// // Contract-specific
// EffectiveTransmitterAddress null.String `json:"effectiveTransmitterAddress"`
// SendingKeys pq.StringArray `json:"sendingKeys"`
-
-// // Mercury-specific
-// FeedID *common.Hash `json:"feedID"`
func Test_RelayConfig(t *testing.T) {
cid := testutils.NewRandomEVMChainID()
fromBlock := uint64(2222)
- feedID := utils.NewHash()
rawToml := fmt.Sprintf(`
ChainID = "%s"
FromBlock = %d
-FeedID = "0x%x"
-`, cid, fromBlock, feedID[:])
+`, cid, fromBlock)
var rc RelayConfig
err := toml.Unmarshal([]byte(rawToml), &rc)
@@ -43,7 +37,6 @@ FeedID = "0x%x"
assert.Equal(t, cid.String(), rc.ChainID.String())
assert.Equal(t, fromBlock, rc.FromBlock)
- assert.Equal(t, feedID.Hex(), rc.FeedID.Hex())
}
func Test_ChainReaderConfig(t *testing.T) {
diff --git a/core/services/relay/evm/write_target_test.go b/core/services/relay/evm/write_target_test.go
index c4712d69970..5c6e4add81a 100644
--- a/core/services/relay/evm/write_target_test.go
+++ b/core/services/relay/evm/write_target_test.go
@@ -270,7 +270,7 @@ func TestEvmWrite(t *testing.T) {
require.NoError(t, err2)
c.EVM[0].Workflow.ForwarderAddress = &forwarderAddr
})
- testChain.On("ID").Return(big.NewInt(11155111))
+ testChain.On("ID").Return(big.NewInt(11155111)).Maybe()
testChain.On("Config").Return(evmtest.NewChainScopedConfig(t, testCfg))
capabilityRegistry := evmcapabilities.NewRegistry(lggr)
diff --git a/core/store/migrate/migrations/0266_remove_feed_id.sql b/core/store/migrate/migrations/0266_remove_feed_id.sql
new file mode 100644
index 00000000000..98ccef8dee3
--- /dev/null
+++ b/core/store/migrate/migrations/0266_remove_feed_id.sql
@@ -0,0 +1,12 @@
+-- +goose Up
+-- +goose StatementBegin
+ALTER TABLE ocr2_oracle_specs DROP COLUMN feed_id;
+ALTER TABLE bootstrap_specs DROP COLUMN feed_id;
+-- +goose StatementEnd
+
+
+-- +goose Down
+-- +goose StatementBegin
+ALTER TABLE ocr2_oracle_specs ADD COLUMN feed_id bytea CHECK (feed_id IS NULL OR octet_length(feed_id) = 32);
+ALTER TABLE bootstrap_specs ADD COLUMN feed_id bytea CHECK (feed_id IS NULL OR octet_length(feed_id) = 32);
+-- +goose StatementEnd
diff --git a/core/web/resolver/spec.go b/core/web/resolver/spec.go
index 3d9be365f92..ef31ac0cf92 100644
--- a/core/web/resolver/spec.go
+++ b/core/web/resolver/spec.go
@@ -600,12 +600,10 @@ func (r *OCR2SpecResolver) TransmitterID() *string {
}
// FeedID resolves the spec's feed ID
+// Deprecated: FeedID is no longer used for anything, but continue to return a
+// nil value in graphQL for compatibility with operator UI
func (r *OCR2SpecResolver) FeedID() *string {
- if r.spec.FeedID == nil {
- return nil
- }
- feedID := r.spec.FeedID.String()
- return &feedID
+ return nil
}
func (r *OCR2SpecResolver) AllowNoBootstrappers() bool {
diff --git a/core/web/resolver/testdata/config-empty-effective.toml b/core/web/resolver/testdata/config-empty-effective.toml
index 50c53108afe..dc15e2a315d 100644
--- a/core/web/resolver/testdata/config-empty-effective.toml
+++ b/core/web/resolver/testdata/config-empty-effective.toml
@@ -227,11 +227,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/core/web/resolver/testdata/config-full.toml b/core/web/resolver/testdata/config-full.toml
index fd50a37db62..cf0163b3525 100644
--- a/core/web/resolver/testdata/config-full.toml
+++ b/core/web/resolver/testdata/config-full.toml
@@ -237,11 +237,6 @@ test = 'load'
[Mercury]
VerboseLogging = true
-[Mercury.Cache]
-LatestReportTTL = '1m40s'
-MaxStaleAge = '1m41s'
-LatestReportDeadline = '1m42s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/core/web/resolver/testdata/config-multi-chain-effective.toml b/core/web/resolver/testdata/config-multi-chain-effective.toml
index 75cbfa5d6f8..d56f781af80 100644
--- a/core/web/resolver/testdata/config-multi-chain-effective.toml
+++ b/core/web/resolver/testdata/config-multi-chain-effective.toml
@@ -227,11 +227,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/core/web/testdata/body/health.html b/core/web/testdata/body/health.html
index b8f8cd43028..6fb1c08d98a 100644
--- a/core/web/testdata/body/health.html
+++ b/core/web/testdata/body/health.html
@@ -93,15 +93,6 @@
Monitor
-
- Mercury
-
- WSRPCPool
-
- CacheSet
-
-
-
PipelineORM
diff --git a/core/web/testdata/body/health.json b/core/web/testdata/body/health.json
index 39aa690219a..b6365b31916 100644
--- a/core/web/testdata/body/health.json
+++ b/core/web/testdata/body/health.json
@@ -162,24 +162,6 @@
"output": ""
}
},
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool",
- "attributes": {
- "name": "Mercury.WSRPCPool",
- "status": "passing",
- "output": ""
- }
- },
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool.CacheSet",
- "attributes": {
- "name": "Mercury.WSRPCPool.CacheSet",
- "status": "passing",
- "output": ""
- }
- },
{
"type": "checks",
"id": "PipelineORM",
diff --git a/core/web/testdata/body/health.txt b/core/web/testdata/body/health.txt
index 3b0da89f6fb..0d2663066fa 100644
--- a/core/web/testdata/body/health.txt
+++ b/core/web/testdata/body/health.txt
@@ -17,8 +17,6 @@ ok Heartbeat
ok JobSpawner
ok LLOTransmissionReaper
ok Mailbox.Monitor
-ok Mercury.WSRPCPool
-ok Mercury.WSRPCPool.CacheSet
ok PipelineORM
ok PipelineRunner
ok PipelineRunner.BridgeCache
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index 2f6b5e9b51e..77cc98f5f24 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -1933,45 +1933,6 @@ VerboseLogging enables detailed logging of mercury/LLO operations. These logs
can be expensive since they may serialize large structs, so they are disabled
by default.
-## Mercury.Cache
-```toml
-[Mercury.Cache]
-LatestReportTTL = "1s" # Default
-MaxStaleAge = "1h" # Default
-LatestReportDeadline = "5s" # Default
-```
-Mercury.Cache controls settings for the price retrieval cache querying a mercury server
-
-### LatestReportTTL
-```toml
-LatestReportTTL = "1s" # Default
-```
-LatestReportTTL controls how "stale" we will allow a price to be e.g. if
-set to 1s, a new price will always be fetched if the last result was
-from 1 second ago or older.
-
-Another way of looking at it is such: the cache will _never_ return a
-price that was queried from now-LatestReportTTL or before.
-
-Setting to zero disables caching entirely.
-
-### MaxStaleAge
-```toml
-MaxStaleAge = "1h" # Default
-```
-MaxStaleAge is that maximum amount of time that a value can be stale
-before it is deleted from the cache (a form of garbage collection).
-
-This should generally be set to something much larger than
-LatestReportTTL. Setting to zero disables garbage collection.
-
-### LatestReportDeadline
-```toml
-LatestReportDeadline = "5s" # Default
-```
-LatestReportDeadline controls how long to wait for a response from the
-mercury server before retrying. Setting this to zero will wait indefinitely.
-
## Mercury.TLS
```toml
[Mercury.TLS]
@@ -2003,8 +1964,7 @@ Protocol = "grpc" # Default
```
Protocol is the protocol to use for the transmitter.
-Options are either:
-- "wsrpc" for the legacy websocket protocol
+Options are currently:
- "grpc" for the gRPC protocol
### TransmitQueueMaxSize
diff --git a/testdata/scripts/config/merge_raw_configs.txtar b/testdata/scripts/config/merge_raw_configs.txtar
index 2d762e7357e..0ff085aa758 100644
--- a/testdata/scripts/config/merge_raw_configs.txtar
+++ b/testdata/scripts/config/merge_raw_configs.txtar
@@ -374,11 +374,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/health/default.txtar b/testdata/scripts/health/default.txtar
index 7a935fa84a9..b9983d54467 100644
--- a/testdata/scripts/health/default.txtar
+++ b/testdata/scripts/health/default.txtar
@@ -36,8 +36,6 @@ ok Heartbeat
ok JobSpawner
ok LLOTransmissionReaper
ok Mailbox.Monitor
-ok Mercury.WSRPCPool
-ok Mercury.WSRPCPool.CacheSet
ok PipelineORM
ok PipelineRunner
ok PipelineRunner.BridgeCache
@@ -93,24 +91,6 @@ ok WorkflowDBStore
"output": ""
}
},
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool",
- "attributes": {
- "name": "Mercury.WSRPCPool",
- "status": "passing",
- "output": ""
- }
- },
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool.CacheSet",
- "attributes": {
- "name": "Mercury.WSRPCPool.CacheSet",
- "status": "passing",
- "output": ""
- }
- },
{
"type": "checks",
"id": "PipelineORM",
diff --git a/testdata/scripts/health/multi-chain-loopp.txtar b/testdata/scripts/health/multi-chain-loopp.txtar
index 1bcccad9af8..4ca99de134e 100644
--- a/testdata/scripts/health/multi-chain-loopp.txtar
+++ b/testdata/scripts/health/multi-chain-loopp.txtar
@@ -96,8 +96,6 @@ ok Heartbeat
ok JobSpawner
ok LLOTransmissionReaper
ok Mailbox.Monitor
-ok Mercury.WSRPCPool
-ok Mercury.WSRPCPool.CacheSet
ok PipelineORM
ok PipelineRunner
ok PipelineRunner.BridgeCache
@@ -341,24 +339,6 @@ ok WorkflowDBStore
"output": ""
}
},
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool",
- "attributes": {
- "name": "Mercury.WSRPCPool",
- "status": "passing",
- "output": ""
- }
- },
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool.CacheSet",
- "attributes": {
- "name": "Mercury.WSRPCPool.CacheSet",
- "status": "passing",
- "output": ""
- }
- },
{
"type": "checks",
"id": "PipelineORM",
diff --git a/testdata/scripts/health/multi-chain.txtar b/testdata/scripts/health/multi-chain.txtar
index f4867602710..03d233fc831 100644
--- a/testdata/scripts/health/multi-chain.txtar
+++ b/testdata/scripts/health/multi-chain.txtar
@@ -73,8 +73,6 @@ ok Heartbeat
ok JobSpawner
ok LLOTransmissionReaper
ok Mailbox.Monitor
-ok Mercury.WSRPCPool
-ok Mercury.WSRPCPool.CacheSet
ok PipelineORM
ok PipelineRunner
ok PipelineRunner.BridgeCache
@@ -255,24 +253,6 @@ ok WorkflowDBStore
"output": ""
}
},
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool",
- "attributes": {
- "name": "Mercury.WSRPCPool",
- "status": "passing",
- "output": ""
- }
- },
- {
- "type": "checks",
- "id": "Mercury.WSRPCPool.CacheSet",
- "attributes": {
- "name": "Mercury.WSRPCPool.CacheSet",
- "status": "passing",
- "output": ""
- }
- },
{
"type": "checks",
"id": "PipelineORM",
diff --git a/testdata/scripts/node/validate/default.txtar b/testdata/scripts/node/validate/default.txtar
index 656fbcbcdca..f999a29ef29 100644
--- a/testdata/scripts/node/validate/default.txtar
+++ b/testdata/scripts/node/validate/default.txtar
@@ -239,11 +239,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/defaults-override.txtar b/testdata/scripts/node/validate/defaults-override.txtar
index 9f31a151c7f..ce20db9a99b 100644
--- a/testdata/scripts/node/validate/defaults-override.txtar
+++ b/testdata/scripts/node/validate/defaults-override.txtar
@@ -300,11 +300,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
index bb61ee2c053..075c9aa6791 100644
--- a/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-disabled.txtar
@@ -283,11 +283,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
index c4ba69f183c..a5b8603b090 100644
--- a/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging-no-dir.txtar
@@ -283,11 +283,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/disk-based-logging.txtar b/testdata/scripts/node/validate/disk-based-logging.txtar
index a2c96187680..b90cf37b28b 100644
--- a/testdata/scripts/node/validate/disk-based-logging.txtar
+++ b/testdata/scripts/node/validate/disk-based-logging.txtar
@@ -283,11 +283,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/fallback-override.txtar b/testdata/scripts/node/validate/fallback-override.txtar
index eba33a12164..579babc6ba7 100644
--- a/testdata/scripts/node/validate/fallback-override.txtar
+++ b/testdata/scripts/node/validate/fallback-override.txtar
@@ -377,11 +377,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar
index 6282757bd1e..7b757a31d59 100644
--- a/testdata/scripts/node/validate/invalid-ocr-p2p.txtar
+++ b/testdata/scripts/node/validate/invalid-ocr-p2p.txtar
@@ -268,11 +268,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/invalid.txtar b/testdata/scripts/node/validate/invalid.txtar
index fee1a7469be..a41d62932a3 100644
--- a/testdata/scripts/node/validate/invalid.txtar
+++ b/testdata/scripts/node/validate/invalid.txtar
@@ -273,11 +273,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/valid.txtar b/testdata/scripts/node/validate/valid.txtar
index 66e57c5b8b9..1e84b046619 100644
--- a/testdata/scripts/node/validate/valid.txtar
+++ b/testdata/scripts/node/validate/valid.txtar
@@ -280,11 +280,6 @@ TLSCertPath = ''
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''
diff --git a/testdata/scripts/node/validate/warnings.txtar b/testdata/scripts/node/validate/warnings.txtar
index 59156b25ba3..ad5b7a7cb28 100644
--- a/testdata/scripts/node/validate/warnings.txtar
+++ b/testdata/scripts/node/validate/warnings.txtar
@@ -262,11 +262,6 @@ TLSCertPath = 'something'
[Mercury]
VerboseLogging = false
-[Mercury.Cache]
-LatestReportTTL = '1s'
-MaxStaleAge = '1h0m0s'
-LatestReportDeadline = '5s'
-
[Mercury.TLS]
CertFile = ''