Skip to content

Commit

Permalink
Merge pull request #47 from Rocket-Rescue-Node/jms/gardening
Browse files Browse the repository at this point in the history
Add EL tests.
  • Loading branch information
jshufro authored Nov 23, 2023
2 parents cdb15db + 7e8e18e commit 2bf5471
Show file tree
Hide file tree
Showing 20 changed files with 1,939 additions and 37 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: 1.20.8
- uses: actions/checkout@v3
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/setup-go@v4
with:
go-version: 1.20.8
- uses: arduino/setup-protoc@v1
- uses: arduino/setup-protoc@v2
- run: |
go install google.golang.org/protobuf/cmd/protoc-gen-go
go get google.golang.org/grpc/cmd/protoc-gen-go-grpc
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc
- run: |
make
- run: go test -v ./...
- run: go test ./...
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
A reverse-proxy for the Rocket Pool Rescue Node
A reverse-proxy for the Rocket Rescue Node
Copyright (C) 2022 Jacob Shufro and João Poupino

This program is free software: you can redistribute it and/or modify
Expand Down
16 changes: 12 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,16 @@

# Rescue-Proxy

Rocket Pool Rescue Node's Rescue-Proxy is a custom reverse proxy meant to sit between a shared beacon node and its downstream users. It behaves like a normal reverse proxy with the following added features and protections:
[Rocket Rescue Node](https://rescuenode.com)'s Rescue-Proxy is a custom reverse proxy meant to sit between a shared beacon node and its downstream users. It behaves like a normal reverse proxy with the following added features and protections:

1. HMAC authentication via HTTP Basic Auth / GRPC headers
1. Fee Recipient validation for Rocket Pool validator clients
1. Credential expiration
1. Robust caching for frequently accessed immutable chain data
1. GRPC support for Prysm; HTTP support for Nimbus, Lighthouse, and Teku
1. Robust caching for frequently accessed immutable and mutable chain data
1. GRPC support for Prysm; HTTP support for Nimbus, Lighthouse, Lodestar, and Teku
1. Fee Recipient validation for Solo staker validator clients
1. Detailed logging and [metrics](https://status.rescuenode.com).
1. A gRPC API allowing [rescue-api](https://github.com/Rocket-Rescue-Node/rescue-api) to see views of currently active solo and Rocket Pool node operators

## Usage

Expand All @@ -30,15 +33,20 @@ Usage of ./rescue-proxy:
Whether to enable verbose logging
-ec-url string
URL to the execution client to use, eg, http://localhost:8545
-enable-solo-validators
Whether or not to allow solo validators access. (default true)
-grpc-addr string
Address on which to reply to gRPC requests
-grpc-beacon-addr string
Address to the beacon node to proxy for gRPC, eg, localhost:4000
-grpc-tls-cert-file string
Optional TLS Certificate for the gRPC host
-grpc-tls-key-file string
Optional TLS Key for the gRPC host
-hmac-secret string
The secret to use for HMAC (default "test-secret")
-rocketstorage-addr string
Address of the Rocket Storage contract. Defaults to mainnet (default "0x1d8f8f00cfa6758d7bE78336684788Fb0ee0Fa46")
```

* The `-grpc` flags should only be used with a Prysm beacon node.
Expand Down
61 changes: 39 additions & 22 deletions executionlayer/execution-layer.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"github.com/rocket-pool/rocketpool-go/rocketpool"
rptypes "github.com/rocket-pool/rocketpool-go/types"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)

type ForEachNodeClosure func(common.Address) bool
Expand Down Expand Up @@ -98,6 +99,8 @@ type CachingExecutionLayer struct {
shutdown func()

m *metrics.MetricsRegistry

connected chan bool
}

func (e *CachingExecutionLayer) setECShutdownCb(cb func()) {
Expand Down Expand Up @@ -191,19 +194,19 @@ func (e *CachingExecutionLayer) handleMinipoolEvent(event types.Log) {

// Grab its minipool (contract) address and use that to find its public key
minipoolAddr := common.BytesToAddress(event.Topics[1].Bytes())
minipoolDetails, err := minipool.GetMinipoolDetails(e.rp, minipoolAddr, nil)
pubkey, err := minipool.GetMinipoolPubkey(e.rp, minipoolAddr, nil)
if err != nil {
e.Logger.Warn("Error fetching minipool details for new minipools", zap.String("minipool", minipoolAddr.String()), zap.Error(err))
e.Logger.Warn("Error fetching minipool pubkey for new minipool", zap.String("minipool", minipoolAddr.String()), zap.Error(err))
return
}

// Finally, update the minipool index
err = e.cache.addMinipoolNode(minipoolDetails.Pubkey, nodeAddr)
err = e.cache.addMinipoolNode(pubkey, nodeAddr)
if err != nil {
e.Logger.Warn("Error updating minipool cache", zap.Error(err))
}
e.m.Counter("minipool_launch_received").Inc()
e.Logger.Info("Added new minipool", zap.String("pubkey", minipoolDetails.Pubkey.String()), zap.String("node", nodeAddr.String()))
e.Logger.Info("Added new minipool", zap.String("pubkey", pubkey.String()), zap.String("node", nodeAddr.String()))
}

func (e *CachingExecutionLayer) handleOdaoEvent(event types.Log) {
Expand Down Expand Up @@ -441,6 +444,7 @@ func (e *CachingExecutionLayer) ecEventsConnect(opts *bind.CallOpts) error {
newHeadSub.Unsubscribe()
})

e.connected <- true
{
var noMoreEvents bool
var noMoreHeaders bool
Expand Down Expand Up @@ -494,6 +498,7 @@ func (e *CachingExecutionLayer) Init() error {
var err error

e.m = metrics.NewMetricsRegistry("execution_layer")
e.connected = make(chan bool, 1)

// Pick a cache
if e.CachePath == "" {
Expand Down Expand Up @@ -581,61 +586,72 @@ func (e *CachingExecutionLayer) Init() error {
e.Logger.Info("Warming up the cache")

// Get all nodes at the given block
nodes, err := node.GetNodes(e.rp, opts)
nodes, err := node.GetNodeAddresses(e.rp, opts)
if err != nil {
return err
}
e.Logger.Info("Found nodes to preload", zap.Int("count", len(nodes)), zap.Int64("block", opts.BlockNumber.Int64()))

minipoolCount := 0
for _, n := range nodes {
for _, addr := range nodes {
// Allocate a pointer for this node
nodeInfo := &nodeInfo{}
// Determine their smoothing pool status
nodeInfo.inSmoothingPool, err = node.GetSmoothingPoolRegistrationState(e.rp, n.Address, opts)
nodeInfo.inSmoothingPool, err = node.GetSmoothingPoolRegistrationState(e.rp, addr, opts)
if err != nil {
return err
}

// Get their fee distributor address
nodeInfo.feeDistributor, err = node.GetDistributorAddress(e.rp, n.Address, opts)
nodeInfo.feeDistributor, err = node.GetDistributorAddress(e.rp, addr, opts)
if err != nil {
return err
}

// Store the smoothing pool state / fee distributor in the node index
err = e.cache.addNodeInfo(n.Address, nodeInfo)
err = e.cache.addNodeInfo(addr, nodeInfo)
if err != nil {
return err
}

// Also grab their minipools
minipools, err := minipool.GetNodeMinipools(e.rp, n.Address, opts)
minipoolAddresses, err := minipool.GetNodeMinipoolAddresses(e.rp, addr, opts)
if err != nil {
return err
}

minipoolCount += len(minipools)
for _, minipool := range minipools {
err = e.cache.addMinipoolNode(minipool.Pubkey, n.Address)
if err != nil {
return err
}
minipoolCount += len(minipoolAddresses)
var wg errgroup.Group
wg.SetLimit(64)
for _, m := range minipoolAddresses {
m := m
wg.Go(func() error {
pubkey, err := minipool.GetMinipoolPubkey(e.rp, m, opts)
if err != nil {
return err
}
err = e.cache.addMinipoolNode(pubkey, addr)
if err != nil {
return err
}
return nil
})
}
err = wg.Wait()
if err != nil {
return err
}
}

// Get all odao nodes at the given block
odaoNodes, err := trustednode.GetMembers(e.rp, opts)
odaoNodes, err := trustednode.GetMemberAddresses(e.rp, opts)
if err != nil {
return err
}

for _, member := range odaoNodes {
if !member.Exists {
continue
}

err = e.cache.addOdaoNode(member.Address)
err = e.cache.addOdaoNode(member)
if err != nil {
return err
}
Expand Down Expand Up @@ -680,6 +696,7 @@ func (e *CachingExecutionLayer) Stop() {
if err != nil {
e.Logger.Error("error while stopping the cache", zap.Error(err))
}
close(e.connected)
}

// ForEachNode calls the provided closure with the address of every rocket pool node the ExecutionLayer has observed
Expand Down Expand Up @@ -724,7 +741,7 @@ func (e *CachingExecutionLayer) GetRPInfo(pubkey rptypes.ValidatorPubkey) (*RPIn
e.Logger.Error("Validator was in the minipool index, but not the node index",
zap.String("pubkey", pubkey.String()),
zap.String("node", nodeAddr.String()))
return nil, fmt.Errorf("node %x not found in cache despite pubkey %x being present", nodeAddr, pubkey)
return nil, fmt.Errorf("node %s not found in cache despite pubkey %s being present", nodeAddr.String(), pubkey.String())
}

if nodeInfo.inSmoothingPool {
Expand Down
Loading

0 comments on commit 2bf5471

Please sign in to comment.