-
Notifications
You must be signed in to change notification settings - Fork 5
/
tree-gen.go
816 lines (692 loc) · 27.2 KB
/
tree-gen.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
package main
import (
"context"
"fmt"
"math/big"
"net/http"
"os"
"path/filepath"
"time"
"github.com/goccy/go-json"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/fatih/color"
"github.com/rocket-pool/rocketpool-go/rewards"
"github.com/rocket-pool/rocketpool-go/rocketpool"
"github.com/rocket-pool/rocketpool-go/utils/eth"
"github.com/rocket-pool/smartnode/shared/services/beacon"
"github.com/rocket-pool/smartnode/shared/services/beacon/client"
"github.com/rocket-pool/smartnode/shared/services/config"
rprewards "github.com/rocket-pool/smartnode/shared/services/rewards"
"github.com/rocket-pool/smartnode/shared/services/state"
cfgtypes "github.com/rocket-pool/smartnode/shared/types/config"
"github.com/rocket-pool/smartnode/shared/utils/log"
"github.com/urfave/cli/v2"
)
const (
MaxConcurrentEth1Requests = 200
)
// Details about the snapshot block / timestamp for a treegen target
type snapshotDetails struct {
index uint64
startTime time.Time
endTime time.Time
startSlot uint64
snapshotBeaconBlock uint64
snapshotElBlockHeader *types.Header
intervalsPassed uint64
}
// targets holds information about the span we wish to gather data for.
// It could be an entire interval, or it could be a portion of one.
type targets struct {
// If generating a whole interval, we use the rewardsEvent
rewardsEvent *rewards.RewardsEvent
// For preview related functions, we use snapshotDetails
snapshotDetails *snapshotDetails
// Cached beacon block - not necessarily the last block in the last epoch,
// as it may not have been proposed
block *beacon.BeaconBlock
}
// Arguments that will be passed down to the actual treegen routine
type treegenArguments struct {
// Header of the starting EL block
elBlockHeader *types.Header
// Number of intervals elapsed
intervalsPassed uint64
// End time for the rewards tree (may not align on a full interval)
endTime time.Time
// Start time for the rewards tree
startTime time.Time
// Index of the rewards period
index uint64
// The first slot in the period
startSlot uint64
// Consensus end block
block *beacon.BeaconBlock
// Network State at end EL block
state *state.NetworkState
}
// Treegen holder for the requested execution metadata and necessary artifacts
type treeGenerator struct {
log *log.ColorLogger
errLog *log.ColorLogger
rp *rocketpool.RocketPool
cfg *config.RocketPoolConfig
mgr *state.NetworkStateManager
recordMgr *rprewards.RollingRecordManager
bn beacon.Client
beaconConfig beacon.Eth2Config
targets targets
outputDir string
prettyPrint bool
ruleset uint64
useRollingRecords bool
}
// Generates a new rewards tree based on the command line flags
func GenerateTree(c *cli.Context) error {
// Configure
configureHTTP()
// Initialization
interval := c.Int64("interval")
targetEpoch := c.Uint64("target-epoch")
logger := log.NewColorLogger(color.FgHiWhite)
errLogger := log.NewColorLogger(color.FgRed)
// URL acquisiton
ecUrl := c.String("ec-endpoint")
if ecUrl == "" {
return fmt.Errorf("ec-endpoint must be provided")
}
bnUrl := c.String("bn-endpoint")
if ecUrl == "" {
return fmt.Errorf("bn-endpoint must be provided")
}
// Create the EC and BN clients
ec, err := ethclient.Dial(ecUrl)
if err != nil {
return fmt.Errorf("error connecting to the EC: %w", err)
}
bn := client.NewStandardHttpClient(bnUrl)
beaconConfig, err := bn.GetEth2Config()
if err != nil {
return fmt.Errorf("error getting beacon config from the BN at %s - %w", bnUrl, err)
}
// Check which network we're on via the BN
depositContract, err := bn.GetEth2DepositContract()
if err != nil {
return fmt.Errorf("error getting deposit contract from the BN: %w", err)
}
var network cfgtypes.Network
switch depositContract.ChainID {
case 1:
network = cfgtypes.Network_Mainnet
logger.Printlnf("Beacon node is configured for Mainnet.")
default:
return fmt.Errorf("your Beacon node is configured for an unknown network with Chain ID [%d]", depositContract.ChainID)
}
// Create a new config on the proper network
cfg := config.NewRocketPoolConfig("", true)
cfg.Smartnode.Network.Value = network
// Create the RP wrapper
storageContract := cfg.Smartnode.GetStorageAddress()
rp, err := rocketpool.NewRocketPool(ec, common.HexToAddress(storageContract))
if err != nil {
return fmt.Errorf("error creating Rocket Pool wrapper: %w", err)
}
// Create the NetworkStateManager
mgr, err := state.NewNetworkStateManager(rp, cfg, rp.Client, bn, &logger)
if err != nil {
return err
}
// Create the generator
generator := treeGenerator{
log: &logger,
errLog: &errLogger,
rp: rp,
cfg: cfg,
bn: bn,
mgr: mgr,
beaconConfig: beaconConfig,
outputDir: c.String("output-dir"),
prettyPrint: c.Bool("pretty-print"),
ruleset: c.Uint64("ruleset"),
useRollingRecords: c.Bool("use-rolling-records"),
}
// initialize the generator targets
if err := generator.setTargets(interval, targetEpoch); err != nil {
return fmt.Errorf("error setting the targeted consensus epoch and block: %w", err)
}
// Run the tree generation or the rETH SP approximation
if c.Bool("approximate-only") {
return generator.approximateRethSpRewards()
}
// Print the network info and exit if requested
if c.Bool("network-info") {
return generator.printNetworkInfo()
}
return generator.generateTree()
}
func (g *treeGenerator) getTreegenArgs() (*treegenArguments, error) {
// Cache the network state at the time of the targeted epoch for later use
state, err := g.mgr.GetStateForSlot(g.targets.block.Slot)
if err != nil {
return nil, fmt.Errorf("unable to get state at slot %d: %w", g.targets.block.Slot, err)
}
// If we have a rewardsEvent, we're generating a full interval
if g.targets.rewardsEvent != nil {
index := g.targets.rewardsEvent.Index.Uint64()
startSlot := uint64(0)
if index > 0 {
// Get the start slot for this interval
previousRewardsEvent, err := rprewards.GetRewardSnapshotEvent(g.rp, g.cfg, uint64(index-1), nil)
if err != nil {
return nil, fmt.Errorf("error getting event for interval %d: %w", index-1, err)
}
startSlot, err = getStartSlotForInterval(previousRewardsEvent, g.bn, g.beaconConfig)
if err != nil {
return nil, fmt.Errorf("error getting start slot for interval %d: %w", index, err)
}
}
elBlockHeader, err := g.rp.Client.HeaderByNumber(context.Background(), g.targets.rewardsEvent.ExecutionBlock)
if err != nil {
return nil, fmt.Errorf("error getting el block header %d: %w", g.targets.rewardsEvent.ExecutionBlock.Uint64(), err)
}
return &treegenArguments{
startTime: g.targets.rewardsEvent.IntervalStartTime,
endTime: g.targets.rewardsEvent.IntervalEndTime,
index: index,
intervalsPassed: g.targets.rewardsEvent.IntervalsPassed.Uint64(),
startSlot: startSlot,
block: g.targets.block,
elBlockHeader: elBlockHeader,
state: state,
}, nil
}
// Partial interval
return &treegenArguments{
startTime: g.targets.snapshotDetails.startTime,
endTime: g.targets.snapshotDetails.endTime,
index: g.targets.snapshotDetails.index,
intervalsPassed: g.targets.snapshotDetails.intervalsPassed,
startSlot: g.targets.snapshotDetails.startSlot,
block: g.targets.block,
elBlockHeader: g.targets.snapshotDetails.snapshotElBlockHeader,
state: state,
}, nil
}
func (d *snapshotDetails) log(l *log.ColorLogger) {
l.Printlnf("Snapshot Beacon block = %d, EL block = %d, running from %s to %s\n",
d.snapshotBeaconBlock,
d.snapshotElBlockHeader.Number.Uint64(),
d.startTime,
d.endTime)
}
func (g *treeGenerator) lastBlockInEpoch(epoch uint64) (*beacon.BeaconBlock, error) {
// Get the last block proposed in the targeted epoch.
// If the targeted epoch has no proposals, return nil, nil
end := epoch * g.beaconConfig.SlotsPerEpoch
start := end + g.beaconConfig.SlotsPerEpoch - 1
for slot := start; slot >= end; slot-- {
block, exists, err := g.bn.GetBeaconBlock(fmt.Sprint(slot))
if err != nil {
return nil, err
}
if exists {
// We found it, so set it and exit
return &block, nil
}
}
return nil, nil
}
func (g *treeGenerator) setTargets(interval int64, targetEpoch uint64) error {
var err error
// Validate that the target epoch is finalized
if targetEpoch > 0 {
beaconHead, err := g.bn.GetBeaconHead()
if err != nil {
return fmt.Errorf("unable to query beacon head: %w", err)
}
if targetEpoch > beaconHead.FinalizedEpoch {
return fmt.Errorf("targeted epoch has not yet been finalized")
}
}
// If interval isn't set, we're generating a preview of the current interval
if interval < 0 {
var block *beacon.BeaconBlock
if targetEpoch == 0 {
// No targetEpoch was passed, so set it to the latest finalized epoch
b, err := g.mgr.GetLatestFinalizedBeaconBlock()
if err != nil {
return err
}
block = &b
} else {
// A target epoch was passed, so find its last block
block, err = g.lastBlockInEpoch(targetEpoch)
if err != nil {
return err
}
if block == nil {
return fmt.Errorf("Unable to find any valid blocks in epoch %d. Was your BN checkpoint synced against a slot that occurred after this epoch?", targetEpoch)
}
}
g.targets.block = block
g.targets.snapshotDetails, err = g.getSnapshotDetails()
if err != nil {
return err
}
// Ensure the target block is in the current interval
if g.slotToTime(block.Slot).Before(g.targets.snapshotDetails.startTime) {
return fmt.Errorf("selected epoch precedes current interval. use -i to generate previous intervals")
}
// Inform the user of the range they're querying
g.log.Printlnf("Targeting a portion of the current interval (%d)", g.targets.snapshotDetails.index)
g.targets.snapshotDetails.log(g.log)
return nil
}
// We're generating a previous interval (full or partial)
// Get the corresponding rewards event for that interval
rewardsEvent, err := rprewards.GetRewardSnapshotEvent(g.rp, g.cfg, uint64(interval), nil)
if err != nil {
return err
}
// If targetEpoch isn't set, we're generating a full interval
if targetEpoch == 0 {
g.log.Printlnf("Targeting full interval %d", interval)
g.targets.rewardsEvent = &rewardsEvent
// Cache the last block of the rewards period
g.targets.block, err = g.lastBlockInEpoch(rewardsEvent.ConsensusBlock.Uint64() / g.beaconConfig.SlotsPerEpoch)
if err != nil {
return err
}
if g.targets.block == nil {
return fmt.Errorf("unable to find any valid blocks in epoch %d. Was your BN checkpoint synced against a slot that occurred after this epoch?", targetEpoch)
}
return nil
}
// We're generating a partial interval
// Ensure the target slot happens *before* the end of the interval
eventBlock := rewardsEvent.ConsensusBlock.Uint64()
finalEpochOfInterval := eventBlock / g.beaconConfig.SlotsPerEpoch
if targetEpoch == finalEpochOfInterval {
return fmt.Errorf("target epoch %d was the end of the targeted interval %d.\nRerun without -t", targetEpoch, interval)
}
if targetEpoch > finalEpochOfInterval {
return fmt.Errorf("target epoch %d was after targeted interval %d", targetEpoch, interval)
}
// Ensure the target epoch started *after* the start of the interval, which should land on the start of an epoch boundary
epochStartTime := g.slotToTime(targetEpoch * g.beaconConfig.SlotsPerEpoch)
if epochStartTime.Before(rewardsEvent.IntervalStartTime) {
return fmt.Errorf("target epoch %d was before targeted interval %d", targetEpoch, interval)
}
// Cache the target block for later use
block, found, err := g.bn.GetBeaconBlock(fmt.Sprint(eventBlock))
if err != nil {
return err
}
if !found {
return fmt.Errorf("Unable to find the ending block for interval %d (slot %d). Was your BN checkpoint synced against a slot that occurred after this epoch?", interval, eventBlock)
}
g.targets.block = &block
g.targets.snapshotDetails, err = g.getSnapshotDetails()
if err != nil {
return err
}
// Inform the user of the range they're querying
g.log.Printlnf("Targeting a portion of a previous interval (%d)", g.targets.snapshotDetails.index)
g.targets.snapshotDetails.log(g.log)
return nil
}
// Gets the timestamp for a Beacon slot
func (g *treeGenerator) slotToTime(slot uint64) time.Time {
genesisTime := time.Unix(int64(g.beaconConfig.GenesisTime), 0)
secondsForSlot := time.Duration(slot*g.beaconConfig.SecondsPerSlot) * time.Second
return genesisTime.Add(secondsForSlot)
}
// Generates the rewards file for the given generator
func (g *treeGenerator) generateRewardsFile(treegen *rprewards.TreeGenerator) (rprewards.IRewardsFile, error) {
if g.ruleset == 0 {
return treegen.GenerateTree()
}
return treegen.GenerateTreeWithRuleset(g.ruleset)
}
// Serializes the minipool performance file into JSON
func (g *treeGenerator) serializeMinipoolPerformance(rewardsFile rprewards.IRewardsFile) ([]byte, error) {
perfFile := rewardsFile.GetMinipoolPerformanceFile()
if g.prettyPrint {
return perfFile.SerializeHuman()
}
return perfFile.Serialize()
}
// Serializes the rewards tree file in to JSON
func (g *treeGenerator) serializeRewardsTree(rewardsFile rprewards.IRewardsFile) ([]byte, error) {
if g.prettyPrint {
return json.MarshalIndent(rewardsFile, "", "\t")
}
return json.Marshal(rewardsFile)
}
// Writes both the performance file and the rewards file to disk
func (g *treeGenerator) writeFiles(rewardsFile rprewards.IRewardsFile) error {
g.log.Printlnf("Saving JSON files...")
index := rewardsFile.GetHeader().Index
// Get the output paths
rewardsTreePath := filepath.Join(g.outputDir, fmt.Sprintf(config.RewardsTreeFilenameFormat, string(g.cfg.Smartnode.Network.Value.(cfgtypes.Network)), index))
minipoolPerformancePath := filepath.Join(g.outputDir, fmt.Sprintf(config.MinipoolPerformanceFilenameFormat, string(g.cfg.Smartnode.Network.Value.(cfgtypes.Network)), index))
// Serialize the minipool performance file
minipoolPerformanceBytes, err := g.serializeMinipoolPerformance(rewardsFile)
if err != nil {
return fmt.Errorf("error serializing minipool performance file into JSON: %w", err)
}
// Write it to disk
err = os.WriteFile(minipoolPerformancePath, minipoolPerformanceBytes, 0644)
if err != nil {
return fmt.Errorf("error saving minipool performance file to %s: %w", minipoolPerformancePath, err)
}
g.log.Printlnf("Saved minipool performance file to %s", minipoolPerformancePath)
rewardsFile.SetMinipoolPerformanceFileCID("---")
// Serialize the rewards tree to JSON
wrapperBytes, err := g.serializeRewardsTree(rewardsFile)
if err != nil {
return fmt.Errorf("error serializing proof wrapper into JSON: %w", err)
}
g.log.Printlnf("Generation complete! Saving tree...")
// Write the rewards tree to disk
err = os.WriteFile(rewardsTreePath, wrapperBytes, 0644)
if err != nil {
return fmt.Errorf("error saving rewards tree file to %s: %w", rewardsTreePath, err)
}
g.log.Printlnf("Saved rewards snapshot file to %s", rewardsTreePath)
g.log.Printlnf("Successfully generated rewards snapshot for interval %d", index)
return nil
}
// Create the manager for rolling records to use (if applicable) and update the record to the target slot
func (g *treeGenerator) prepareRecordManager(args *treegenArguments) error {
// Ignore this on old rulesets without rolling records
if g.ruleset < 6 && g.ruleset > 0 {
g.log.Printlnf("Ruleset %d does not use rolling records, ignoring them.", g.ruleset)
return nil
}
// Get the target index
ignoreRollingRecords := false
var index uint64
if g.targets.rewardsEvent != nil {
index = g.targets.rewardsEvent.Index.Uint64()
} else {
index = g.targets.snapshotDetails.index
}
// Ignore rolling records for the first interval
if index == 0 {
g.log.Println("Interval 0 cannot use rolling records because there was no previous event to indicate when to start collecting records, ignoring them.")
return nil
}
// If a ruleset isn't specified, check if the interval is before v6
if g.ruleset == 0 {
network := g.cfg.Smartnode.Network.Value.(cfgtypes.Network)
switch network {
case cfgtypes.Network_Mainnet:
ignoreRollingRecords = (index < rprewards.MainnetV6Interval)
default:
return fmt.Errorf("unknown network [%v]", network)
}
}
// Ignore this on old intervals without rolling records
if ignoreRollingRecords {
g.log.Printlnf("Rewards interval %d cannot use rolling records because it used an older ruleset, ignoring them.", index)
return nil
}
// Rolling records are supported, build up the manager
var err error
g.recordMgr, err = rprewards.NewRollingRecordManager(g.log, g.errLog, g.cfg, g.rp, g.bn, g.mgr, args.startSlot, g.beaconConfig, index)
if err != nil {
return fmt.Errorf("error creating rolling record manager: %w", err)
}
// Determine the target slot for tree generation
var targetSlot uint64
if g.targets.rewardsEvent != nil {
targetSlot = g.targets.rewardsEvent.ConsensusBlock.Uint64()
} else {
targetSlot = g.targets.snapshotDetails.snapshotBeaconBlock
}
// Create and update the record to that slot
g.log.Printlnf("Generation supports rolling records - creating a new record manager.")
record, err := g.recordMgr.GenerateRecordForState(args.state)
if err != nil {
return fmt.Errorf("error creating record for slot %d: %w", targetSlot, err)
}
g.recordMgr.Record = record
return nil
}
// Creates a tree generator using the provided arguments
func (g *treeGenerator) getGenerator(args *treegenArguments) (*rprewards.TreeGenerator, error) {
// Prepare the rolling record manager and record if applicable
if g.useRollingRecords {
g.log.Println("Rolling records are enabled, preparing rolling manager.")
err := g.prepareRecordManager(args)
if err != nil {
return nil, fmt.Errorf("error preparing rolling record: %w", err)
}
} else {
g.log.Println("Rolling records are not enabled, ignoring them.")
}
var record *rprewards.RollingRecord = nil
if g.recordMgr != nil {
record = g.recordMgr.Record
}
// Create the tree generator
out, err := rprewards.NewTreeGenerator(
g.log, "", g.rp, g.cfg, g.bn, args.index,
args.startTime, args.endTime, args.block.Slot, args.elBlockHeader,
args.intervalsPassed, args.state, record)
if err != nil {
return nil, fmt.Errorf("error creating tree generator: %w", err)
}
return out, nil
}
// Approximates the rETH stakers' share of the Smoothing Pool's current balance
func (g *treeGenerator) approximateRethSpRewards() error {
args, err := g.getTreegenArgs()
if err != nil {
return fmt.Errorf("error compiling treegen arguments: %w", err)
}
opts := &bind.CallOpts{
BlockNumber: args.elBlockHeader.Number,
}
// Log
g.log.Printlnf("Approximating rETH rewards for the current interval (%d)", args.index)
g.log.Printlnf("Snapshot Beacon block = %d, EL block = %d, running from %s to %s\n",
args.block.Slot, opts.BlockNumber.Uint64(), args.startTime, args.endTime)
// Get the Smoothing Pool contract's balance
smoothingPoolContract, err := g.rp.GetContract("rocketSmoothingPool", opts)
if err != nil {
return fmt.Errorf("error getting smoothing pool contract: %w", err)
}
smoothingPoolBalance, err := g.rp.Client.BalanceAt(context.Background(), *smoothingPoolContract.Address, opts.BlockNumber)
if err != nil {
return fmt.Errorf("error getting smoothing pool balance: %w", err)
}
// Create the tree generator
treegen, err := g.getGenerator(args)
if err != nil {
return err
}
// Approximate the balance
var rETHShare *big.Int
if g.ruleset == 0 {
rETHShare, err = treegen.ApproximateStakerShareOfSmoothingPool()
} else {
rETHShare, err = treegen.ApproximateStakerShareOfSmoothingPoolWithRuleset(g.ruleset)
}
if err != nil {
return fmt.Errorf("error approximating rETH stakers' share of the Smoothing Pool: %w", err)
}
g.log.Printlnf("Total ETH in the Smoothing Pool: %s wei (%.6f ETH)", smoothingPoolBalance.String(), eth.WeiToEth(smoothingPoolBalance))
g.log.Printlnf("rETH stakers's share: %s wei (%.6f ETH)", rETHShare.String(), eth.WeiToEth(rETHShare))
return nil
}
// Generate a complete rewards tree
func (g *treeGenerator) generateTree() error {
args, err := g.getTreegenArgs()
if err != nil {
return fmt.Errorf("error compiling treegen arguments: %w", err)
}
// Create the tree generator
treegen, err := g.getGenerator(args)
if err != nil {
return err
}
// Generate the rewards file
start := time.Now()
rewardsFile, err := g.generateRewardsFile(treegen)
if err != nil {
return fmt.Errorf("error generating Merkle tree: %w", err)
}
header := rewardsFile.GetHeader()
for address, network := range header.InvalidNetworkNodes {
g.log.Printlnf("WARNING: Node %s has invalid network %d assigned! Using 0 (mainnet) instead.", address.Hex(), network)
}
g.log.Printlnf("Finished in %s", time.Since(start).String())
// Validate the Merkle root
if g.targets.rewardsEvent != nil {
root := common.BytesToHash(header.MerkleTree.Root())
if root != g.targets.rewardsEvent.MerkleRoot {
g.log.Printlnf("WARNING: your Merkle tree had a root of %s, but the canonical Merkle tree's root was %s. This file will not be usable for claiming rewards.", root.Hex(), g.targets.rewardsEvent.MerkleRoot.Hex())
} else {
g.log.Printlnf("Your Merkle tree's root of %s matches the canonical root! You will be able to use this file for claiming rewards.", header.MerkleRoot)
}
}
err = g.writeFiles(rewardsFile)
if err != nil {
return err
}
return nil
}
// Create a rewards snapshot at the target block
func (g *treeGenerator) getSnapshotDetails() (*snapshotDetails, error) {
var err error
var opts bind.CallOpts
endTime := g.slotToTime(g.targets.block.Slot)
// Get the number of the EL block matching the CL snapshot block
var snapshotElBlockHeader *types.Header
if g.targets.block.ExecutionBlockNumber == 0 {
// No EL data so the Merge hasn't happened yet, figure out the EL block based on the Epoch ending time
snapshotElBlockHeader, err = rprewards.GetELBlockHeaderForTime(endTime, g.rp)
if err != nil {
return nil, fmt.Errorf("error getting EL block for time %s: %w", endTime, err)
}
opts.BlockNumber = snapshotElBlockHeader.Number
} else {
opts.BlockNumber = big.NewInt(0).SetUint64(g.targets.block.ExecutionBlockNumber)
snapshotElBlockHeader, err = g.rp.Client.HeaderByNumber(context.Background(), opts.BlockNumber)
if err != nil {
return nil, fmt.Errorf("error getting EL block %d: %w", opts.BlockNumber.Uint64(), err)
}
}
// Get the interval index
indexBig, err := rewards.GetRewardIndex(g.rp, &opts)
if err != nil {
return nil, fmt.Errorf("error getting current reward index: %w", err)
}
index := indexBig.Uint64()
// Get the start slot
startSlot := uint64(0)
if index > 0 {
// Get the start slot for this interval
previousRewardsEvent, err := rprewards.GetRewardSnapshotEvent(g.rp, g.cfg, uint64(index-1), nil)
if err != nil {
return nil, fmt.Errorf("error getting event for interval %d: %w", index-1, err)
}
startSlot, err = getStartSlotForInterval(previousRewardsEvent, g.bn, g.beaconConfig)
if err != nil {
return nil, fmt.Errorf("error getting start slot for interval %d: %w", index, err)
}
}
// Get the start time for the interval, and how long an interval is supposed to take
startTime, err := rewards.GetClaimIntervalTimeStart(g.rp, &opts)
if err != nil {
return nil, fmt.Errorf("error getting claim interval start time: %w", err)
}
intervalTime, err := rewards.GetClaimIntervalTime(g.rp, &opts)
if err != nil {
return nil, fmt.Errorf("error getting claim interval time: %w", err)
}
// Calculate the intervals passed
blockTime := time.Unix(int64(snapshotElBlockHeader.Time), 0)
timeSinceStart := blockTime.Sub(startTime)
intervalsPassed := uint64(timeSinceStart / intervalTime)
return &snapshotDetails{
index: index,
startTime: startTime,
endTime: endTime,
startSlot: startSlot,
snapshotBeaconBlock: g.targets.block.Slot,
snapshotElBlockHeader: snapshotElBlockHeader,
intervalsPassed: intervalsPassed,
}, nil
}
// Gets the start slot for the given interval
func getStartSlotForInterval(previousIntervalEvent rewards.RewardsEvent, bc beacon.Client, beaconConfig beacon.Eth2Config) (uint64, error) {
// Sanity check to confirm the BN can access the block from the previous interval
_, exists, err := bc.GetBeaconBlock(previousIntervalEvent.ConsensusBlock.String())
if err != nil {
return 0, fmt.Errorf("error verifying block from previous interval: %w", err)
}
if !exists {
return 0, fmt.Errorf("couldn't retrieve CL block from previous interval (slot %d); this likely means you checkpoint sync'd your Beacon Node and it has not backfilled to the previous interval yet so it cannot be used for tree generation", previousIntervalEvent.ConsensusBlock.Uint64())
}
previousEpoch := previousIntervalEvent.ConsensusBlock.Uint64() / beaconConfig.SlotsPerEpoch
nextEpoch := previousEpoch + 1
consensusStartBlock := nextEpoch * beaconConfig.SlotsPerEpoch
// Get the first block that isn't missing
for {
_, exists, err := bc.GetBeaconBlock(fmt.Sprint(consensusStartBlock))
if err != nil {
return 0, fmt.Errorf("error getting EL data for BC slot %d: %w", consensusStartBlock, err)
}
if !exists {
consensusStartBlock++
} else {
break
}
}
return consensusStartBlock, nil
}
// Print information about the current network and interval info
func (g *treeGenerator) printNetworkInfo() error {
args, err := g.getTreegenArgs()
if err != nil {
return fmt.Errorf("error compiling treegen arguments: %w", err)
}
// Generate the rewards file
generator, err := g.getGenerator(args)
if err != nil {
return err
}
g.log.Println()
g.log.Println("=== Network Details ===")
g.log.Printlnf("Current index: %d", args.index)
g.log.Printlnf("Start Time: %s", args.startTime)
// Find the event for the previous interval
if args.index > 0 {
rewardsEvent, err := rprewards.GetRewardSnapshotEvent(g.rp, g.cfg, args.index-1, nil)
if err != nil {
return fmt.Errorf("error getting rewards submission event for previous interval (%d): %w", args.index-1, err)
}
g.log.Printlnf("Start Beacon Slot: %d", rewardsEvent.ConsensusBlock.Uint64()+1)
g.log.Printlnf("Start EL Block: %d", rewardsEvent.ExecutionBlock.Uint64()+1)
}
g.log.Printlnf("End Time: %s", args.endTime)
g.log.Printlnf("Snapshot Beacon Slot: %d", args.block.Slot)
g.log.Printlnf("Snapshot EL Block: %s", args.elBlockHeader.Number.String())
g.log.Printlnf("Intervals Passed: %d", args.intervalsPassed)
g.log.Printlnf("Tree Ruleset: v%d", generator.GetGeneratorRulesetVersion())
g.log.Printlnf("Approximator Ruleset: v%d", generator.GetApproximatorRulesetVersion())
return nil
}
// Configure HTTP transport settings
func configureHTTP() {
// The watchtower daemon makes a large number of concurrent RPC requests to the Eth1 client
// The HTTP transport is set to cache connections for future re-use equal to the maximum expected number of concurrent requests
// This prevents issues related to memory consumption and address allowance from repeatedly opening and closing connections
http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = MaxConcurrentEth1Requests
}