diff --git a/node/pkg/watchers/evm/connectors/batch_poller.go b/node/pkg/watchers/evm/connectors/batch_poller.go index adabc05d26..1262350cac 100644 --- a/node/pkg/watchers/evm/connectors/batch_poller.go +++ b/node/pkg/watchers/evm/connectors/batch_poller.go @@ -82,6 +82,17 @@ func (b *BatchPollConnector) SubscribeForBlocks(ctx context.Context, errC chan e errCount := 0 + // Publish the initial finalized and safe blocks so we have a starting point for reobservation requests. + for idx, block := range lastBlocks { + b.logger.Info(fmt.Sprintf("publishing initial %s block", b.batchData[idx].finality), zap.Uint64("initial_block", block.Number.Uint64())) + sink <- block + if b.generateSafe && b.batchData[idx].finality == Finalized { + safe := block.Copy(Safe) + b.logger.Info("publishing generated initial safe block", zap.Uint64("initial_block", safe.Number.Uint64())) + sink <- safe + } + } + common.RunWithScissors(ctx, errC, "block_poll_subscribe_for_blocks", func(ctx context.Context) error { timer := time.NewTimer(b.Delay) defer timer.Stop() diff --git a/node/pkg/watchers/evm/connectors/batch_poller_test.go b/node/pkg/watchers/evm/connectors/batch_poller_test.go index 8f39495fd3..8cf7052290 100644 --- a/node/pkg/watchers/evm/connectors/batch_poller_test.go +++ b/node/pkg/watchers/evm/connectors/batch_poller_test.go @@ -281,12 +281,13 @@ func TestBatchPoller(t *testing.T) { } }() - // First sleep a bit and make sure there were no start up errors and no blocks got published. + // First sleep a bit and make sure there were no start up errors and the initial blocks were published. time.Sleep(10 * time.Millisecond) mutex.Lock() require.NoError(t, publishedErr) require.NoError(t, publishedSubErr) - assert.Nil(t, block) + batchShouldHaveSafeAndFinalizedButNotLatest(t, block, 0x309a0c, baseConnector.expectedHash()) + block = nil mutex.Unlock() // Post the first new block and verify we get it.