diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index fec5c884856..65f856dc3f4 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -270,6 +270,7 @@ func computeBlocksToPrune(blockReader services.FullBlockReader, p prune.Mode) (b // for MVP we sync with Downloader only once, in future will send new snapshots also func WaitForDownloader(ctx context.Context, logPrefix string, dirs datadir.Dirs, headerchain, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { snapshots := blockReader.Snapshots() + bscSnapshots := blockReader.BscSnapshots() borSnapshots := blockReader.BorSnapshots() // Find minimum block to download. @@ -282,6 +283,11 @@ func WaitForDownloader(ctx context.Context, logPrefix string, dirs datadir.Dirs, return err } } + if cc.Parlia != nil { + if err := bscSnapshots.ReopenFolder(); err != nil { + return err + } + } return nil } @@ -314,9 +320,13 @@ func WaitForDownloader(ctx context.Context, logPrefix string, dirs datadir.Dirs, } } + if caplin == NoCaplin { + blobs = !blobs + } + // build all download requests for _, p := range preverifiedBlockSnapshots { - if caplin == NoCaplin && (strings.Contains(p.Name, "beaconblocks") || strings.Contains(p.Name, "blobsidecars")) { + if caplin == NoCaplin && (strings.Contains(p.Name, "beaconblocks") || strings.Contains(p.Name, "blobsidecars")) && !strings.Contains(p.Name, "bscblobsiders") { continue } if caplin == OnlyCaplin && !strings.Contains(p.Name, "beaconblocks") && !strings.Contains(p.Name, "blobsidecars") { @@ -416,6 +426,12 @@ func WaitForDownloader(ctx context.Context, logPrefix string, dirs datadir.Dirs, } } + if cc.Parlia != nil { + if err := bscSnapshots.ReopenFolder(); err != nil { + return err + } + } + if err := agg.OpenFolder(); err != nil { return err } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index c0d99628232..4b44c41730a 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -617,7 +617,7 @@ func NewDefaultStages(ctx context.Context, runInTestMode := cfg.ImportMode return stagedsync.DefaultStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.BlobPrune, silkworm, cfg.Prune), stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications), stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, recents, signatures, cfg.WithHeimdallWaypointRecording, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), @@ -660,7 +660,7 @@ func NewPipelineStages(ctx context.Context, if len(cfg.Sync.UploadLocation) == 0 { return stagedsync.PipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.BlobPrune, silkworm, cfg.Prune), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications.Accumulator, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg)), @@ -669,7 +669,7 @@ func NewPipelineStages(ctx context.Context, } return stagedsync.UploaderPipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, engine, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling || cfg.BlobPrune, silkworm, cfg.Prune), stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd), @@ -723,7 +723,7 @@ func NewPolygonSyncStages( consensusEngine, agg, config.InternalCL && config.CaplinConfig.Backfilling, - config.CaplinConfig.BlobBackfilling, + config.CaplinConfig.BlobBackfilling || config.BlobPrune, silkworm, config.Prune, ),