From 6045429a0b2c8b2636648cc29b775cbb7c458ff0 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Fri, 26 Aug 2022 07:58:07 +0200 Subject: [PATCH 01/26] fix: Content-Length header in HEAD request for CAR file (#725) --- cmd/booster-http/server.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cmd/booster-http/server.go b/cmd/booster-http/server.go index 1c49b25e4..f1f684e65 100644 --- a/cmd/booster-http/server.go +++ b/cmd/booster-http/server.go @@ -184,9 +184,7 @@ func (s *HttpServer) handleByPayloadCid(w http.ResponseWriter, r *http.Request) pieceCid := pieces[0] ctx := r.Context() content, err := s.getPieceContent(ctx, pieceCid) - if err == nil && isCar && r.Method != "HEAD" { - // Note: Getting the CAR content out of the piece is non-trivial, so - // we don't do it for HEAD requests + if err == nil && isCar { content, err = s.getCarContent(pieceCid, content) } if err != nil { @@ -233,9 +231,7 @@ func (s *HttpServer) handleByPieceCid(w http.ResponseWriter, r *http.Request) { // Get a reader over the the piece ctx := r.Context() content, err := s.getPieceContent(ctx, pieceCid) - if err == nil && isCar && r.Method != "HEAD" { - // Note: Getting the CAR content out of the piece is non-trivial, so - // we don't do it for HEAD requests + if err == nil && isCar { content, err = s.getCarContent(pieceCid, content) } if err != nil { @@ -272,7 +268,7 @@ func serveContent(w http.ResponseWriter, r *http.Request, content io.ReadSeeker, w.Header().Set("Content-Type", contentType) if r.Method == "HEAD" { - // For an HTTP HEAD request we don't send any data (just headers) + // For an HTTP HEAD request ServeContent doesn't send any data (just headers) http.ServeContent(w, r, "", time.Time{}, content) alog("%s\tHEAD %s", color.New(color.FgGreen).Sprintf("%d", http.StatusOK), r.URL) return From 6d1354daf6fd2a76b77247de87bfc2ed2d10cb0b Mon Sep 17 00:00:00 2001 From: dirkmc Date: Fri, 26 Aug 2022 15:30:05 +0200 Subject: [PATCH 02/26] refactor: rename booster-http flag --sealing-api to --storage-api (#734) --- cmd/booster-http/run.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cmd/booster-http/run.go b/cmd/booster-http/run.go index f2ed270c9..4d8800780 100644 --- a/cmd/booster-http/run.go +++ b/cmd/booster-http/run.go @@ -67,8 +67,8 @@ var runCmd = &cli.Command{ Required: true, }, &cli.StringFlag{ - Name: "api-sealer", - Usage: "the endpoint for the sealer API", + Name: "api-storage", + Usage: "the endpoint for the storage node API", Required: true, }, }, @@ -99,19 +99,19 @@ var runCmd = &cli.Command{ } defer ncloser() - // Connect to the sealing API - sealingApiInfo := cctx.String("api-sealer") - sauth, err := storageAuthWithURL(sealingApiInfo) + // Connect to the storage API + storageApiInfo := cctx.String("api-storage") + sauth, err := storageAuthWithURL(storageApiInfo) if err != nil { - return fmt.Errorf("parsing sealing API endpoint: %w", err) + return fmt.Errorf("parsing storage API endpoint: %w", err) } - sealingService, sealerCloser, err := getMinerApi(ctx, sealingApiInfo) + storageService, storageCloser, err := getMinerApi(ctx, storageApiInfo) if err != nil { return fmt.Errorf("getting miner API: %w", err) } - defer sealerCloser() + defer storageCloser() - maddr, err := sealingService.ActorAddress(ctx) + maddr, err := storageService.ActorAddress(ctx) if err != nil { return fmt.Errorf("getting miner actor address: %w", err) } @@ -129,17 +129,17 @@ var runCmd = &cli.Command{ // Create the store interface var urls []string - lstor, err := paths.NewLocal(ctx, lr, sealingService, urls) + lstor, err := paths.NewLocal(ctx, lr, storageService, urls) if err != nil { return fmt.Errorf("creating new local store: %w", err) } - storage := lotus_modules.RemoteStorage(lstor, sealingService, sauth, sealer.Config{ + storage := lotus_modules.RemoteStorage(lstor, storageService, sauth, sealer.Config{ // TODO: Not sure if I need this, or any of the other fields in this struct ParallelFetchLimit: 1, }) // Create the piece provider and sector accessors - pp := sealer.NewPieceProvider(storage, sealingService, sealingService) - sa := sectoraccessor.NewSectorAccessor(dtypes.MinerAddress(maddr), sealingService, pp, fullnodeApi) + pp := sealer.NewPieceProvider(storage, storageService, storageService) + sa := sectoraccessor.NewSectorAccessor(dtypes.MinerAddress(maddr), storageService, pp, fullnodeApi) allowIndexing := cctx.Bool("allow-indexing") // Create the server API sapi := serverApi{ctx: ctx, bapi: bapi, sa: sa} @@ -269,7 +269,7 @@ func getMinerApi(ctx context.Context, ai string) (v0api.StorageMiner, jsonrpc.Cl return nil, nil, fmt.Errorf("could not get DialArgs: %w", err) } - log.Infof("Using sealing API at %s", addr) + log.Infof("Using storage API at %s", addr) api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader()) if err != nil { return nil, nil, fmt.Errorf("creating miner service API: %w", err) From 02ffef7921c997e9e3e9985300846451afd67412 Mon Sep 17 00:00:00 2001 From: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Date: Fri, 26 Aug 2022 20:55:50 +0530 Subject: [PATCH 03/26] Update config version to 3 (#735) * update config * minor fixes * fix comment --- cmd/boostd/init.go | 2 -- node/config/def.go | 3 --- node/config/doc_gen.go | 12 ------------ node/config/migrate.go | 7 ++++--- node/config/types.go | 4 ---- node/config/v2_to_v3.go | 28 ++++++++++++++++++++++++++++ 6 files changed, 32 insertions(+), 24 deletions(-) create mode 100644 node/config/v2_to_v3.go diff --git a/cmd/boostd/init.go b/cmd/boostd/init.go index dd173b740..f32374f11 100644 --- a/cmd/boostd/init.go +++ b/cmd/boostd/init.go @@ -700,8 +700,6 @@ func setBoostDealMakingCfg(bdm *config.DealmakingConfig, mktsCfg *lotus_config.S bdm.MaxDealStartDelay = config.Duration(ldm.MaxDealStartDelay) bdm.MaxProviderCollateralMultiplier = ldm.MaxProviderCollateralMultiplier bdm.MaxStagingDealsBytes = ldm.MaxStagingDealsBytes - bdm.SimultaneousTransfersForStorage = ldm.SimultaneousTransfersForStorage - bdm.SimultaneousTransfersForRetrieval = ldm.SimultaneousTransfersForRetrieval bdm.StartEpochSealingBuffer = ldm.StartEpochSealingBuffer bdm.Filter = ldm.Filter bdm.RetrievalFilter = ldm.RetrievalFilter diff --git a/node/config/def.go b/node/config/def.go index 9ae9048fd..1a912ae83 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -79,9 +79,6 @@ func DefaultBoost() *Boost { ExpectedSealDuration: Duration(time.Hour * 24), MaxProviderCollateralMultiplier: 2, - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, - StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed DealProposalLogDuration: Duration(time.Hour * 24), diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 8a29b89df..331350a60 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -183,18 +183,6 @@ as a multiplier of the minimum collateral bound`, Comment: `The maximum allowed disk usage size in bytes of staging deals not yet passed to the sealing node by the markets service. 0 is unlimited.`, }, - { - Name: "SimultaneousTransfersForStorage", - Type: "uint64", - - Comment: `The maximum number of parallel online data transfers for storage deals`, - }, - { - Name: "SimultaneousTransfersForRetrieval", - Type: "uint64", - - Comment: `The maximum number of parallel online data transfers for retrieval deals`, - }, { Name: "StartEpochSealingBuffer", Type: "uint64", diff --git a/node/config/migrate.go b/node/config/migrate.go index fb7138057..42e5f98fb 100644 --- a/node/config/migrate.go +++ b/node/config/migrate.go @@ -15,13 +15,14 @@ var log = logging.Logger("cfg") // CurrentVersion is the config version expected by Boost. // We need to migrate the config file to this version. -const CurrentVersion = 2 +const CurrentVersion = 3 type migrateUpFn = func(cfgPath string) (string, error) var migrations = []migrateUpFn{ - v0Tov1, // index 0 => version 0 - v1Tov2, // index 1 => version 1 + v0Tov1, // index 0 => version 1 + v1Tov2, // index 1 => version 2 + v2Tov3, // index 2 => version 3 } // This struct is used to get the config file version diff --git a/node/config/types.go b/node/config/types.go index 7ff9c3a2d..522fde165 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -147,10 +147,6 @@ type DealmakingConfig struct { // The maximum allowed disk usage size in bytes of staging deals not yet // passed to the sealing node by the markets service. 0 is unlimited. MaxStagingDealsBytes int64 - // The maximum number of parallel online data transfers for storage deals - SimultaneousTransfersForStorage uint64 - // The maximum number of parallel online data transfers for retrieval deals - SimultaneousTransfersForRetrieval uint64 // Minimum start epoch buffer to give time for sealing of sector with deal. StartEpochSealingBuffer uint64 // The amount of time to keep deal proposal logs for before cleaning them up. diff --git a/node/config/v2_to_v3.go b/node/config/v2_to_v3.go new file mode 100644 index 000000000..dbc069ebc --- /dev/null +++ b/node/config/v2_to_v3.go @@ -0,0 +1,28 @@ +package config + +import ( + "fmt" +) + +// Migrate from config version 2 to version 3 (i.e. remove a few fields and add recently added fields) +func v2Tov3(cfgPath string) (string, error) { + cfg, err := FromFile(cfgPath, DefaultBoost()) + if err != nil { + return "", fmt.Errorf("parsing config file %s: %w", cfgPath, err) + } + + boostCfg, ok := cfg.(*Boost) + if !ok { + return "", fmt.Errorf("unexpected config type %T: expected *config.Boost", cfg) + } + + // Update the Boost config version + boostCfg.ConfigVersion = 3 + + bz, err := ConfigUpdate(boostCfg, DefaultBoost(), true) + if err != nil { + return "", fmt.Errorf("applying configuration: %w", err) + } + + return string(bz), nil +} From e5b59d21eb1b7a3c72d99100d7da81c42c711087 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Tue, 30 Aug 2022 13:13:11 +0200 Subject: [PATCH 04/26] Transfer limiter (#710) * feat: transfer limiter * fix: dont open new transfers to stalled peers beyond the soft limit * refactor: transfer limiter - avoid locking contention * refactor: transfer limiter - distinguish peers by host (not peer ID) (#736) * fix: flaky test --- go.mod | 1 + node/config/def.go | 7 +- node/config/doc_gen.go | 21 + node/config/types.go | 11 + node/modules/storageminer.go | 5 + storagemarket/deal_execution.go | 29 +- storagemarket/provider.go | 13 +- storagemarket/provider_test.go | 19 +- storagemarket/transfer_limiter.go | 281 +++++++++++++ storagemarket/transfer_limiter_test.go | 382 ++++++++++++++++++ storagemarket/types/types.go | 42 ++ storagemarket/types/types_test.go | 49 +++ transport/httptransport/http_transport.go | 23 +- transport/httptransport/{ => util}/util.go | 37 +- .../httptransport/{ => util}/util_test.go | 52 ++- 15 files changed, 914 insertions(+), 58 deletions(-) create mode 100644 storagemarket/transfer_limiter.go create mode 100644 storagemarket/transfer_limiter_test.go create mode 100644 storagemarket/types/types_test.go rename transport/httptransport/{ => util}/util.go (60%) rename transport/httptransport/{ => util}/util_test.go (53%) diff --git a/go.mod b/go.mod index 33f06aaf2..02e3eb68b 100644 --- a/go.mod +++ b/go.mod @@ -99,6 +99,7 @@ require ( go.uber.org/atomic v1.9.0 go.uber.org/fx v1.15.0 go.uber.org/multierr v1.8.0 + golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/tools v0.1.11 golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f diff --git a/node/config/def.go b/node/config/def.go index 1a912ae83..7997c4b64 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -94,7 +94,12 @@ func DefaultBoost() *Boost { }, MaxTransferDuration: Duration(24 * 3600 * time.Second), - RemoteCommp: false, + + RemoteCommp: false, + + HttpTransferMaxConcurrentDownloads: 20, + HttpTransferStallTimeout: Duration(5 * time.Minute), + HttpTransferStallCheckPeriod: Duration(30 * time.Second), }, LotusDealmaking: lotus_config.DealmakingConfig{ diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 331350a60..569a088d5 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -234,6 +234,27 @@ see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-f Comment: `The public multi-address for retrieving deals with booster-http. Note: Must be in multiaddr format, eg /dns/foo.com/tcp/443/https`, }, + { + Name: "HttpTransferMaxConcurrentDownloads", + Type: "uint64", + + Comment: `The maximum number of concurrent storage deal HTTP downloads. +Note that this is a soft maximum; if some downloads stall, +more downloads are allowed to start.`, + }, + { + Name: "HttpTransferStallCheckPeriod", + Type: "Duration", + + Comment: `The period between checking if downloads have stalled.`, + }, + { + Name: "HttpTransferStallTimeout", + Type: "Duration", + + Comment: `The time that can elapse before a download is considered stalled (and +another concurrent download is allowed to start).`, + }, }, "FeeConfig": []DocField{ { diff --git a/node/config/types.go b/node/config/types.go index 522fde165..3a2df4190 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -163,12 +163,23 @@ type DealmakingConfig struct { // The maximum amount of time a transfer can take before it fails MaxTransferDuration Duration + // Whether to do commp on the Boost node (local) or on the Sealer (remote) RemoteCommp bool // The public multi-address for retrieving deals with booster-http. // Note: Must be in multiaddr format, eg /dns/foo.com/tcp/443/https HTTPRetrievalMultiaddr string + + // The maximum number of concurrent storage deal HTTP downloads. + // Note that this is a soft maximum; if some downloads stall, + // more downloads are allowed to start. + HttpTransferMaxConcurrentDownloads uint64 + // The period between checking if downloads have stalled. + HttpTransferStallCheckPeriod Duration + // The time that can elapse before a download is considered stalled (and + // another concurrent download is allowed to start). + HttpTransferStallTimeout Duration } type FeeConfig struct { diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 08a9ce240..a192d3a67 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -404,6 +404,11 @@ func NewStorageMarketProvider(provAddr address.Address, cfg *config.Boost) func( prvCfg := storagemarket.Config{ MaxTransferDuration: time.Duration(cfg.Dealmaking.MaxTransferDuration), RemoteCommp: cfg.Dealmaking.RemoteCommp, + TransferLimiter: storagemarket.TransferLimiterConfig{ + MaxConcurrent: cfg.Dealmaking.HttpTransferMaxConcurrentDownloads, + StallCheckPeriod: time.Duration(cfg.Dealmaking.HttpTransferStallCheckPeriod), + StallTimeout: time.Duration(cfg.Dealmaking.HttpTransferStallTimeout), + }, } dl := logs.NewDealLogger(logsDB) tspt := httptransport.New(h, dl) diff --git a/storagemarket/deal_execution.go b/storagemarket/deal_execution.go index 4bac3b7aa..f07d2d210 100644 --- a/storagemarket/deal_execution.go +++ b/storagemarket/deal_execution.go @@ -287,8 +287,27 @@ func (p *Provider) untagFundsAfterPublish(ctx context.Context, deal *types.Provi } func (p *Provider) transferAndVerify(ctx context.Context, pub event.Emitter, deal *types.ProviderDealState) *dealMakingError { - p.dealLogger.Infow(deal.DealUuid, "transferring deal data", "transfer client id", deal.Transfer.ClientID) + p.dealLogger.Infow(deal.DealUuid, "deal queued for transfer", "transfer client id", deal.Transfer.ClientID) + // Wait for a spot in the transfer queue + err := p.xferLimiter.waitInQueue(ctx, deal) + if err != nil { + // If boost was shutdown while waiting for the transfer to start, + // automatically retry on restart. + if errors.Is(err, context.Canceled) { + return &dealMakingError{ + retry: smtypes.DealRetryAuto, + error: fmt.Errorf("boost shutdown while waiting to start transfer for deal %s: %w", deal.DealUuid, err), + } + } + return &dealMakingError{ + retry: smtypes.DealRetryFatal, + error: fmt.Errorf("queued transfer failed to start for deal %s: %w", deal.DealUuid, err), + } + } + defer p.xferLimiter.complete(deal.DealUuid) + + p.dealLogger.Infow(deal.DealUuid, "start deal data transfer", "transfer client id", deal.Transfer.ClientID) tctx, cancel := context.WithDeadline(ctx, time.Now().Add(p.config.MaxTransferDuration)) defer cancel() @@ -316,6 +335,10 @@ func (p *Provider) transferAndVerify(ctx context.Context, pub event.Emitter, dea error: fmt.Errorf("data-transfer failed: %w", err), } } + + // Make room in the transfer queue for the next transfer + p.xferLimiter.complete(deal.DealUuid) + p.dealLogger.Infow(deal.DealUuid, "deal data-transfer completed successfully", "bytes received", deal.NBytesReceived, "time taken", time.Since(st).String()) @@ -332,8 +355,9 @@ func (p *Provider) transferAndVerify(ctx context.Context, pub event.Emitter, dea func (p *Provider) waitForTransferFinish(ctx context.Context, handler transport.Handler, pub event.Emitter, deal *types.ProviderDealState) error { defer handler.Close() defer p.transfers.complete(deal.DealUuid) - var lastOutputPct int64 + // log transfer progress to the deal log every 10% + var lastOutputPct int64 logTransferProgress := func(received int64) { pct := (100 * received) / int64(deal.Transfer.Size) outputPct := pct / 10 @@ -355,6 +379,7 @@ func (p *Provider) waitForTransferFinish(ctx context.Context, handler transport. } deal.NBytesReceived = evt.NBytesReceived p.transfers.setBytes(deal.DealUuid, uint64(evt.NBytesReceived)) + p.xferLimiter.setBytes(deal.DealUuid, uint64(evt.NBytesReceived)) p.fireEventDealUpdate(pub, deal) logTransferProgress(deal.NBytesReceived) diff --git a/storagemarket/provider.go b/storagemarket/provider.go index c53267c7c..c8530c144 100644 --- a/storagemarket/provider.go +++ b/storagemarket/provider.go @@ -54,7 +54,8 @@ type Config struct { // The maximum amount of time a transfer can take before it fails MaxTransferDuration time.Duration // Whether to do commp on the Boost node (local) or the sealing node (remote) - RemoteCommp bool + RemoteCommp bool + TransferLimiter TransferLimiterConfig } var log = logging.Logger("boost-provider") @@ -91,6 +92,7 @@ type Provider struct { logsDB *db.LogsDB Transport transport.Transport + xferLimiter *transferLimiter fundManager *fundmanager.FundManager storageManager *storagemanager.StorageManager dealPublisher types.DealPublisher @@ -122,6 +124,11 @@ func NewProvider(cfg Config, sqldb *sql.DB, dealsDB *db.DealsDB, fundMgr *fundma dagst stores.DAGStoreWrapper, ps piecestore.PieceStore, ip types.IndexProvider, askGetter types.AskGetter, sigVerifier types.SignatureVerifier, dl *logs.DealLogger, tspt transport.Transport) (*Provider, error) { + xferLimiter, err := newTransferLimiter(cfg.TransferLimiter) + if err != nil { + return nil, err + } + newDealPS, err := newDealPubsub() if err != nil { return nil, err @@ -147,6 +154,7 @@ func NewProvider(cfg Config, sqldb *sql.DB, dealsDB *db.DealsDB, fundMgr *fundma storageSpaceChan: make(chan storageSpaceDealReq), Transport: tspt, + xferLimiter: xferLimiter, fundManager: fundMgr, storageManager: storageMgr, @@ -421,6 +429,9 @@ func (p *Provider) Start() error { // Start sampling transfer data rate go p.transfers.start(p.ctx) + // Start the transfer limiter + go p.xferLimiter.run(p.ctx) + log.Infow("storage provider: started") return nil } diff --git a/storagemarket/provider_test.go b/storagemarket/provider_test.go index 1543616d9..3db5c6fd5 100644 --- a/storagemarket/provider_test.go +++ b/storagemarket/provider_test.go @@ -1356,7 +1356,15 @@ func NewHarness(t *testing.T, opts ...harnessOpt) *ProviderHarness { askStore := &mockAskStore{} askStore.SetAsk(pc.price, pc.verifiedPrice, pc.minPieceSize, pc.maxPieceSize) - prvCfg := Config{MaxTransferDuration: time.Hour, RemoteCommp: !pc.localCommp} + prvCfg := Config{ + MaxTransferDuration: time.Hour, + RemoteCommp: !pc.localCommp, + TransferLimiter: TransferLimiterConfig{ + MaxConcurrent: 10, + StallCheckPeriod: time.Millisecond, + StallTimeout: time.Hour, + }, + } prov, err := NewProvider(prvCfg, sqldb, dealsDB, fm, sm, fn, minerStub, minerAddr, minerStub, minerStub, sps, minerStub, df, sqldb, logsDB, dagStore, ps, &NoOpIndexProvider{}, askStore, &mockSignatureVerifier{true, nil}, dl, tspt) require.NoError(t, err) @@ -1611,6 +1619,10 @@ func (ph *ProviderHarness) newDealBuilder(t *testing.T, seed int, opts ...dealPr require.NoError(tbuilder.t, err) name := carv2Fileinfo.Name() + req := tspttypes.HttpRequest{URL: "http://foo.bar"} + xferParams, err := json.Marshal(req) + require.NoError(t, err) + // assemble the final deal params to send to the provider dealParams := &types.DealParams{ DealUUID: uuid.New(), @@ -1624,8 +1636,9 @@ func (ph *ProviderHarness) newDealBuilder(t *testing.T, seed int, opts ...dealPr }, DealDataRoot: rootCid, Transfer: types.Transfer{ - Type: "http", - Size: uint64(carv2Fileinfo.Size()), + Type: "http", + Params: xferParams, + Size: uint64(carv2Fileinfo.Size()), }, } diff --git a/storagemarket/transfer_limiter.go b/storagemarket/transfer_limiter.go new file mode 100644 index 000000000..29098e8ce --- /dev/null +++ b/storagemarket/transfer_limiter.go @@ -0,0 +1,281 @@ +package storagemarket + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + smtypes "github.com/filecoin-project/boost/storagemarket/types" + "github.com/google/uuid" +) + +type transfer struct { + started chan struct{} + deal *smtypes.ProviderDealState + host string + updatedAt time.Time + bytes uint64 +} + +func (t *transfer) isStarted() bool { + select { + case <-t.started: + return true + default: + return false + } +} + +type TransferLimiterConfig struct { + // The maximum number of concurrent transfers (soft limit - see comment below) + MaxConcurrent uint64 + // The period between checking if a connection has stalled + StallCheckPeriod time.Duration + // The time that can elapse before a download is considered stalled + StallTimeout time.Duration +} + +// +// transferLimiter maintains a queue of transfers with a soft upper limit on +// the number of concurrent transfers. +// +// To prevent slow or stalled transfers from blocking up the queue there are +// a couple of mitigations: +// +// The queue is ordered such that we +// - start transferring data for the oldest deal first +// - prefer to start transfers with peers that don't have any ongoing transfer +// - once the soft limit is reached, don't allow any new transfers with peers +// that have existing stalled transfers +// +// Note that peers are distinguished by their host (eg foo.bar:8080) not by +// libp2p peer ID. +// +// For example, if there is +// - one active transfer with peer A +// - one pending transfer (peer A) +// - one pending transfer (peer B) +// the algorithm will prefer to start a transfer with peer B than peer A. +// +// This helps to ensure that slow peers don't block the transfer queue. +// +// The limit on the number of concurrent transfers is soft: +// eg if there is a limit of 5 concurrent transfers and there are +// - three active transfers +// - two stalled transfers +// then two more transfers are permitted to start (as long as they're not with +// one of the stalled peers) +// +type transferLimiter struct { + cfg TransferLimiterConfig + + lk sync.RWMutex + xfers map[uuid.UUID]*transfer +} + +func newTransferLimiter(cfg TransferLimiterConfig) (*transferLimiter, error) { + if cfg.MaxConcurrent == 0 { + return nil, fmt.Errorf("maximum active concurrent transfers must be > 0") + } + if cfg.StallCheckPeriod == 0 { + return nil, fmt.Errorf("transfer stall check period must be > 0") + } + if cfg.StallTimeout == 0 { + return nil, fmt.Errorf("transfer stall timeout must be > 0") + } + + return &transferLimiter{ + cfg: cfg, + xfers: make(map[uuid.UUID]*transfer), + }, nil +} + +func (tl *transferLimiter) run(ctx context.Context) { + // Periodically check for stalled transfers + ticker := time.NewTicker(tl.cfg.StallCheckPeriod) + defer ticker.Stop() + + // Note: The first tick will occur after one stall check period (not + // immediately). + for { + select { + case t := <-ticker.C: + tl.check(t) + + case <-ctx.Done(): + return + } + } +} + +func (tl *transferLimiter) check(now time.Time) { + // Take a copy of the transfers map. + // We do this to avoid lock contention with the SetBytes message which + // is called with high frequency when there are a lot of concurrent + // transfers (every time data is received). + tl.lk.Lock() + xfers := make(map[uuid.UUID]*transfer, len(tl.xfers)) + for id, xfer := range tl.xfers { + cp := *xfer + xfers[id] = &cp + } + tl.lk.Unlock() + + // Count how many transfers are active (not stalled) + var activeCount uint64 + transferringPeers := make(map[string]struct{}, len(xfers)) + stalledPeers := make(map[string]struct{}, len(xfers)) + unstartedXfers := make([]*transfer, 0, len(xfers)) + for _, xfer := range xfers { + if !xfer.isStarted() { + // Build a list of unstarted transfers (needed later) + unstartedXfers = append(unstartedXfers, xfer) + + // Skip transfers that haven't started + continue + } + + // Build the set of peers that have an ongoing transfer (needed later) + transferringPeers[xfer.host] = struct{}{} + + // Check each transfer to see if it has stalled + if now.Sub(xfer.updatedAt) < tl.cfg.StallTimeout { + activeCount++ + } else { + stalledPeers[xfer.host] = struct{}{} + } + } + + // Check if there are already enough active transfers + if activeCount >= tl.cfg.MaxConcurrent { + return + } + + // Sort unstarted transfers by creation date (oldest first) + sort.Slice(unstartedXfers, func(i, j int) bool { + return unstartedXfers[i].deal.CreatedAt.Before(unstartedXfers[j].deal.CreatedAt) + }) + + // Gets the next transfer that should be started + nextTransfer := func() *transfer { + var next *transfer + + // Iterate over unstarted transfers from oldest to newest + startedCount := tl.startedCount(xfers) + for _, xfer := range unstartedXfers { + // Skip transfers that have already been started. + // Note: A previous call to nextTransfer may have started the + // transfer. + if xfer.isStarted() { + continue + } + + // If there is already a transfer to the same peer and it's stalled, + // allow a new transfer with that peer, but only up to the soft + // limit + _, isStalledPeer := stalledPeers[xfer.host] + if isStalledPeer && startedCount >= tl.cfg.MaxConcurrent { + continue + } + + // Default to choosing the oldest unstarted transfer + if next == nil { + next = xfer + } + + // If there are no transfers with the peer that sent the storage deal, + // start a transfer. + // This helps ensure that a slow peer doesn't block up the transfer + // queue, because we'll favour opening a transfer to a new peer + // over a peer that already has a transfer (which may be slow). + if _, ok := transferringPeers[xfer.host]; !ok { + return xfer + } + } + + return next + } + + // Start new transfers until we reach the limit + for i := activeCount; i < tl.cfg.MaxConcurrent; i++ { + next := nextTransfer() + if next == nil { + return + } + + // Update the list of peers with active transfers + transferringPeers[next.host] = struct{}{} + + // Signal that the transfer has started + next.updatedAt = time.Now() + close(next.started) + } +} + +// Count how many transfers have been started but not completed +func (tl *transferLimiter) startedCount(xfers map[uuid.UUID]*transfer) uint64 { + var count uint64 + for _, xfer := range xfers { + if xfer.isStarted() { + count++ + } + } + return count +} + +// Count how many transfers there are in total +func (tl *transferLimiter) transfersCount() int { + tl.lk.RLock() + defer tl.lk.RUnlock() + + return len(tl.xfers) +} + +// Wait for the next open spot in the transfer queue +func (tl *transferLimiter) waitInQueue(ctx context.Context, deal *smtypes.ProviderDealState) error { + host, err := deal.Transfer.Host() + if err != nil { + return fmt.Errorf("getting host from Transfer params for deal %s: %w", deal.DealUuid, err) + } + xfer := &transfer{ + deal: deal, + host: host, + started: make(chan struct{}), + } + + // Add the transfer to the queue + tl.lk.Lock() + tl.xfers[deal.DealUuid] = xfer + tl.lk.Unlock() + + // Wait for the signal that the transfer can start + select { + case <-xfer.started: + return nil + case <-ctx.Done(): + tl.complete(deal.DealUuid) + return ctx.Err() + } +} + +// Called when a transfer has completed (or errored out) +func (tl *transferLimiter) complete(dealUuid uuid.UUID) { + tl.lk.Lock() + defer tl.lk.Unlock() + + delete(tl.xfers, dealUuid) +} + +// Called each time the transfer progresses +func (tl *transferLimiter) setBytes(dealUuid uuid.UUID, bytes uint64) { + tl.lk.Lock() + defer tl.lk.Unlock() + + xfer, ok := tl.xfers[dealUuid] + if ok { + xfer.updatedAt = time.Now() + xfer.bytes = bytes + } +} diff --git a/storagemarket/transfer_limiter_test.go b/storagemarket/transfer_limiter_test.go new file mode 100644 index 000000000..d4159de68 --- /dev/null +++ b/storagemarket/transfer_limiter_test.go @@ -0,0 +1,382 @@ +package storagemarket + +import ( + "context" + "encoding/json" + "fmt" + "golang.org/x/exp/rand" + "testing" + "time" + + smtypes "github.com/filecoin-project/boost/storagemarket/types" + "github.com/filecoin-project/boost/testutil" + "github.com/filecoin-project/boost/transport/types" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func generateDeal() *smtypes.ProviderDealState { + return generateDealWithHost(fmt.Sprintf("foo.bar:%d", rand.Uint64())) +} + +func generateDealWithHost(host string) *smtypes.ProviderDealState { + transferParams := types.HttpRequest{ + URL: fmt.Sprintf("http://%s/piece/%s", host, uuid.New()), + } + json, err := json.Marshal(transferParams) + if err != nil { + panic(err) + } + return &smtypes.ProviderDealState{ + DealUuid: uuid.New(), + CreatedAt: time.Now(), + Transfer: smtypes.Transfer{ + Type: "http", + Params: json, + }, + } +} + +func TestTransferLimiterBasic(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Set up the transfer limiter + tl, err := newTransferLimiter(TransferLimiterConfig{ + MaxConcurrent: 1, + StallCheckPeriod: time.Millisecond, + StallTimeout: 30 * time.Second, + }) + require.NoError(t, err) + + go tl.run(ctx) + + // Add a deal to the transfer queue + deal1 := generateDeal() + err = tl.waitInQueue(ctx, deal1) + require.NoError(t, err) + + // Remove the deal from the transfer queue + tl.complete(deal1.DealUuid) +} + +// Verifies that a new transfer is blocked until the number of ongoing +// transfers falls below the limit +func TestTransferLimiterQueueSize(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + tl, err := newTransferLimiter(TransferLimiterConfig{ + MaxConcurrent: 1, + StallCheckPeriod: time.Millisecond, + StallTimeout: 30 * time.Second, + }) + require.NoError(t, err) + + // Generate two deals and add them to the transfer queue + deal1 := generateDeal() + deal2 := generateDeal() + + started := make(chan struct{}, 2) + go func() { + err := tl.waitInQueue(ctx, deal1) + require.NoError(t, err) + started <- struct{}{} + }() + + go func() { + err := tl.waitInQueue(ctx, deal2) + require.NoError(t, err) + started <- struct{}{} + }() + + // Wait till both go-routines call waitInQueue + require.Eventually(t, func() bool { return tl.transfersCount() == 2 }, time.Second, time.Millisecond) + + // Expect the first transfer to start + tl.check(time.Now()) + <-started + + // Expect the second transfer not to start yet, because the transfer queue + // has a limit of 1 + select { + case <-started: + require.Fail(t, "expected second transfer not to start yet") + default: + } + + // Complete the first transfer + tl.complete(deal1.DealUuid) + + // Expect the second transfer to start now that the first has completed + tl.check(time.Now()) + <-started +} + +// Verifies that if a transfer stalls, another transfer is allowed to start, +// even if that means the total number of transfers breaks the soft limit +func TestTransferLimiterStalledTransfer(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg := TransferLimiterConfig{ + MaxConcurrent: 1, + StallCheckPeriod: time.Millisecond, + StallTimeout: time.Second, + } + tl, err := newTransferLimiter(cfg) + require.NoError(t, err) + + // Generate two deals and add them to the transfer queue + deal1 := generateDeal() + deal2 := generateDeal() + + started := make(chan struct{}, 2) + go func() { + err := tl.waitInQueue(ctx, deal1) + require.NoError(t, err) + started <- struct{}{} + }() + + go func() { + err := tl.waitInQueue(ctx, deal2) + require.NoError(t, err) + started <- struct{}{} + }() + + // Wait till both go-routines call waitInQueue + require.Eventually(t, func() bool { return tl.transfersCount() == 2 }, time.Second, time.Millisecond) + + // Expect the first transfer to start + tl.check(time.Now()) + <-started + + // Expect the second transfer not to start yet, because the transfer queue + // has a limit of 1 + select { + case <-started: + require.Fail(t, "expected second transfer not to start yet") + default: + } + + // Tell the transfer limiter that some progress has been made in the + // transfer for deal 1 + tl.setBytes(deal1.DealUuid, 1) + + // Simulate a check after half of the stall timeout has elapsed + tl.check(time.Now().Add(cfg.StallTimeout / 2)) + + // Expect the second transfer not to start yet, because the stall timeout + // has not expired + select { + case <-started: + require.Fail(t, "expected second transfer not to start yet") + default: + } + + // Once the stall duration has elapsed with no updates to the first + // transfer, expect the second transfer to be allowed to start + tl.check(time.Now().Add(cfg.StallTimeout)) + <-started +} + +// Verifies that transfers are prioritized in order from oldest to newest +func TestTransferLimiterPriorityOldestFirst(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg := TransferLimiterConfig{ + MaxConcurrent: 1, + StallCheckPeriod: time.Millisecond, + StallTimeout: 30 * time.Second, + } + tl, err := newTransferLimiter(cfg) + require.NoError(t, err) + + // Generate deals and add them to the transfer queue in the reverse order + // than they were generated + dealCount := 100 + deals := make([]*smtypes.ProviderDealState, 0, dealCount) + for i := 0; i < dealCount; i++ { + dl := generateDeal() + dl.CreatedAt = time.Now().Add(-time.Hour).Round(time.Minute).Add(time.Duration(i) * time.Second) + deals = append(deals, dl) + } + dealsReversed := make(chan *smtypes.ProviderDealState, len(deals)) + for i := len(deals) - 1; i >= 0; i-- { + dealsReversed <- deals[i] + } + + started := make(chan *smtypes.ProviderDealState) + for i := 0; i < len(deals); i++ { + // The order in which the go routines run is non-deterministic, + // but by adding deals in newest-to-oldest order, we can assume that + // it's extremely unlikely the deals will be started in + // oldest-to-newest order + go func() { + dl := <-dealsReversed + err := tl.waitInQueue(ctx, dl) + require.NoError(t, err) + started <- dl + }() + } + + // Wait for all the deals to be added to the transfer queue + require.Eventually(t, func() bool { return tl.transfersCount() == dealCount }, time.Second, time.Millisecond) + + // Expect the deals to be started in order from oldest to newest + for i := 0; i < len(deals); i++ { + go tl.check(time.Now()) + + dl := <-started + require.Equal(t, deals[i].DealUuid, dl.DealUuid) + + // Make space in the queue for the next deal to be started + tl.complete(dl.DealUuid) + } +} + +// Verifies that the prioritization favours transfers to peers that don't +// already have an ongoing transfer. +// eg there is +// - an ongoing transfer to peer A +// - a queued transfer to peer A +// - a queued transfer to peer B +// The next transfer should be the one to peer B +func TestTransferLimiterPriorityNoExistingTransferToPeerFirst(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg := TransferLimiterConfig{ + MaxConcurrent: 2, + StallCheckPeriod: time.Millisecond, + StallTimeout: 30 * time.Second, + } + tl, err := newTransferLimiter(cfg) + require.NoError(t, err) + + // Generate three deals, where the first two have the same peer + p := testutil.GeneratePeer() + h := "myhost.com" + deal1 := generateDealWithHost(h) + deal1.ClientPeerID = p + deal2 := generateDealWithHost(h) + deal2.ClientPeerID = p + deal3 := generateDeal() + + deals := make(chan *smtypes.ProviderDealState, 3) + deals <- deal1 + deals <- deal2 + deals <- deal3 + + started := make(chan *smtypes.ProviderDealState, len(deals)) + for i := 0; i < 3; i++ { + go func() { + dl := <-deals + err := tl.waitInQueue(ctx, dl) + require.NoError(t, err) + started <- dl + }() + } + + // Wait for all the deals to be added to the transfer queue + require.Eventually(t, func() bool { return len(deals) == 0 }, time.Second, time.Millisecond) + + // The queue size limit is 2. + // The oldest to newest order is deal1, deal2, deal3 + // However we expect deal1 and deal3 to be started first. + // This is because deal1 and deal2 have the same peer ID, and + // the algorithm should favour deal3 because it has a different + // peer ID. + tl.check(time.Now()) + dl := <-started + require.True(t, dl.DealUuid == deal1.DealUuid || dl.DealUuid == deal3.DealUuid) + dl = <-started + require.True(t, dl.DealUuid == deal1.DealUuid || dl.DealUuid == deal3.DealUuid) + + // Complete the first deal transfer to make space for another transfer + tl.complete(deal1.DealUuid) + + // Expect deal2 to start last + tl.check(time.Now()) + dl = <-started + require.Equal(t, deal2.DealUuid, dl.DealUuid) +} + +// Verifies that a new transfer will not be started if there are already +// transfers to that same peer that are stalled, and the soft limit has +// been reached +func TestTransferLimiterStalledTransferHardLimited(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg := TransferLimiterConfig{ + MaxConcurrent: 2, + StallCheckPeriod: time.Millisecond, + StallTimeout: time.Second, + } + tl, err := newTransferLimiter(cfg) + require.NoError(t, err) + + // Generate a deal and add to the transfer queue + h := "myhost.com" + deal1 := generateDealWithHost(h) + + started := make(chan struct{}, 3) + go func() { + err := tl.waitInQueue(ctx, deal1) + require.NoError(t, err) + started <- struct{}{} + }() + + // Wait till go-routine calls waitInQueue + require.Eventually(t, func() bool { return tl.transfersCount() == 1 }, time.Second, time.Millisecond) + + // Expect the first transfer to start + tl.check(time.Now()) + <-started + + // Mark the first transfer as stalled + tl.check(time.Now().Add(cfg.StallTimeout)) + + // Generate a second deal to the same peer + deal2 := generateDealWithHost(h) + deal2.ClientPeerID = deal1.ClientPeerID + + go func() { + err := tl.waitInQueue(ctx, deal2) + require.NoError(t, err) + started <- struct{}{} + }() + + // Wait till go-routine calls waitInQueue + require.Eventually(t, func() bool { return tl.transfersCount() == 2 }, time.Second, time.Millisecond) + + // It should be allowed to start, because even though there's a stalled + // transfer to the peer, we're still below the soft limit + tl.check(time.Now().Add(cfg.StallTimeout)) + <-started + + // Generate a third deal to the same peer + deal3 := generateDealWithHost(h) + deal3.ClientPeerID = deal1.ClientPeerID + + go func() { + err := tl.waitInQueue(ctx, deal3) + require.NoError(t, err) + started <- struct{}{} + }() + + // Wait till go-routine calls waitInQueue + require.Eventually(t, func() bool { return tl.transfersCount() == 3 }, time.Second, time.Millisecond) + + tl.check(time.Now().Add(cfg.StallTimeout)) + + // Expect the third transfer not to start yet, because there's a stalled + // transfer to the same peer in the queue and we've reached the soft limit + select { + case <-started: + require.Fail(t, "expected third transfer not to start yet") + default: + } +} diff --git a/storagemarket/types/types.go b/storagemarket/types/types.go index f59acb30b..28972f219 100644 --- a/storagemarket/types/types.go +++ b/storagemarket/types/types.go @@ -2,12 +2,18 @@ package types import ( "context" + "encoding/json" + "fmt" "io" + "net/url" "github.com/filecoin-project/boost/sealingpipeline" + "github.com/filecoin-project/boost/transport/httptransport/util" + "github.com/filecoin-project/boost/transport/types" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-fil-markets/shared" "github.com/filecoin-project/go-fil-markets/storagemarket" + multiaddrutil "github.com/filecoin-project/go-legs/httpsync/multiaddr" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v8/market" "github.com/filecoin-project/go-state-types/crypto" @@ -97,6 +103,42 @@ type Transfer struct { Size uint64 } +func (t *Transfer) Host() (string, error) { + if t.Type != "http" && t.Type != "libp2p" { + return "", fmt.Errorf("cannot parse params for unrecognized transfer type '%s'", t.Type) + } + + // de-serialize transport opaque token + tInfo := &types.HttpRequest{} + if err := json.Unmarshal(t.Params, tInfo); err != nil { + return "", fmt.Errorf("failed to de-serialize transport params bytes '%s': %w", string(t.Params), err) + } + + // Parse http / multiaddr url + u, err := util.ParseUrl(tInfo.URL) + if err != nil { + return "", fmt.Errorf("cannot parse url '%s': %w", tInfo.URL, err) + } + + // If the url is in libp2p format + if u.Scheme == util.Libp2pScheme { + // Get the host from the multiaddr + mahttp, err := multiaddrutil.ToURL(u.Multiaddr) + if err != nil { + return "", err + } + return mahttp.Host, nil + } + + // Otherwise parse as an http url + httpUrl, err := url.Parse(u.Url) + if err != nil { + return "", fmt.Errorf("cannot parse url '%s' from '%s': %w", u.Url, tInfo.URL, err) + } + + return httpUrl.Host, nil +} + type DealResponse struct { Accepted bool // Message is the reason the deal proposal was rejected. It is empty if diff --git a/storagemarket/types/types_test.go b/storagemarket/types/types_test.go new file mode 100644 index 000000000..4ecfc223f --- /dev/null +++ b/storagemarket/types/types_test.go @@ -0,0 +1,49 @@ +package types + +import ( + "encoding/json" + "github.com/filecoin-project/boost/transport/types" + "github.com/stretchr/testify/require" + "testing" +) + +func TestTransferHost(t *testing.T) { + testCases := []struct { + name string + xferType string + url string + expected string + }{{ + name: "http", + xferType: "http", + url: "http://foo.bar:1234", + expected: "foo.bar:1234", + }, { + name: "libp2p http", + xferType: "libp2p", + url: "libp2p:///ip4/1.2.3.4/tcp/5678/p2p/Qma9T5YraSnpRDZqRR4krcSJabThc8nwZuJV3LercPHufi", + expected: "1.2.3.4:5678", + }, { + name: "libp2p quic", + xferType: "libp2p", + url: "libp2p:///ip4/1.2.3.4/udp/5678/quic/p2p/Qma9T5YraSnpRDZqRR4krcSJabThc8nwZuJV3LercPHufi", + expected: "1.2.3.4:5678", + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := types.HttpRequest{URL: tc.url} + res, err := json.Marshal(req) + require.NoError(t, err) + + xfer := Transfer{ + Type: tc.xferType, + Params: res, + } + h, err := xfer.Host() + require.NoError(t, err) + + require.Equal(t, tc.expected, h) + }) + } +} diff --git a/transport/httptransport/http_transport.go b/transport/httptransport/http_transport.go index 7f9c69663..02529ccd0 100644 --- a/transport/httptransport/http_transport.go +++ b/transport/httptransport/http_transport.go @@ -12,8 +12,8 @@ import ( "time" "github.com/filecoin-project/boost/storagemarket/logs" - "github.com/filecoin-project/boost/transport" + "github.com/filecoin-project/boost/transport/httptransport/util" "github.com/filecoin-project/boost/transport/types" "github.com/google/uuid" "github.com/jpillora/backoff" @@ -30,10 +30,13 @@ const ( maxBackOff = 10 * time.Minute factor = 1.5 maxReconnectAttempts = 15 - - libp2pScheme = "libp2p" ) +type httpError struct { + error + code int +} + var _ transport.Transport = (*httpTransport)(nil) type Option func(*httpTransport) @@ -98,11 +101,11 @@ func (h *httpTransport) Execute(ctx context.Context, transportInfo []byte, dealI } // parse request URL - u, err := parseUrl(tInfo.URL) + u, err := util.ParseUrl(tInfo.URL) if err != nil { return nil, fmt.Errorf("failed to parse request url: %w", err) } - tInfo.URL = u.url + tInfo.URL = u.Url // check that the outputFile exists fi, err := os.Stat(dealInfo.OutputFile) @@ -146,8 +149,8 @@ func (h *httpTransport) Execute(ctx context.Context, transportInfo []byte, dealI } // If this is a libp2p URL - if u.scheme == libp2pScheme { - h.dl.Infow(duuid, "libp2p-http url", "url", tInfo.URL, "peer id", u.peerID, "multiaddr", u.multiaddr) + if u.Scheme == util.Libp2pScheme { + h.dl.Infow(duuid, "libp2p-http url", "url", tInfo.URL, "peer id", u.PeerID, "multiaddr", u.Multiaddr) // Use the libp2p client t.client = h.libp2pClient @@ -157,13 +160,13 @@ func (h *httpTransport) Execute(ctx context.Context, transportInfo []byte, dealI if deadline, ok := ctx.Deadline(); ok { addrTtl = time.Until(deadline) } - h.libp2pHost.Peerstore().AddAddr(u.peerID, u.multiaddr, addrTtl) + h.libp2pHost.Peerstore().AddAddr(u.PeerID, u.Multiaddr, addrTtl) // Protect the connection for the lifetime of the data transfer tag := uuid.New().String() - h.libp2pHost.ConnManager().Protect(u.peerID, tag) + h.libp2pHost.ConnManager().Protect(u.PeerID, tag) cleanupFns = append(cleanupFns, func() { - h.libp2pHost.ConnManager().Unprotect(u.peerID, tag) + h.libp2pHost.ConnManager().Unprotect(u.PeerID, tag) }) } else { t.client = http.DefaultClient diff --git a/transport/httptransport/util.go b/transport/httptransport/util/util.go similarity index 60% rename from transport/httptransport/util.go rename to transport/httptransport/util/util.go index f48f4fbbd..db7b813df 100644 --- a/transport/httptransport/util.go +++ b/transport/httptransport/util/util.go @@ -1,4 +1,4 @@ -package httptransport +package util import ( "fmt" @@ -9,19 +9,16 @@ import ( "github.com/multiformats/go-multiaddr" ) -type httpError struct { - error - code int -} +const Libp2pScheme = "libp2p" -type transportUrl struct { - scheme string - url string - peerID peer.ID - multiaddr multiaddr.Multiaddr +type TransportUrl struct { + Scheme string + Url string + PeerID peer.ID + Multiaddr multiaddr.Multiaddr } -func parseUrl(urlStr string) (*transportUrl, error) { +func ParseUrl(urlStr string) (*TransportUrl, error) { u, err := url.Parse(urlStr) if err != nil { return nil, fmt.Errorf("parsing url '%s': %w", urlStr, err) @@ -29,15 +26,15 @@ func parseUrl(urlStr string) (*transportUrl, error) { if u.Scheme == "" { return nil, fmt.Errorf("parsing url '%s': could not parse scheme", urlStr) } - if u.Scheme == libp2pScheme { + if u.Scheme == Libp2pScheme { return parseLibp2pUrl(urlStr) } - return &transportUrl{scheme: u.Scheme, url: urlStr}, nil + return &TransportUrl{Scheme: u.Scheme, Url: urlStr}, nil } -func parseLibp2pUrl(urlStr string) (*transportUrl, error) { +func parseLibp2pUrl(urlStr string) (*TransportUrl, error) { // Remove libp2p prefix - prefix := libp2pScheme + "://" + prefix := Libp2pScheme + "://" if !strings.HasPrefix(urlStr, prefix) { return nil, fmt.Errorf("libp2p URL '%s' must start with prefix '%s'", urlStr, prefix) } @@ -53,10 +50,10 @@ func parseLibp2pUrl(urlStr string) (*transportUrl, error) { return nil, fmt.Errorf("expected only one address in url '%s'", urlStr) } - return &transportUrl{ - scheme: libp2pScheme, - url: libp2pScheme + "://" + addrInfo.ID.String(), - peerID: addrInfo.ID, - multiaddr: addrInfo.Addrs[0], + return &TransportUrl{ + Scheme: Libp2pScheme, + Url: Libp2pScheme + "://" + addrInfo.ID.String(), + PeerID: addrInfo.ID, + Multiaddr: addrInfo.Addrs[0], }, nil } diff --git a/transport/httptransport/util_test.go b/transport/httptransport/util/util_test.go similarity index 53% rename from transport/httptransport/util_test.go rename to transport/httptransport/util/util_test.go index 71a1477b5..ddfd11193 100644 --- a/transport/httptransport/util_test.go +++ b/transport/httptransport/util/util_test.go @@ -1,4 +1,4 @@ -package httptransport +package util import ( "testing" @@ -12,21 +12,21 @@ func TestParseUrl(t *testing.T) { tests := []struct { name string url string - expect *transportUrl + expect *TransportUrl expectError bool }{{ name: "http url", url: "http://www.test.com/path", - expect: &transportUrl{ - url: "http://www.test.com/path", - scheme: "http", + expect: &TransportUrl{ + Url: "http://www.test.com/path", + Scheme: "http", }, }, { name: "https url", url: "https://www.test.com/path", - expect: &transportUrl{ - url: "https://www.test.com/path", - scheme: "https", + expect: &TransportUrl{ + Url: "https://www.test.com/path", + Scheme: "https", }, }, { name: "bad url", @@ -35,37 +35,47 @@ func TestParseUrl(t *testing.T) { }, { name: "ip4 libp2p url", url: "libp2p:///ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - expect: &transportUrl{ - scheme: libp2pScheme, - url: libp2pScheme + "://QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - peerID: peerMustDecode(t, "QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"), - multiaddr: multiAddrMustParse(t, "/ip4/104.131.131.82/tcp/4001"), + expect: &TransportUrl{ + Scheme: Libp2pScheme, + Url: Libp2pScheme + "://QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", + PeerID: peerMustDecode(t, "QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"), + Multiaddr: multiAddrMustParse(t, "/ip4/104.131.131.82/tcp/4001"), }, }, { name: "dns libp2p url", url: "libp2p:///dnsaddr/bootstrap.libp2p.io/ipfs/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", - expect: &transportUrl{ - scheme: libp2pScheme, - url: libp2pScheme + "://QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", - peerID: peerMustDecode(t, "QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb"), - multiaddr: multiAddrMustParse(t, "/dnsaddr/bootstrap.libp2p.io"), + expect: &TransportUrl{ + Scheme: Libp2pScheme, + Url: Libp2pScheme + "://QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + PeerID: peerMustDecode(t, "QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb"), + Multiaddr: multiAddrMustParse(t, "/dnsaddr/bootstrap.libp2p.io"), + }, + }, { + name: "quic libp2p url", + url: "libp2p:///ip4/1.2.3.4/udp/5678/quic/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + expect: &TransportUrl{ + Scheme: Libp2pScheme, + Url: Libp2pScheme + "://QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb", + PeerID: peerMustDecode(t, "QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb"), + Multiaddr: multiAddrMustParse(t, "/ip4/1.2.3.4/udp/5678/quic"), }, }, { name: "libp2p url no peer ID", url: "libp2p:///ip4/104.131.131.82/tcp/4001", expectError: true, }} + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := parseUrl(tt.url) + got, err := ParseUrl(tt.url) if tt.expectError { require.Error(t, err) return } else { require.NoError(t, err) } - if tt.expect.multiaddr != nil { - require.Equal(t, tt.expect.multiaddr.String(), got.multiaddr.String()) + if tt.expect.Multiaddr != nil { + require.Equal(t, tt.expect.Multiaddr.String(), got.Multiaddr.String()) } require.Equal(t, tt.expect, got) }) From 3bc2b7df05edac2ca1c1774e75d5acde5cf204fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Airenas=20Vai=C4=8Di=C5=ABnas?= Date: Tue, 30 Aug 2022 16:58:39 +0300 Subject: [PATCH 05/26] Docker files (#713) * Add docker build scripts Prepare containers to run lotus, boost in debug mode * Fix lotus version in dockerfile * Add lotus, lotus-miner to boost image * Make clearer var names * Fix nginx proxy * Add sample script for making a demo deal * Add sample docker-compose for starting devnet * Decorate output of sample script * Add task to push all docker containers * Add info about building docker images * Add info on how to inspect devnet in docker * Fix port extract command * Fix typos * Update scripts for docker compose v2 * Fix typo * Expose default ports * Add resolver for nginx docker dns Boost container's IP can change and gui can't deal with it * Fix new lines * Update readme for devnet And some other minor text cleaning * Add script on timer to reload nginx conf Fix problem when nginx caches boost IP and GUI is not functioning after `docker compose up` finishes * Fix problem with failing publishing Local address of a miner was passed to the boost container * Mimic devnet params from boost sample From pkg/devnet/devnet.go * Build boost from git source not from a local one * Add publish and retrieve tasks into demo script * Update examples/devnet/docker-compose.yaml Co-authored-by: Anton Evangelatov * Update filecoin proofs dir for lotus-miner * Reduce WaitDealsDelay Co-authored-by: Anton Evangelatov --- .dockerignore | 1 + build/devnet/Makefile | 34 ++++++ build/devnet/README.md | 30 +++++ build/devnet/boost-gui/Dockerfile | 39 +++++++ build/devnet/boost-gui/Makefile | 17 +++ build/devnet/boost-gui/entrypoint.sh | 8 ++ build/devnet/boost-gui/nginx.conf.in | 28 +++++ build/devnet/boost/Dockerfile | 64 +++++++++++ build/devnet/boost/Makefile | 18 +++ build/devnet/boost/entrypoint.sh | 74 ++++++++++++ build/devnet/boost/sample/make-a-deal.sh | 138 +++++++++++++++++++++++ build/devnet/lotus-miner/Dockerfile | 32 ++++++ build/devnet/lotus-miner/Makefile | 16 +++ build/devnet/lotus-miner/entrypoint.sh | 16 +++ build/devnet/lotus/Dockerfile | 32 ++++++ build/devnet/lotus/Makefile | 16 +++ build/devnet/lotus/entrypoint.sh | 22 ++++ examples/devnet/.env | 5 + examples/devnet/.gitignore | 1 + examples/devnet/Makefile | 20 ++++ examples/devnet/README.md | 45 ++++++++ examples/devnet/docker-compose.yaml | 82 ++++++++++++++ 22 files changed, 738 insertions(+) create mode 100644 .dockerignore create mode 100644 build/devnet/Makefile create mode 100644 build/devnet/README.md create mode 100644 build/devnet/boost-gui/Dockerfile create mode 100644 build/devnet/boost-gui/Makefile create mode 100755 build/devnet/boost-gui/entrypoint.sh create mode 100644 build/devnet/boost-gui/nginx.conf.in create mode 100644 build/devnet/boost/Dockerfile create mode 100644 build/devnet/boost/Makefile create mode 100755 build/devnet/boost/entrypoint.sh create mode 100755 build/devnet/boost/sample/make-a-deal.sh create mode 100644 build/devnet/lotus-miner/Dockerfile create mode 100644 build/devnet/lotus-miner/Makefile create mode 100755 build/devnet/lotus-miner/entrypoint.sh create mode 100644 build/devnet/lotus/Dockerfile create mode 100644 build/devnet/lotus/Makefile create mode 100755 build/devnet/lotus/entrypoint.sh create mode 100644 examples/devnet/.env create mode 100644 examples/devnet/.gitignore create mode 100644 examples/devnet/Makefile create mode 100644 examples/devnet/README.md create mode 100644 examples/devnet/docker-compose.yaml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..1e107f52e --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +examples diff --git a/build/devnet/Makefile b/build/devnet/Makefile new file mode 100644 index 000000000..98b860e99 --- /dev/null +++ b/build/devnet/Makefile @@ -0,0 +1,34 @@ +################################################################################## +lotus_version?=1.17.1-rc2 +boost_version?=1.3.0-rc1 +docker_user?=filecoin + +lotus_test_image=$(docker_user)/lotus-test:$(lotus_version) +################################################################################## +lotus-$(lotus_version): + git clone --depth 1 --branch v$(lotus_version) https://github.com/filecoin-project/lotus $@ + +prepare/lotus-test: | lotus-$(lotus_version) + cd lotus-$(lotus_version) && docker build -f Dockerfile.lotus --target lotus-test -t $(lotus_test_image) . +.PHONY: prepare/lotus-test +################################################################################## +build/%: prepare/lotus-test + cd $* && make dbuild +push/%: prepare/lotus-test + cd $* && make dpush +################################################################################## +build/all: build/lotus build/lotus-miner build/boost build/boost-gui +.PHONY: build/all +################################################################################## +push/all: push/lotus push/lotus-miner push/boost push/boost-gui +.PHONY: push/all +################################################################################## +clean: clean/lotus-test +.PHONY: clean + +clean/lotus-test: + rm -rf lotus-$(lotus_version) +.PHONY: clean/lotus-test + +.EXPORT_ALL_VARIABLES: +################################################################################## diff --git a/build/devnet/README.md b/build/devnet/README.md new file mode 100644 index 000000000..a116f1d91 --- /dev/null +++ b/build/devnet/README.md @@ -0,0 +1,30 @@ +# Devnet docker images for lotus and boost + +This dir contains scripts for building docker images that are required to start the lotus devnet with the boost as a storage provider. It is a realization of [devnet guide](../../documentation/devnet.md) in docker containers. `lotus` and `lotus-miner` images are based on the [official lotus image file](https://github.com/filecoin-project/lotus/blob/master/Dockerfile.lotus). Because there is no image with lotus in debug mode published on Dockerhub so we rebuild lotus containers locally. + +NOTE: These docker images are for demo and devs ONLY. They MUST NOT/CAN NOT be used in production environments. + +## Building images: + +1. Select lotus version, for example: `lotus_version=1.17.1-rc2`. It must be the tag name of [the lotus git repo](https://github.com/filecoin-project/lotus/tags) without `v` prefix. +2. Select boost version, for example: `boost_version=1.3.0-rc1`. Docker images for the boost will be built on the current code base. The `boost_version` is just used to tag images. If you want to build images for a specific boost version then you have to checkout that version first. +3. Build images + +``` +make build/all lotus_version=1.17.1-rc2 boost_version=1.3.0-rc1 +``` +## Publishing images: + +1. Log in to docker with the `filecoin` user. +2. Publish +``` +make push/all lotus_version=1.17.1-rc2 boost_version=1.3.0-rc1 +``` +3. If you want to publish using a non `filecoin` account (for some testing purposes) + +``` +make push/all lotus_version=1.17.1-rc2 boost_version=1.3.0-rc1 docker_user= +``` +## How to run devnet in docker: + +Follow the instructions in the [docker-compose devnet guide](../../examples/devnet/README.md) diff --git a/build/devnet/boost-gui/Dockerfile b/build/devnet/boost-gui/Dockerfile new file mode 100644 index 000000000..32d90e087 --- /dev/null +++ b/build/devnet/boost-gui/Dockerfile @@ -0,0 +1,39 @@ +######################################################################################### +######################################################################################### +FROM node:16.16-alpine3.15 AS builder + +RUN apk --no-cache --update add git + +ARG BUILD_VERSION=0.1 +WORKDIR /src +RUN git clone --depth 1 --branch v${BUILD_VERSION} https://github.com/filecoin-project/boost + +WORKDIR /src/boost/react + +#TODO remove force after fixing npm dependencies +RUN npm install --force + +RUN npm run build +##################################################################################### +FROM nginx:1.23-alpine + +ARG BUILD_VERSION=0.1 + +LABEL org.opencontainers.image.version=$BUILD_VERSION \ + org.opencontainers.image.authors="Boost Dev Team" \ + name="boost-gui" \ + maintainer="Boost Dev Team" \ + vendor="Boost Dev Team" \ + version=$BUILD_VERSION \ + release=$BUILD_VERSION \ + summary="This image is used to host the boost-gui service" \ + description="This image is used to host the boost-gui service" + +EXPOSE 8000 +ENV BOOST_URL=http://boost:8080 + +COPY --from=builder /src/boost/react/build usr/share/nginx/html +COPY nginx.conf.in /app/nginx.conf.in +COPY entrypoint.sh /app/entrypoint.sh + +ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/build/devnet/boost-gui/Makefile b/build/devnet/boost-gui/Makefile new file mode 100644 index 000000000..cdf63504f --- /dev/null +++ b/build/devnet/boost-gui/Makefile @@ -0,0 +1,17 @@ +##################################################################################### +service=$(docker_user)/boost-gui +version=$(boost_version) +########### DOCKER ################################################################## +tag=$(service):$(version) + +dbuild: + docker build -t $(tag) --build-arg BUILD_VERSION=$(version) . + +dpush: dbuild + docker push $(tag) + +dscan: dbuild + docker scan --accept-license $(tag) +##################################################################################### +.PHONY: + dbuild dpush dscan diff --git a/build/devnet/boost-gui/entrypoint.sh b/build/devnet/boost-gui/entrypoint.sh new file mode 100755 index 000000000..8d7ad27f0 --- /dev/null +++ b/build/devnet/boost-gui/entrypoint.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh +set -e + +echo Preparing config with BOOST_URL=${BOOST_URL} +cat /app/nginx.conf.in | envsubst '$BOOST_URL' > /etc/nginx/conf.d/default.conf + +echo Starting nginx +exec nginx -g 'daemon off;' diff --git a/build/devnet/boost-gui/nginx.conf.in b/build/devnet/boost-gui/nginx.conf.in new file mode 100644 index 000000000..9b8c2f654 --- /dev/null +++ b/build/devnet/boost-gui/nginx.conf.in @@ -0,0 +1,28 @@ +server { + listen 8000; + charset utf-8; + sendfile on; + root /usr/share/nginx/html; + + location / { + expires -1; + add_header Pragma "no-cache"; + add_header Cache-Control "no-store, no-cache, must-revalidate, post-check=0, pre-check=0"; + + try_files $uri $uri/ /index.html; + } + + location /graphql { + resolver 127.0.0.11 valid=30s; + proxy_pass ${BOOST_URL}/graphql; + } + + location /graphql/subscription { + resolver 127.0.0.11 valid=30s; + proxy_pass ${BOOST_URL}/graphql/subscription; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + } +} diff --git a/build/devnet/boost/Dockerfile b/build/devnet/boost/Dockerfile new file mode 100644 index 000000000..186e2c034 --- /dev/null +++ b/build/devnet/boost/Dockerfile @@ -0,0 +1,64 @@ +######################################################################################### +######################################################################################### +ARG LOTUS_TEST_IMAGE=filecoin/lotus-test:latest +FROM ${LOTUS_TEST_IMAGE} as lotus-dev +######################################################################################### +FROM golang:1.18-bullseye as builder + +RUN apt update && apt install -y \ + build-essential \ + bzr pkg-config \ + clang \ + curl \ + gcc git \ + hwloc \ + jq \ + libhwloc-dev wget \ + mesa-opencl-icd \ + ocl-icd-opencl-dev + +WORKDIR /go/src/ + +ARG BUILD_VERSION=0.1 +RUN git clone --depth 1 --branch v${BUILD_VERSION} https://github.com/filecoin-project/boost + +RUN cd boost && make debug +######################################################################################### +FROM ubuntu:20.04 as runner + +RUN apt update && apt install -y \ + curl \ + hwloc \ + jq + +ARG BUILD_VERSION=0.1 + +LABEL org.opencontainers.image.version=$BUILD_VERSION \ + org.opencontainers.image.authors="Boost Dev Team" \ + name="boost-dev" \ + maintainer="Boost Dev Team" \ + vendor="Boost Dev Team" \ + version=$BUILD_VERSION \ + release=$BUILD_VERSION \ + summary="This image is used to host the boost-dev storage provider" \ + description="This image is used to host the boost-dev storage provider" + +WORKDIR /app +ENV BOOST_PATH /var/lib/boost +VOLUME /var/lib/boost +EXPOSE 8080 + +COPY --from=builder /go/src/boost/boostd /usr/local/bin/ +COPY --from=builder /go/src/boost/boost /usr/local/bin/ +COPY --from=builder /go/src/boost/boostx /usr/local/bin/ +COPY --from=lotus-dev /usr/local/bin/lotus /usr/local/bin/ +COPY --from=lotus-dev /usr/local/bin/lotus-miner /usr/local/bin/ +## Fix missing lib libhwloc.so.5 +RUN ls -1 /lib/x86_64-linux-gnu/libhwloc.so.* | head -n 1 | xargs -n1 -I {} ln -s {} /lib/x86_64-linux-gnu/libhwloc.so.5 +## Smoke test for the boost and lotus +RUN lotus -v && boost -v + +COPY entrypoint.sh /app/ +COPY sample/* /app/sample/ + +ENTRYPOINT ["./entrypoint.sh"] diff --git a/build/devnet/boost/Makefile b/build/devnet/boost/Makefile new file mode 100644 index 000000000..4dc33870d --- /dev/null +++ b/build/devnet/boost/Makefile @@ -0,0 +1,18 @@ +##################################################################################### +service=$(docker_user)/boost-dev +version=$(boost_version) +########### DOCKER ################################################################## +tag=$(service):$(version) + +dbuild: + docker build --build-arg LOTUS_TEST_IMAGE=$(lotus_test_image) --build-arg BUILD_VERSION=$(version) \ + -t $(tag) . + +dpush: dbuild + docker push $(tag) + +dscan: dbuild + docker scan --accept-license $(tag) +##################################################################################### +.PHONY: + dbuild dpush dscan diff --git a/build/devnet/boost/entrypoint.sh b/build/devnet/boost/entrypoint.sh new file mode 100755 index 000000000..57d29ae0e --- /dev/null +++ b/build/devnet/boost/entrypoint.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +set -e + +echo Wait for lotus is ready ... +lotus wait-api +echo Wait for lotus-miner is ready ... +lotus-miner wait-api +echo BOOST_PATH=$BOOST_PATH +export DEFAULT_WALLET=`lotus wallet default` +export FULLNODE_API_INFO=`lotus auth api-info --perm=admin | cut -f2 -d=` +export MINER_API_INFO=`lotus-miner auth api-info --perm=admin | cut -f2 -d=` + +if [ ! -f $BOOST_PATH/.init.boost ]; then + echo Init wallets ... + export COLLAT_WALLET=`lotus wallet new bls` + export PUBMSG_WALLET=`lotus wallet new bls` + export CLIENT_WALLET=`lotus wallet new bls` + echo MINER_API_INFO=$MINER_API_INFO + echo FULLNODE_API_INFO=$FULLNODE_API_INFO + echo PUBMSG_WALLET=$PUBMSG_WALLET + echo COLLAT_WALLET=$COLLAT_WALLET + + lotus send --from $DEFAULT_WALLET $COLLAT_WALLET 10 + lotus send --from $DEFAULT_WALLET $PUBMSG_WALLET 10 + lotus send --from $DEFAULT_WALLET $CLIENT_WALLET 10 + lotus wallet market add --from $DEFAULT_WALLET --address $CLIENT_WALLET 5 + lotus wallet market add --address $COLLAT_WALLET 5 + + until lotus-miner actor control set --really-do-it ${PUBMSG_WALLET}; do echo Waiting for storage miner API ready ...; sleep 1; done + + echo Init boost on first run ... + + boostd -vv --boost-repo $BOOST_PATH init --api-sealer=$MINER_API_INFO \ + --api-sector-index=$MINER_API_INFO \ + --wallet-publish-storage-deals=$PUBMSG_WALLET \ + --wallet-deal-collateral=$COLLAT_WALLET \ + --max-staging-deals-bytes=2000000000 + + # echo exit code: $? + + echo Setting port in boost config... + sed -i 's|ip4/0.0.0.0/tcp/0|ip4/0.0.0.0/tcp/50000|g' $BOOST_PATH/config.toml + + echo Done + touch $BOOST_PATH/.init.boost +fi + +if [ ! -f $BOOST_PATH/.register.boost ]; then + echo Temporary starting boost to get maddr... + + boostd -vv run &> $BOOST_PATH/boostd.log & + BOOST_PID=`echo $!` + echo Got boost PID = $BOOST_PID + + until cat $BOOST_PATH/boostd.log | grep maddr; do echo "Waiting for boost..."; sleep 1; done + echo Looks like boost started and initialized... + + echo Registering to lotus-miner... + MADDR=`cat $BOOST_PATH/boostd.log | grep maddr | cut -f3 -d"{" | cut -f1 -d:` + echo Got maddr=${MADDR} + + lotus-miner actor set-peer-id ${MADDR} + lotus-miner actor set-addrs /dns/boost/tcp/50000 + echo Registered + + touch $BOOST_PATH/.register.boost + echo Try to stop boost... + kill -15 $BOOST_PID || kill -9 $BOOST_PID + rm -f $BOOST_PATH/boostd.log + echo Super. DONE! Boostd is now configured and will be started soon +fi + +echo Starting boost in dev mode... +exec boostd -vv run diff --git a/build/devnet/boost/sample/make-a-deal.sh b/build/devnet/boost/sample/make-a-deal.sh new file mode 100755 index 000000000..e95277d76 --- /dev/null +++ b/build/devnet/boost/sample/make-a-deal.sh @@ -0,0 +1,138 @@ +#!/usr/bin/env bash +################################################################################### +# sample demo script for making a deal with boost client +################################################################################### +set -e +# colors +cb="\e[1m" +ci="\e[3m" +cn="\e[0m" +################################################################################### +printf "\n +###################################################################################\n \ +Hello to the demo script that makes a storage deal using the boost client\n \ +###################################################################################\n \ +1. The boost client needs to know how to connect to the lotus instance. \ +We need to set ${cb}FULLNODE_API_INFO${cn} env var. We have the lotus client here that will provide a connection token.\n \ + : ${ci}lotus auth api-info --perm=admin${cn} - returns lotus connection token \n\n" +read -rsp $'Press any key to export variable...\n' -n1 key +export `lotus auth api-info --perm=admin` + +printf "\nExported FULLNODE_API_INFO=$FULLNODE_API_INFO\n \ +###################################################################################\n" +################################################################################### +printf "2. The boost client needs to be initialized by calling \n${ci}boost init${cn} \n\n" +read -rsp $'Press any key to execute it...\n\n' -n1 key + +boost init + +printf "\n\nGreat. Boost client has been initialized.\n \ +###################################################################################\n" +################################################################################### +printf "3. Now add some funds from lotus to boost wallet. We will use the lotus client:\n\n \ + : ${ci}lotus wallet default${cn} - returns default lotus wallet\n \ + : ${ci}boost wallet default${cn} - returns default wallet for the current boost client actor\n \ + : ${ci}lotus send --from=`lotus wallet default` `boost wallet default` 10${cn} - sends 10 FIL\n" +read -rsp $'Press any key to execute it...\n\n' -n1 key + +lotus send --from=`lotus wallet default` `boost wallet default` 10 + +printf "\n\nDone. Funds transfer was initiated\n \ +###################################################################################\n" +################################################################################### +printf "4. Now add some funds to the market actor\n \ + : ${ci}boostx market-add 1${cn}\n\n" +read -rsp $'Press any key to execute it...\n' -n1 key + +until boostx market-add 1; do printf "\nOpps, maybe funds not added yet.\nNeed to wait some time. \n"; read -rsp $'Press any key to try again...\n' -n1 key; done + +printf "\n\nYes. We can make a deal now.\n \ +###################################################################################\n" +################################################################################### +printf "5. Let's generate a sample file in ${ci}/app/public/sample.txt${cn}. We will use it as a demo file.\n\n" +read -rsp $'Press any key to generate it...\n\n' -n1 key +rm -f /app/public/sample.txt +for i in {1..57}; do echo "Hi Boost, $i times" >> /app/public/sample.txt; done + +printf "\n\nFile content:\n\n" +cat /app/public/sample.txt +printf "\n\n \ +###################################################################################\n" +################################################################################### + +printf "6. After that, you need to generate a car file for data you want to store on Filecoin (${ci}/app/public/sample.txt${cn}), \ +and note down its ${ci}payload-cid${cn}. \ +We will use the ${ci}boostx${cn} utility\n \ + : ${ci}boostx generate-car /app/public/sample.txt /app/public/sample.car${cn}\n\n" +read -rsp $'Press any key to execute it...\n\n' -n1 key + +boostx generate-car /app/public/sample.txt /app/public/sample.car + +PAYLOAD_CID=`boostx generate-car /app/public/sample.txt /app/public/sample.car | grep CID | cut -d: -f2 | xargs` +printf "\n\nDone. We noted payload-cid = ${ci}$PAYLOAD_CID${cn}\n \ +###################################################################################\n" +################################################################################### +printf "7. Then you need to calculate the commp and piece size for the generated car file:\n \ + : ${ci}boostx commp /app/public/sample.car${cn}\n\n" +read -rsp $'Press any key to execute it...\n\n' -n1 key + +boostx commp /app/public/sample.car + +COMMP_CID=`boostx commp /app/public/sample.car 2> /dev/null | grep CID | cut -d: -f2 | xargs` +PIECE=`boostx commp /app/public/sample.car 2> /dev/null | grep Piece | cut -d: -f2 | xargs` +CAR=`boostx commp /app/public/sample.car 2> /dev/null | grep Car | cut -d: -f2 | xargs` +printf "\n\nYes. We also have remembered these values:\n \ +Commp-cid = $COMMP_CID \n \ +Piece size = $PIECE \n \ +Car size = $CAR \n \ +###################################################################################\n" +################################################################################### +printf "8. That's it. We are ready to make the deal. \n \ + : ${ci}boost deal --verified=false --provider=t01000 \ +--http-url=http://demo-http-server/sample.car \ +--commp=$COMMP_CID --car-size=$CAR --piece-size=$PIECE \ +--payload-cid=$PAYLOAD_CID --storage-price 20000000000\n\n${cn}" +read -rsp $'Press any key to make the deal...\n\n' -n1 key + +until boost deal --verified=false \ + --provider=t01000 \ + --http-url=http://demo-http-server/sample.car \ + --commp=$COMMP_CID \ + --car-size=$CAR \ + --piece-size=$PIECE \ + --payload-cid=$PAYLOAD_CID --storage-price 20000000000 +do + printf "\nThe error has occured. Perhaps we should wait some time for funds to arrive into the market account.\n\n" + read -rsp $'Press any key to check the boost wallet...\n\n' -n1 key + boost init + read -rsp $'\n\nPress any key to try making the deal again...\n' -n1 key +done + +printf "\n\n ${cb}Congrats! You have made it.${cn}\n\n \ +###################################################################################\n" +###################################################################################" +printf "9. Deal has been made, and it will be published automatically after some time, but you can do it manually using boost's graphql API\n \ +: ${ci}curl -X POST -H \"Content-Type: application/json\" -d '{\"query\":\"mutation { dealPublishNow }\"}' http://localhost:8080/graphql/query ${cn}\n\n" +read -rsp $'Press any key to publish the deal...\n\n' -n1 key + +curl -X POST -H "Content-Type: application/json" -d '{"query":"mutation { dealPublishNow }"}' http://localhost:8080/graphql/query | jq +printf "\nDone.\n\n \ +###################################################################################\n" +################################################################################### +printf "10. To retrieve the file from the ${cb}lotus${cn} system you can use \n\ +${ci}lotus client retrieve${cn} or ${ci}lotus client cat${cn} commands.\n\ +: ${ci}lotus client cat --miner t01000 $PAYLOAD_CID ${cn}\n\n" + +read -rsp $'Press any key to show the file content...\n\n' -n1 key +until lotus client cat --miner t01000 $PAYLOAD_CID +do + printf "\nFile publishing may take time, please wait some time until the deal is finished and try again.\n\n" + read -rsp $'Press any key to try again...\n' -n1 key +done + +printf "\n\nIf you see a file content you have just completed the demo. You have succesfully:\n\n\ + 1) initiated the boost client\n\ + 2) prepared sample file\n\ + 3) sent the sample file to the Filecoin devnet\n\ + 4) retrieved the content of the file from it.\n\n\ +More info at ${cb}https://boost.filecoin.io${cn} or ${cb}https://github.com/filecoin-project/boost${cn}.\n\n\n" diff --git a/build/devnet/lotus-miner/Dockerfile b/build/devnet/lotus-miner/Dockerfile new file mode 100644 index 000000000..4507e492d --- /dev/null +++ b/build/devnet/lotus-miner/Dockerfile @@ -0,0 +1,32 @@ +ARG LOTUS_TEST_IMAGE=filecoin/lotus-test:latest +############################################################################# +FROM ${LOTUS_TEST_IMAGE} + +ARG BUILD_VERSION=0.1 + +LABEL org.opencontainers.image.version=$BUILD_VERSION \ + org.opencontainers.image.authors="Boost Dev Team" \ + name="lotus-miner-dev" \ + maintainer="Boost Dev Team" \ + vendor="Boost Dev Team" \ + version=$BUILD_VERSION \ + release=$BUILD_VERSION \ + summary="This image is used to host the lotus-miner dev service" \ + description="This image is used to host the lotus-miner dev service" + +EXPOSE 2345 +ENV LOTUS_SKIP_GENESIS_CHECK=_yes_ +ENV GENESIS_PATH=/var/lib/genesis +ENV SECTOR_SIZE=8388608 + +VOLUME /var/tmp/filecoin-proof-parameters +VOLUME /var/lib/genesis + +WORKDIR /app +RUN mkdir -p /app + +COPY entrypoint.sh /app + +USER root + +ENTRYPOINT ["./entrypoint.sh"] diff --git a/build/devnet/lotus-miner/Makefile b/build/devnet/lotus-miner/Makefile new file mode 100644 index 000000000..74f8bd1ee --- /dev/null +++ b/build/devnet/lotus-miner/Makefile @@ -0,0 +1,16 @@ +##################################################################################### +service=$(docker_user)/lotus-miner-dev +version?=$(lotus_version) +########### DOCKER ################################################################## +tag=$(service):$(version) +dbuild: + docker build -t $(tag) --build-arg LOTUS_TEST_IMAGE=$(lotus_test_image) --build-arg BUILD_VERSION=$(version) . + +dpush: dbuild + docker push $(tag) + +dscan: dbuild + docker scan --accept-license $(tag) +##################################################################################### +.PHONY: + dbuild dpush dscan diff --git a/build/devnet/lotus-miner/entrypoint.sh b/build/devnet/lotus-miner/entrypoint.sh new file mode 100755 index 000000000..e1041ebaa --- /dev/null +++ b/build/devnet/lotus-miner/entrypoint.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -e +echo Wait for lotus is ready ... +lotus wait-api +echo Lotus ready. Lets go +if [ ! -f $LOTUS_MINER_PATH/.init.miner ]; then + echo Import the genesis miner key ... + lotus wallet import --as-default $GENESIS_PATH/pre-seal-t01000.key + echo Set up the genesis miner ... + lotus-miner init --genesis-miner --actor=t01000 --sector-size=$SECTOR_SIZE --pre-sealed-sectors=$GENESIS_PATH --pre-sealed-metadata=$GENESIS_PATH/pre-seal-t01000.json --nosync + touch $LOTUS_MINER_PATH/.init.miner + echo Done +fi + +echo Starting lotus miner ... +exec lotus-miner run --nosync diff --git a/build/devnet/lotus/Dockerfile b/build/devnet/lotus/Dockerfile new file mode 100644 index 000000000..94fc9037d --- /dev/null +++ b/build/devnet/lotus/Dockerfile @@ -0,0 +1,32 @@ +ARG LOTUS_TEST_IMAGE=filecoin/lotus-test:latest +############################################################################# +FROM ${LOTUS_TEST_IMAGE} + +ARG BUILD_VERSION=0.1 + +LABEL org.opencontainers.image.version=$BUILD_VERSION \ + org.opencontainers.image.authors="Boost Dev Team" \ + name="lotus-dev" \ + maintainer="Boost Dev Team" \ + vendor="Boost Dev Team" \ + version=$BUILD_VERSION \ + release=$BUILD_VERSION \ + summary="This image is used to host the lotus dev service" \ + description="This image is used to host the lotus dev service" + +EXPOSE 1234 +ENV LOTUS_SKIP_GENESIS_CHECK=_yes_ +ENV GENESIS_PATH=/var/lib/genesis +ENV SECTOR_SIZE=8388608 + +VOLUME /var/tmp/filecoin-proof-parameters +VOLUME /var/lib/genesis + +WORKDIR /app +RUN mkdir -p /app + +COPY entrypoint.sh /app + +USER root + +ENTRYPOINT ["./entrypoint.sh"] diff --git a/build/devnet/lotus/Makefile b/build/devnet/lotus/Makefile new file mode 100644 index 000000000..8ac9eb49a --- /dev/null +++ b/build/devnet/lotus/Makefile @@ -0,0 +1,16 @@ +##################################################################################### +service=$(docker_user)/lotus-dev +version?=$(lotus_version) +########### DOCKER ################################################################## +tag=$(service):$(version) +dbuild: + docker build -t $(tag) --build-arg LOTUS_TEST_IMAGE=$(lotus_test_image) --build-arg BUILD_VERSION=$(version) . + +dpush: dbuild + docker push $(tag) + +dscan: dbuild + docker scan --accept-license $(tag) +##################################################################################### +.PHONY: + dbuild dpush dscan diff --git a/build/devnet/lotus/entrypoint.sh b/build/devnet/lotus/entrypoint.sh new file mode 100755 index 000000000..8c2552275 --- /dev/null +++ b/build/devnet/lotus/entrypoint.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +set -e +if [ ! -f $LOTUS_PATH/.init.params ]; then + echo Initializing fetch params ... + lotus fetch-params $SECTOR_SIZE + touch $LOTUS_PATH/.init.params + echo Done +fi + +if [ ! -f $LOTUS_PATH/.init.genesis ]; then + echo Initializing pre seal ... + lotus-seed --sector-dir $GENESIS_PATH pre-seal --sector-size $SECTOR_SIZE --num-sectors 1 + echo Initializing genesis ... + lotus-seed --sector-dir $GENESIS_PATH genesis new $LOTUS_PATH/localnet.json + echo Initializing address ... + lotus-seed --sector-dir $GENESIS_PATH genesis add-miner $LOTUS_PATH/localnet.json $GENESIS_PATH/pre-seal-t01000.json + touch $LOTUS_PATH/.init.genesis + echo Done +fi + +echo Starting lotus deamon ... +exec lotus daemon --lotus-make-genesis=$LOTUS_PATH/devgen.car --genesis-template=$LOTUS_PATH/localnet.json --bootstrap=false diff --git a/examples/devnet/.env b/examples/devnet/.env new file mode 100644 index 000000000..3c060d4a5 --- /dev/null +++ b/examples/devnet/.env @@ -0,0 +1,5 @@ +DOCKER_USER=filecoin +LOTUS_IMAGE=${DOCKER_USER}/lotus-dev:1.17.1-rc2 +LOTUS_MINER_IMAGE=${DOCKER_USER}/lotus-miner-dev:1.17.1-rc2 +BOOST_IMAGE=${DOCKER_USER}/boost-dev:1.3.0-rc1 +BOOST_GUI_IMAGE=${DOCKER_USER}/boost-gui:1.3.0-rc1 \ No newline at end of file diff --git a/examples/devnet/.gitignore b/examples/devnet/.gitignore new file mode 100644 index 000000000..1269488f7 --- /dev/null +++ b/examples/devnet/.gitignore @@ -0,0 +1 @@ +data diff --git a/examples/devnet/Makefile b/examples/devnet/Makefile new file mode 100644 index 000000000..c1c66a960 --- /dev/null +++ b/examples/devnet/Makefile @@ -0,0 +1,20 @@ +################################################################################## +start: + docker compose up -d + docker compose logs -f +.PHONY: start +################################################################################## +ssh/boost: + docker compose exec boost /bin/bash +.PHONY: ssh/boost +################################################################################## +clean: clean/docker + rm -rf data +.PHONY: clean +clean/all: clean + rm -rf /var/tmp/filecoin-proof-parameters +.PHONY: clean/all +clean/docker: + docker compose down +.PHONY: clean/docker +################################################################################## diff --git a/examples/devnet/README.md b/examples/devnet/README.md new file mode 100644 index 000000000..35ad7a08b --- /dev/null +++ b/examples/devnet/README.md @@ -0,0 +1,45 @@ +# Devnet in docker + +The docker-compose file contains a realization of the [devnet guide](../../documentation/devnet.md) in docker containers. + +## To start devnet: + +1. Run +``` +docker compose up -d +``` +It will spin up `lotus`, `lotus-miner`, `boost`, `boost-gui` and `demo-http-server` containers. All temporary data will be saved in `./data` folder. +The initial setup could take up to 20 min or more (it takes time to download filecoin proof parameters). During the initial setup, it is normal to see error messages in the log. Containers are waiting for the lotus to be ready. It may timeout several times. Restart is expected to be managed by `docker`. + +2. Try opening the boost GUI http://localhost:8000 with a browser. Devnet is ready to operate when the URL opens and indicates no errors on the startup page. +Also, you can try to inspect the status using `docker compose logs -f`. + +## Making a deal + +The `boost` container is packed with `boost` and `lotus` clients. You can connect to the container with the command `docker compose exec boost /bin/bash` and follow instructions for [storing files with Boost guide](https://boost.filecoin.io/tutorials/how-to-store-files-with-boost-on-filecoin). But the recommended startup is to follow the semi-interactive demo first: +``` +# attach to a running boost container +docker compose exec boost /bin/bash + +# execute the demo script /app/sample/make-a-deal.sh +root@83260455bbd2:/app# ./sample/make-a-deal.sh +``` +## Accessing lotus from localhost + +By default the [docker-compose.yaml](./docker-compose.yaml) does not expose any port of the `lotus` container. To access the `lotus` from a local machine: +1. You can either expose `1234` in [docker-compose.yaml](./docker-compose.yaml) or find the IP of the `lotus` container using `docker inspect lotus | grep IPAddress` command. +2. Get the `FULLNODE_API_INFO` +``` +docker exec -it lotus lotus auth api-info --perm=admin +FULLNODE_API_INFO=eyJ...ms4:/dns/lotus/tcp/1234/http +``` +3. Change the `dns/lotus/tcp/1234/http` to `ip4/<127.0.0.1 or container's IP>/tcp/1234/http` for the use in `FULLNODE_API_INFO`. + +## Cleaning devnet + +To stop containers and drop everything: +``` +docker compose down --rmi all + +sudo rm -rf ./data +``` diff --git a/examples/devnet/docker-compose.yaml b/examples/devnet/docker-compose.yaml new file mode 100644 index 000000000..87a2b908b --- /dev/null +++ b/examples/devnet/docker-compose.yaml @@ -0,0 +1,82 @@ +version: '3.8' + +x-logging: + &default-logging + options: + max-size: '20m' + max-file: '3' + driver: json-file + +services: + lotus: + container_name: lotus + image: ${LOTUS_IMAGE} + # ports: + # - "1234:1234" + environment: + - LOTUS_API_LISTENADDRESS=/dns/lotus/tcp/1234/http + restart: unless-stopped + logging: *default-logging + volumes: + - ./data/lotus:/var/lib/lotus:rw + - ./data/genesis:/var/lib/genesis:rw + - /var/tmp/filecoin-proof-parameters:/var/tmp/filecoin-proof-parameters:rw + + lotus-miner: + container_name: lotus-miner + image: ${LOTUS_MINER_IMAGE} + # ports: + # - "2345:2345" + environment: + - LOTUS_API_LISTENADDRESS=/dns/lotus-miner/tcp/2345/http + - LOTUS_API_REMOTELISTENADDRESS=lotus-miner:2345 + - LOTUS_SEALING_BATCHPRECOMMITS=false + - LOTUS_SEALING_AGGREGATECOMMITS=false + - LOTUS_SUBSYSTEMS_ENABLEMARKETS=false + - LOTUS_SEALING_WAITDEALSDELAY=1h + restart: unless-stopped + logging: *default-logging + volumes: + - ./data/lotus-miner:/var/lib/lotus-miner:rw + - ./data/lotus:/var/lib/lotus:ro + - ./data/genesis:/var/lib/genesis:ro + - /var/tmp/filecoin-proof-parameters:/var/tmp/filecoin-proof-parameters:rw + + boost: + container_name: boost + image: ${BOOST_IMAGE} + # ports: + # - "8080:8080" + environment: + - LOTUS_PATH=/var/lib/lotus + - LOTUS_MINER_PATH=/var/lib/lotus-miner + restart: unless-stopped + logging: *default-logging + volumes: + - ./data/boost:/var/lib/boost:rw + - ./data/lotus:/var/lib/lotus:ro + - ./data/lotus-miner:/var/lib/lotus-miner:ro + - ./data/sample:/app/public:rw + + boost-gui: + container_name: boost-gui + image: ${BOOST_GUI_IMAGE} + ports: + - "8000:8000" + environment: + - BOOST_URL=http://boost:8080 + healthcheck: # try reloading nginx configuration if IP of the boost container changes + test: "nc -zv boost 8080 &> curr.ip && ( cmp curr.ip prev.ip || ( cp curr.ip prev.ip && kill -1 1 ))" + interval: "20s" + restart: unless-stopped + logging: *default-logging + + demo-http-server: + container_name: demo-http-server + image: nginx:1.23-alpine + # ports: + # - "8001:80" + restart: unless-stopped + logging: *default-logging + volumes: + - ./data/sample:/usr/share/nginx/html:ro From 57e5a9f9cc1094b38d6bd4d31dd9f8c0f9cecd9b Mon Sep 17 00:00:00 2001 From: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Date: Wed, 31 Aug 2022 12:50:30 +0530 Subject: [PATCH 06/26] remove failing check (#744) --- node/impl/boost.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/node/impl/boost.go b/node/impl/boost.go index a5b9fbf4c..386e0a94e 100644 --- a/node/impl/boost.go +++ b/node/impl/boost.go @@ -459,10 +459,6 @@ func (sm *BoostAPI) BoostDagstoreDestroyShard(ctx context.Context, key string) e if err != nil { return fmt.Errorf("unable to query dagstore for shard info: %w", err) } - // If the shard is not registered we would expect ErrShardUnknown - if !errors.Is(err, dagstore.ErrShardUnknown) { - return fmt.Errorf("shard not found in the dagstore: %w", err) - } pieceCid, err := cid.Parse(key) if err != nil { From 42fb5c050735f03bcc2dae693e217171c04ff544 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 31 Aug 2022 12:46:34 +0200 Subject: [PATCH 07/26] consolidate markdown documentation (#743) * move all readmes under README.md * simplify README.md * fixup * fixup * updated docs; moved build/devnet to docker/devnet * move examples under docker * fixup * revert .env * fixup --- README.md | 262 +++++++++++++++--- build/devnet/README.md | 30 -- {examples => docker}/devnet/.env | 2 +- {build => docker}/devnet/Makefile | 19 ++ {build => docker}/devnet/boost-gui/Dockerfile | 0 {build => docker}/devnet/boost-gui/Makefile | 0 .../devnet/boost-gui/entrypoint.sh | 0 .../devnet/boost-gui/nginx.conf.in | 0 {build => docker}/devnet/boost/Dockerfile | 0 {build => docker}/devnet/boost/Makefile | 0 {build => docker}/devnet/boost/entrypoint.sh | 0 .../devnet/boost/sample/make-a-deal.sh | 0 .../devnet/docker-compose.yaml | 0 .../devnet/lotus-miner/Dockerfile | 0 {build => docker}/devnet/lotus-miner/Makefile | 0 .../devnet/lotus-miner/entrypoint.sh | 0 {build => docker}/devnet/lotus/Dockerfile | 0 {build => docker}/devnet/lotus/Makefile | 0 {build => docker}/devnet/lotus/entrypoint.sh | 0 documentation/devnet.md | 181 ------------ examples/devnet/.gitignore | 1 - examples/devnet/Makefile | 20 -- examples/devnet/README.md | 45 --- 23 files changed, 247 insertions(+), 313 deletions(-) delete mode 100644 build/devnet/README.md rename {examples => docker}/devnet/.env (77%) rename {build => docker}/devnet/Makefile (71%) rename {build => docker}/devnet/boost-gui/Dockerfile (100%) rename {build => docker}/devnet/boost-gui/Makefile (100%) rename {build => docker}/devnet/boost-gui/entrypoint.sh (100%) rename {build => docker}/devnet/boost-gui/nginx.conf.in (100%) rename {build => docker}/devnet/boost/Dockerfile (100%) rename {build => docker}/devnet/boost/Makefile (100%) rename {build => docker}/devnet/boost/entrypoint.sh (100%) rename {build => docker}/devnet/boost/sample/make-a-deal.sh (100%) rename {examples => docker}/devnet/docker-compose.yaml (100%) rename {build => docker}/devnet/lotus-miner/Dockerfile (100%) rename {build => docker}/devnet/lotus-miner/Makefile (100%) rename {build => docker}/devnet/lotus-miner/entrypoint.sh (100%) rename {build => docker}/devnet/lotus/Dockerfile (100%) rename {build => docker}/devnet/lotus/Makefile (100%) rename {build => docker}/devnet/lotus/entrypoint.sh (100%) delete mode 100644 documentation/devnet.md delete mode 100644 examples/devnet/.gitignore delete mode 100644 examples/devnet/Makefile delete mode 100644 examples/devnet/README.md diff --git a/README.md b/README.md index 06e6460a3..181f029fd 100644 --- a/README.md +++ b/README.md @@ -4,79 +4,271 @@ Boost is a tool for Filecoin storage providers to manage data storage and retrie See the docs at [https://boost.filecoin.io](https://boost.filecoin.io/getting-started) to get started. -## For development: +## Table of Contents +- [Building and Installing Boost](#building-and-installing-boost) +- [Running Boost for development](#running-boost-for-development) +- [Running Boost devnet in Docker](#running-boost-devnet-in-docker) +- [License](#license) -1. Install using instructions in the building and installation section in [the docs](https://boost.filecoin.io/getting-started#building-and-installing). +## Building and Installing Boost -2. Make sure you have a local Lotus fullnode and miner running and listening to `localhost:1234` and `localhost:2345` respectively, for example with a devnet: +Compile and install using the instructions at the `Building and installing` section in [the docs](https://boost.filecoin.io/getting-started#building-and-installing). +## Running Boost for development + +To run Boost on your development machine, you will need to set up a devnet: + +1. Remove any existing Lotus and Boost repositories +``` +rm -rf ~/.lotusmarkets ~/.lotus ~/.lotusminer ~/.genesis_sectors +rm -rf ~/.boost +``` + +2. Build Lotus in debug mode + +The version of lotus needs to match the version in Boost's go.mod +``` +cd lotus +git checkout +make debug +``` + +3. Install Lotus + +The devnet script uses the installed `lotus` and `lotus-miner` binaries to run the miner and daemon. +``` +make install +install -C ./lotus-seed /usr/local/bin/lotus-seed +``` + +4. Build Boost in debug mode + +Double check if environment variables are set: +``` +export LIBRARY_PATH=/opt/homebrew/lib +export PATH="$(brew --prefix coreutils)/libexec/gnubin:/usr/local/bin:$PATH" +``` + +Build and install +``` +cd boost +make debug +make install +``` + +5. Start the devnet + +The following command will use the binaries that you built and installed above, and will run `lotus`, `lotus-miner` and `lotus-seed`. The `lotus` version must match the version in Boost's go.mod. +``` +cd boost +./devnet +``` + +The first time you run it, it will download the Filecoin proof parameters. It will take at least 10 minutes depending on your connection speed. You may need to restart the command multiple times as your terminal will probably timeout before it finishes downloading everything. + +The devnet isn't designed to be restartable. After it has been successfully run once, you'll have to clear out the previous data before re-running `./devnet`: +``` +rm -rf ~/.lotusmarkets && rm -rf ~/.lotus && rm -rf ~/.lotusminer && rm -rf ~/.genesis_sectors +``` + +6. Wait for `lotus-miner` to come up (through the command above) + +Unset these variables as they interfere with the `lotus-miner` command. +``` +unset MINER_API_INFO +unset FULLNODE_API_INFO +``` + +Then repeatedly run this command until it succeeds: +``` +lotus-miner auth api-info --perm=admin +``` + +7. Get the authentication tokens to connect to the lotus daemon and miner: + +``` +export ENV_MINER_API_INFO=`lotus-miner auth api-info --perm=admin` +export ENV_FULLNODE_API_INFO=`lotus auth api-info --perm=admin` + +export MINER_API_INFO=`echo $ENV_MINER_API_INFO | awk '{split($0,a,"="); print a[2]}'` +export FULLNODE_API_INFO=`echo $ENV_FULLNODE_API_INFO | awk '{split($0,a,"="); print a[2]}'` + +echo MINER_API_INFO=$MINER_API_INFO +echo FULLNODE_API_INFO=$FULLNODE_API_INFO +``` + +8. Create the wallets needed for Boost + +``` +export DEFAULT_WALLET=`lotus wallet list | tail -1 | awk '{print $1}'` +export COLLAT_WALLET=`lotus wallet new bls` +export PUBMSG_WALLET=`lotus wallet new bls` +export CLIENT_WALLET=`lotus wallet new bls` +``` + +9. Add funds to the wallets + +``` +lotus send --from $DEFAULT_WALLET $COLLAT_WALLET 10 +lotus send --from $DEFAULT_WALLET $PUBMSG_WALLET 10 +lotus send --from $DEFAULT_WALLET $CLIENT_WALLET 10 ``` -devnet + +Run this command repeatedly until each wallet you created has 10 FIL: +``` +lotus wallet list ``` -Note that currently `devnet` is using the default paths that `lotus` and `lotus-miner` use for their repositories, and you should make sure these directories are empty: +This should take about 10 seconds. + +10. Set the Publish Message Wallet as a control address on the miner ``` -LOTUS_PATH=~/.lotus -LOTUS_MINER_PATH=~/.lotusminer +lotus-miner actor control set --really-do-it $PUBMSG_WALLET +``` + +11. Add funds into the Market Actor escrow for the client and Collateral wallets -rm -rf ~/.lotus ~/.lotusminer +``` +lotus wallet market add --from $DEFAULT_WALLET --address $CLIENT_WALLET 5 +lotus wallet market add --address $COLLAT_WALLET 5 ``` +12. Initialize Boost / Create Boost repository -3. Create Boost repository +``` +boostd -vv init \ + --api-sealer=$MINER_API_INFO \ + --api-sector-index=$MINER_API_INFO \ + --wallet-publish-storage-deals=$PUBMSG_WALLET \ + --wallet-deal-collateral=$COLLAT_WALLET \ + --max-staging-deals-bytes=2000000000 +``` +13. Build the Web UI +``` +make react ``` -export $(lotus auth api-info --perm=admin) -export $(lotus-miner auth api-info --perm=admin) -boostd --vv init \ - --api-sealer=`lotus-miner auth api-info --perm=admin` \ - --api-sector-index=`lotus-miner auth api-info --perm=admin` \ - --wallet-publish-storage-deals=`lotus wallet new bls` \ - --wallet-deal-collateral=`lotus wallet new bls` \ - --max-staging-deals-bytes=50000000000 +14. Edit config to set a fixed listen address + +Edit `~/.boost/config.toml` + +Set the port in the `ListenAddresses` key to `50000` +``` +[Libp2p] + ListenAddresses = ["/ip4/0.0.0.0/tcp/50000", "/ip6/::/tcp/0"] ``` -4. Run the Boost daemon service +15. Run Boost +``` +boostd -vv run +``` +Note the peer ID of the boost instance: +``` +2022-06-10T09:32:28.819Z INFO boostd boostd/run.go:114 Boost libp2p node listening {"maddr": "{12D3KooWQNNWNiJ1mieEk9EHjDVF2qBc1FSjJGEzwjnMJzteApaW: [/ip4/172.17.0.2/tcp/50000 /ip4/127.0.0.1/tcp/50000]}"} ``` -export $(lotus auth api-info --perm=admin) +In this example: `12D3KooWQNNWNiJ1mieEk9EHjDVF2qBc1FSjJGEzwjnMJzteApaW` -boostd --vv run +14. Set the peer ID and multi-address of the miner on chain ``` +lotus-miner actor set-peer-id +lotus-miner actor set-addrs /ip4/127.0.0.1/tcp/50000 +``` + +16. Open the Web UI + +Open http://localhost:8080 to see the Boost UI -5. Interact with Boost +### Make a deal with Boost -Pass the client address (wallet) and the provider address to the `dummydeal` command. -Note that -- the client address is the address of a wallet with funds in `lotus wallet list` -- you can find the provider address in `~/.boost/config.toml` under the config key `Wallets.Miner` +1. Initialize the Boost client +``` +boost init +``` + +This will output the address of the wallet (it's safe to run the init command repeatedly). +2. Send funds to the client wallet ``` -boostd dummydeal +lotus send --from=$DEFAULT_WALLET 10 ``` -## Running the UI in Development Mode: +3. Follow the guide at https://boost.filecoin.io/tutorials/how-to-store-files-with-boost-on-filecoin + +Note that above you already ran a command to export FULLNODE_API (and point it to your local devnet lotus daemon). + +Note also that the provider address is `t01000` and you will need to supply an appropriate `--storage-price` when using `boost deal` since the devnet has a minimum price. Alternatively, using "Settings" in the Boost web UI to set the deal price to zero. + +## Running Boost devnet in Docker -1. Run the server +### Building Docker images + +1. Select Lotus version, for example: `lotus_version=1.17.1-rc2`. It must be the tag name of [the Lotus git repo](https://github.com/filecoin-project/lotus/tags) without `v` prefix. + +2. Select Boost version, for example: `boost_version=1.3.0-rc1`. + +3. Build images ``` -cd react -npm install -npm start +cd docker/devnet +make build/all ``` -2. Open UI +If you need to build a different version, edit the `.env` file. + +### Start devnet docker stack + +1. Run ``` -http://localhost:3000 +cd docker/devnet +docker compose up -d ``` -## Running a devnet: +It will spin up `lotus`, `lotus-miner`, `boost`, `boost-gui` and `demo-http-server` containers. All temporary data will be saved in `./data` folder. + +The initial setup could take up to 20 min or more as it needs to download Filecoin proof parameters. During the initial setup, it is normal to see error messages in the log. Containers are waiting for the lotus to be ready. It may timeout several times. Restart is expected to be managed by `docker`. + +2. Try opening the Boost GUI http://localhost:8000 . Devnet is ready to operate when the URL opens and indicates no errors on the startup page. + +You can inspect the status using `docker compose logs -f`. + +### Making a deal + +The `boost` container is packed with `boost` and `lotus` clients. You can connect to the container with the command `docker compose exec boost /bin/bash` and follow instructions for [storing files with Boost guide](https://boost.filecoin.io/tutorials/how-to-store-files-with-boost-on-filecoin). But the recommended startup is to follow the semi-interactive demo first: + +``` +# Attach to a running boost container +docker compose exec boost /bin/bash -Follow the instructions in the [devnet guide](./documentation/devnet.md) +# Execute the demo script /app/sample/make-a-deal.sh +root@83260455bbd2:/app# ./sample/make-a-deal.sh +``` + +### Accessing Lotus from localhost + +By default the [docker-compose.yaml](./docker-compose.yaml) does not expose any port of the `lotus` container. To access the `lotus` from a local machine: +1. You can either expose `1234` in [docker-compose.yaml](./docker-compose.yaml) or find the IP of the `lotus` container using `docker inspect lotus | grep IPAddress` command. +2. Get the `FULLNODE_API_INFO` +``` +docker exec -it lotus lotus auth api-info --perm=admin +FULLNODE_API_INFO=eyJ...ms4:/dns/lotus/tcp/1234/http +``` +3. Change the `dns/lotus/tcp/1234/http` to `ip4/<127.0.0.1 or container's IP>/tcp/1234/http` for the use in `FULLNODE_API_INFO`. + +### Cleaning up + +To stop containers and drop everything: +``` +docker compose down --rmi local + +rm -rf ./data + +rm -rf /var/tmp/filecoin-proof-parameters +``` ## License diff --git a/build/devnet/README.md b/build/devnet/README.md deleted file mode 100644 index a116f1d91..000000000 --- a/build/devnet/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Devnet docker images for lotus and boost - -This dir contains scripts for building docker images that are required to start the lotus devnet with the boost as a storage provider. It is a realization of [devnet guide](../../documentation/devnet.md) in docker containers. `lotus` and `lotus-miner` images are based on the [official lotus image file](https://github.com/filecoin-project/lotus/blob/master/Dockerfile.lotus). Because there is no image with lotus in debug mode published on Dockerhub so we rebuild lotus containers locally. - -NOTE: These docker images are for demo and devs ONLY. They MUST NOT/CAN NOT be used in production environments. - -## Building images: - -1. Select lotus version, for example: `lotus_version=1.17.1-rc2`. It must be the tag name of [the lotus git repo](https://github.com/filecoin-project/lotus/tags) without `v` prefix. -2. Select boost version, for example: `boost_version=1.3.0-rc1`. Docker images for the boost will be built on the current code base. The `boost_version` is just used to tag images. If you want to build images for a specific boost version then you have to checkout that version first. -3. Build images - -``` -make build/all lotus_version=1.17.1-rc2 boost_version=1.3.0-rc1 -``` -## Publishing images: - -1. Log in to docker with the `filecoin` user. -2. Publish -``` -make push/all lotus_version=1.17.1-rc2 boost_version=1.3.0-rc1 -``` -3. If you want to publish using a non `filecoin` account (for some testing purposes) - -``` -make push/all lotus_version=1.17.1-rc2 boost_version=1.3.0-rc1 docker_user= -``` -## How to run devnet in docker: - -Follow the instructions in the [docker-compose devnet guide](../../examples/devnet/README.md) diff --git a/examples/devnet/.env b/docker/devnet/.env similarity index 77% rename from examples/devnet/.env rename to docker/devnet/.env index 3c060d4a5..6db690f48 100644 --- a/examples/devnet/.env +++ b/docker/devnet/.env @@ -2,4 +2,4 @@ DOCKER_USER=filecoin LOTUS_IMAGE=${DOCKER_USER}/lotus-dev:1.17.1-rc2 LOTUS_MINER_IMAGE=${DOCKER_USER}/lotus-miner-dev:1.17.1-rc2 BOOST_IMAGE=${DOCKER_USER}/boost-dev:1.3.0-rc1 -BOOST_GUI_IMAGE=${DOCKER_USER}/boost-gui:1.3.0-rc1 \ No newline at end of file +BOOST_GUI_IMAGE=${DOCKER_USER}/boost-gui:1.3.0-rc1 diff --git a/build/devnet/Makefile b/docker/devnet/Makefile similarity index 71% rename from build/devnet/Makefile rename to docker/devnet/Makefile index 98b860e99..55b9d443b 100644 --- a/build/devnet/Makefile +++ b/docker/devnet/Makefile @@ -32,3 +32,22 @@ clean/lotus-test: .EXPORT_ALL_VARIABLES: ################################################################################## +start: + docker compose up -d + docker compose logs -f +.PHONY: start +################################################################################## +ssh/boost: + docker compose exec boost /bin/bash +.PHONY: ssh/boost +################################################################################## +clean-stack: clean/docker + rm -rf data +.PHONY: clean +clean/all: clean + rm -rf /var/tmp/filecoin-proof-parameters +.PHONY: clean/all +clean/docker: + docker compose down +.PHONY: clean/docker +################################################################################## diff --git a/build/devnet/boost-gui/Dockerfile b/docker/devnet/boost-gui/Dockerfile similarity index 100% rename from build/devnet/boost-gui/Dockerfile rename to docker/devnet/boost-gui/Dockerfile diff --git a/build/devnet/boost-gui/Makefile b/docker/devnet/boost-gui/Makefile similarity index 100% rename from build/devnet/boost-gui/Makefile rename to docker/devnet/boost-gui/Makefile diff --git a/build/devnet/boost-gui/entrypoint.sh b/docker/devnet/boost-gui/entrypoint.sh similarity index 100% rename from build/devnet/boost-gui/entrypoint.sh rename to docker/devnet/boost-gui/entrypoint.sh diff --git a/build/devnet/boost-gui/nginx.conf.in b/docker/devnet/boost-gui/nginx.conf.in similarity index 100% rename from build/devnet/boost-gui/nginx.conf.in rename to docker/devnet/boost-gui/nginx.conf.in diff --git a/build/devnet/boost/Dockerfile b/docker/devnet/boost/Dockerfile similarity index 100% rename from build/devnet/boost/Dockerfile rename to docker/devnet/boost/Dockerfile diff --git a/build/devnet/boost/Makefile b/docker/devnet/boost/Makefile similarity index 100% rename from build/devnet/boost/Makefile rename to docker/devnet/boost/Makefile diff --git a/build/devnet/boost/entrypoint.sh b/docker/devnet/boost/entrypoint.sh similarity index 100% rename from build/devnet/boost/entrypoint.sh rename to docker/devnet/boost/entrypoint.sh diff --git a/build/devnet/boost/sample/make-a-deal.sh b/docker/devnet/boost/sample/make-a-deal.sh similarity index 100% rename from build/devnet/boost/sample/make-a-deal.sh rename to docker/devnet/boost/sample/make-a-deal.sh diff --git a/examples/devnet/docker-compose.yaml b/docker/devnet/docker-compose.yaml similarity index 100% rename from examples/devnet/docker-compose.yaml rename to docker/devnet/docker-compose.yaml diff --git a/build/devnet/lotus-miner/Dockerfile b/docker/devnet/lotus-miner/Dockerfile similarity index 100% rename from build/devnet/lotus-miner/Dockerfile rename to docker/devnet/lotus-miner/Dockerfile diff --git a/build/devnet/lotus-miner/Makefile b/docker/devnet/lotus-miner/Makefile similarity index 100% rename from build/devnet/lotus-miner/Makefile rename to docker/devnet/lotus-miner/Makefile diff --git a/build/devnet/lotus-miner/entrypoint.sh b/docker/devnet/lotus-miner/entrypoint.sh similarity index 100% rename from build/devnet/lotus-miner/entrypoint.sh rename to docker/devnet/lotus-miner/entrypoint.sh diff --git a/build/devnet/lotus/Dockerfile b/docker/devnet/lotus/Dockerfile similarity index 100% rename from build/devnet/lotus/Dockerfile rename to docker/devnet/lotus/Dockerfile diff --git a/build/devnet/lotus/Makefile b/docker/devnet/lotus/Makefile similarity index 100% rename from build/devnet/lotus/Makefile rename to docker/devnet/lotus/Makefile diff --git a/build/devnet/lotus/entrypoint.sh b/docker/devnet/lotus/entrypoint.sh similarity index 100% rename from build/devnet/lotus/entrypoint.sh rename to docker/devnet/lotus/entrypoint.sh diff --git a/documentation/devnet.md b/documentation/devnet.md deleted file mode 100644 index aedea9424..000000000 --- a/documentation/devnet.md +++ /dev/null @@ -1,181 +0,0 @@ -# Devnet - -To run Boost on your development machine, you will need to set up a devnet: - -1. Remove any existing lotus repo and boost repo -``` -rm -rf ~/.lotusmarkets && rm -rf ~/.lotus && rm -rf ~/.lotusminer && rm -rf ~/.genesis_sectors -rm -rf ~/.boost -``` - -2. Build lotus in debug mode - -The version of lotus needs to match the version in boost's go.mod -``` -cd lotus -git checkout -make debug -``` - -3. Install lotus - -The devnet script uses the installed lotus to run the miner and daemon. -``` -make install -install -C ./lotus-seed /usr/local/bin/lotus-seed -``` - -4. Build boost in debug mode - -Double check if environment variables are set: -``` -export LIBRARY_PATH=/opt/homebrew/lib -export PATH="$(brew --prefix coreutils)/libexec/gnubin:/usr/local/bin:$PATH" -``` - -Build & Install -``` -cd boost -make debug -make install -``` - -3. Start the devnet - -The following command will use the lotus binaries that you built and installed above, and especially it will run `lotus`, `lotus-miner` and `lotus-seed`. So the lotus version must match the version in boost's go.mod. -``` -cd boost -./devnet -``` - -The first time you run it, it will install a lot of metadata-related proofs. It will take at least 30 minutes depending on your connection speed. You may need to restart the command multiple times as your terminal will probably timeout before it finishes downloading everything. - -You can also use `./devnet &` instead of `./devnet` to run the process in the background - that would prevent you from having to manually restart the command when your terminal times out. - -NOTE: -The devnet isn't designed to be restartable unfortunately. After it has been successfully run once, you'll have to clear out the previous data before re-running `./devnet`: -``` -rm -rf ~/.lotusmarkets && rm -rf ~/.lotus && rm -rf ~/.lotusminer && rm -rf ~/.genesis_sectors -``` - -4. Wait for lotus-miner to come up (through the command above) - -Unset these variables as they interfere with the `lotus-miner` command. -``` -unset MINER_API_INFO -unset FULLNODE_API_INFO -``` -Then repeatedly run this command until it succeeds: -``` -lotus-miner auth api-info --perm=admin -``` - -5. Get the auth tokens to connect to the lotus daemon and miner: -``` -export ENV_MINER_API_INFO=`lotus-miner auth api-info --perm=admin` -export ENV_FULLNODE_API_INFO=`lotus auth api-info --perm=admin` - -export MINER_API_INFO=`echo $ENV_MINER_API_INFO | awk '{split($0,a,"="); print a[2]}'` -export FULLNODE_API_INFO=`echo $ENV_FULLNODE_API_INFO | awk '{split($0,a,"="); print a[2]}'` - -echo MINER_API_INFO=$MINER_API_INFO -echo FULLNODE_API_INFO=$FULLNODE_API_INFO -``` - -6. Create the wallets needed for boost -``` -export DEFAULT_WALLET=`lotus wallet list | tail -1 | awk '{print $1}'` -export COLLAT_WALLET=`lotus wallet new bls` -export PUBMSG_WALLET=`lotus wallet new bls` -export CLIENT_WALLET=`lotus wallet new bls` -``` - -7. Add funds to the wallets -``` -lotus send --from $DEFAULT_WALLET $COLLAT_WALLET 10 -lotus send --from $DEFAULT_WALLET $PUBMSG_WALLET 10 -lotus send --from $DEFAULT_WALLET $CLIENT_WALLET 10 -``` - -Run this command repeatedly until each wallet you created has 10 FIL: -``` -lotus wallet list -``` -This should take about 10 seconds. - -8. Set the Publish Message Wallet as a control address on the miner -``` -lotus-miner actor control set --really-do-it $PUBMSG_WALLET -``` - -9. Add funds into the Market Actor escrow for the client and Collateral wallets -``` -lotus wallet market add --from $DEFAULT_WALLET --address $CLIENT_WALLET 5 -lotus wallet market add --address $COLLAT_WALLET 5 -``` - -10. Initialize boost -``` -boostd -vv init \ - --api-sealer=$MINER_API_INFO \ - --api-sector-index=$MINER_API_INFO \ - --wallet-publish-storage-deals=$PUBMSG_WALLET \ - --wallet-deal-collateral=$COLLAT_WALLET \ - --max-staging-deals-bytes=2000000000 -``` - -11. Build the Web UI -``` -make react -``` - -12. Edit config to set a fixed listen address - -Open `~/.boost/config.toml` - -Set the port in the `ListenAddresses` key to `50000` -``` -[Libp2p] - ListenAddresses = ["/ip4/0.0.0.0/tcp/50000", "/ip6/::/tcp/0"] -``` - -13. Run boost -``` -boostd -vv run -``` - -Note down the peer ID of the boost instance: -``` -2022-06-10T09:32:28.819Z INFO boostd boostd/run.go:114 Boost libp2p node listening {"maddr": "{12D3KooWQNNWNiJ1mieEk9EHjDVF2qBc1FSjJGEzwjnMJzteApaW: [/ip4/172.17.0.2/tcp/50000 /ip4/127.0.0.1/tcp/50000]}"} -``` -In this example: `12D3KooWQNNWNiJ1mieEk9EHjDVF2qBc1FSjJGEzwjnMJzteApaW` - -14. Set the peer ID and multi-address of the miner on chain -``` -lotus-miner actor set-peer-id -lotus-miner actor set-addrs /ip4/127.0.0.1/tcp/50000 -``` - -15. Open the Web UI - -Open http://localhost:8080 to see the Boost UI - -## Make a deal with Boost - -2. Initialize the boost client -``` -boost init -``` - -This will output the address of the wallet (it's safe to run the init command repeatedly). - -2. Send funds to the client wallet -``` -lotus send --from=$DEFAULT_WALLET 10 -``` - -3. Follow the guide at https://boost.filecoin.io/tutorials/how-to-store-files-with-boost-on-filecoin - -Note that above you already ran a command to export FULLNODE_API (and point it to your local devnet lotus daemon). - -Note also that the provider address is `t01000` and you will need to supply an appropriate `--storage-price` when using `boost deal` since the devnet has a minimum price. Alternatively, using "Settings" in the Boost web UI to set the deal price to zero. diff --git a/examples/devnet/.gitignore b/examples/devnet/.gitignore deleted file mode 100644 index 1269488f7..000000000 --- a/examples/devnet/.gitignore +++ /dev/null @@ -1 +0,0 @@ -data diff --git a/examples/devnet/Makefile b/examples/devnet/Makefile deleted file mode 100644 index c1c66a960..000000000 --- a/examples/devnet/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -################################################################################## -start: - docker compose up -d - docker compose logs -f -.PHONY: start -################################################################################## -ssh/boost: - docker compose exec boost /bin/bash -.PHONY: ssh/boost -################################################################################## -clean: clean/docker - rm -rf data -.PHONY: clean -clean/all: clean - rm -rf /var/tmp/filecoin-proof-parameters -.PHONY: clean/all -clean/docker: - docker compose down -.PHONY: clean/docker -################################################################################## diff --git a/examples/devnet/README.md b/examples/devnet/README.md deleted file mode 100644 index 35ad7a08b..000000000 --- a/examples/devnet/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Devnet in docker - -The docker-compose file contains a realization of the [devnet guide](../../documentation/devnet.md) in docker containers. - -## To start devnet: - -1. Run -``` -docker compose up -d -``` -It will spin up `lotus`, `lotus-miner`, `boost`, `boost-gui` and `demo-http-server` containers. All temporary data will be saved in `./data` folder. -The initial setup could take up to 20 min or more (it takes time to download filecoin proof parameters). During the initial setup, it is normal to see error messages in the log. Containers are waiting for the lotus to be ready. It may timeout several times. Restart is expected to be managed by `docker`. - -2. Try opening the boost GUI http://localhost:8000 with a browser. Devnet is ready to operate when the URL opens and indicates no errors on the startup page. -Also, you can try to inspect the status using `docker compose logs -f`. - -## Making a deal - -The `boost` container is packed with `boost` and `lotus` clients. You can connect to the container with the command `docker compose exec boost /bin/bash` and follow instructions for [storing files with Boost guide](https://boost.filecoin.io/tutorials/how-to-store-files-with-boost-on-filecoin). But the recommended startup is to follow the semi-interactive demo first: -``` -# attach to a running boost container -docker compose exec boost /bin/bash - -# execute the demo script /app/sample/make-a-deal.sh -root@83260455bbd2:/app# ./sample/make-a-deal.sh -``` -## Accessing lotus from localhost - -By default the [docker-compose.yaml](./docker-compose.yaml) does not expose any port of the `lotus` container. To access the `lotus` from a local machine: -1. You can either expose `1234` in [docker-compose.yaml](./docker-compose.yaml) or find the IP of the `lotus` container using `docker inspect lotus | grep IPAddress` command. -2. Get the `FULLNODE_API_INFO` -``` -docker exec -it lotus lotus auth api-info --perm=admin -FULLNODE_API_INFO=eyJ...ms4:/dns/lotus/tcp/1234/http -``` -3. Change the `dns/lotus/tcp/1234/http` to `ip4/<127.0.0.1 or container's IP>/tcp/1234/http` for the use in `FULLNODE_API_INFO`. - -## Cleaning devnet - -To stop containers and drop everything: -``` -docker compose down --rmi all - -sudo rm -rf ./data -``` From badb6034e5f59a68ec6bc8ae27f66e669a85b907 Mon Sep 17 00:00:00 2001 From: Anton Evangelatov Date: Wed, 31 Aug 2022 13:08:03 +0200 Subject: [PATCH 08/26] change network name for devnet (#747) --- docker/devnet/docker-compose.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/devnet/docker-compose.yaml b/docker/devnet/docker-compose.yaml index 87a2b908b..0086b39df 100644 --- a/docker/devnet/docker-compose.yaml +++ b/docker/devnet/docker-compose.yaml @@ -7,6 +7,10 @@ x-logging: max-file: '3' driver: json-file +networks: + default: + name: devnet + services: lotus: container_name: lotus From 85347fc1a5fb1331830185b5b2f691f00fc62627 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Wed, 31 Aug 2022 16:26:26 +0200 Subject: [PATCH 09/26] feat: transfer limiter web UI updates (#740) --- gql/resolver.go | 51 +++++++++---- gql/resolver_transfers.go | 57 ++++++++++++++- gql/schema.graphql | 18 +++++ react/src/DealTransfers.css | 23 ++++++ react/src/DealTransfers.js | 117 +++++++++++++++++++++++++++--- react/src/Deals.css | 5 ++ react/src/Deals.js | 38 ++++++++++ react/src/gql.js | 25 +++++++ storagemarket/provider.go | 4 - storagemarket/transfer_limiter.go | 63 ++++++++++++++++ storagemarket/transfers.go | 67 ++++++++++++----- 11 files changed, 418 insertions(+), 50 deletions(-) create mode 100644 react/src/DealTransfers.css diff --git a/gql/resolver.go b/gql/resolver.go index 4884fda73..2cabdbbec 100644 --- a/gql/resolver.go +++ b/gql/resolver.go @@ -102,7 +102,7 @@ func (r *resolver) Deal(ctx context.Context, args struct{ ID graphql.ID }) (*dea return nil, err } - return newDealResolver(deal, r.dealsDB, r.logsDB, r.spApi), nil + return newDealResolver(deal, r.provider, r.dealsDB, r.logsDB, r.spApi), nil } type dealsArgs struct { @@ -135,7 +135,7 @@ func (r *resolver) Deals(ctx context.Context, args dealsArgs) (*dealListResolver resolvers := make([]*dealResolver, 0, len(deals)) for _, deal := range deals { - resolvers = append(resolvers, newDealResolver(&deal, r.dealsDB, r.logsDB, r.spApi)) + resolvers = append(resolvers, newDealResolver(&deal, r.provider, r.dealsDB, r.logsDB, r.spApi)) } return &dealListResolver{ @@ -168,7 +168,7 @@ func (r *resolver) DealUpdate(ctx context.Context, args struct{ ID graphql.ID }) } net := make(chan *dealResolver, 1) - net <- newDealResolver(deal, r.dealsDB, r.logsDB, r.spApi) + net <- newDealResolver(deal, r.provider, r.dealsDB, r.logsDB, r.spApi) // Updates to deal state are broadcast on pubsub. Pipe these updates to the // client @@ -180,7 +180,7 @@ func (r *resolver) DealUpdate(ctx context.Context, args struct{ ID graphql.ID }) } return nil, fmt.Errorf("%s: subscribing to deal updates: %w", args.ID, err) } - sub := &subLastUpdate{sub: dealUpdatesSub, dealsDB: r.dealsDB, logsDB: r.logsDB, spApi: r.spApi} + sub := &subLastUpdate{sub: dealUpdatesSub, provider: r.provider, dealsDB: r.dealsDB, logsDB: r.logsDB, spApi: r.spApi} go func() { sub.Pipe(ctx, net) // blocks until connection is closed close(net) @@ -219,7 +219,7 @@ func (r *resolver) DealNew(ctx context.Context) (<-chan *dealNewResolver, error) case evti := <-sub.Out(): // Pipe the deal to the new deal channel di := evti.(types.ProviderDealState) - rsv := newDealResolver(&di, r.dealsDB, r.logsDB, r.spApi) + rsv := newDealResolver(&di, r.provider, r.dealsDB, r.logsDB, r.spApi) totalCount, err := r.dealsDB.Count(ctx, "") if err != nil { log.Errorf("getting total deal count: %w", err) @@ -330,15 +330,17 @@ func (r *resolver) dealList(ctx context.Context, query string, cursor *graphql.I type dealResolver struct { types.ProviderDealState + provider *storagemarket.Provider transferred uint64 dealsDB *db.DealsDB logsDB *db.LogsDB spApi sealingpipeline.API } -func newDealResolver(deal *types.ProviderDealState, dealsDB *db.DealsDB, logsDB *db.LogsDB, spApi sealingpipeline.API) *dealResolver { +func newDealResolver(deal *types.ProviderDealState, provider *storagemarket.Provider, dealsDB *db.DealsDB, logsDB *db.LogsDB, spApi sealingpipeline.API) *dealResolver { return &dealResolver{ ProviderDealState: *deal, + provider: provider, transferred: uint64(deal.NBytesReceived), dealsDB: dealsDB, logsDB: logsDB, @@ -502,13 +504,17 @@ func (dr *dealResolver) message(ctx context.Context, checkpoint dealcheckpoints. if dr.IsOffline { return "Awaiting Offline Data Import" } - switch dr.transferred { - case 0: + switch { + case dr.transferred == 0 && !dr.provider.IsTransferStalled(dr.DealUuid): return "Transfer Queued" - case 100: + case dr.transferred == 100: return "Transfer Complete" default: pct := (100 * dr.transferred) / dr.ProviderDealState.Transfer.Size + isStalled := dr.provider.IsTransferStalled(dr.DealUuid) + if isStalled { + return fmt.Sprintf("Transfer stalled at %d%% ", pct) + } return fmt.Sprintf("Transferring %d%%", pct) } case dealcheckpoints.Transferred: @@ -533,6 +539,22 @@ func (dr *dealResolver) message(ctx context.Context, checkpoint dealcheckpoints. return checkpoint.String() } +func (dr *dealResolver) TransferSamples() []*transferPoint { + points := dr.provider.Transfer(dr.DealUuid) + pts := make([]*transferPoint, 0, len(points)) + for _, pt := range points { + pts = append(pts, &transferPoint{ + At: graphql.Time{Time: pt.At}, + Bytes: gqltypes.Uint64(pt.Bytes), + }) + } + return pts +} + +func (dr *dealResolver) IsTransferStalled() bool { + return dr.provider.IsTransferStalled(dr.DealUuid) +} + func (dr *dealResolver) sealingState(ctx context.Context) string { si, err := dr.spApi.SectorsStatus(ctx, dr.SectorID, false) if err != nil { @@ -594,10 +616,11 @@ func toUuid(id graphql.ID) (uuid.UUID, error) { } type subLastUpdate struct { - sub event.Subscription - dealsDB *db.DealsDB - logsDB *db.LogsDB - spApi sealingpipeline.API + sub event.Subscription + provider *storagemarket.Provider + dealsDB *db.DealsDB + logsDB *db.LogsDB + spApi sealingpipeline.API } func (s *subLastUpdate) Pipe(ctx context.Context, net chan *dealResolver) { @@ -636,7 +659,7 @@ func (s *subLastUpdate) Pipe(ctx context.Context, net chan *dealResolver) { loop: for { di := lastUpdate.(types.ProviderDealState) - rsv := newDealResolver(&di, s.dealsDB, s.logsDB, s.spApi) + rsv := newDealResolver(&di, s.provider, s.dealsDB, s.logsDB, s.spApi) select { case <-ctx.Done(): diff --git a/gql/resolver_transfers.go b/gql/resolver_transfers.go index a4c7091a3..e7a20f306 100644 --- a/gql/resolver_transfers.go +++ b/gql/resolver_transfers.go @@ -6,6 +6,8 @@ import ( "time" gqltypes "github.com/filecoin-project/boost/gql/types" + "github.com/filecoin-project/boost/storagemarket" + "github.com/google/uuid" "github.com/graph-gophers/graphql-go" ) @@ -15,15 +17,62 @@ type transferPoint struct { } // query: transfers: [TransferPoint] -func (r *resolver) Transfers(_ context.Context) ([]*transferPoint, error) { - deals := r.provider.Transfers() +func (r *resolver) Transfers(_ context.Context) []*transferPoint { + return r.getTransferSamples(r.provider.Transfers(), nil) +} + +type transferStats struct { + HttpMaxConcurrentDownloads int32 + Stats []*hostTransferStats +} + +type hostTransferStats struct { + Host string + Total int32 + Started int32 + Stalled int32 + TransferSamples []*transferPoint +} + +// query: transferStats: TransferStats +func (r *resolver) TransferStats(_ context.Context) *transferStats { + transfersByDeal := r.provider.Transfers() + stats := r.provider.TransferStats() + gqlStats := make([]*hostTransferStats, 0, len(stats)) + for _, s := range stats { + gqlStats = append(gqlStats, &hostTransferStats{ + Host: s.Host, + Total: int32(s.Total), + Started: int32(s.Started), + Stalled: int32(s.Stalled), + TransferSamples: r.getTransferSamples(transfersByDeal, s.DealUuids), + }) + } + return &transferStats{ + HttpMaxConcurrentDownloads: int32(r.cfg.Dealmaking.HttpTransferMaxConcurrentDownloads), + Stats: gqlStats, + } +} + +func (r *resolver) getTransferSamples(deals map[uuid.UUID][]storagemarket.TransferPoint, filter []uuid.UUID) []*transferPoint { + // If filter is nil, include all deals + if filter == nil { + for dealUuid := range deals { + filter = append(filter, dealUuid) + } + } // We have // dealUUID -> [At: