From fc8ea49959730abb4076cf89654c004482393153 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C5=81ukasz=20Magiera?= <magik6k@gmail.com>
Date: Wed, 3 Apr 2019 03:44:32 +0200
Subject: [PATCH 01/15] Cleanup core package
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

License: MIT
Signed-off-by: Łukasz Magiera <magik6k@gmail.com>


This commit was moved from ipfs/kubo@d35dac70f0b0b47c7813c6380deb8974179bdb32
---
 core/bootstrap/bootstrap.go      | 243 +++++++++++++++++++++++++++++++
 core/bootstrap/bootstrap_test.go |  56 +++++++
 2 files changed, 299 insertions(+)
 create mode 100644 core/bootstrap/bootstrap.go
 create mode 100644 core/bootstrap/bootstrap_test.go

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
new file mode 100644
index 000000000..e6b4f826d
--- /dev/null
+++ b/core/bootstrap/bootstrap.go
@@ -0,0 +1,243 @@
+package bootstrap
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"sync"
+	"time"
+
+	config "github.com/ipfs/go-ipfs-config"
+	logging "github.com/ipfs/go-log"
+	"github.com/jbenet/goprocess"
+	"github.com/jbenet/goprocess/context"
+	"github.com/jbenet/goprocess/periodic"
+	"github.com/libp2p/go-libp2p-host"
+	"github.com/libp2p/go-libp2p-loggables"
+	"github.com/libp2p/go-libp2p-net"
+	"github.com/libp2p/go-libp2p-peer"
+	"github.com/libp2p/go-libp2p-peerstore"
+	"github.com/libp2p/go-libp2p-routing"
+
+	"github.com/ipfs/go-ipfs/thirdparty/math2"
+)
+
+var log = logging.Logger("bootstrap")
+
+// ErrNotEnoughBootstrapPeers signals that we do not have enough bootstrap
+// peers to bootstrap correctly.
+var ErrNotEnoughBootstrapPeers = errors.New("not enough bootstrap peers to bootstrap")
+
+// BootstrapConfig specifies parameters used in an IpfsNode's network
+// bootstrapping process.
+type BootstrapConfig struct {
+	// MinPeerThreshold governs whether to bootstrap more connections. If the
+	// node has less open connections than this number, it will open connections
+	// to the bootstrap nodes. From there, the routing system should be able
+	// to use the connections to the bootstrap nodes to connect to even more
+	// peers. Routing systems like the IpfsDHT do so in their own Bootstrap
+	// process, which issues random queries to find more peers.
+	MinPeerThreshold int
+
+	// Period governs the periodic interval at which the node will
+	// attempt to bootstrap. The bootstrap process is not very expensive, so
+	// this threshold can afford to be small (<=30s).
+	Period time.Duration
+
+	// ConnectionTimeout determines how long to wait for a bootstrap
+	// connection attempt before cancelling it.
+	ConnectionTimeout time.Duration
+
+	// BootstrapPeers is a function that returns a set of bootstrap peers
+	// for the bootstrap process to use. This makes it possible for clients
+	// to control the peers the process uses at any moment.
+	BootstrapPeers func() []peerstore.PeerInfo
+}
+
+// DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
+var DefaultBootstrapConfig = BootstrapConfig{
+	MinPeerThreshold:  4,
+	Period:            30 * time.Second,
+	ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
+}
+
+func BootstrapConfigWithPeers(pis []peerstore.PeerInfo) BootstrapConfig {
+	cfg := DefaultBootstrapConfig
+	cfg.BootstrapPeers = func() []peerstore.PeerInfo {
+		return pis
+	}
+	return cfg
+}
+
+// Bootstrap kicks off IpfsNode bootstrapping. This function will periodically
+// check the number of open connections and -- if there are too few -- initiate
+// connections to well-known bootstrap peers. It also kicks off subsystem
+// bootstrapping (i.e. routing).
+func Bootstrap(id peer.ID, host host.Host, rt routing.IpfsRouting, cfg BootstrapConfig) (io.Closer, error) {
+
+	// make a signal to wait for one bootstrap round to complete.
+	doneWithRound := make(chan struct{})
+
+	if len(cfg.BootstrapPeers()) == 0 {
+		// We *need* to bootstrap but we have no bootstrap peers
+		// configured *at all*, inform the user.
+		log.Warning("no bootstrap nodes configured: go-ipfs may have difficulty connecting to the network")
+	}
+
+	// the periodic bootstrap function -- the connection supervisor
+	periodic := func(worker goprocess.Process) {
+		ctx := goprocessctx.OnClosingContext(worker)
+		defer log.EventBegin(ctx, "periodicBootstrap", id).Done()
+
+		if err := bootstrapRound(ctx, host, cfg); err != nil {
+			log.Event(ctx, "bootstrapError", id, loggables.Error(err))
+			log.Debugf("%s bootstrap error: %s", id, err)
+		}
+
+		<-doneWithRound
+	}
+
+	// kick off the node's periodic bootstrapping
+	proc := periodicproc.Tick(cfg.Period, periodic)
+	proc.Go(periodic) // run one right now.
+
+	// kick off Routing.Bootstrap
+	if rt != nil {
+		ctx := goprocessctx.OnClosingContext(proc)
+		if err := rt.Bootstrap(ctx); err != nil {
+			proc.Close()
+			return nil, err
+		}
+	}
+
+	doneWithRound <- struct{}{}
+	close(doneWithRound) // it no longer blocks periodic
+	return proc, nil
+}
+
+func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
+
+	ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
+	defer cancel()
+	id := host.ID()
+
+	// get bootstrap peers from config. retrieving them here makes
+	// sure we remain observant of changes to client configuration.
+	peers := cfg.BootstrapPeers()
+	// determine how many bootstrap connections to open
+	connected := host.Network().Peers()
+	if len(connected) >= cfg.MinPeerThreshold {
+		log.Event(ctx, "bootstrapSkip", id)
+		log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes",
+			id, len(connected), cfg.MinPeerThreshold)
+		return nil
+	}
+	numToDial := cfg.MinPeerThreshold - len(connected)
+
+	// filter out bootstrap nodes we are already connected to
+	var notConnected []peerstore.PeerInfo
+	for _, p := range peers {
+		if host.Network().Connectedness(p.ID) != net.Connected {
+			notConnected = append(notConnected, p)
+		}
+	}
+
+	// if connected to all bootstrap peer candidates, exit
+	if len(notConnected) < 1 {
+		log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial)
+		return ErrNotEnoughBootstrapPeers
+	}
+
+	// connect to a random susbset of bootstrap candidates
+	randSubset := randomSubsetOfPeers(notConnected, numToDial)
+
+	defer log.EventBegin(ctx, "bootstrapStart", id).Done()
+	log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset)
+	return bootstrapConnect(ctx, host, randSubset)
+}
+
+func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerInfo) error {
+	if len(peers) < 1 {
+		return ErrNotEnoughBootstrapPeers
+	}
+
+	errs := make(chan error, len(peers))
+	var wg sync.WaitGroup
+	for _, p := range peers {
+
+		// performed asynchronously because when performed synchronously, if
+		// one `Connect` call hangs, subsequent calls are more likely to
+		// fail/abort due to an expiring context.
+		// Also, performed asynchronously for dial speed.
+
+		wg.Add(1)
+		go func(p peerstore.PeerInfo) {
+			defer wg.Done()
+			defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done()
+			log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
+
+			ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
+			if err := ph.Connect(ctx, p); err != nil {
+				log.Event(ctx, "bootstrapDialFailed", p.ID)
+				log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
+				errs <- err
+				return
+			}
+			log.Event(ctx, "bootstrapDialSuccess", p.ID)
+			log.Infof("bootstrapped with %v", p.ID)
+		}(p)
+	}
+	wg.Wait()
+
+	// our failure condition is when no connection attempt succeeded.
+	// So drain the errs channel, counting the results.
+	close(errs)
+	count := 0
+	var err error
+	for err = range errs {
+		if err != nil {
+			count++
+		}
+	}
+	if count == len(peers) {
+		return fmt.Errorf("failed to bootstrap. %s", err)
+	}
+	return nil
+}
+
+func randomSubsetOfPeers(in []peerstore.PeerInfo, max int) []peerstore.PeerInfo {
+	n := math2.IntMin(max, len(in))
+	var out []peerstore.PeerInfo
+	for _, val := range rand.Perm(len(in)) {
+		out = append(out, in[val])
+		if len(out) >= n {
+			break
+		}
+	}
+	return out
+}
+
+type Peers []config.BootstrapPeer
+
+func (bpeers Peers) ToPeerInfos() []peerstore.PeerInfo {
+	pinfos := make(map[peer.ID]*peerstore.PeerInfo)
+	for _, bootstrap := range bpeers {
+		pinfo, ok := pinfos[bootstrap.ID()]
+		if !ok {
+			pinfo = new(peerstore.PeerInfo)
+			pinfos[bootstrap.ID()] = pinfo
+			pinfo.ID = bootstrap.ID()
+		}
+
+		pinfo.Addrs = append(pinfo.Addrs, bootstrap.Transport())
+	}
+
+	var peers []peerstore.PeerInfo
+	for _, pinfo := range pinfos {
+		peers = append(peers, *pinfo)
+	}
+
+	return peers
+}
diff --git a/core/bootstrap/bootstrap_test.go b/core/bootstrap/bootstrap_test.go
new file mode 100644
index 000000000..0c7799858
--- /dev/null
+++ b/core/bootstrap/bootstrap_test.go
@@ -0,0 +1,56 @@
+package bootstrap
+
+import (
+	"fmt"
+	"testing"
+
+	config "github.com/ipfs/go-ipfs-config"
+	pstore "github.com/libp2p/go-libp2p-peerstore"
+	testutil "github.com/libp2p/go-testutil"
+)
+
+func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
+	var ps []pstore.PeerInfo
+	sizeofSlice := 100
+	for i := 0; i < sizeofSlice; i++ {
+		pid, err := testutil.RandPeerID()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		ps = append(ps, pstore.PeerInfo{ID: pid})
+	}
+	out := randomSubsetOfPeers(ps, 2*sizeofSlice)
+	if len(out) != len(ps) {
+		t.Fail()
+	}
+}
+
+func TestMultipleAddrsPerPeer(t *testing.T) {
+	var bsps []config.BootstrapPeer
+	for i := 0; i < 10; i++ {
+		pid, err := testutil.RandPeerID()
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/5001/ipfs/%s", pid.Pretty())
+		bsp1, err := config.ParseBootstrapPeer(addr)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		addr = fmt.Sprintf("/ip4/127.0.0.1/udp/5002/utp/ipfs/%s", pid.Pretty())
+		bsp2, err := config.ParseBootstrapPeer(addr)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		bsps = append(bsps, bsp1, bsp2)
+	}
+
+	pinfos := Peers.ToPeerInfos(bsps)
+	if len(pinfos) != len(bsps)/2 {
+		t.Fatal("expected fewer peers")
+	}
+}

From d1e92682dc81cc7e87bfe32262e9644849d02c1c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C5=81ukasz=20Magiera?= <magik6k@gmail.com>
Date: Mon, 8 Apr 2019 15:36:25 +0200
Subject: [PATCH 02/15] bootstrap: cleanup randomSubsetOfPeers
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

License: MIT
Signed-off-by: Łukasz Magiera <magik6k@gmail.com>


This commit was moved from ipfs/kubo@0e6f8d4cc19ef5bb5d9c47e357a3ffb862b8b067
---
 core/bootstrap/bootstrap.go | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index e6b4f826d..d7c107690 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -20,8 +20,6 @@ import (
 	"github.com/libp2p/go-libp2p-peer"
 	"github.com/libp2p/go-libp2p-peerstore"
 	"github.com/libp2p/go-libp2p-routing"
-
-	"github.com/ipfs/go-ipfs/thirdparty/math2"
 )
 
 var log = logging.Logger("bootstrap")
@@ -208,13 +206,13 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerI
 }
 
 func randomSubsetOfPeers(in []peerstore.PeerInfo, max int) []peerstore.PeerInfo {
-	n := math2.IntMin(max, len(in))
-	var out []peerstore.PeerInfo
-	for _, val := range rand.Perm(len(in)) {
-		out = append(out, in[val])
-		if len(out) >= n {
-			break
-		}
+	if max > len(in) {
+		max = len(in)
+	}
+
+	out := make([]peerstore.PeerInfo, max)
+	for i, val := range rand.Perm(len(in))[:max] {
+		out[i] = in[val]
 	}
 	return out
 }

From 3f93896efef2bcbaec60933deb6756e26f44f077 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ra=C3=BAl=20Kripalani?= <raul@protocol.ai>
Date: Tue, 28 May 2019 17:21:57 +0100
Subject: [PATCH 03/15] migrate to go-libp2p-core.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

closes #6391

License: MIT
Signed-off-by: Raúl Kripalani <raul@protocol.ai>


This commit was moved from ipfs/kubo@e8c2852179b03a95c8d198895b246b1e3ffaeed8
---
 core/bootstrap/bootstrap.go      | 38 ++++++++++++++++----------------
 core/bootstrap/bootstrap_test.go | 12 +++++-----
 2 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index d7c107690..b2cf1a811 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -14,12 +14,12 @@ import (
 	"github.com/jbenet/goprocess"
 	"github.com/jbenet/goprocess/context"
 	"github.com/jbenet/goprocess/periodic"
-	"github.com/libp2p/go-libp2p-host"
+	"github.com/libp2p/go-libp2p-core/host"
+	"github.com/libp2p/go-libp2p-core/network"
+	"github.com/libp2p/go-libp2p-core/peer"
+	"github.com/libp2p/go-libp2p-core/peerstore"
+	"github.com/libp2p/go-libp2p-core/routing"
 	"github.com/libp2p/go-libp2p-loggables"
-	"github.com/libp2p/go-libp2p-net"
-	"github.com/libp2p/go-libp2p-peer"
-	"github.com/libp2p/go-libp2p-peerstore"
-	"github.com/libp2p/go-libp2p-routing"
 )
 
 var log = logging.Logger("bootstrap")
@@ -51,7 +51,7 @@ type BootstrapConfig struct {
 	// BootstrapPeers is a function that returns a set of bootstrap peers
 	// for the bootstrap process to use. This makes it possible for clients
 	// to control the peers the process uses at any moment.
-	BootstrapPeers func() []peerstore.PeerInfo
+	BootstrapPeers func() []peer.AddrInfo
 }
 
 // DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
@@ -61,9 +61,9 @@ var DefaultBootstrapConfig = BootstrapConfig{
 	ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
 }
 
-func BootstrapConfigWithPeers(pis []peerstore.PeerInfo) BootstrapConfig {
+func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
 	cfg := DefaultBootstrapConfig
-	cfg.BootstrapPeers = func() []peerstore.PeerInfo {
+	cfg.BootstrapPeers = func() []peer.AddrInfo {
 		return pis
 	}
 	return cfg
@@ -73,7 +73,7 @@ func BootstrapConfigWithPeers(pis []peerstore.PeerInfo) BootstrapConfig {
 // check the number of open connections and -- if there are too few -- initiate
 // connections to well-known bootstrap peers. It also kicks off subsystem
 // bootstrapping (i.e. routing).
-func Bootstrap(id peer.ID, host host.Host, rt routing.IpfsRouting, cfg BootstrapConfig) (io.Closer, error) {
+func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConfig) (io.Closer, error) {
 
 	// make a signal to wait for one bootstrap round to complete.
 	doneWithRound := make(chan struct{})
@@ -135,9 +135,9 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
 	numToDial := cfg.MinPeerThreshold - len(connected)
 
 	// filter out bootstrap nodes we are already connected to
-	var notConnected []peerstore.PeerInfo
+	var notConnected []peer.AddrInfo
 	for _, p := range peers {
-		if host.Network().Connectedness(p.ID) != net.Connected {
+		if host.Network().Connectedness(p.ID) != network.Connected {
 			notConnected = append(notConnected, p)
 		}
 	}
@@ -156,7 +156,7 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
 	return bootstrapConnect(ctx, host, randSubset)
 }
 
-func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerInfo) error {
+func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo) error {
 	if len(peers) < 1 {
 		return ErrNotEnoughBootstrapPeers
 	}
@@ -171,7 +171,7 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerI
 		// Also, performed asynchronously for dial speed.
 
 		wg.Add(1)
-		go func(p peerstore.PeerInfo) {
+		go func(p peer.AddrInfo) {
 			defer wg.Done()
 			defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done()
 			log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
@@ -205,12 +205,12 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peerstore.PeerI
 	return nil
 }
 
-func randomSubsetOfPeers(in []peerstore.PeerInfo, max int) []peerstore.PeerInfo {
+func randomSubsetOfPeers(in []peer.AddrInfo, max int) []peer.AddrInfo {
 	if max > len(in) {
 		max = len(in)
 	}
 
-	out := make([]peerstore.PeerInfo, max)
+	out := make([]peer.AddrInfo, max)
 	for i, val := range rand.Perm(len(in))[:max] {
 		out[i] = in[val]
 	}
@@ -219,12 +219,12 @@ func randomSubsetOfPeers(in []peerstore.PeerInfo, max int) []peerstore.PeerInfo
 
 type Peers []config.BootstrapPeer
 
-func (bpeers Peers) ToPeerInfos() []peerstore.PeerInfo {
-	pinfos := make(map[peer.ID]*peerstore.PeerInfo)
+func (bpeers Peers) ToPeerInfos() []peer.AddrInfo {
+	pinfos := make(map[peer.ID]*peer.AddrInfo)
 	for _, bootstrap := range bpeers {
 		pinfo, ok := pinfos[bootstrap.ID()]
 		if !ok {
-			pinfo = new(peerstore.PeerInfo)
+			pinfo = new(peer.AddrInfo)
 			pinfos[bootstrap.ID()] = pinfo
 			pinfo.ID = bootstrap.ID()
 		}
@@ -232,7 +232,7 @@ func (bpeers Peers) ToPeerInfos() []peerstore.PeerInfo {
 		pinfo.Addrs = append(pinfo.Addrs, bootstrap.Transport())
 	}
 
-	var peers []peerstore.PeerInfo
+	var peers []peer.AddrInfo
 	for _, pinfo := range pinfos {
 		peers = append(peers, *pinfo)
 	}
diff --git a/core/bootstrap/bootstrap_test.go b/core/bootstrap/bootstrap_test.go
index 0c7799858..e7e460b87 100644
--- a/core/bootstrap/bootstrap_test.go
+++ b/core/bootstrap/bootstrap_test.go
@@ -5,20 +5,20 @@ import (
 	"testing"
 
 	config "github.com/ipfs/go-ipfs-config"
-	pstore "github.com/libp2p/go-libp2p-peerstore"
-	testutil "github.com/libp2p/go-testutil"
+	"github.com/libp2p/go-libp2p-core/peer"
+	"github.com/libp2p/go-libp2p-core/test"
 )
 
 func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
-	var ps []pstore.PeerInfo
+	var ps []peer.AddrInfo
 	sizeofSlice := 100
 	for i := 0; i < sizeofSlice; i++ {
-		pid, err := testutil.RandPeerID()
+		pid, err := test.RandPeerID()
 		if err != nil {
 			t.Fatal(err)
 		}
 
-		ps = append(ps, pstore.PeerInfo{ID: pid})
+		ps = append(ps, peer.AddrInfo{ID: pid})
 	}
 	out := randomSubsetOfPeers(ps, 2*sizeofSlice)
 	if len(out) != len(ps) {
@@ -29,7 +29,7 @@ func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
 func TestMultipleAddrsPerPeer(t *testing.T) {
 	var bsps []config.BootstrapPeer
 	for i := 0; i < 10; i++ {
-		pid, err := testutil.RandPeerID()
+		pid, err := test.RandPeerID()
 		if err != nil {
 			t.Fatal(err)
 		}

From c73b0a6c3a919490452cc6cb2a52b6d5f9d749fe Mon Sep 17 00:00:00 2001
From: Steven Allen <steven@stebalien.com>
Date: Fri, 31 May 2019 17:06:52 -0700
Subject: [PATCH 04/15] chore: deprecate go-ipfs-addr

This commit was moved from ipfs/kubo@5d468e23ea315811018c3af46e0b29f4355f7d37
---
 core/bootstrap/bootstrap.go      | 24 ------------------------
 core/bootstrap/bootstrap_test.go | 31 -------------------------------
 2 files changed, 55 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index b2cf1a811..026690366 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -9,7 +9,6 @@ import (
 	"sync"
 	"time"
 
-	config "github.com/ipfs/go-ipfs-config"
 	logging "github.com/ipfs/go-log"
 	"github.com/jbenet/goprocess"
 	"github.com/jbenet/goprocess/context"
@@ -216,26 +215,3 @@ func randomSubsetOfPeers(in []peer.AddrInfo, max int) []peer.AddrInfo {
 	}
 	return out
 }
-
-type Peers []config.BootstrapPeer
-
-func (bpeers Peers) ToPeerInfos() []peer.AddrInfo {
-	pinfos := make(map[peer.ID]*peer.AddrInfo)
-	for _, bootstrap := range bpeers {
-		pinfo, ok := pinfos[bootstrap.ID()]
-		if !ok {
-			pinfo = new(peer.AddrInfo)
-			pinfos[bootstrap.ID()] = pinfo
-			pinfo.ID = bootstrap.ID()
-		}
-
-		pinfo.Addrs = append(pinfo.Addrs, bootstrap.Transport())
-	}
-
-	var peers []peer.AddrInfo
-	for _, pinfo := range pinfos {
-		peers = append(peers, *pinfo)
-	}
-
-	return peers
-}
diff --git a/core/bootstrap/bootstrap_test.go b/core/bootstrap/bootstrap_test.go
index e7e460b87..23128c31f 100644
--- a/core/bootstrap/bootstrap_test.go
+++ b/core/bootstrap/bootstrap_test.go
@@ -1,10 +1,8 @@
 package bootstrap
 
 import (
-	"fmt"
 	"testing"
 
-	config "github.com/ipfs/go-ipfs-config"
 	"github.com/libp2p/go-libp2p-core/peer"
 	"github.com/libp2p/go-libp2p-core/test"
 )
@@ -25,32 +23,3 @@ func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
 		t.Fail()
 	}
 }
-
-func TestMultipleAddrsPerPeer(t *testing.T) {
-	var bsps []config.BootstrapPeer
-	for i := 0; i < 10; i++ {
-		pid, err := test.RandPeerID()
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/5001/ipfs/%s", pid.Pretty())
-		bsp1, err := config.ParseBootstrapPeer(addr)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		addr = fmt.Sprintf("/ip4/127.0.0.1/udp/5002/utp/ipfs/%s", pid.Pretty())
-		bsp2, err := config.ParseBootstrapPeer(addr)
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		bsps = append(bsps, bsp1, bsp2)
-	}
-
-	pinfos := Peers.ToPeerInfos(bsps)
-	if len(pinfos) != len(bsps)/2 {
-		t.Fatal("expected fewer peers")
-	}
-}

From c794bdba2ab1af0be7e6aa8646121f45435da0ee Mon Sep 17 00:00:00 2001
From: Steven Allen <steven@stebalien.com>
Date: Thu, 16 Jan 2020 15:48:20 -0800
Subject: [PATCH 05/15] fix: migrate from deprecated warning function

This commit was moved from ipfs/kubo@a53d48059bff98e3a48faf79103651ce301a7ab2
---
 core/bootstrap/bootstrap.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index 026690366..e6ec29eea 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -80,7 +80,7 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
 	if len(cfg.BootstrapPeers()) == 0 {
 		// We *need* to bootstrap but we have no bootstrap peers
 		// configured *at all*, inform the user.
-		log.Warning("no bootstrap nodes configured: go-ipfs may have difficulty connecting to the network")
+		log.Warn("no bootstrap nodes configured: go-ipfs may have difficulty connecting to the network")
 	}
 
 	// the periodic bootstrap function -- the connection supervisor

From 1b4de817ffa71a73038020571c68abf6a72ecd84 Mon Sep 17 00:00:00 2001
From: Steven Allen <steven@stebalien.com>
Date: Thu, 16 Jan 2020 16:18:53 -0800
Subject: [PATCH 06/15] fix(tracing): remove event tracing

We've deprecated this system and have yet to move to a new system. We might as
well remove everything, switch to a new system, then deliberately trace the
entire system.


This commit was moved from ipfs/kubo@906f45edd9899352efba710e2f53978fc4b8c6e4
---
 core/bootstrap/bootstrap.go | 8 --------
 1 file changed, 8 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index e6ec29eea..4edc5ac84 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -18,7 +18,6 @@ import (
 	"github.com/libp2p/go-libp2p-core/peer"
 	"github.com/libp2p/go-libp2p-core/peerstore"
 	"github.com/libp2p/go-libp2p-core/routing"
-	"github.com/libp2p/go-libp2p-loggables"
 )
 
 var log = logging.Logger("bootstrap")
@@ -86,10 +85,8 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
 	// the periodic bootstrap function -- the connection supervisor
 	periodic := func(worker goprocess.Process) {
 		ctx := goprocessctx.OnClosingContext(worker)
-		defer log.EventBegin(ctx, "periodicBootstrap", id).Done()
 
 		if err := bootstrapRound(ctx, host, cfg); err != nil {
-			log.Event(ctx, "bootstrapError", id, loggables.Error(err))
 			log.Debugf("%s bootstrap error: %s", id, err)
 		}
 
@@ -126,7 +123,6 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
 	// determine how many bootstrap connections to open
 	connected := host.Network().Peers()
 	if len(connected) >= cfg.MinPeerThreshold {
-		log.Event(ctx, "bootstrapSkip", id)
 		log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes",
 			id, len(connected), cfg.MinPeerThreshold)
 		return nil
@@ -150,7 +146,6 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
 	// connect to a random susbset of bootstrap candidates
 	randSubset := randomSubsetOfPeers(notConnected, numToDial)
 
-	defer log.EventBegin(ctx, "bootstrapStart", id).Done()
 	log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset)
 	return bootstrapConnect(ctx, host, randSubset)
 }
@@ -172,17 +167,14 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo)
 		wg.Add(1)
 		go func(p peer.AddrInfo) {
 			defer wg.Done()
-			defer log.EventBegin(ctx, "bootstrapDial", ph.ID(), p.ID).Done()
 			log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
 
 			ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
 			if err := ph.Connect(ctx, p); err != nil {
-				log.Event(ctx, "bootstrapDialFailed", p.ID)
 				log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
 				errs <- err
 				return
 			}
-			log.Event(ctx, "bootstrapDialSuccess", p.ID)
 			log.Infof("bootstrapped with %v", p.ID)
 		}(p)
 	}

From 735d89cb31885432ecf5270bedb7aeaf47f3e4fc Mon Sep 17 00:00:00 2001
From: Jorropo <jorropo.pgm@gmail.com>
Date: Mon, 29 Aug 2022 13:55:00 +0200
Subject: [PATCH 07/15] chore: bump go-libp2p v0.22.0 & go1.18&go1.19

Fixes: #9225


This commit was moved from ipfs/kubo@196887cbe5fbcd41243c1dfb0db681a1cc2914ff
---
 core/bootstrap/bootstrap.go      | 10 +++++-----
 core/bootstrap/bootstrap_test.go |  4 ++--
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index 4edc5ac84..daa0a44d3 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -13,11 +13,11 @@ import (
 	"github.com/jbenet/goprocess"
 	"github.com/jbenet/goprocess/context"
 	"github.com/jbenet/goprocess/periodic"
-	"github.com/libp2p/go-libp2p-core/host"
-	"github.com/libp2p/go-libp2p-core/network"
-	"github.com/libp2p/go-libp2p-core/peer"
-	"github.com/libp2p/go-libp2p-core/peerstore"
-	"github.com/libp2p/go-libp2p-core/routing"
+	"github.com/libp2p/go-libp2p/core/host"
+	"github.com/libp2p/go-libp2p/core/network"
+	"github.com/libp2p/go-libp2p/core/peer"
+	"github.com/libp2p/go-libp2p/core/peerstore"
+	"github.com/libp2p/go-libp2p/core/routing"
 )
 
 var log = logging.Logger("bootstrap")
diff --git a/core/bootstrap/bootstrap_test.go b/core/bootstrap/bootstrap_test.go
index 23128c31f..98a4a7827 100644
--- a/core/bootstrap/bootstrap_test.go
+++ b/core/bootstrap/bootstrap_test.go
@@ -3,8 +3,8 @@ package bootstrap
 import (
 	"testing"
 
-	"github.com/libp2p/go-libp2p-core/peer"
-	"github.com/libp2p/go-libp2p-core/test"
+	"github.com/libp2p/go-libp2p/core/peer"
+	"github.com/libp2p/go-libp2p/core/test"
 )
 
 func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {

From e1776db498e92e574169d9a731a145954cc88859 Mon Sep 17 00:00:00 2001
From: Lucas Molas <schomatis@gmail.com>
Date: Thu, 25 May 2023 09:39:49 -0300
Subject: [PATCH 08/15] feat(bootstrap): save connected peers as backup
 bootstrap peers (#8856)

* feat(bootstrap): save connected peers as backup temporary bootstrap ones
* fix: do not add duplicated oldSavedPeers, not using tags, reuse
randomizeList
* test: add regression test
* chore: add changelog

---------

Co-authored-by: Henrique Dias <hacdias@gmail.com>
Co-authored-by: Marcin Rataj <lidel@lidel.org>

This commit was moved from ipfs/kubo@63561f3baf63524ce7d147f67c0c4b4e0ddc5bc9
---
 core/bootstrap/bootstrap.go      | 229 ++++++++++++++++++++++++-------
 core/bootstrap/bootstrap_test.go |   6 +-
 2 files changed, 183 insertions(+), 52 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index daa0a44d3..b566e0e97 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -3,16 +3,16 @@ package bootstrap
 import (
 	"context"
 	"errors"
-	"fmt"
 	"io"
 	"math/rand"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	logging "github.com/ipfs/go-log"
 	"github.com/jbenet/goprocess"
-	"github.com/jbenet/goprocess/context"
-	"github.com/jbenet/goprocess/periodic"
+	goprocessctx "github.com/jbenet/goprocess/context"
+	periodicproc "github.com/jbenet/goprocess/periodic"
 	"github.com/libp2p/go-libp2p/core/host"
 	"github.com/libp2p/go-libp2p/core/network"
 	"github.com/libp2p/go-libp2p/core/peer"
@@ -50,13 +50,26 @@ type BootstrapConfig struct {
 	// for the bootstrap process to use. This makes it possible for clients
 	// to control the peers the process uses at any moment.
 	BootstrapPeers func() []peer.AddrInfo
+
+	// BackupBootstrapInterval governs the periodic interval at which the node will
+	// attempt to save connected nodes to use as temporary bootstrap peers.
+	BackupBootstrapInterval time.Duration
+
+	// MaxBackupBootstrapSize controls the maximum number of peers we're saving
+	// as backup bootstrap peers.
+	MaxBackupBootstrapSize int
+
+	SaveBackupBootstrapPeers func(context.Context, []peer.AddrInfo)
+	LoadBackupBootstrapPeers func(context.Context) []peer.AddrInfo
 }
 
 // DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
 var DefaultBootstrapConfig = BootstrapConfig{
-	MinPeerThreshold:  4,
-	Period:            30 * time.Second,
-	ConnectionTimeout: (30 * time.Second) / 3, // Perod / 3
+	MinPeerThreshold:        4,
+	Period:                  30 * time.Second,
+	ConnectionTimeout:       (30 * time.Second) / 3, // Perod / 3
+	BackupBootstrapInterval: 1 * time.Hour,
+	MaxBackupBootstrapSize:  20,
 }
 
 func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
@@ -90,6 +103,9 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
 			log.Debugf("%s bootstrap error: %s", id, err)
 		}
 
+		// Exit the first call (triggered independently by `proc.Go`, not `Tick`)
+		// only after being done with the *single* Routing.Bootstrap call. Following
+		// periodic calls (`Tick`) will not block on this.
 		<-doneWithRound
 	}
 
@@ -108,9 +124,100 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
 
 	doneWithRound <- struct{}{}
 	close(doneWithRound) // it no longer blocks periodic
+
+	startSavePeersAsTemporaryBootstrapProc(cfg, host, proc)
+
 	return proc, nil
 }
 
+// Aside of the main bootstrap process we also run a secondary one that saves
+// connected peers as a backup measure if we can't connect to the official
+// bootstrap ones. These peers will serve as *temporary* bootstrap nodes.
+func startSavePeersAsTemporaryBootstrapProc(cfg BootstrapConfig, host host.Host, bootstrapProc goprocess.Process) {
+	savePeersFn := func(worker goprocess.Process) {
+		ctx := goprocessctx.OnClosingContext(worker)
+
+		if err := saveConnectedPeersAsTemporaryBootstrap(ctx, host, cfg); err != nil {
+			log.Debugf("saveConnectedPeersAsTemporaryBootstrap error: %s", err)
+		}
+	}
+	savePeersProc := periodicproc.Tick(cfg.BackupBootstrapInterval, savePeersFn)
+
+	// When the main bootstrap process ends also terminate the 'save connected
+	// peers' ones. Coupling the two seems the easiest way to handle this backup
+	// process without additional complexity.
+	go func() {
+		<-bootstrapProc.Closing()
+		savePeersProc.Close()
+	}()
+
+	// Run the first round now (after the first bootstrap process has finished)
+	// as the SavePeersPeriod can be much longer than bootstrap.
+	savePeersProc.Go(savePeersFn)
+}
+
+func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
+	// Randomize the list of connected peers, we don't prioritize anyone.
+	connectedPeers := randomizeList(host.Network().Peers())
+
+	bootstrapPeers := cfg.BootstrapPeers()
+	backupPeers := make([]peer.AddrInfo, 0, cfg.MaxBackupBootstrapSize)
+
+	// Choose peers to save and filter out the ones that are already bootstrap nodes.
+	for _, p := range connectedPeers {
+		found := false
+		for _, bootstrapPeer := range bootstrapPeers {
+			if p == bootstrapPeer.ID {
+				found = true
+				break
+			}
+		}
+		if !found {
+			backupPeers = append(backupPeers, peer.AddrInfo{
+				ID:    p,
+				Addrs: host.Network().Peerstore().Addrs(p),
+			})
+		}
+
+		if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
+			break
+		}
+	}
+
+	// If we didn't reach the target number use previously stored connected peers.
+	if len(backupPeers) < cfg.MaxBackupBootstrapSize {
+		oldSavedPeers := cfg.LoadBackupBootstrapPeers(ctx)
+		log.Debugf("missing %d peers to reach backup bootstrap target of %d, trying from previous list of %d saved peers",
+			cfg.MaxBackupBootstrapSize-len(backupPeers), cfg.MaxBackupBootstrapSize, len(oldSavedPeers))
+
+		// Add some of the old saved peers. Ensure we don't duplicate them.
+		for _, p := range oldSavedPeers {
+			found := false
+			for _, sp := range backupPeers {
+				if p.ID == sp.ID {
+					found = true
+					break
+				}
+			}
+
+			if !found {
+				backupPeers = append(backupPeers, p)
+			}
+
+			if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
+				break
+			}
+		}
+	}
+
+	cfg.SaveBackupBootstrapPeers(ctx, backupPeers)
+	log.Debugf("saved %d peers (of %d target) as bootstrap backup in the config", len(backupPeers), cfg.MaxBackupBootstrapSize)
+	return nil
+}
+
+// Connect to as many peers needed to reach the BootstrapConfig.MinPeerThreshold.
+// Peers can be original bootstrap or temporary ones (drawn from a list of
+// persisted previously connected peers).
 func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
 
 	ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
@@ -127,35 +234,58 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
 			id, len(connected), cfg.MinPeerThreshold)
 		return nil
 	}
-	numToDial := cfg.MinPeerThreshold - len(connected)
+	numToDial := cfg.MinPeerThreshold - len(connected) // numToDial > 0
 
-	// filter out bootstrap nodes we are already connected to
-	var notConnected []peer.AddrInfo
-	for _, p := range peers {
-		if host.Network().Connectedness(p.ID) != network.Connected {
-			notConnected = append(notConnected, p)
+	if len(peers) > 0 {
+		numToDial -= int(peersConnect(ctx, host, peers, numToDial, true))
+		if numToDial <= 0 {
+			return nil
 		}
 	}
 
-	// if connected to all bootstrap peer candidates, exit
-	if len(notConnected) < 1 {
-		log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial)
-		return ErrNotEnoughBootstrapPeers
+	log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections, trying backup list", numToDial)
+
+	tempBootstrapPeers := cfg.LoadBackupBootstrapPeers(ctx)
+	if len(tempBootstrapPeers) > 0 {
+		numToDial -= int(peersConnect(ctx, host, tempBootstrapPeers, numToDial, false))
+		if numToDial <= 0 {
+			return nil
+		}
 	}
 
-	// connect to a random susbset of bootstrap candidates
-	randSubset := randomSubsetOfPeers(notConnected, numToDial)
+	log.Debugf("tried both original bootstrap peers and temporary ones but still missing target of %d connections", numToDial)
 
-	log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset)
-	return bootstrapConnect(ctx, host, randSubset)
+	return ErrNotEnoughBootstrapPeers
 }
 
-func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo) error {
-	if len(peers) < 1 {
-		return ErrNotEnoughBootstrapPeers
-	}
+// Attempt to make `needed` connections from the `availablePeers` list. Mark
+// peers as either `permanent` or temporary when adding them to the Peerstore.
+// Return the number of connections completed. We eagerly over-connect in parallel,
+// so we might connect to more than needed.
+// (We spawn as many routines and attempt connections as the number of availablePeers,
+// but this list comes from restricted sets of original or temporary bootstrap
+// nodes which will keep it under a sane value.)
+func peersConnect(ctx context.Context, ph host.Host, availablePeers []peer.AddrInfo, needed int, permanent bool) uint64 {
+	peers := randomizeList(availablePeers)
+
+	// Monitor the number of connections and stop if we reach the target.
+	var connected uint64
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+	go func() {
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			case <-time.After(1 * time.Second):
+				if int(atomic.LoadUint64(&connected)) >= needed {
+					cancel()
+					return
+				}
+			}
+		}
+	}()
 
-	errs := make(chan error, len(peers))
 	var wg sync.WaitGroup
 	for _, p := range peers {
 
@@ -164,45 +294,46 @@ func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo)
 		// fail/abort due to an expiring context.
 		// Also, performed asynchronously for dial speed.
 
+		if int(atomic.LoadUint64(&connected)) >= needed {
+			cancel()
+			break
+		}
+
 		wg.Add(1)
 		go func(p peer.AddrInfo) {
 			defer wg.Done()
+
+			// Skip addresses belonging to a peer we're already connected to.
+			// (Not a guarantee but a best-effort policy.)
+			if ph.Network().Connectedness(p.ID) == network.Connected {
+				return
+			}
 			log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID)
 
-			ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
 			if err := ph.Connect(ctx, p); err != nil {
-				log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
-				errs <- err
+				if ctx.Err() != context.Canceled {
+					log.Debugf("failed to bootstrap with %v: %s", p.ID, err)
+				}
 				return
 			}
+			if permanent {
+				// We're connecting to an original bootstrap peer, mark it as
+				// a permanent address (Connect will register it as TempAddrTTL).
+				ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
+			}
+
 			log.Infof("bootstrapped with %v", p.ID)
+			atomic.AddUint64(&connected, 1)
 		}(p)
 	}
 	wg.Wait()
 
-	// our failure condition is when no connection attempt succeeded.
-	// So drain the errs channel, counting the results.
-	close(errs)
-	count := 0
-	var err error
-	for err = range errs {
-		if err != nil {
-			count++
-		}
-	}
-	if count == len(peers) {
-		return fmt.Errorf("failed to bootstrap. %s", err)
-	}
-	return nil
+	return connected
 }
 
-func randomSubsetOfPeers(in []peer.AddrInfo, max int) []peer.AddrInfo {
-	if max > len(in) {
-		max = len(in)
-	}
-
-	out := make([]peer.AddrInfo, max)
-	for i, val := range rand.Perm(len(in))[:max] {
+func randomizeList[T any](in []T) []T {
+	out := make([]T, len(in))
+	for i, val := range rand.Perm(len(in)) {
 		out[i] = in[val]
 	}
 	return out
diff --git a/core/bootstrap/bootstrap_test.go b/core/bootstrap/bootstrap_test.go
index 98a4a7827..39490a474 100644
--- a/core/bootstrap/bootstrap_test.go
+++ b/core/bootstrap/bootstrap_test.go
@@ -7,9 +7,9 @@ import (
 	"github.com/libp2p/go-libp2p/core/test"
 )
 
-func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
+func TestRandomizeAddressList(t *testing.T) {
 	var ps []peer.AddrInfo
-	sizeofSlice := 100
+	sizeofSlice := 10
 	for i := 0; i < sizeofSlice; i++ {
 		pid, err := test.RandPeerID()
 		if err != nil {
@@ -18,7 +18,7 @@ func TestSubsetWhenMaxIsGreaterThanLengthOfSlice(t *testing.T) {
 
 		ps = append(ps, peer.AddrInfo{ID: pid})
 	}
-	out := randomSubsetOfPeers(ps, 2*sizeofSlice)
+	out := randomizeList(ps)
 	if len(out) != len(ps) {
 		t.Fail()
 	}

From 4ca63d99ca6bd765b33efda62802a2b64664293a Mon Sep 17 00:00:00 2001
From: Kay <kehiiiiya@gmail.com>
Date: Thu, 17 Aug 2023 15:32:08 +0330
Subject: [PATCH 09/15] style: gofumpt and godot [skip changelog] (#10081)

This commit was moved from ipfs/kubo@f12b372af9cc32975ff48397708fac3ec1f9966f
---
 core/bootstrap/bootstrap.go | 2 --
 1 file changed, 2 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index b566e0e97..ed95d74e1 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -85,7 +85,6 @@ func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
 // connections to well-known bootstrap peers. It also kicks off subsystem
 // bootstrapping (i.e. routing).
 func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConfig) (io.Closer, error) {
-
 	// make a signal to wait for one bootstrap round to complete.
 	doneWithRound := make(chan struct{})
 
@@ -219,7 +218,6 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
 // Peers can be original bootstrap or temporary ones (drawn from a list of
 // persisted previously connected peers).
 func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {
-
 	ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
 	defer cancel()
 	id := host.ID()

From a775656d698b277fff331547a8cc9165e5c648ef Mon Sep 17 00:00:00 2001
From: Andrew Gillis <gammazero@users.noreply.github.com>
Date: Thu, 21 Sep 2023 09:29:38 -0700
Subject: [PATCH 10/15] core/bootstrap: fix panic without backup bootstrap peer
 functions (#10029)

Fix panic when backup bootstrap peer load and save funcs are nil

A panic occurs when the first bootstrap round runs is these functions are not assigned in the configuration:
- `LoadBackupBootstrapPeers`
- `SaveBackupBootstrapPeers`

This fix assumes that it is acceptable for these functions to be nil, as it may be desirable to disable the backup peer load and save functionality.

This commit was moved from ipfs/kubo@c46cbecb832b9a25f74a275b946b3a0ff3aefaba
---
 core/bootstrap/bootstrap.go      |  50 ++++++++++++--
 core/bootstrap/bootstrap_test.go | 114 +++++++++++++++++++++++++++++++
 2 files changed, 157 insertions(+), 7 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index ed95d74e1..acd7ef672 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -59,8 +59,8 @@ type BootstrapConfig struct {
 	// as backup bootstrap peers.
 	MaxBackupBootstrapSize int
 
-	SaveBackupBootstrapPeers func(context.Context, []peer.AddrInfo)
-	LoadBackupBootstrapPeers func(context.Context) []peer.AddrInfo
+	saveBackupBootstrapPeers func(context.Context, []peer.AddrInfo)
+	loadBackupBootstrapPeers func(context.Context) []peer.AddrInfo
 }
 
 // DefaultBootstrapConfig specifies default sane parameters for bootstrapping.
@@ -72,14 +72,41 @@ var DefaultBootstrapConfig = BootstrapConfig{
 	MaxBackupBootstrapSize:  20,
 }
 
-func BootstrapConfigWithPeers(pis []peer.AddrInfo) BootstrapConfig {
+// BootstrapConfigWithPeers creates a default BootstrapConfig configured with
+// the specified peers, and optional functions to load and save backup peers.
+func BootstrapConfigWithPeers(pis []peer.AddrInfo, options ...func(*BootstrapConfig)) BootstrapConfig {
 	cfg := DefaultBootstrapConfig
 	cfg.BootstrapPeers = func() []peer.AddrInfo {
 		return pis
 	}
+	for _, opt := range options {
+		opt(&cfg)
+	}
 	return cfg
 }
 
+// WithBackupPeers configures functions to load and save backup bootstrap peers.
+func WithBackupPeers(load func(context.Context) []peer.AddrInfo, save func(context.Context, []peer.AddrInfo)) func(*BootstrapConfig) {
+	if save == nil && load != nil || save != nil && load == nil {
+		panic("both load and save backup bootstrap peers functions must be defined")
+	}
+	return func(cfg *BootstrapConfig) {
+		cfg.loadBackupBootstrapPeers = load
+		cfg.saveBackupBootstrapPeers = save
+	}
+}
+
+// BackupPeers returns the load and save backup peers functions.
+func (cfg *BootstrapConfig) BackupPeers() (func(context.Context) []peer.AddrInfo, func(context.Context, []peer.AddrInfo)) {
+	return cfg.loadBackupBootstrapPeers, cfg.saveBackupBootstrapPeers
+}
+
+// SetBackupPeers sets the load and save backup peers functions.
+func (cfg *BootstrapConfig) SetBackupPeers(load func(context.Context) []peer.AddrInfo, save func(context.Context, []peer.AddrInfo)) {
+	opt := WithBackupPeers(load, save)
+	opt(cfg)
+}
+
 // Bootstrap kicks off IpfsNode bootstrapping. This function will periodically
 // check the number of open connections and -- if there are too few -- initiate
 // connections to well-known bootstrap peers. It also kicks off subsystem
@@ -124,7 +151,11 @@ func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConf
 	doneWithRound <- struct{}{}
 	close(doneWithRound) // it no longer blocks periodic
 
-	startSavePeersAsTemporaryBootstrapProc(cfg, host, proc)
+	// If loadBackupBootstrapPeers is not nil then saveBackupBootstrapPeers
+	// must also not be nil.
+	if cfg.loadBackupBootstrapPeers != nil {
+		startSavePeersAsTemporaryBootstrapProc(cfg, host, proc)
+	}
 
 	return proc, nil
 }
@@ -185,7 +216,7 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
 
 	// If we didn't reach the target number use previously stored connected peers.
 	if len(backupPeers) < cfg.MaxBackupBootstrapSize {
-		oldSavedPeers := cfg.LoadBackupBootstrapPeers(ctx)
+		oldSavedPeers := cfg.loadBackupBootstrapPeers(ctx)
 		log.Debugf("missing %d peers to reach backup bootstrap target of %d, trying from previous list of %d saved peers",
 			cfg.MaxBackupBootstrapSize-len(backupPeers), cfg.MaxBackupBootstrapSize, len(oldSavedPeers))
 
@@ -209,7 +240,7 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
 		}
 	}
 
-	cfg.SaveBackupBootstrapPeers(ctx, backupPeers)
+	cfg.saveBackupBootstrapPeers(ctx, backupPeers)
 	log.Debugf("saved %d peers (of %d target) as bootstrap backup in the config", len(backupPeers), cfg.MaxBackupBootstrapSize)
 	return nil
 }
@@ -241,9 +272,14 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er
 		}
 	}
 
+	if cfg.loadBackupBootstrapPeers == nil {
+		log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections", numToDial)
+		return ErrNotEnoughBootstrapPeers
+	}
+
 	log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections, trying backup list", numToDial)
 
-	tempBootstrapPeers := cfg.LoadBackupBootstrapPeers(ctx)
+	tempBootstrapPeers := cfg.loadBackupBootstrapPeers(ctx)
 	if len(tempBootstrapPeers) > 0 {
 		numToDial -= int(peersConnect(ctx, host, tempBootstrapPeers, numToDial, false))
 		if numToDial <= 0 {
diff --git a/core/bootstrap/bootstrap_test.go b/core/bootstrap/bootstrap_test.go
index 39490a474..d933379d4 100644
--- a/core/bootstrap/bootstrap_test.go
+++ b/core/bootstrap/bootstrap_test.go
@@ -1,8 +1,14 @@
 package bootstrap
 
 import (
+	"context"
+	"crypto/rand"
+	"reflect"
 	"testing"
+	"time"
 
+	"github.com/libp2p/go-libp2p"
+	"github.com/libp2p/go-libp2p/core/crypto"
 	"github.com/libp2p/go-libp2p/core/peer"
 	"github.com/libp2p/go-libp2p/core/test"
 )
@@ -23,3 +29,111 @@ func TestRandomizeAddressList(t *testing.T) {
 		t.Fail()
 	}
 }
+
+func TestLoadAndSaveOptions(t *testing.T) {
+	loadFunc := func(_ context.Context) []peer.AddrInfo { return nil }
+	saveFunc := func(_ context.Context, _ []peer.AddrInfo) {}
+
+	bootCfg := BootstrapConfigWithPeers(nil, WithBackupPeers(loadFunc, saveFunc))
+	load, save := bootCfg.BackupPeers()
+	if load == nil {
+		t.Fatal("load function not assigned")
+	}
+	if reflect.ValueOf(load).Pointer() != reflect.ValueOf(loadFunc).Pointer() {
+		t.Fatal("load not assigned correct function")
+	}
+	if save == nil {
+		t.Fatal("save function not assigned")
+	}
+	if reflect.ValueOf(save).Pointer() != reflect.ValueOf(saveFunc).Pointer() {
+		t.Fatal("save not assigned correct function")
+	}
+
+	assertPanics(t, "with only load func", func() {
+		BootstrapConfigWithPeers(nil, WithBackupPeers(loadFunc, nil))
+	})
+
+	assertPanics(t, "with only save func", func() {
+		BootstrapConfigWithPeers(nil, WithBackupPeers(nil, saveFunc))
+	})
+
+	bootCfg = BootstrapConfigWithPeers(nil, WithBackupPeers(nil, nil))
+	load, save = bootCfg.BackupPeers()
+	if load != nil || save != nil {
+		t.Fatal("load and save functions should both be nil")
+	}
+}
+
+func TestSetBackupPeers(t *testing.T) {
+	loadFunc := func(_ context.Context) []peer.AddrInfo { return nil }
+	saveFunc := func(_ context.Context, _ []peer.AddrInfo) {}
+
+	bootCfg := DefaultBootstrapConfig
+	bootCfg.SetBackupPeers(loadFunc, saveFunc)
+	load, save := bootCfg.BackupPeers()
+	if load == nil {
+		t.Fatal("load function not assigned")
+	}
+	if reflect.ValueOf(load).Pointer() != reflect.ValueOf(loadFunc).Pointer() {
+		t.Fatal("load not assigned correct function")
+	}
+	if save == nil {
+		t.Fatal("save function not assigned")
+	}
+	if reflect.ValueOf(save).Pointer() != reflect.ValueOf(saveFunc).Pointer() {
+		t.Fatal("save not assigned correct function")
+	}
+
+	assertPanics(t, "with only load func", func() {
+		bootCfg.SetBackupPeers(loadFunc, nil)
+	})
+
+	assertPanics(t, "with only save func", func() {
+		bootCfg.SetBackupPeers(nil, saveFunc)
+	})
+
+	bootCfg.SetBackupPeers(nil, nil)
+	load, save = bootCfg.BackupPeers()
+	if load != nil || save != nil {
+		t.Fatal("load and save functions should both be nil")
+	}
+}
+
+func TestNoTempPeersLoadAndSave(t *testing.T) {
+	period := 500 * time.Millisecond
+	bootCfg := BootstrapConfigWithPeers(nil)
+	bootCfg.MinPeerThreshold = 2
+	bootCfg.Period = period
+
+	priv, pub, err := crypto.GenerateEd25519Key(rand.Reader)
+	if err != nil {
+		t.Fatal(err)
+	}
+	peerID, err := peer.IDFromPublicKey(pub)
+	if err != nil {
+		t.Fatal(err)
+	}
+	p2pHost, err := libp2p.New(libp2p.Identity(priv))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	bootstrapper, err := Bootstrap(peerID, p2pHost, nil, bootCfg)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	time.Sleep(4 * period)
+	bootstrapper.Close()
+
+}
+
+func assertPanics(t *testing.T, name string, f func()) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Errorf("%s: did not panic as expected", name)
+		}
+	}()
+
+	f()
+}

From 24ee956efbf5fe037dfa00e5d1c0f9da023885c7 Mon Sep 17 00:00:00 2001
From: Jorropo <jorropo.pgm@gmail.com>
Date: Wed, 6 Sep 2023 00:32:10 +0200
Subject: [PATCH 11/15] perf: make bootstrap saves O(N)

This commit was moved from ipfs/kubo@66590e350f6dbdf9da77e9548a91f9ce96d803dd
---
 core/bootstrap/bootstrap.go | 39 +++++++++++++++++--------------------
 1 file changed, 18 insertions(+), 21 deletions(-)

diff --git a/core/bootstrap/bootstrap.go b/core/bootstrap/bootstrap.go
index acd7ef672..5cde50371 100644
--- a/core/bootstrap/bootstrap.go
+++ b/core/bootstrap/bootstrap.go
@@ -192,22 +192,24 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
 
 	bootstrapPeers := cfg.BootstrapPeers()
 	backupPeers := make([]peer.AddrInfo, 0, cfg.MaxBackupBootstrapSize)
+	foundPeers := make(map[peer.ID]struct{}, cfg.MaxBackupBootstrapSize+len(bootstrapPeers))
+
+	// Don't record bootstrap peers
+	for _, b := range bootstrapPeers {
+		foundPeers[b.ID] = struct{}{}
+	}
 
 	// Choose peers to save and filter out the ones that are already bootstrap nodes.
 	for _, p := range connectedPeers {
-		found := false
-		for _, bootstrapPeer := range bootstrapPeers {
-			if p == bootstrapPeer.ID {
-				found = true
-				break
-			}
-		}
-		if !found {
-			backupPeers = append(backupPeers, peer.AddrInfo{
-				ID:    p,
-				Addrs: host.Network().Peerstore().Addrs(p),
-			})
+		if _, found := foundPeers[p]; found {
+			continue
 		}
+		foundPeers[p] = struct{}{}
+
+		backupPeers = append(backupPeers, peer.AddrInfo{
+			ID:    p,
+			Addrs: host.Network().Peerstore().Addrs(p),
+		})
 
 		if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
 			break
@@ -222,17 +224,12 @@ func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host,
 
 		// Add some of the old saved peers. Ensure we don't duplicate them.
 		for _, p := range oldSavedPeers {
-			found := false
-			for _, sp := range backupPeers {
-				if p.ID == sp.ID {
-					found = true
-					break
-				}
+			if _, found := foundPeers[p.ID]; found {
+				continue
 			}
+			foundPeers[p.ID] = struct{}{}
 
-			if !found {
-				backupPeers = append(backupPeers, p)
-			}
+			backupPeers = append(backupPeers, p)
 
 			if len(backupPeers) >= cfg.MaxBackupBootstrapSize {
 				break

From a29dc5a758a53ad2a172c34c5cfb9cae82d55969 Mon Sep 17 00:00:00 2001
From: gammazero <gammazero@users.noreply.github.com>
Date: Thu, 28 Sep 2023 18:39:02 -0700
Subject: [PATCH 12/15] Move bootstrap from core to top level directory

---
 {core/bootstrap => bootstrap}/bootstrap.go      | 0
 {core/bootstrap => bootstrap}/bootstrap_test.go | 0
 2 files changed, 0 insertions(+), 0 deletions(-)
 rename {core/bootstrap => bootstrap}/bootstrap.go (100%)
 rename {core/bootstrap => bootstrap}/bootstrap_test.go (100%)

diff --git a/core/bootstrap/bootstrap.go b/bootstrap/bootstrap.go
similarity index 100%
rename from core/bootstrap/bootstrap.go
rename to bootstrap/bootstrap.go
diff --git a/core/bootstrap/bootstrap_test.go b/bootstrap/bootstrap_test.go
similarity index 100%
rename from core/bootstrap/bootstrap_test.go
rename to bootstrap/bootstrap_test.go

From 48b732db106798edfc1ee2f55459fd6ad182f668 Mon Sep 17 00:00:00 2001
From: gammazero <gammazero@users.noreply.github.com>
Date: Thu, 28 Sep 2023 18:44:00 -0700
Subject: [PATCH 13/15] Update CHANGELOG and mod tidy

---
 CHANGELOG.md | 1 +
 go.sum       | 1 +
 2 files changed, 2 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c17a9c46e..c4d8baad4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,7 @@ The following emojis are used to highlight certain changes:
 * ✨ Migrated repositories into Boxo
   * [`github.com/ipfs/kubo/peering`](https://pkg.go.dev/github.com/ipfs/kubo/peering) => [`./peering`](./peering)
     A service which establish, overwatch and maintain long lived connections.
+  * github.com/ipfs/kubo/core/bootstrap => ./bootstrap
 
 ### Changed
 
diff --git a/go.sum b/go.sum
index 8beabe300..25af3632d 100644
--- a/go.sum
+++ b/go.sum
@@ -307,6 +307,7 @@ github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOan
 github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo=
 github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
 github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc=
 github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
 github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
 github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=

From 133a4650e25082f5bff7577690800ea5bd5e18e0 Mon Sep 17 00:00:00 2001
From: gammazero <gammazero@users.noreply.github.com>
Date: Wed, 25 Oct 2023 14:55:34 -0700
Subject: [PATCH 14/15] Update CHANGELOG.md

---
 CHANGELOG.md | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index c4d8baad4..f334cca8a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,7 +24,8 @@ The following emojis are used to highlight certain changes:
 * ✨ Migrated repositories into Boxo
   * [`github.com/ipfs/kubo/peering`](https://pkg.go.dev/github.com/ipfs/kubo/peering) => [`./peering`](./peering)
     A service which establish, overwatch and maintain long lived connections.
-  * github.com/ipfs/kubo/core/bootstrap => ./bootstrap
+  * [`github.com/ipfs/kubo/core/bootstrap`](https://pkg.go.dev/github.com/ipfs/kubo/core/bootstrap) => [`./bootstrap](./bootstrap)
+    A service that maintains connections to a number of bootstrap peers.
 
 ### Changed
 

From 45426dbd133ae7eaa473d4bc3e5eff88ea6d113f Mon Sep 17 00:00:00 2001
From: Andrew Gillis <gammazero@users.noreply.github.com>
Date: Wed, 25 Oct 2023 20:55:49 -0700
Subject: [PATCH 15/15] Update bootstrap/bootstrap.go

Co-authored-by: Jorropo <jorropo.pgm@gmail.com>
---
 bootstrap/bootstrap.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/bootstrap/bootstrap.go b/bootstrap/bootstrap.go
index 5cde50371..347d98797 100644
--- a/bootstrap/bootstrap.go
+++ b/bootstrap/bootstrap.go
@@ -67,7 +67,7 @@ type BootstrapConfig struct {
 var DefaultBootstrapConfig = BootstrapConfig{
 	MinPeerThreshold:        4,
 	Period:                  30 * time.Second,
-	ConnectionTimeout:       (30 * time.Second) / 3, // Perod / 3
+	ConnectionTimeout:       (30 * time.Second) / 3, // Period / 3
 	BackupBootstrapInterval: 1 * time.Hour,
 	MaxBackupBootstrapSize:  20,
 }