Skip to content

Commit

Permalink
peerstore: fix addressbook benchmark timing
Browse files Browse the repository at this point in the history
The previous code was not accounting accurately for the time spent
generating test cases. In the benchmark output below the difference
between 100 address per peer suite is huge:

BenchmarkInMemoryPeerstore/SetAddrs-100Addrs-InMem-16              10000	    190198 ns/op
vs
BenchmarkInMemoryPeerstore/SetAddrs-100-16                	   10000	     42710 ns/op

old benchmark:

```
goos: linux
goarch: amd64
pkg: github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem
cpu: AMD Ryzen 7 7840U w/ Radeon  780M Graphics
BenchmarkInMemoryPeerstore
BenchmarkInMemoryPeerstore/AddAddrs-1Addrs-InMem
BenchmarkInMemoryPeerstore/AddAddrs-1Addrs-InMem-16         	   10000	      4158 ns/op
BenchmarkInMemoryPeerstore/AddAddrs-10Addrs-InMem
BenchmarkInMemoryPeerstore/AddAddrs-10Addrs-InMem-16        	   10000	     26364 ns/op
BenchmarkInMemoryPeerstore/AddAddrs-100Addrs-InMem
BenchmarkInMemoryPeerstore/AddAddrs-100Addrs-InMem-16       	   10000	    197692 ns/op
BenchmarkInMemoryPeerstore/AddGetAndClearAddrs-1Addrs-InMem
BenchmarkInMemoryPeerstore/AddGetAndClearAddrs-1Addrs-InMem-16         	   10000	      3692 ns/op
BenchmarkInMemoryPeerstore/AddGetAndClearAddrs-10Addrs-InMem
BenchmarkInMemoryPeerstore/AddGetAndClearAddrs-10Addrs-InMem-16        	   10000	     23182 ns/op
BenchmarkInMemoryPeerstore/AddGetAndClearAddrs-100Addrs-InMem
BenchmarkInMemoryPeerstore/AddGetAndClearAddrs-100Addrs-InMem-16       	   10000	    202959 ns/op
BenchmarkInMemoryPeerstore/Get1000PeersWithAddrs-1Addrs-InMem
BenchmarkInMemoryPeerstore/Get1000PeersWithAddrs-1Addrs-InMem-16       	   10000	     25348 ns/op
BenchmarkInMemoryPeerstore/Get1000PeersWithAddrs-10Addrs-InMem
BenchmarkInMemoryPeerstore/Get1000PeersWithAddrs-10Addrs-InMem-16      	   10000	     20133 ns/op
BenchmarkInMemoryPeerstore/Get1000PeersWithAddrs-100Addrs-InMem
BenchmarkInMemoryPeerstore/Get1000PeersWithAddrs-100Addrs-InMem-16     	   10000	     17525 ns/op
BenchmarkInMemoryPeerstore/GetAddrs-1Addrs-InMem
BenchmarkInMemoryPeerstore/GetAddrs-1Addrs-InMem-16                    	   10000	       163.5 ns/op
BenchmarkInMemoryPeerstore/GetAddrs-10Addrs-InMem
BenchmarkInMemoryPeerstore/GetAddrs-10Addrs-InMem-16                   	   10000	       203.9 ns/op
BenchmarkInMemoryPeerstore/GetAddrs-100Addrs-InMem
BenchmarkInMemoryPeerstore/GetAddrs-100Addrs-InMem-16                  	   10000	      1696 ns/op
BenchmarkInMemoryPeerstore/SetAddrs-1Addrs-InMem
BenchmarkInMemoryPeerstore/SetAddrs-1Addrs-InMem-16                    	   10000	      3431 ns/op
BenchmarkInMemoryPeerstore/SetAddrs-10Addrs-InMem
BenchmarkInMemoryPeerstore/SetAddrs-10Addrs-InMem-16                   	   10000	     21599 ns/op
BenchmarkInMemoryPeerstore/SetAddrs-100Addrs-InMem
BenchmarkInMemoryPeerstore/SetAddrs-100Addrs-InMem-16                  	   10000	    190198 ns/op
```

new benchmark:
```
goos: linux
goarch: amd64
pkg: github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem
cpu: AMD Ryzen 7 7840U w/ Radeon  780M Graphics
BenchmarkInMemoryPeerstore
BenchmarkInMemoryPeerstore/AddAddrs-1
BenchmarkInMemoryPeerstore/AddAddrs-1-16         	   10000	       899.2 ns/op
BenchmarkInMemoryPeerstore/GetAddrs-1
BenchmarkInMemoryPeerstore/GetAddrs-1-16         	   10000	       914.9 ns/op
BenchmarkInMemoryPeerstore/GetAndClearAddrs-1
BenchmarkInMemoryPeerstore/GetAndClearAddrs-1-16 	   10000	       699.5 ns/op
BenchmarkInMemoryPeerstore/PeersWithAddrs-1
BenchmarkInMemoryPeerstore/PeersWithAddrs-1-16   	   10000	    112298 ns/op
BenchmarkInMemoryPeerstore/SetAddrs-1
BenchmarkInMemoryPeerstore/SetAddrs-1-16         	   10000	       764.7 ns/op
BenchmarkInMemoryPeerstore/AddAddrs-10
BenchmarkInMemoryPeerstore/AddAddrs-10-16        	   10000	      4682 ns/op
BenchmarkInMemoryPeerstore/GetAddrs-10
BenchmarkInMemoryPeerstore/GetAddrs-10-16        	   10000	      5310 ns/op
BenchmarkInMemoryPeerstore/GetAndClearAddrs-10
BenchmarkInMemoryPeerstore/GetAndClearAddrs-10-16         	   10000	      4147 ns/op
BenchmarkInMemoryPeerstore/PeersWithAddrs-10
BenchmarkInMemoryPeerstore/PeersWithAddrs-10-16           	   10000	     93408 ns/op
BenchmarkInMemoryPeerstore/SetAddrs-10
BenchmarkInMemoryPeerstore/SetAddrs-10-16                 	   10000	      4412 ns/op
BenchmarkInMemoryPeerstore/AddAddrs-100
BenchmarkInMemoryPeerstore/AddAddrs-100-16                	   10000	     40847 ns/op
BenchmarkInMemoryPeerstore/GetAddrs-100
BenchmarkInMemoryPeerstore/GetAddrs-100-16                	   10000	     40197 ns/op
BenchmarkInMemoryPeerstore/GetAndClearAddrs-100
BenchmarkInMemoryPeerstore/GetAndClearAddrs-100-16        	   10000	     37848 ns/op
BenchmarkInMemoryPeerstore/PeersWithAddrs-100
BenchmarkInMemoryPeerstore/PeersWithAddrs-100-16          	   10000	    111613 ns/op
BenchmarkInMemoryPeerstore/SetAddrs-100
BenchmarkInMemoryPeerstore/SetAddrs-100-16                	   10000	     42710 ns/op
```
  • Loading branch information
sukunrt committed Dec 7, 2024
1 parent b3209ef commit 3b1ef8a
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 120 deletions.
164 changes: 55 additions & 109 deletions p2p/host/peerstore/test/benchmarks_suite.go
Original file line number Diff line number Diff line change
@@ -1,124 +1,70 @@
package test

import (
"context"
"fmt"
"sort"
"testing"

pstore "github.com/libp2p/go-libp2p/core/peerstore"
)

var peerstoreBenchmarks = map[string]func(pstore.Peerstore, chan *peerpair) func(*testing.B){
"AddAddrs": benchmarkAddAddrs,
"SetAddrs": benchmarkSetAddrs,
"GetAddrs": benchmarkGetAddrs,
// The in-between get allows us to benchmark the read-through cache.
"AddGetAndClearAddrs": benchmarkAddGetAndClearAddrs,
// Calls PeersWithAddr on a peerstore with 1000 peers.
"Get1000PeersWithAddrs": benchmarkGet1000PeersWithAddrs,
}

func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, variant string) {
// Parameterises benchmarks to tackle peers with 1, 10, 100 multiaddrs.
params := []struct {
n int
ch chan *peerpair
}{
{1, make(chan *peerpair, 100)},
{10, make(chan *peerpair, 100)},
{100, make(chan *peerpair, 100)},
}

ctx, cancel := context.WithCancel(context.Background())
defer cancel()

// Start all test peer producing goroutines, where each produces peers with as many
// multiaddrs as the n field in the param struct.
for _, p := range params {
go AddressProducer(ctx, b, p.ch, p.n)
}

// So tests are always run in the same order.
ordernames := make([]string, 0, len(peerstoreBenchmarks))
for name := range peerstoreBenchmarks {
ordernames = append(ordernames, name)
}
sort.Strings(ordernames)

for _, name := range ordernames {
bench := peerstoreBenchmarks[name]
for _, p := range params {
// Create a new peerstore.
ps, closeFunc := factory()

// Run the test.
b.Run(fmt.Sprintf("%s-%dAddrs-%s", name, p.n, variant), bench(ps, p.ch))

// Cleanup.
if closeFunc != nil {
closeFunc()
for _, sz := range []int{1, 10, 100} {
const N = 10000
peers := getPeerPairs(b, N, sz)

b.Run(fmt.Sprintf("AddAddrs-%d", sz), func(b *testing.B) {
ps, cleanup := factory()
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := peers[i%N]
ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
}
}
}
}

func benchmarkAddAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) {
return func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := <-addrs
ps.AddAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL)
}
}
}

func benchmarkSetAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) {
return func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := <-addrs
ps.SetAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL)
}
}
}

func benchmarkGetAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) {
return func(b *testing.B) {
pp := <-addrs
ps.SetAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL)

b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = ps.Addrs(pp.ID)
}
}
}

func benchmarkAddGetAndClearAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) {
return func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := <-addrs
ps.AddAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL)
ps.Addrs(pp.ID)
ps.ClearAddrs(pp.ID)
}
}
}
})

b.Run(fmt.Sprintf("GetAddrs-%d", sz), func(b *testing.B) {
ps, cleanup := factory()
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := peers[i%N]
ps.SetAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
}
})

b.Run(fmt.Sprintf("GetAndClearAddrs-%d", sz), func(b *testing.B) {
ps, cleanup := factory()
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := peers[i%N]
ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
ps.Addrs(pp.ID)
ps.ClearAddrs(pp.ID)
}
})

func benchmarkGet1000PeersWithAddrs(ps pstore.Peerstore, addrs chan *peerpair) func(*testing.B) {
return func(b *testing.B) {
var peers = make([]*peerpair, 1000)
for i := range peers {
pp := <-addrs
ps.AddAddrs(pp.ID, pp.Addr, pstore.PermanentAddrTTL)
peers[i] = pp
}
b.Run(fmt.Sprintf("PeersWithAddrs-%d", sz), func(b *testing.B) {
ps, cleanup := factory()
defer cleanup()
for _, pp := range peers {
ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
}

b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = ps.PeersWithAddrs()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = ps.PeersWithAddrs()
}
})

b.Run(fmt.Sprintf("SetAddrs-%d", sz), func(b *testing.B) {
ps, cleanup := factory()
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
pp := peers[i%N]
ps.SetAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
}
})
}
}
16 changes: 5 additions & 11 deletions p2p/host/peerstore/test/utils.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package test

import (
"context"
"fmt"
"testing"

Expand Down Expand Up @@ -45,17 +44,12 @@ func RandomPeer(b *testing.B, addrCount int) *peerpair {
return &peerpair{pid, addrs}
}

func AddressProducer(ctx context.Context, b *testing.B, addrs chan *peerpair, addrsPerPeer int) {
b.Helper()
defer close(addrs)
for {
p := RandomPeer(b, addrsPerPeer)
select {
case addrs <- p:
case <-ctx.Done():
return
}
func getPeerPairs(b *testing.B, n int, addrsPerPeer int) []*peerpair {
pps := make([]*peerpair, n)
for i := 0; i < n; i++ {
pps[i] = RandomPeer(b, addrsPerPeer)
}
return pps
}

func GenerateAddrs(count int) []ma.Multiaddr {
Expand Down

0 comments on commit 3b1ef8a

Please sign in to comment.