storage: add Benchmarks for ScrapeSwarm, optimize implementations

This commit is contained in:
Leo Balduf 2017-08-24 12:45:17 +02:00
parent d026424038
commit 5400a99b75
5 changed files with 48 additions and 6 deletions

View file

@ -518,13 +518,14 @@ func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorren
shard := ps.shards[ps.shardIndex(ih, addressFamily)]
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
swarm, ok := shard.swarms[ih]
if !ok {
shard.RUnlock()
return
}
resp.Incomplete = uint32(len(shard.swarms[ih].leechers))
resp.Complete = uint32(len(shard.swarms[ih].seeders))
resp.Incomplete = uint32(len(swarm.leechers))
resp.Complete = uint32(len(swarm.seeders))
shard.RUnlock()
return

View file

@ -18,6 +18,7 @@ func createNew() s.PeerStore {
func TestPeerStore(t *testing.T) { s.TestPeerStore(t, createNew()) }
func BenchmarkNop(b *testing.B) { s.Nop(b, createNew()) }
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
@ -42,3 +43,5 @@ func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, cr
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }
func BenchmarkScrapeSwarm(b *testing.B) { s.ScrapeSwarm(b, createNew()) }
func BenchmarkScrapeSwarm1kInfohash(b *testing.B) { s.ScrapeSwarm1kInfohash(b, createNew()) }

View file

@ -643,13 +643,14 @@ func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorren
shard := ps.shards[ps.shardIndex(ih, addressFamily)]
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
swarm, ok := shard.swarms[ih]
if !ok {
shard.RUnlock()
return
}
resp.Incomplete = uint32(shard.swarms[ih].lenLeechers())
resp.Complete = uint32(shard.swarms[ih].lenSeeders())
resp.Incomplete = uint32(swarm.lenLeechers())
resp.Complete = uint32(swarm.lenSeeders())
shard.RUnlock()
return

View file

@ -23,6 +23,7 @@ func createNew() s.PeerStore {
func TestPeerStore(t *testing.T) { s.TestPeerStore(t, createNew()) }
func BenchmarkNop(b *testing.B) { s.Nop(b, createNew()) }
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
@ -47,3 +48,5 @@ func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, cr
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }
func BenchmarkScrapeSwarm(b *testing.B) { s.ScrapeSwarm(b, createNew()) }
func BenchmarkScrapeSwarm1kInfohash(b *testing.B) { s.ScrapeSwarm1kInfohash(b, createNew()) }

View file

@ -95,6 +95,19 @@ func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef ex
}
}
// Nop executes a no-op for each iteration.
// It should produce the same results for each PeerStore.
// This can be used to get an estimate of the impact of the benchmark harness
// on benchmark results and an estimate of the general performance of the system
// benchmarked on.
//
// Nop can run in parallel.
func Nop(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
return nil
})
}
// Put benchmarks the PutSeeder method of a PeerStore by repeatedly Putting the
// same Peer for the same InfoHash.
//
@ -415,3 +428,24 @@ func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) {
return err
})
}
// ScrapeSwarm benchmarks the ScrapeSwarm method of a PeerStore.
// The swarm scraped has 500 seeders and 500 leechers.
//
// ScrapeSwarm can run in parallel.
func ScrapeSwarm(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
ps.ScrapeSwarm(bd.infohashes[0], bittorrent.IPv4)
return nil
})
}
// ScrapeSwarm1kInfohash behaves like ScrapeSwarm with one of 1000 infohashes.
//
// ScrapeSwarm1kInfohash can run in parallel.
func ScrapeSwarm1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
ps.ScrapeSwarm(bd.infohashes[i%1000], bittorrent.IPv4)
return nil
})
}