From 7e819dbb0c4f3fadab0d9a3640c9a3b414741eb8 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 18 Jun 2016 15:02:38 -0400 Subject: [PATCH 1/8] pkg: add stopper --- pkg/stopper/stopper.go | 101 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 pkg/stopper/stopper.go diff --git a/pkg/stopper/stopper.go b/pkg/stopper/stopper.go new file mode 100644 index 0000000..ddf1a21 --- /dev/null +++ b/pkg/stopper/stopper.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package stopper + +import ( + "sync" +) + +// AlreadyStopped is a closed error channel to be used by StopperFuncs when +// an element was already stopped. +var AlreadyStopped <-chan error + +// AlreadyStoppedFunc is a StopperFunc that returns AlreadyStopped. +var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped } + +func init() { + closeMe := make(chan error) + close(closeMe) + AlreadyStopped = closeMe +} + +// Stopper is an interface that allows a clean shutdown. +type Stopper interface { + // Stop returns a channel that indicates whether the stop was + // successful. + // The channel can either return one error or be closed. Closing the + // channel signals a clean shutdown. + // The Stop function should return immediately and perform the actual + // shutdown in a seperate goroutine. + Stop() <-chan error +} + +// StopGroup is a group that can be stopped. +type StopGroup struct { + stoppables []StopperFunc + stoppablesLock sync.Mutex +} + +// StopperFunc is a function that can be used to provide a clean shutdown. +type StopperFunc func() <-chan error + +// NewStopGroup creates a new StopGroup. +func NewStopGroup() *StopGroup { + return &StopGroup{ + stoppables: make([]StopperFunc, 0), + } +} + +// Add adds a Stopper to the StopGroup. +// On the next call to Stop(), the Stopper will be stopped. +func (cg *StopGroup) Add(toAdd Stopper) { + cg.stoppablesLock.Lock() + defer cg.stoppablesLock.Unlock() + + cg.stoppables = append(cg.stoppables, toAdd.Stop) +} + +// AddFunc adds a StopperFunc to the StopGroup. +// On the next call to Stop(), the StopperFunc will be called. +func (cg *StopGroup) AddFunc(toAddFunc StopperFunc) { + cg.stoppablesLock.Lock() + defer cg.stoppablesLock.Unlock() + + cg.stoppables = append(cg.stoppables, toAddFunc) +} + +// Stop stops all members of the StopGroup. +// Stopping will be done in a concurrent fashion. +// The slice of errors returned contains all errors returned by stopping the +// members. +func (cg *StopGroup) Stop() []error { + cg.stoppablesLock.Lock() + defer cg.stoppablesLock.Unlock() + + var errors []error + whenDone := make(chan struct{}) + + waitChannels := make([]<-chan error, 0, len(cg.stoppables)) + for _, toStop := range cg.stoppables { + waitFor := toStop() + if waitFor == nil { + panic("received a nil chan from Stop") + } + waitChannels = append(waitChannels, waitFor) + } + + go func() { + for _, waitForMe := range waitChannels { + err := <-waitForMe + if err != nil { + errors = append(errors, err) + } + } + close(whenDone) + }() + + <-whenDone + return errors +} From f4101f83e0f04e1048924e2f9f0190bd77677c3a Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sun, 19 Jun 2016 12:49:43 -0400 Subject: [PATCH 2/8] store: use stopper, extraxt StringStore tests --- server/store/ip_store.go | 8 + server/store/memory/ip_store.go | 57 +++++ server/store/memory/ip_store_test.go | 194 ++++++++++-------- server/store/memory/peer_store.go | 84 ++++++++ server/store/memory/peer_store_test.go | 60 +++--- server/store/memory/string_store.go | 32 +++ server/store/memory/string_store_test.go | 61 +----- server/store/middleware/infohash/blacklist.go | 2 +- .../middleware/infohash/blacklist_test.go | 5 + server/store/peer_store.go | 7 + server/store/store.go | 37 ++-- server/store/store_tests.go | 95 +++++++++ server/store/string_store.go | 12 +- 13 files changed, 465 insertions(+), 189 deletions(-) create mode 100644 server/store/store_tests.go diff --git a/server/store/ip_store.go b/server/store/ip_store.go index e02afb5..7d7fc93 100644 --- a/server/store/ip_store.go +++ b/server/store/ip_store.go @@ -7,6 +7,8 @@ package store import ( "fmt" "net" + + "github.com/chihaya/chihaya/pkg/stopper" ) var ipStoreDrivers = make(map[string]IPStoreDriver) @@ -52,6 +54,12 @@ type IPStore interface { // Returns ErrResourceDoesNotExist if the given network is not // contained in the store. RemoveNetwork(network string) error + + // Stopper provides the Stop method that stops the IPStore. + // Stop should shut down the IPStore in a separate goroutine and send + // an error to the channel if the shutdown failed. If the shutdown + // was successful, the channel is to be closed. + stopper.Stopper } // IPStoreDriver represents an interface for creating a handle to the diff --git a/server/store/memory/ip_store.go b/server/store/memory/ip_store.go index dc2f4b8..8b01e66 100644 --- a/server/store/memory/ip_store.go +++ b/server/store/memory/ip_store.go @@ -23,6 +23,7 @@ func (d *ipStoreDriver) New(_ *store.DriverConfig) (store.IPStore, error) { return &ipStore{ ips: make(map[[16]byte]struct{}), networks: netmatch.New(), + closed: make(chan struct{}), }, nil } @@ -31,6 +32,7 @@ func (d *ipStoreDriver) New(_ *store.DriverConfig) (store.IPStore, error) { type ipStore struct { ips map[[16]byte]struct{} networks *netmatch.Trie + closed chan struct{} sync.RWMutex } @@ -65,6 +67,12 @@ func (s *ipStore) AddNetwork(network string) error { s.Lock() defer s.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + return s.networks.Add(key, length) } @@ -72,6 +80,12 @@ func (s *ipStore) AddIP(ip net.IP) error { s.Lock() defer s.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + s.ips[key(ip)] = struct{}{} return nil @@ -82,6 +96,12 @@ func (s *ipStore) HasIP(ip net.IP) (bool, error) { s.RLock() defer s.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + _, ok := s.ips[key] if ok { return true, nil @@ -99,6 +119,12 @@ func (s *ipStore) HasAnyIP(ips []net.IP) (bool, error) { s.RLock() defer s.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + for _, ip := range ips { key := key(ip) if _, ok := s.ips[key]; ok { @@ -121,6 +147,12 @@ func (s *ipStore) HasAllIPs(ips []net.IP) (bool, error) { s.RLock() defer s.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + for _, ip := range ips { key := key(ip) if _, ok := s.ips[key]; !ok { @@ -142,6 +174,12 @@ func (s *ipStore) RemoveIP(ip net.IP) error { s.Lock() defer s.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if _, ok := s.ips[key]; !ok { return store.ErrResourceDoesNotExist } @@ -160,9 +198,28 @@ func (s *ipStore) RemoveNetwork(network string) error { s.Lock() defer s.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + err = s.networks.Remove(key, length) if err != nil && err == netmatch.ErrNotContained { return store.ErrResourceDoesNotExist } return err } + +func (s *ipStore) Stop() <-chan error { + toReturn := make(chan error) + go func() { + s.Lock() + defer s.Unlock() + s.ips = make(map[[16]byte]struct{}) + s.networks = netmatch.New() + close(s.closed) + close(toReturn) + }() + return toReturn +} diff --git a/server/store/memory/ip_store_test.go b/server/store/memory/ip_store_test.go index 80f7610..15b5a68 100644 --- a/server/store/memory/ip_store_test.go +++ b/server/store/memory/ip_store_test.go @@ -10,7 +10,7 @@ import ( "github.com/chihaya/chihaya/server/store" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -31,7 +31,7 @@ func TestKey(t *testing.T) { for _, tt := range table { got := key(tt.input) - assert.Equal(t, got, tt.expected) + require.Equal(t, got, tt.expected) } } @@ -39,100 +39,108 @@ func TestIPStore(t *testing.T) { var d = &ipStoreDriver{} s, err := d.New(&store.DriverConfig{}) - assert.Nil(t, err) - assert.NotNil(t, s) + require.Nil(t, err) + require.NotNil(t, s) // check default state found, err := s.HasIP(v4) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) // check IPv4 err = s.AddIP(v4) - assert.Nil(t, err) + require.Nil(t, err) found, err = s.HasIP(v4) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) found, err = s.HasIP(v4s) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) found, err = s.HasIP(v6) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) // check removes err = s.RemoveIP(v6) - assert.NotNil(t, err) + require.NotNil(t, err) err = s.RemoveIP(v4s) - assert.Nil(t, err) + require.Nil(t, err) found, err = s.HasIP(v4) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) // check IPv6 err = s.AddIP(v6) - assert.Nil(t, err) + require.Nil(t, err) found, err = s.HasIP(v6) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) err = s.RemoveIP(v6) - assert.Nil(t, err) + require.Nil(t, err) found, err = s.HasIP(v6) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) + + errChan := s.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") } func TestHasAllHasAny(t *testing.T) { var d = &ipStoreDriver{} s, err := d.New(&store.DriverConfig{}) - assert.Nil(t, err) - assert.NotNil(t, s) + require.Nil(t, err) + require.NotNil(t, s) found, err := s.HasAnyIP(nil) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) found, err = s.HasAllIPs(nil) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) found, err = s.HasAllIPs([]net.IP{v4}) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) err = s.AddIP(v4) - assert.Nil(t, err) + require.Nil(t, err) found, err = s.HasAnyIP([]net.IP{v4, v6}) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) found, err = s.HasAllIPs([]net.IP{v4, v6}) - assert.Nil(t, err) - assert.False(t, found) + require.Nil(t, err) + require.False(t, found) found, err = s.HasAllIPs([]net.IP{v4}) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) err = s.AddIP(v6) - assert.Nil(t, err) + require.Nil(t, err) found, err = s.HasAnyIP([]net.IP{v4, v6}) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) found, err = s.HasAllIPs([]net.IP{v4, v6}) - assert.Nil(t, err) - assert.True(t, found) + require.Nil(t, err) + require.True(t, found) + + errChan := s.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") } func TestNetworks(t *testing.T) { @@ -145,46 +153,51 @@ func TestNetworks(t *testing.T) { ) s, err := d.New(&store.DriverConfig{}) - assert.Nil(t, err) + require.Nil(t, err) + require.NotNil(t, s) match, err := s.HasIP(includedIP) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) match, err = s.HasIP(excludedIP) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) err = s.AddNetwork("") - assert.NotNil(t, err) + require.NotNil(t, err) err = s.RemoveNetwork("") - assert.NotNil(t, err) + require.NotNil(t, err) err = s.AddNetwork(net1) - assert.Nil(t, err) + require.Nil(t, err) match, err = s.HasIP(includedIP) - assert.Nil(t, err) - assert.True(t, match) + require.Nil(t, err) + require.True(t, match) match, err = s.HasIP(excludedIP) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) err = s.RemoveNetwork(net2) - assert.NotNil(t, err) + require.NotNil(t, err) err = s.RemoveNetwork(net1) - assert.Nil(t, err) + require.Nil(t, err) match, err = s.HasIP(includedIP) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) match, err = s.HasIP(excludedIP) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) + + errChan := s.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") } func TestHasAllHasAnyNetworks(t *testing.T) { @@ -197,61 +210,66 @@ func TestHasAllHasAnyNetworks(t *testing.T) { excluded = net.ParseIP("10.154.243.22") ) s, err := d.New(&store.DriverConfig{}) - assert.Nil(t, err) + require.Nil(t, err) + require.NotNil(t, s) match, err := s.HasAnyIP([]net.IP{inNet1, inNet2, excluded}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) match, err = s.HasAllIPs([]net.IP{inNet1, inNet2, excluded}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) err = s.AddNetwork(net1) - assert.Nil(t, err) + require.Nil(t, err) match, err = s.HasAnyIP([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.True(t, match) + require.Nil(t, err) + require.True(t, match) match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) err = s.AddNetwork(net2) - assert.Nil(t, err) + require.Nil(t, err) match, err = s.HasAnyIP([]net.IP{inNet1, inNet2, excluded}) - assert.Nil(t, err) - assert.True(t, match) + require.Nil(t, err) + require.True(t, match) match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.True(t, match) + require.Nil(t, err) + require.True(t, match) match, err = s.HasAllIPs([]net.IP{inNet1, inNet2, excluded}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) err = s.RemoveNetwork(net1) - assert.Nil(t, err) + require.Nil(t, err) match, err = s.HasAnyIP([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.True(t, match) + require.Nil(t, err) + require.True(t, match) match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) err = s.RemoveNetwork(net2) - assert.Nil(t, err) + require.Nil(t, err) match, err = s.HasAnyIP([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - assert.Nil(t, err) - assert.False(t, match) + require.Nil(t, err) + require.False(t, match) + + errChan := s.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") } diff --git a/server/store/memory/peer_store.go b/server/store/memory/peer_store.go index 639a89f..9c07deb 100644 --- a/server/store/memory/peer_store.go +++ b/server/store/memory/peer_store.go @@ -35,6 +35,7 @@ func (d *peerStoreDriver) New(storecfg *store.DriverConfig) (store.PeerStore, er } return &peerStore{ shards: shards, + closed: make(chan struct{}), }, nil } @@ -72,6 +73,7 @@ type peerShard struct { type peerStore struct { shards []*peerShard + closed chan struct{} } var _ store.PeerStore = &peerStore{} @@ -100,6 +102,12 @@ func (s *peerStore) PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error { shard.Lock() defer shard.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if shard.peers[key] == nil { shard.peers[key] = make(map[string]peer) } @@ -118,6 +126,12 @@ func (s *peerStore) DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) erro shard.Lock() defer shard.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if shard.peers[key] == nil { return store.ErrResourceDoesNotExist } @@ -143,6 +157,12 @@ func (s *peerStore) PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error shard.Lock() defer shard.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if shard.peers[key] == nil { shard.peers[key] = make(map[string]peer) } @@ -161,6 +181,12 @@ func (s *peerStore) DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) err shard.Lock() defer shard.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if shard.peers[key] == nil { return store.ErrResourceDoesNotExist } @@ -187,6 +213,12 @@ func (s *peerStore) GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) e shard.Lock() defer shard.Unlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if shard.peers[lkey] != nil { delete(shard.peers[lkey], peerKey(p)) } @@ -243,6 +275,12 @@ func (s *peerStore) AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWan shard.RLock() defer shard.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + if seeder { // Append leechers as possible. leechers := shard.peers[lkey] @@ -307,6 +345,12 @@ func (s *peerStore) GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chiha shard.RLock() defer shard.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + seeders := shard.peers[key] for _, p := range seeders { if p.IP.To4() == nil { @@ -324,6 +368,12 @@ func (s *peerStore) GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chih shard.RLock() defer shard.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + leechers := shard.peers[key] for _, p := range leechers { if p.IP.To4() == nil { @@ -341,6 +391,12 @@ func (s *peerStore) NumSeeders(infoHash chihaya.InfoHash) int { shard.RLock() defer shard.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + return len(shard.peers[key]) } @@ -350,5 +406,33 @@ func (s *peerStore) NumLeechers(infoHash chihaya.InfoHash) int { shard.RLock() defer shard.RUnlock() + select { + case <-s.closed: + panic("attempted to interact with stopped store") + default: + } + return len(shard.peers[key]) } + +func (s *peerStore) Stop() <-chan error { + toReturn := make(chan error) + go func() { + oldshards := s.shards + for _, shard := range oldshards { + shard.Lock() + } + shards := make([]*peerShard, len(oldshards)) + for i := 0; i < len(oldshards); i++ { + shards[i] = &peerShard{} + shards[i].peers = make(map[string]map[string]peer) + } + s.shards = shards + close(s.closed) + for _, shard := range oldshards { + shard.Unlock() + } + close(toReturn) + }() + return toReturn +} diff --git a/server/store/memory/peer_store_test.go b/server/store/memory/peer_store_test.go index 8eb6a0c..42be5ee 100644 --- a/server/store/memory/peer_store_test.go +++ b/server/store/memory/peer_store_test.go @@ -11,7 +11,7 @@ import ( "github.com/chihaya/chihaya" "github.com/chihaya/chihaya/server/store" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func peerInSlice(peer chihaya.Peer, peers []chihaya.Peer) bool { @@ -56,8 +56,8 @@ func TestPeerStoreAPI(t *testing.T) { d = &peerStoreDriver{} ) s, err := d.New(&config) - assert.Nil(t, err) - assert.NotNil(t, s) + require.Nil(t, err) + require.NotNil(t, s) for _, p := range peers { // Construct chihaya.Peer from test data. @@ -72,22 +72,22 @@ func TestPeerStoreAPI(t *testing.T) { } else { err = s.PutLeecher(hash, peer) } - assert.Nil(t, err) + require.Nil(t, err) } leechers1, leechers61, err := s.GetLeechers(hash) - assert.Nil(t, err) - assert.NotEmpty(t, leechers1) - assert.NotEmpty(t, leechers61) + require.Nil(t, err) + require.NotEmpty(t, leechers1) + require.NotEmpty(t, leechers61) num := s.NumLeechers(hash) - assert.Equal(t, len(leechers1)+len(leechers61), num) + require.Equal(t, len(leechers1)+len(leechers61), num) seeders1, seeders61, err := s.GetSeeders(hash) - assert.Nil(t, err) - assert.NotEmpty(t, seeders1) - assert.NotEmpty(t, seeders61) + require.Nil(t, err) + require.NotEmpty(t, seeders1) + require.NotEmpty(t, seeders61) num = s.NumSeeders(hash) - assert.Equal(t, len(seeders1)+len(seeders61), num) + require.Equal(t, len(seeders1)+len(seeders61), num) leechers := append(leechers1, leechers61...) seeders := append(seeders1, seeders61...) @@ -101,9 +101,9 @@ func TestPeerStoreAPI(t *testing.T) { } if p.seeder { - assert.True(t, peerInSlice(peer, seeders)) + require.True(t, peerInSlice(peer, seeders)) } else { - assert.True(t, peerInSlice(peer, leechers)) + require.True(t, peerInSlice(peer, leechers)) } if p.seeder { @@ -111,11 +111,11 @@ func TestPeerStoreAPI(t *testing.T) { } else { err = s.DeleteLeecher(hash, peer) } - assert.Nil(t, err) + require.Nil(t, err) } - assert.Zero(t, s.NumLeechers(hash)) - assert.Zero(t, s.NumSeeders(hash)) + require.Zero(t, s.NumLeechers(hash)) + require.Zero(t, s.NumSeeders(hash)) // Re-add all the peers to the peerStore. for _, p := range peers { @@ -133,27 +133,31 @@ func TestPeerStoreAPI(t *testing.T) { } // Check that there are 6 seeders, and 4 leechers. - assert.Equal(t, 6, s.NumSeeders(hash)) - assert.Equal(t, 4, s.NumLeechers(hash)) + require.Equal(t, 6, s.NumSeeders(hash)) + require.Equal(t, 4, s.NumLeechers(hash)) peer := chihaya.Peer{ ID: chihaya.PeerIDFromString(peers[0].peerID), IP: net.ParseIP(peers[0].ip), Port: peers[0].port, } err = s.GraduateLeecher(hash, peer) - assert.Nil(t, err) + require.Nil(t, err) // Check that there are 7 seeders, and 3 leechers after graduating a // leecher to a seeder. - assert.Equal(t, 7, s.NumSeeders(hash)) - assert.Equal(t, 3, s.NumLeechers(hash)) + require.Equal(t, 7, s.NumSeeders(hash)) + require.Equal(t, 3, s.NumLeechers(hash)) peers1, peers61, err := s.AnnouncePeers(hash, true, 5, peer, chihaya.Peer{}) - assert.Nil(t, err) - assert.NotNil(t, peers1) - assert.NotNil(t, peers61) + require.Nil(t, err) + require.NotNil(t, peers1) + require.NotNil(t, peers61) err = s.CollectGarbage(time.Now()) - assert.Nil(t, err) - assert.Equal(t, s.NumLeechers(hash), 0) - assert.Equal(t, s.NumSeeders(hash), 0) + require.Nil(t, err) + require.Equal(t, s.NumLeechers(hash), 0) + require.Equal(t, s.NumSeeders(hash), 0) + + errChan := s.Stop() + err = <-errChan + require.Nil(t, err, "PeerStore shutdown must not fail") } diff --git a/server/store/memory/string_store.go b/server/store/memory/string_store.go index 85cc4d4..531e3bb 100644 --- a/server/store/memory/string_store.go +++ b/server/store/memory/string_store.go @@ -19,11 +19,13 @@ type stringStoreDriver struct{} func (d *stringStoreDriver) New(_ *store.DriverConfig) (store.StringStore, error) { return &stringStore{ strings: make(map[string]struct{}), + closed: make(chan struct{}), }, nil } type stringStore struct { strings map[string]struct{} + closed chan struct{} sync.RWMutex } @@ -33,6 +35,12 @@ func (ss *stringStore) PutString(s string) error { ss.Lock() defer ss.Unlock() + select { + case <-ss.closed: + panic("attempted to interact with stopped store") + default: + } + ss.strings[s] = struct{}{} return nil @@ -42,6 +50,12 @@ func (ss *stringStore) HasString(s string) (bool, error) { ss.RLock() defer ss.RUnlock() + select { + case <-ss.closed: + panic("attempted to interact with stopped store") + default: + } + _, ok := ss.strings[s] return ok, nil @@ -51,6 +65,12 @@ func (ss *stringStore) RemoveString(s string) error { ss.Lock() defer ss.Unlock() + select { + case <-ss.closed: + panic("attempted to interact with stopped store") + default: + } + if _, ok := ss.strings[s]; !ok { return store.ErrResourceDoesNotExist } @@ -59,3 +79,15 @@ func (ss *stringStore) RemoveString(s string) error { return nil } + +func (ss *stringStore) Stop() <-chan error { + toReturn := make(chan error) + go func() { + ss.Lock() + defer ss.Unlock() + ss.strings = make(map[string]struct{}) + close(ss.closed) + close(toReturn) + }() + return toReturn +} diff --git a/server/store/memory/string_store_test.go b/server/store/memory/string_store_test.go index 32618e4..ccf5065 100644 --- a/server/store/memory/string_store_test.go +++ b/server/store/memory/string_store_test.go @@ -7,69 +7,14 @@ package memory import ( "testing" - "github.com/stretchr/testify/assert" - "github.com/chihaya/chihaya/server/store" ) var ( - driver = &stringStoreDriver{} - s1 = "abc" - s2 = "def" + driver = &stringStoreDriver{} + stringStoreTester = store.PrepareStringStoreTester(driver) ) func TestStringStore(t *testing.T) { - ss, err := driver.New(&store.DriverConfig{}) - assert.Nil(t, err) - assert.NotNil(t, ss) - - has, err := ss.HasString(s1) - assert.Nil(t, err) - assert.False(t, has) - - has, err = ss.HasString(s2) - assert.Nil(t, err) - assert.False(t, has) - - err = ss.RemoveString(s1) - assert.NotNil(t, err) - - err = ss.PutString(s1) - assert.Nil(t, err) - - has, err = ss.HasString(s1) - assert.Nil(t, err) - assert.True(t, has) - - has, err = ss.HasString(s2) - assert.Nil(t, err) - assert.False(t, has) - - err = ss.PutString(s1) - assert.Nil(t, err) - - err = ss.PutString(s2) - assert.Nil(t, err) - - has, err = ss.HasString(s1) - assert.Nil(t, err) - assert.True(t, has) - - has, err = ss.HasString(s2) - assert.Nil(t, err) - assert.True(t, has) - - err = ss.RemoveString(s1) - assert.Nil(t, err) - - err = ss.RemoveString(s2) - assert.Nil(t, err) - - has, err = ss.HasString(s1) - assert.Nil(t, err) - assert.False(t, has) - - has, err = ss.HasString(s2) - assert.Nil(t, err) - assert.False(t, has) + stringStoreTester.TestStringStore(t, &store.DriverConfig{}) } diff --git a/server/store/middleware/infohash/blacklist.go b/server/store/middleware/infohash/blacklist.go index 9a24320..ff883b1 100644 --- a/server/store/middleware/infohash/blacklist.go +++ b/server/store/middleware/infohash/blacklist.go @@ -14,7 +14,7 @@ func init() { tracker.RegisterAnnounceMiddleware("infohash_blacklist", blacklistAnnounceInfohash) tracker.RegisterScrapeMiddlewareConstructor("infohash_blacklist", blacklistScrapeInfohash) mustGetStore = func() store.StringStore { - return store.MustGetStore() + return store.MustGetStore().StringStore } } diff --git a/server/store/middleware/infohash/blacklist_test.go b/server/store/middleware/infohash/blacklist_test.go index 804a336..3d06b51 100644 --- a/server/store/middleware/infohash/blacklist_test.go +++ b/server/store/middleware/infohash/blacklist_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/chihaya/chihaya" + "github.com/chihaya/chihaya/pkg/stopper" "github.com/chihaya/chihaya/server/store" "github.com/chihaya/chihaya/tracker" ) @@ -36,6 +37,10 @@ func (ss *storeMock) RemoveString(s string) error { return nil } +func (ss *storeMock) Stop() <-chan error { + return stopper.AlreadyStopped +} + var mock store.StringStore = &storeMock{ strings: make(map[string]struct{}), } diff --git a/server/store/peer_store.go b/server/store/peer_store.go index aada1cd..fff01c3 100644 --- a/server/store/peer_store.go +++ b/server/store/peer_store.go @@ -9,6 +9,7 @@ import ( "time" "github.com/chihaya/chihaya" + "github.com/chihaya/chihaya/pkg/stopper" ) var peerStoreDrivers = make(map[string]PeerStoreDriver) @@ -61,6 +62,12 @@ type PeerStore interface { NumSeeders(infoHash chihaya.InfoHash) int // NumLeechers gets the amount of leechers for a particular infoHash. NumLeechers(infoHash chihaya.InfoHash) int + + // Stopper provides the Stop method that stops the PeerStore. + // Stop should shut down the PeerStore in a separate goroutine and send + // an error to the channel if the shutdown failed. If the shutdown + // was successful, the channel is to be closed. + stopper.Stopper } // PeerStoreDriver represents an interface for creating a handle to the storage diff --git a/server/store/store.go b/server/store/store.go index 4625078..e47ec94 100644 --- a/server/store/store.go +++ b/server/store/store.go @@ -7,12 +7,12 @@ package store import ( "errors" "log" - "sync" "time" "gopkg.in/yaml.v2" "github.com/chihaya/chihaya" + "github.com/chihaya/chihaya/pkg/stopper" "github.com/chihaya/chihaya/server" "github.com/chihaya/chihaya/tracker" ) @@ -34,29 +34,34 @@ func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Ser return nil, errors.New("store: invalid store config: " + err.Error()) } + theStore = &Store{ + cfg: cfg, + tkr: tkr, + shutdown: make(chan struct{}), + sg: stopper.NewStopGroup(), + } + ps, err := OpenPeerStore(&cfg.PeerStore) if err != nil { return nil, err } + theStore.sg.Add(ps) ips, err := OpenIPStore(&cfg.IPStore) if err != nil { return nil, err } + theStore.sg.Add(ips) ss, err := OpenStringStore(&cfg.StringStore) if err != nil { return nil, err } + theStore.sg.Add(ss) - theStore = &Store{ - cfg: cfg, - tkr: tkr, - shutdown: make(chan struct{}), - PeerStore: ps, - IPStore: ips, - StringStore: ss, - } + theStore.PeerStore = ps + theStore.IPStore = ips + theStore.StringStore = ss } return theStore, nil } @@ -110,7 +115,7 @@ type Store struct { cfg *Config tkr *tracker.Tracker shutdown chan struct{} - wg sync.WaitGroup + sg *stopper.StopGroup PeerStore IPStore @@ -120,12 +125,18 @@ type Store struct { // Start starts the store drivers and blocks until all of them exit. func (s *Store) Start() { <-s.shutdown - s.wg.Wait() - log.Println("Store server shut down cleanly") } // Stop stops the store drivers and waits for them to exit. func (s *Store) Stop() { + errors := s.sg.Stop() + if len(errors) == 0 { + log.Println("Store server shut down cleanly") + } else { + log.Println("Store server: failed to shutdown drivers") + for _, err := range errors { + log.Println(err.Error()) + } + } close(s.shutdown) - s.wg.Wait() } diff --git a/server/store/store_tests.go b/server/store/store_tests.go new file mode 100644 index 0000000..5359c41 --- /dev/null +++ b/server/store/store_tests.go @@ -0,0 +1,95 @@ +// Copyright 2016 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package store + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// StringStoreTester is a collection of tests for a StringStore driver. +// Every benchmark expects a new, clean storage. Every benchmark should be +// called with a DriverConfig that ensures this. +type StringStoreTester interface { + TestStringStore(*testing.T, *DriverConfig) +} + +var _ StringStoreTester = &stringStoreTester{} + +type stringStoreTester struct { + s1, s2 string + driver StringStoreDriver +} + +// PrepareStringStoreTester prepares a reusable suite for StringStore driver +// tests. +func PrepareStringStoreTester(driver StringStoreDriver) StringStoreTester { + return &stringStoreTester{ + s1: "abc", + s2: "def", + driver: driver, + } +} + +func (s *stringStoreTester) TestStringStore(t *testing.T, cfg *DriverConfig) { + ss, err := s.driver.New(cfg) + require.Nil(t, err) + require.NotNil(t, ss) + + has, err := ss.HasString(s.s1) + require.Nil(t, err) + require.False(t, has) + + has, err = ss.HasString(s.s2) + require.Nil(t, err) + require.False(t, has) + + err = ss.RemoveString(s.s1) + require.NotNil(t, err) + + err = ss.PutString(s.s1) + require.Nil(t, err) + + has, err = ss.HasString(s.s1) + require.Nil(t, err) + require.True(t, has) + + has, err = ss.HasString(s.s2) + require.Nil(t, err) + require.False(t, has) + + err = ss.PutString(s.s1) + require.Nil(t, err) + + err = ss.PutString(s.s2) + require.Nil(t, err) + + has, err = ss.HasString(s.s1) + require.Nil(t, err) + require.True(t, has) + + has, err = ss.HasString(s.s2) + require.Nil(t, err) + require.True(t, has) + + err = ss.RemoveString(s.s1) + require.Nil(t, err) + + err = ss.RemoveString(s.s2) + require.Nil(t, err) + + has, err = ss.HasString(s.s1) + require.Nil(t, err) + require.False(t, has) + + has, err = ss.HasString(s.s2) + require.Nil(t, err) + require.False(t, has) + + errChan := ss.Stop() + err = <-errChan + require.Nil(t, err, "StringStore shutdown must not fail") +} diff --git a/server/store/string_store.go b/server/store/string_store.go index cfaea49..77ce849 100644 --- a/server/store/string_store.go +++ b/server/store/string_store.go @@ -4,7 +4,11 @@ package store -import "fmt" +import ( + "fmt" + + "github.com/chihaya/chihaya/pkg/stopper" +) var stringStoreDrivers = make(map[string]StringStoreDriver) @@ -21,6 +25,12 @@ type StringStore interface { // Returns ErrResourceDoesNotExist if the given string is not contained // in the store. RemoveString(s string) error + + // Stopper provides the Stop method that stops the StringStore. + // Stop should shut down the StringStore in a separate goroutine and send + // an error to the channel if the shutdown failed. If the shutdown + // was successful, the channel is to be closed. + stopper.Stopper } // StringStoreDriver represents an interface for creating a handle to the From c191e04ee7bb4043ab3ccb7ebeab16635be494bc Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sun, 19 Jun 2016 12:50:21 -0400 Subject: [PATCH 3/8] store: add StringStore benchmarks --- server/store/memory/string_store_test.go | 85 ++++++- server/store/store_bench.go | 298 +++++++++++++++++++++++ 2 files changed, 381 insertions(+), 2 deletions(-) create mode 100644 server/store/store_bench.go diff --git a/server/store/memory/string_store_test.go b/server/store/memory/string_store_test.go index ccf5065..46719d2 100644 --- a/server/store/memory/string_store_test.go +++ b/server/store/memory/string_store_test.go @@ -11,10 +11,91 @@ import ( ) var ( - driver = &stringStoreDriver{} - stringStoreTester = store.PrepareStringStoreTester(driver) + driver = &stringStoreDriver{} + stringStoreTester = store.PrepareStringStoreTester(driver) + stringStoreBenchmarker = store.PrepareStringStoreBenchmarker(&stringStoreDriver{}) ) func TestStringStore(t *testing.T) { stringStoreTester.TestStringStore(t, &store.DriverConfig{}) } + +func BenchmarkStringStore_AddShort(b *testing.B) { + stringStoreBenchmarker.AddShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_AddLong(b *testing.B) { + stringStoreBenchmarker.AddLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_LookupShort(b *testing.B) { + stringStoreBenchmarker.LookupShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_LookupLong(b *testing.B) { + stringStoreBenchmarker.LookupLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_AddRemoveShort(b *testing.B) { + stringStoreBenchmarker.AddRemoveShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_AddRemoveLong(b *testing.B) { + stringStoreBenchmarker.AddRemoveLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_LookupNonExistShort(b *testing.B) { + stringStoreBenchmarker.LookupNonExistShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_LookupNonExistLong(b *testing.B) { + stringStoreBenchmarker.LookupNonExistLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_RemoveNonExistShort(b *testing.B) { + stringStoreBenchmarker.RemoveNonExistShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_RemoveNonExistLong(b *testing.B) { + stringStoreBenchmarker.RemoveNonExistLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_Add1KShort(b *testing.B) { + stringStoreBenchmarker.Add1KShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_Add1KLong(b *testing.B) { + stringStoreBenchmarker.Add1KLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_Lookup1KShort(b *testing.B) { + stringStoreBenchmarker.Lookup1KShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_Lookup1KLong(b *testing.B) { + stringStoreBenchmarker.Lookup1KLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_AddRemove1KShort(b *testing.B) { + stringStoreBenchmarker.AddRemove1KShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_AddRemove1KLong(b *testing.B) { + stringStoreBenchmarker.AddRemove1KLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_LookupNonExist1KShort(b *testing.B) { + stringStoreBenchmarker.LookupNonExist1KShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_LookupNonExist1KLong(b *testing.B) { + stringStoreBenchmarker.LookupNonExist1KLong(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_RemoveNonExist1KShort(b *testing.B) { + stringStoreBenchmarker.RemoveNonExist1KShort(b, &store.DriverConfig{}) +} + +func BenchmarkStringStore_RemoveNonExist1KLong(b *testing.B) { + stringStoreBenchmarker.RemoveNonExist1KLong(b, &store.DriverConfig{}) +} diff --git a/server/store/store_bench.go b/server/store/store_bench.go new file mode 100644 index 0000000..1d752bf --- /dev/null +++ b/server/store/store_bench.go @@ -0,0 +1,298 @@ +// Copyright 2016 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package store + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +const num1KElements = 1000 + +// StringStoreBenchmarker is a collection of benchmarks for StringStore drivers. +// Every benchmark expects a new, clean storage. Every benchmark should be +// called with a DriverConfig that ensures this. +type StringStoreBenchmarker interface { + AddShort(*testing.B, *DriverConfig) + AddLong(*testing.B, *DriverConfig) + LookupShort(*testing.B, *DriverConfig) + LookupLong(*testing.B, *DriverConfig) + AddRemoveShort(*testing.B, *DriverConfig) + AddRemoveLong(*testing.B, *DriverConfig) + LookupNonExistShort(*testing.B, *DriverConfig) + LookupNonExistLong(*testing.B, *DriverConfig) + RemoveNonExistShort(*testing.B, *DriverConfig) + RemoveNonExistLong(*testing.B, *DriverConfig) + + Add1KShort(*testing.B, *DriverConfig) + Add1KLong(*testing.B, *DriverConfig) + Lookup1KShort(*testing.B, *DriverConfig) + Lookup1KLong(*testing.B, *DriverConfig) + AddRemove1KShort(*testing.B, *DriverConfig) + AddRemove1KLong(*testing.B, *DriverConfig) + LookupNonExist1KShort(*testing.B, *DriverConfig) + LookupNonExist1KLong(*testing.B, *DriverConfig) + RemoveNonExist1KShort(*testing.B, *DriverConfig) + RemoveNonExist1KLong(*testing.B, *DriverConfig) +} + +var _ StringStoreBenchmarker = &stringStoreBench{} + +type stringStoreBench struct { + // sShort holds differentStrings unique strings of length 10. + sShort [num1KElements]string + // sLong holds differentStrings unique strings of length 1000. + sLong [num1KElements]string + + driver StringStoreDriver +} + +func generateLongStrings() (a [num1KElements]string) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = strings.Repeat(fmt.Sprintf("%x", b), 250) + } + + return +} + +func generateShortStrings() (a [num1KElements]string) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = strings.Repeat(fmt.Sprintf("%x", b), 3)[:10] + } + + return +} + +// PrepareStringStoreBenchmarker prepares a reusable suite for StringStore driver +// benchmarks. +func PrepareStringStoreBenchmarker(driver StringStoreDriver) StringStoreBenchmarker { + return stringStoreBench{ + sShort: generateShortStrings(), + sLong: generateLongStrings(), + driver: driver, + } +} + +type stringStoreSetupFunc func(StringStore) error + +func stringStoreSetupNOP(StringStore) error { return nil } + +type stringStoreBenchFunc func(StringStore, int) error + +func (sb stringStoreBench) runBenchmark(b *testing.B, cfg *DriverConfig, setup stringStoreSetupFunc, execute stringStoreBenchFunc) { + ss, err := sb.driver.New(cfg) + require.Nil(b, err, "Constructor error must be nil") + require.NotNil(b, ss, "String store must not be nil") + + err = setup(ss) + require.Nil(b, err, "Benchmark setup must not fail") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + execute(ss, i) + } + b.StopTimer() + + errChan := ss.Stop() + err = <-errChan + require.Nil(b, err, "StringStore shutdown must not fail") +} + +func (sb stringStoreBench) AddShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sShort[0]) + return nil + }) +} + +func (sb stringStoreBench) AddLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sLong[0]) + return nil + }) +} + +func (sb stringStoreBench) Add1KShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sShort[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) Add1KLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sLong[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) LookupShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, + func(ss StringStore) error { + return ss.PutString(sb.sShort[0]) + }, + func(ss StringStore, i int) error { + ss.HasString(sb.sShort[0]) + return nil + }) +} + +func (sb stringStoreBench) LookupLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, + func(ss StringStore) error { + return ss.PutString(sb.sLong[0]) + }, + func(ss StringStore, i int) error { + ss.HasString(sb.sLong[0]) + return nil + }) +} + +func (sb stringStoreBench) Lookup1KShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, + func(ss StringStore) error { + for i := 0; i < num1KElements; i++ { + err := ss.PutString(sb.sShort[i]) + if err != nil { + return err + } + } + return nil + }, + func(ss StringStore, i int) error { + ss.HasString(sb.sShort[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) Lookup1KLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, + func(ss StringStore) error { + for i := 0; i < num1KElements; i++ { + err := ss.PutString(sb.sLong[i]) + if err != nil { + return err + } + } + return nil + }, + func(ss StringStore, i int) error { + ss.HasString(sb.sLong[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) AddRemoveShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sShort[0]) + ss.RemoveString(sb.sShort[0]) + return nil + }) +} + +func (sb stringStoreBench) AddRemoveLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sLong[0]) + ss.RemoveString(sb.sLong[0]) + return nil + }) +} + +func (sb stringStoreBench) AddRemove1KShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sShort[i%num1KElements]) + ss.RemoveString(sb.sShort[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) AddRemove1KLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.PutString(sb.sLong[i%num1KElements]) + ss.RemoveString(sb.sLong[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) LookupNonExistShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.HasString(sb.sShort[0]) + return nil + }) +} + +func (sb stringStoreBench) LookupNonExistLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.HasString(sb.sLong[0]) + return nil + }) +} + +func (sb stringStoreBench) LookupNonExist1KShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.HasString(sb.sShort[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) LookupNonExist1KLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.HasString(sb.sLong[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) RemoveNonExistShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.RemoveString(sb.sShort[0]) + return nil + }) +} + +func (sb stringStoreBench) RemoveNonExistLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.RemoveString(sb.sLong[0]) + return nil + }) +} + +func (sb stringStoreBench) RemoveNonExist1KShort(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.RemoveString(sb.sShort[i%num1KElements]) + return nil + }) +} + +func (sb stringStoreBench) RemoveNonExist1KLong(b *testing.B, cfg *DriverConfig) { + sb.runBenchmark(b, cfg, stringStoreSetupNOP, + func(ss StringStore, i int) error { + ss.RemoveString(sb.sLong[i%num1KElements]) + return nil + }) +} From a4d808dea98dace3eb5d74c1d6e7bc5da63d6036 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 18 Jun 2016 15:51:10 -0400 Subject: [PATCH 4/8] store: add IPStore tests --- server/store/memory/ip_store_test.go | 233 +----------------------- server/store/store_bench.go | 2 + server/store/store_tests.go | 258 +++++++++++++++++++++++++++ 3 files changed, 266 insertions(+), 227 deletions(-) diff --git a/server/store/memory/ip_store_test.go b/server/store/memory/ip_store_test.go index 15b5a68..908fa88 100644 --- a/server/store/memory/ip_store_test.go +++ b/server/store/memory/ip_store_test.go @@ -17,6 +17,8 @@ var ( v6 = net.ParseIP("0c22:384e:0:0c22:384e::68") v4 = net.ParseIP("12.13.14.15") v4s = net.ParseIP("12.13.14.15").To4() + + ipStoreTester = store.PrepareIPStoreTester(&ipStoreDriver{}) ) func TestKey(t *testing.T) { @@ -36,240 +38,17 @@ func TestKey(t *testing.T) { } func TestIPStore(t *testing.T) { - var d = &ipStoreDriver{} - - s, err := d.New(&store.DriverConfig{}) - require.Nil(t, err) - require.NotNil(t, s) - - // check default state - found, err := s.HasIP(v4) - require.Nil(t, err) - require.False(t, found) - - // check IPv4 - err = s.AddIP(v4) - require.Nil(t, err) - - found, err = s.HasIP(v4) - require.Nil(t, err) - require.True(t, found) - - found, err = s.HasIP(v4s) - require.Nil(t, err) - require.True(t, found) - - found, err = s.HasIP(v6) - require.Nil(t, err) - require.False(t, found) - - // check removes - err = s.RemoveIP(v6) - require.NotNil(t, err) - - err = s.RemoveIP(v4s) - require.Nil(t, err) - - found, err = s.HasIP(v4) - require.Nil(t, err) - require.False(t, found) - - // check IPv6 - err = s.AddIP(v6) - require.Nil(t, err) - - found, err = s.HasIP(v6) - require.Nil(t, err) - require.True(t, found) - - err = s.RemoveIP(v6) - require.Nil(t, err) - - found, err = s.HasIP(v6) - require.Nil(t, err) - require.False(t, found) - - errChan := s.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") + ipStoreTester.TestIPStore(t, &store.DriverConfig{}) } func TestHasAllHasAny(t *testing.T) { - var d = &ipStoreDriver{} - s, err := d.New(&store.DriverConfig{}) - require.Nil(t, err) - require.NotNil(t, s) - - found, err := s.HasAnyIP(nil) - require.Nil(t, err) - require.False(t, found) - - found, err = s.HasAllIPs(nil) - require.Nil(t, err) - require.True(t, found) - - found, err = s.HasAllIPs([]net.IP{v4}) - require.Nil(t, err) - require.False(t, found) - - err = s.AddIP(v4) - require.Nil(t, err) - - found, err = s.HasAnyIP([]net.IP{v4, v6}) - require.Nil(t, err) - require.True(t, found) - - found, err = s.HasAllIPs([]net.IP{v4, v6}) - require.Nil(t, err) - require.False(t, found) - - found, err = s.HasAllIPs([]net.IP{v4}) - require.Nil(t, err) - require.True(t, found) - - err = s.AddIP(v6) - require.Nil(t, err) - - found, err = s.HasAnyIP([]net.IP{v4, v6}) - require.Nil(t, err) - require.True(t, found) - - found, err = s.HasAllIPs([]net.IP{v4, v6}) - require.Nil(t, err) - require.True(t, found) - - errChan := s.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") + ipStoreTester.TestHasAllHasAny(t, &store.DriverConfig{}) } func TestNetworks(t *testing.T) { - var ( - d = &ipStoreDriver{} - net1 = "192.168.22.255/24" - net2 = "192.168.23.255/24" - includedIP = net.ParseIP("192.168.22.23") - excludedIP = net.ParseIP("192.168.23.22") - ) - - s, err := d.New(&store.DriverConfig{}) - require.Nil(t, err) - require.NotNil(t, s) - - match, err := s.HasIP(includedIP) - require.Nil(t, err) - require.False(t, match) - - match, err = s.HasIP(excludedIP) - require.Nil(t, err) - require.False(t, match) - - err = s.AddNetwork("") - require.NotNil(t, err) - - err = s.RemoveNetwork("") - require.NotNil(t, err) - - err = s.AddNetwork(net1) - require.Nil(t, err) - - match, err = s.HasIP(includedIP) - require.Nil(t, err) - require.True(t, match) - - match, err = s.HasIP(excludedIP) - require.Nil(t, err) - require.False(t, match) - - err = s.RemoveNetwork(net2) - require.NotNil(t, err) - - err = s.RemoveNetwork(net1) - require.Nil(t, err) - - match, err = s.HasIP(includedIP) - require.Nil(t, err) - require.False(t, match) - - match, err = s.HasIP(excludedIP) - require.Nil(t, err) - require.False(t, match) - - errChan := s.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") + ipStoreTester.TestNetworks(t, &store.DriverConfig{}) } func TestHasAllHasAnyNetworks(t *testing.T) { - var ( - d = &ipStoreDriver{} - net1 = "192.168.22.255/24" - net2 = "192.168.23.255/24" - inNet1 = net.ParseIP("192.168.22.234") - inNet2 = net.ParseIP("192.168.23.123") - excluded = net.ParseIP("10.154.243.22") - ) - s, err := d.New(&store.DriverConfig{}) - require.Nil(t, err) - require.NotNil(t, s) - - match, err := s.HasAnyIP([]net.IP{inNet1, inNet2, excluded}) - require.Nil(t, err) - require.False(t, match) - - match, err = s.HasAllIPs([]net.IP{inNet1, inNet2, excluded}) - require.Nil(t, err) - require.False(t, match) - - err = s.AddNetwork(net1) - require.Nil(t, err) - - match, err = s.HasAnyIP([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.True(t, match) - - match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.False(t, match) - - err = s.AddNetwork(net2) - require.Nil(t, err) - - match, err = s.HasAnyIP([]net.IP{inNet1, inNet2, excluded}) - require.Nil(t, err) - require.True(t, match) - - match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.True(t, match) - - match, err = s.HasAllIPs([]net.IP{inNet1, inNet2, excluded}) - require.Nil(t, err) - require.False(t, match) - - err = s.RemoveNetwork(net1) - require.Nil(t, err) - - match, err = s.HasAnyIP([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.True(t, match) - - match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.False(t, match) - - err = s.RemoveNetwork(net2) - require.Nil(t, err) - - match, err = s.HasAnyIP([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.False(t, match) - - match, err = s.HasAllIPs([]net.IP{inNet1, inNet2}) - require.Nil(t, err) - require.False(t, match) - - errChan := s.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") + ipStoreTester.TestHasAllHasAnyNetworks(t, &store.DriverConfig{}) } diff --git a/server/store/store_bench.go b/server/store/store_bench.go index 1d752bf..ea52ef9 100644 --- a/server/store/store_bench.go +++ b/server/store/store_bench.go @@ -9,6 +9,8 @@ import ( "strings" "testing" + "net" + "github.com/stretchr/testify/require" ) diff --git a/server/store/store_tests.go b/server/store/store_tests.go index 5359c41..ea0663d 100644 --- a/server/store/store_tests.go +++ b/server/store/store_tests.go @@ -7,6 +7,8 @@ package store import ( "testing" + "net" + "github.com/stretchr/testify/require" ) @@ -93,3 +95,259 @@ func (s *stringStoreTester) TestStringStore(t *testing.T, cfg *DriverConfig) { err = <-errChan require.Nil(t, err, "StringStore shutdown must not fail") } + +// IPStoreTester is a collection of tests for an IPStore driver. +// Every benchmark expects a new, clean storage. Every benchmark should be +// called with a DriverConfig that ensures this. +type IPStoreTester interface { + TestIPStore(*testing.T, *DriverConfig) + TestHasAllHasAny(*testing.T, *DriverConfig) + TestNetworks(*testing.T, *DriverConfig) + TestHasAllHasAnyNetworks(*testing.T, *DriverConfig) +} + +var _ IPStoreTester = &ipStoreTester{} + +type ipStoreTester struct { + v6, v4, v4s net.IP + net1, net2 string + inNet1, inNet2 net.IP + excluded net.IP + driver IPStoreDriver +} + +// PrepareIPStoreTester prepares a reusable suite for IPStore driver +// tests. +func PrepareIPStoreTester(driver IPStoreDriver) IPStoreTester { + return &ipStoreTester{ + v6: net.ParseIP("0c22:384e:0:0c22:384e::68"), + v4: net.ParseIP("12.13.14.15"), + v4s: net.ParseIP("12.13.14.15").To4(), + net1: "192.168.22.255/24", + net2: "192.168.23.255/24", + inNet1: net.ParseIP("192.168.22.22"), + inNet2: net.ParseIP("192.168.23.23"), + excluded: net.ParseIP("10.154.243.22"), + driver: driver, + } +} + +func (s *ipStoreTester) TestIPStore(t *testing.T, cfg *DriverConfig) { + is, err := s.driver.New(cfg) + require.Nil(t, err) + require.NotNil(t, is) + + // check default state + found, err := is.HasIP(s.v4) + require.Nil(t, err) + require.False(t, found) + + // check IPv4 + err = is.AddIP(s.v4) + require.Nil(t, err) + + found, err = is.HasIP(s.v4) + require.Nil(t, err) + require.True(t, found) + + found, err = is.HasIP(s.v4s) + require.Nil(t, err) + require.True(t, found) + + found, err = is.HasIP(s.v6) + require.Nil(t, err) + require.False(t, found) + + // check removes + err = is.RemoveIP(s.v6) + require.NotNil(t, err) + + err = is.RemoveIP(s.v4s) + require.Nil(t, err) + + found, err = is.HasIP(s.v4) + require.Nil(t, err) + require.False(t, found) + + // check IPv6 + err = is.AddIP(s.v6) + require.Nil(t, err) + + found, err = is.HasIP(s.v6) + require.Nil(t, err) + require.True(t, found) + + err = is.RemoveIP(s.v6) + require.Nil(t, err) + + found, err = is.HasIP(s.v6) + require.Nil(t, err) + require.False(t, found) + + errChan := is.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") +} + +func (s *ipStoreTester) TestHasAllHasAny(t *testing.T, cfg *DriverConfig) { + is, err := s.driver.New(cfg) + require.Nil(t, err) + require.NotNil(t, is) + + found, err := is.HasAnyIP(nil) + require.Nil(t, err) + require.False(t, found) + + found, err = is.HasAllIPs(nil) + require.Nil(t, err) + require.True(t, found) + + found, err = is.HasAllIPs([]net.IP{s.v6}) + require.Nil(t, err) + require.False(t, found) + + err = is.AddIP(s.v4) + require.Nil(t, err) + + found, err = is.HasAnyIP([]net.IP{s.v6, s.v4}) + require.Nil(t, err) + require.True(t, found) + + found, err = is.HasAllIPs([]net.IP{s.v6, s.v4}) + require.Nil(t, err) + require.False(t, found) + + found, err = is.HasAllIPs([]net.IP{s.v4}) + require.Nil(t, err) + require.True(t, found) + + err = is.AddIP(s.v6) + require.Nil(t, err) + + found, err = is.HasAnyIP([]net.IP{s.v6, s.v6}) + require.Nil(t, err) + require.True(t, found) + + found, err = is.HasAllIPs([]net.IP{s.v6, s.v6}) + require.Nil(t, err) + require.True(t, found) + + errChan := is.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") +} + +func (s *ipStoreTester) TestNetworks(t *testing.T, cfg *DriverConfig) { + is, err := s.driver.New(cfg) + require.Nil(t, err) + require.NotNil(t, is) + + match, err := is.HasIP(s.inNet1) + require.Nil(t, err) + require.False(t, match) + + match, err = is.HasIP(s.inNet2) + require.Nil(t, err) + require.False(t, match) + + err = is.AddNetwork("") + require.NotNil(t, err) + + err = is.RemoveNetwork("") + require.NotNil(t, err) + + err = is.AddNetwork(s.net1) + require.Nil(t, err) + + match, err = is.HasIP(s.inNet1) + require.Nil(t, err) + require.True(t, match) + + match, err = is.HasIP(s.inNet2) + require.Nil(t, err) + require.False(t, match) + + err = is.RemoveNetwork(s.net2) + require.NotNil(t, err) + + err = is.RemoveNetwork(s.net1) + require.Nil(t, err) + + match, err = is.HasIP(s.inNet1) + require.Nil(t, err) + require.False(t, match) + + match, err = is.HasIP(s.inNet2) + require.Nil(t, err) + require.False(t, match) + + errChan := is.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") +} + +func (s *ipStoreTester) TestHasAllHasAnyNetworks(t *testing.T, cfg *DriverConfig) { + is, err := s.driver.New(cfg) + require.Nil(t, err) + require.NotNil(t, s) + + match, err := is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded}) + require.Nil(t, err) + require.False(t, match) + + match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded}) + require.Nil(t, err) + require.False(t, match) + + err = is.AddNetwork(s.net1) + require.Nil(t, err) + + match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.True(t, match) + + match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.False(t, match) + + err = is.AddNetwork(s.net2) + require.Nil(t, err) + + match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded}) + require.Nil(t, err) + require.True(t, match) + + match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.True(t, match) + + match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded}) + require.Nil(t, err) + require.False(t, match) + + err = is.RemoveNetwork(s.net1) + require.Nil(t, err) + + match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.True(t, match) + + match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.False(t, match) + + err = is.RemoveNetwork(s.net2) + require.Nil(t, err) + + match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.False(t, match) + + match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) + require.Nil(t, err) + require.False(t, match) + + errChan := is.Stop() + err = <-errChan + require.Nil(t, err, "IPStore shutdown must not fail") +} From 5fae38399b87ed068ec91bfd930554de27cc659c Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 18 Jun 2016 18:21:01 -0400 Subject: [PATCH 5/8] store: add IPStore benchmarks --- server/store/memory/ip_store_test.go | 147 +++++++- server/store/store_bench.go | 483 ++++++++++++++++++++++++++- 2 files changed, 627 insertions(+), 3 deletions(-) diff --git a/server/store/memory/ip_store_test.go b/server/store/memory/ip_store_test.go index 908fa88..552568d 100644 --- a/server/store/memory/ip_store_test.go +++ b/server/store/memory/ip_store_test.go @@ -18,7 +18,8 @@ var ( v4 = net.ParseIP("12.13.14.15") v4s = net.ParseIP("12.13.14.15").To4() - ipStoreTester = store.PrepareIPStoreTester(&ipStoreDriver{}) + ipStoreTester = store.PrepareIPStoreTester(&ipStoreDriver{}) + ipStoreBenchmarker = store.PrepareIPStoreBenchmarker(&ipStoreDriver{}) ) func TestKey(t *testing.T) { @@ -52,3 +53,147 @@ func TestNetworks(t *testing.T) { func TestHasAllHasAnyNetworks(t *testing.T) { ipStoreTester.TestHasAllHasAnyNetworks(t, &store.DriverConfig{}) } + +func BenchmarkIPStore_AddV4(b *testing.B) { + ipStoreBenchmarker.AddV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddV6(b *testing.B) { + ipStoreBenchmarker.AddV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupV4(b *testing.B) { + ipStoreBenchmarker.LookupV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupV6(b *testing.B) { + ipStoreBenchmarker.LookupV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemoveV4(b *testing.B) { + ipStoreBenchmarker.AddRemoveV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemoveV6(b *testing.B) { + ipStoreBenchmarker.AddRemoveV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupNonExistV4(b *testing.B) { + ipStoreBenchmarker.LookupNonExistV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupNonExistV6(b *testing.B) { + ipStoreBenchmarker.LookupNonExistV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExistV4(b *testing.B) { + ipStoreBenchmarker.RemoveNonExistV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExistV6(b *testing.B) { + ipStoreBenchmarker.RemoveNonExistV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddV4Network(b *testing.B) { + ipStoreBenchmarker.AddV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddV6Network(b *testing.B) { + ipStoreBenchmarker.AddV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupV4Network(b *testing.B) { + ipStoreBenchmarker.LookupV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupV6Network(b *testing.B) { + ipStoreBenchmarker.LookupV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemoveV4Network(b *testing.B) { + ipStoreBenchmarker.AddRemoveV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemoveV6Network(b *testing.B) { + ipStoreBenchmarker.AddRemoveV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExistV4Network(b *testing.B) { + ipStoreBenchmarker.RemoveNonExistV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExistV6Network(b *testing.B) { + ipStoreBenchmarker.RemoveNonExistV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Add1KV4(b *testing.B) { + ipStoreBenchmarker.Add1KV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Add1KV6(b *testing.B) { + ipStoreBenchmarker.Add1KV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Lookup1KV4(b *testing.B) { + ipStoreBenchmarker.Lookup1KV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Lookup1KV6(b *testing.B) { + ipStoreBenchmarker.Lookup1KV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemove1KV4(b *testing.B) { + ipStoreBenchmarker.AddRemove1KV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemove1KV6(b *testing.B) { + ipStoreBenchmarker.AddRemove1KV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupNonExist1KV4(b *testing.B) { + ipStoreBenchmarker.LookupNonExist1KV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_LookupNonExist1KV6(b *testing.B) { + ipStoreBenchmarker.LookupNonExist1KV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExist1KV4(b *testing.B) { + ipStoreBenchmarker.RemoveNonExist1KV4(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExist1KV6(b *testing.B) { + ipStoreBenchmarker.RemoveNonExist1KV6(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Add1KV4Network(b *testing.B) { + ipStoreBenchmarker.Add1KV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Add1KV6Network(b *testing.B) { + ipStoreBenchmarker.Add1KV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Lookup1KV4Network(b *testing.B) { + ipStoreBenchmarker.Lookup1KV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_Lookup1KV6Network(b *testing.B) { + ipStoreBenchmarker.Lookup1KV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemove1KV4Network(b *testing.B) { + ipStoreBenchmarker.AddRemove1KV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_AddRemove1KV6Network(b *testing.B) { + ipStoreBenchmarker.AddRemove1KV6Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExist1KV4Network(b *testing.B) { + ipStoreBenchmarker.RemoveNonExist1KV4Network(b, &store.DriverConfig{}) +} + +func BenchmarkIPStore_RemoveNonExist1KV6Network(b *testing.B) { + ipStoreBenchmarker.RemoveNonExist1KV6Network(b, &store.DriverConfig{}) +} diff --git a/server/store/store_bench.go b/server/store/store_bench.go index ea52ef9..c1b56f3 100644 --- a/server/store/store_bench.go +++ b/server/store/store_bench.go @@ -6,11 +6,10 @@ package store import ( "fmt" + "net" "strings" "testing" - "net" - "github.com/stretchr/testify/require" ) @@ -298,3 +297,483 @@ func (sb stringStoreBench) RemoveNonExist1KLong(b *testing.B, cfg *DriverConfig) return nil }) } + +// IPStoreBenchmarker is a collection of benchmarks for IPStore drivers. +// Every benchmark expects a new, clean storage. Every benchmark should be +// called with a DriverConfig that ensures this. +type IPStoreBenchmarker interface { + AddV4(*testing.B, *DriverConfig) + AddV6(*testing.B, *DriverConfig) + LookupV4(*testing.B, *DriverConfig) + LookupV6(*testing.B, *DriverConfig) + AddRemoveV4(*testing.B, *DriverConfig) + AddRemoveV6(*testing.B, *DriverConfig) + LookupNonExistV4(*testing.B, *DriverConfig) + LookupNonExistV6(*testing.B, *DriverConfig) + RemoveNonExistV4(*testing.B, *DriverConfig) + RemoveNonExistV6(*testing.B, *DriverConfig) + + AddV4Network(*testing.B, *DriverConfig) + AddV6Network(*testing.B, *DriverConfig) + LookupV4Network(*testing.B, *DriverConfig) + LookupV6Network(*testing.B, *DriverConfig) + AddRemoveV4Network(*testing.B, *DriverConfig) + AddRemoveV6Network(*testing.B, *DriverConfig) + RemoveNonExistV4Network(*testing.B, *DriverConfig) + RemoveNonExistV6Network(*testing.B, *DriverConfig) + + Add1KV4(*testing.B, *DriverConfig) + Add1KV6(*testing.B, *DriverConfig) + Lookup1KV4(*testing.B, *DriverConfig) + Lookup1KV6(*testing.B, *DriverConfig) + AddRemove1KV4(*testing.B, *DriverConfig) + AddRemove1KV6(*testing.B, *DriverConfig) + LookupNonExist1KV4(*testing.B, *DriverConfig) + LookupNonExist1KV6(*testing.B, *DriverConfig) + RemoveNonExist1KV4(*testing.B, *DriverConfig) + RemoveNonExist1KV6(*testing.B, *DriverConfig) + + Add1KV4Network(*testing.B, *DriverConfig) + Add1KV6Network(*testing.B, *DriverConfig) + Lookup1KV4Network(*testing.B, *DriverConfig) + Lookup1KV6Network(*testing.B, *DriverConfig) + AddRemove1KV4Network(*testing.B, *DriverConfig) + AddRemove1KV6Network(*testing.B, *DriverConfig) + RemoveNonExist1KV4Network(*testing.B, *DriverConfig) + RemoveNonExist1KV6Network(*testing.B, *DriverConfig) +} + +func generateV4Networks() (a [num1KElements]string) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = fmt.Sprintf("64.%d.%d.255/24", b[0], b[1]) + } + + return +} + +func generateV6Networks() (a [num1KElements]string) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = fmt.Sprintf("6464:6464:6464:%02x%02x:ffff:ffff:ffff:ffff/64", b[0], b[1]) + } + + return +} + +func generateV4IPs() (a [num1KElements]net.IP) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = net.ParseIP(fmt.Sprintf("64.%d.%d.64", b[0], b[1])).To4() + } + + return +} + +func generateV6IPs() (a [num1KElements]net.IP) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = net.ParseIP(fmt.Sprintf("6464:6464:6464:%02x%02x:6464:6464:6464:6464", b[0], b[1])) + } + + return +} + +type ipStoreBench struct { + v4IPs [num1KElements]net.IP + v6IPs [num1KElements]net.IP + + v4Networks [num1KElements]string + v6Networks [num1KElements]string + + driver IPStoreDriver +} + +// PrepareIPStoreBenchmarker prepares a reusable suite for StringStore driver +// benchmarks. +func PrepareIPStoreBenchmarker(driver IPStoreDriver) IPStoreBenchmarker { + return ipStoreBench{ + v4IPs: generateV4IPs(), + v6IPs: generateV6IPs(), + v4Networks: generateV4Networks(), + v6Networks: generateV6Networks(), + driver: driver, + } +} + +type ipStoreSetupFunc func(IPStore) error + +func ipStoreSetupNOP(IPStore) error { return nil } + +type ipStoreBenchFunc func(IPStore, int) error + +func (ib ipStoreBench) runBenchmark(b *testing.B, cfg *DriverConfig, setup ipStoreSetupFunc, execute ipStoreBenchFunc) { + is, err := ib.driver.New(cfg) + require.Nil(b, err, "Constructor error must be nil") + require.NotNil(b, is, "IP store must not be nil") + + err = setup(is) + require.Nil(b, err, "Benchmark setup must not fail") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + execute(is, i) + } + b.StopTimer() + + errChan := is.Stop() + err = <-errChan + require.Nil(b, err, "IPStore shutdown must not fail") +} + +func (ib ipStoreBench) AddV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v4IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) AddV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v6IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) LookupV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + return is.AddIP(ib.v4IPs[0]) + }, + func(is IPStore, i int) error { + is.HasIP(ib.v4IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) LookupV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + return is.AddIP(ib.v6IPs[0]) + }, + func(is IPStore, i int) error { + is.HasIP(ib.v6IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) AddRemoveV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v4IPs[0]) + is.RemoveIP(ib.v4IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) AddRemoveV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v6IPs[0]) + is.RemoveIP(ib.v6IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) LookupNonExistV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.HasIP(ib.v4IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) LookupNonExistV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.HasIP(ib.v6IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExistV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveIP(ib.v4IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExistV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveIP(ib.v6IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) AddV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v4Networks[0]) + return nil + }) +} + +func (ib ipStoreBench) AddV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v6Networks[0]) + return nil + }) +} + +func (ib ipStoreBench) LookupV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + return is.AddNetwork(ib.v4Networks[0]) + }, + func(is IPStore, i int) error { + is.HasIP(ib.v4IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) LookupV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + return is.AddNetwork(ib.v6Networks[0]) + }, + func(is IPStore, i int) error { + is.HasIP(ib.v6IPs[0]) + return nil + }) +} + +func (ib ipStoreBench) AddRemoveV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v4Networks[0]) + is.RemoveNetwork(ib.v4Networks[0]) + return nil + }) +} + +func (ib ipStoreBench) AddRemoveV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v6Networks[0]) + is.RemoveNetwork(ib.v6Networks[0]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExistV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveNetwork(ib.v4Networks[0]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExistV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveNetwork(ib.v6Networks[0]) + return nil + }) +} + +func (ib ipStoreBench) Add1KV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v4IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Add1KV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v6IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Lookup1KV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + for i := 0; i < num1KElements; i++ { + err := is.AddIP(ib.v4IPs[i%num1KElements]) + if err != nil { + return err + } + } + return nil + }, + func(is IPStore, i int) error { + is.HasIP(ib.v4IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Lookup1KV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + for i := 0; i < num1KElements; i++ { + err := is.AddIP(ib.v6IPs[i%num1KElements]) + if err != nil { + return err + } + } + return nil + }, + func(is IPStore, i int) error { + is.HasIP(ib.v6IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) AddRemove1KV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v4IPs[i%num1KElements]) + is.RemoveIP(ib.v4IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) AddRemove1KV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddIP(ib.v6IPs[i%num1KElements]) + is.RemoveIP(ib.v6IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) LookupNonExist1KV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.HasIP(ib.v4IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) LookupNonExist1KV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.HasIP(ib.v6IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExist1KV4(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveIP(ib.v4IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExist1KV6(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveIP(ib.v6IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Add1KV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v4Networks[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Add1KV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v6Networks[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Lookup1KV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + for i := 0; i < num1KElements; i++ { + err := is.AddNetwork(ib.v4Networks[i%num1KElements]) + if err != nil { + return err + } + } + return nil + }, + func(is IPStore, i int) error { + is.HasIP(ib.v4IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) Lookup1KV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, + func(is IPStore) error { + for i := 0; i < num1KElements; i++ { + err := is.AddNetwork(ib.v6Networks[i%num1KElements]) + if err != nil { + return err + } + } + return nil + }, + func(is IPStore, i int) error { + is.HasIP(ib.v6IPs[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) AddRemove1KV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v4Networks[i%num1KElements]) + is.RemoveNetwork(ib.v4Networks[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) AddRemove1KV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.AddNetwork(ib.v6Networks[i%num1KElements]) + is.RemoveNetwork(ib.v6Networks[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExist1KV4Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveNetwork(ib.v4Networks[i%num1KElements]) + return nil + }) +} + +func (ib ipStoreBench) RemoveNonExist1KV6Network(b *testing.B, cfg *DriverConfig) { + ib.runBenchmark(b, cfg, ipStoreSetupNOP, + func(is IPStore, i int) error { + is.RemoveNetwork(ib.v6Networks[i%num1KElements]) + return nil + }) +} From 69e9401838f2354bd597efd6c51ffc762e81d4f1 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sun, 19 Jun 2016 12:55:20 -0400 Subject: [PATCH 6/8] store: add PeerStore tests --- server/store/memory/peer_store_test.go | 160 ++---------------------- server/store/store_tests.go | 162 +++++++++++++++++++++++++ 2 files changed, 175 insertions(+), 147 deletions(-) diff --git a/server/store/memory/peer_store_test.go b/server/store/memory/peer_store_test.go index 42be5ee..0da3ff6 100644 --- a/server/store/memory/peer_store_test.go +++ b/server/store/memory/peer_store_test.go @@ -5,159 +5,25 @@ package memory import ( - "net" "testing" - "time" - "github.com/chihaya/chihaya" "github.com/chihaya/chihaya/server/store" - "github.com/stretchr/testify/require" ) -func peerInSlice(peer chihaya.Peer, peers []chihaya.Peer) bool { - for _, v := range peers { - if v.Equal(peer) { - return true - } +var ( + peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{}) + peerStoreTestConfig = &store.DriverConfig{} +) + +func init() { + unmarshalledConfig := struct { + Shards int + }{ + 1, } - return false + peerStoreTestConfig.Config = unmarshalledConfig } -func TestPeerStoreAPI(t *testing.T) { - var ( - hash = chihaya.InfoHash([20]byte{}) - - peers = []struct { - seeder bool - peerID string - ip string - port uint16 - }{ - {false, "-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720}, - {false, "-AZ3042-6ozMq5q6Q3NX", "38.241.13.19", 4833}, - {false, "-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878}, - {false, "-AR6360-6oZyyMWoOOBe", "fd0a:29a8:8445::38", 3167}, - {true, "-AG2083-s1hiF8vGAAg0", "231.231.49.173", 1453}, - {true, "-AG3003-lEl2Mm4NEO4n", "254.99.84.77", 7032}, - {true, "-MR1100-00HS~T7*65rm", "211.229.100.129", 2614}, - {true, "-LK0140-ATIV~nbEQAMr", "fdad:c435:bf79::12", 4114}, - {true, "-KT2210-347143496631", "fdda:1b35:7d6e::9", 6179}, - {true, "-TR0960-6ep6svaa61r4", "fd7f:78f0:4c77::55", 4727}, - } - unmarshalledConfig = struct { - Shards int - }{ - 1, - } - config = store.DriverConfig{ - Name: "memory", - Config: unmarshalledConfig, - } - d = &peerStoreDriver{} - ) - s, err := d.New(&config) - require.Nil(t, err) - require.NotNil(t, s) - - for _, p := range peers { - // Construct chihaya.Peer from test data. - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(p.peerID), - IP: net.ParseIP(p.ip), - Port: p.port, - } - - if p.seeder { - err = s.PutSeeder(hash, peer) - } else { - err = s.PutLeecher(hash, peer) - } - require.Nil(t, err) - } - - leechers1, leechers61, err := s.GetLeechers(hash) - require.Nil(t, err) - require.NotEmpty(t, leechers1) - require.NotEmpty(t, leechers61) - num := s.NumLeechers(hash) - require.Equal(t, len(leechers1)+len(leechers61), num) - - seeders1, seeders61, err := s.GetSeeders(hash) - require.Nil(t, err) - require.NotEmpty(t, seeders1) - require.NotEmpty(t, seeders61) - num = s.NumSeeders(hash) - require.Equal(t, len(seeders1)+len(seeders61), num) - - leechers := append(leechers1, leechers61...) - seeders := append(seeders1, seeders61...) - - for _, p := range peers { - // Construct chihaya.Peer from test data. - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(p.peerID), - IP: net.ParseIP(p.ip), - Port: p.port, - } - - if p.seeder { - require.True(t, peerInSlice(peer, seeders)) - } else { - require.True(t, peerInSlice(peer, leechers)) - } - - if p.seeder { - err = s.DeleteSeeder(hash, peer) - } else { - err = s.DeleteLeecher(hash, peer) - } - require.Nil(t, err) - } - - require.Zero(t, s.NumLeechers(hash)) - require.Zero(t, s.NumSeeders(hash)) - - // Re-add all the peers to the peerStore. - for _, p := range peers { - // Construct chihaya.Peer from test data. - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(p.peerID), - IP: net.ParseIP(p.ip), - Port: p.port, - } - if p.seeder { - s.PutSeeder(hash, peer) - } else { - s.PutLeecher(hash, peer) - } - } - - // Check that there are 6 seeders, and 4 leechers. - require.Equal(t, 6, s.NumSeeders(hash)) - require.Equal(t, 4, s.NumLeechers(hash)) - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(peers[0].peerID), - IP: net.ParseIP(peers[0].ip), - Port: peers[0].port, - } - err = s.GraduateLeecher(hash, peer) - require.Nil(t, err) - // Check that there are 7 seeders, and 3 leechers after graduating a - // leecher to a seeder. - require.Equal(t, 7, s.NumSeeders(hash)) - require.Equal(t, 3, s.NumLeechers(hash)) - - peers1, peers61, err := s.AnnouncePeers(hash, true, 5, peer, chihaya.Peer{}) - require.Nil(t, err) - require.NotNil(t, peers1) - require.NotNil(t, peers61) - - err = s.CollectGarbage(time.Now()) - require.Nil(t, err) - require.Equal(t, s.NumLeechers(hash), 0) - require.Equal(t, s.NumSeeders(hash), 0) - - errChan := s.Stop() - err = <-errChan - require.Nil(t, err, "PeerStore shutdown must not fail") +func TestPeerStore(t *testing.T) { + peerStoreTester.TestPeerStore(t, peerStoreTestConfig) } diff --git a/server/store/store_tests.go b/server/store/store_tests.go index ea0663d..63053a6 100644 --- a/server/store/store_tests.go +++ b/server/store/store_tests.go @@ -9,6 +9,9 @@ import ( "net" + "time" + + "github.com/chihaya/chihaya" "github.com/stretchr/testify/require" ) @@ -351,3 +354,162 @@ func (s *ipStoreTester) TestHasAllHasAnyNetworks(t *testing.T, cfg *DriverConfig err = <-errChan require.Nil(t, err, "IPStore shutdown must not fail") } + +// PeerStoreTester is a collection of tests for a PeerStore driver. +// Every benchmark expects a new, clean storage. Every benchmark should be +// called with a DriverConfig that ensures this. +type PeerStoreTester interface { + TestPeerStore(*testing.T, *DriverConfig) +} + +var _ PeerStoreTester = &peerStoreTester{} + +type peerStoreTester struct { + driver PeerStoreDriver +} + +// PreparePeerStoreTester prepares a reusable suite for PeerStore driver +// tests. +func PreparePeerStoreTester(driver PeerStoreDriver) PeerStoreTester { + return &peerStoreTester{ + driver: driver, + } +} + +func peerInSlice(peer chihaya.Peer, peers []chihaya.Peer) bool { + for _, v := range peers { + if v.Equal(peer) { + return true + } + } + return false +} + +func (pt *peerStoreTester) TestPeerStore(t *testing.T, cfg *DriverConfig) { + var ( + hash = chihaya.InfoHash([20]byte{}) + + peers = []struct { + seeder bool + peerID string + ip string + port uint16 + }{ + {false, "-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720}, + {false, "-AZ3042-6ozMq5q6Q3NX", "38.241.13.19", 4833}, + {false, "-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878}, + {false, "-AR6360-6oZyyMWoOOBe", "fd0a:29a8:8445::38", 3167}, + {true, "-AG2083-s1hiF8vGAAg0", "231.231.49.173", 1453}, + {true, "-AG3003-lEl2Mm4NEO4n", "254.99.84.77", 7032}, + {true, "-MR1100-00HS~T7*65rm", "211.229.100.129", 2614}, + {true, "-LK0140-ATIV~nbEQAMr", "fdad:c435:bf79::12", 4114}, + {true, "-KT2210-347143496631", "fdda:1b35:7d6e::9", 6179}, + {true, "-TR0960-6ep6svaa61r4", "fd7f:78f0:4c77::55", 4727}, + } + ) + s, err := pt.driver.New(cfg) + require.Nil(t, err) + require.NotNil(t, s) + + for _, p := range peers { + // Construct chihaya.Peer from test data. + peer := chihaya.Peer{ + ID: chihaya.PeerIDFromString(p.peerID), + IP: net.ParseIP(p.ip), + Port: p.port, + } + + if p.seeder { + err = s.PutSeeder(hash, peer) + } else { + err = s.PutLeecher(hash, peer) + } + require.Nil(t, err) + } + + leechers1, leechers61, err := s.GetLeechers(hash) + require.Nil(t, err) + require.NotEmpty(t, leechers1) + require.NotEmpty(t, leechers61) + num := s.NumLeechers(hash) + require.Equal(t, len(leechers1)+len(leechers61), num) + + seeders1, seeders61, err := s.GetSeeders(hash) + require.Nil(t, err) + require.NotEmpty(t, seeders1) + require.NotEmpty(t, seeders61) + num = s.NumSeeders(hash) + require.Equal(t, len(seeders1)+len(seeders61), num) + + leechers := append(leechers1, leechers61...) + seeders := append(seeders1, seeders61...) + + for _, p := range peers { + // Construct chihaya.Peer from test data. + peer := chihaya.Peer{ + ID: chihaya.PeerIDFromString(p.peerID), + IP: net.ParseIP(p.ip), + Port: p.port, + } + + if p.seeder { + require.True(t, peerInSlice(peer, seeders)) + } else { + require.True(t, peerInSlice(peer, leechers)) + } + + if p.seeder { + err = s.DeleteSeeder(hash, peer) + } else { + err = s.DeleteLeecher(hash, peer) + } + require.Nil(t, err) + } + + require.Zero(t, s.NumLeechers(hash)) + require.Zero(t, s.NumSeeders(hash)) + + // Re-add all the peers to the peerStore. + for _, p := range peers { + // Construct chihaya.Peer from test data. + peer := chihaya.Peer{ + ID: chihaya.PeerIDFromString(p.peerID), + IP: net.ParseIP(p.ip), + Port: p.port, + } + if p.seeder { + s.PutSeeder(hash, peer) + } else { + s.PutLeecher(hash, peer) + } + } + + // Check that there are 6 seeders, and 4 leechers. + require.Equal(t, 6, s.NumSeeders(hash)) + require.Equal(t, 4, s.NumLeechers(hash)) + peer := chihaya.Peer{ + ID: chihaya.PeerIDFromString(peers[0].peerID), + IP: net.ParseIP(peers[0].ip), + Port: peers[0].port, + } + err = s.GraduateLeecher(hash, peer) + require.Nil(t, err) + // Check that there are 7 seeders, and 3 leechers after graduating a + // leecher to a seeder. + require.Equal(t, 7, s.NumSeeders(hash)) + require.Equal(t, 3, s.NumLeechers(hash)) + + peers1, peers61, err := s.AnnouncePeers(hash, true, 5, peer, chihaya.Peer{}) + require.Nil(t, err) + require.NotNil(t, peers1) + require.NotNil(t, peers61) + + err = s.CollectGarbage(time.Now()) + require.Nil(t, err) + require.Equal(t, s.NumLeechers(hash), 0) + require.Equal(t, s.NumSeeders(hash), 0) + + errChan := s.Stop() + err = <-errChan + require.Nil(t, err, "PeerStore shutdown must not fail") +} From 31581bc1c3971a43a3efa231c1cb15e4701a47ec Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sun, 19 Jun 2016 12:55:45 -0400 Subject: [PATCH 7/8] store: add PeerStore benchmarks --- server/store/memory/peer_store_test.go | 117 +++++- server/store/store_bench.go | 481 +++++++++++++++++++++++++ 2 files changed, 596 insertions(+), 2 deletions(-) diff --git a/server/store/memory/peer_store_test.go b/server/store/memory/peer_store_test.go index 0da3ff6..9a00b17 100644 --- a/server/store/memory/peer_store_test.go +++ b/server/store/memory/peer_store_test.go @@ -11,8 +11,9 @@ import ( ) var ( - peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{}) - peerStoreTestConfig = &store.DriverConfig{} + peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{}) + peerStoreBenchmarker = store.PreparePeerStoreBenchmarker(&peerStoreDriver{}) + peerStoreTestConfig = &store.DriverConfig{} ) func init() { @@ -27,3 +28,115 @@ func init() { func TestPeerStore(t *testing.T) { peerStoreTester.TestPeerStore(t, peerStoreTestConfig) } + +func BenchmarkPeerStore_PutSeeder(b *testing.B) { + peerStoreBenchmarker.PutSeeder(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder1KInfohash(b *testing.B) { + peerStoreBenchmarker.PutSeeder1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutSeeder1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder1KInfohash(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist1KSeeders(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist1KInfohash1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist1KSeeders(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeers(b *testing.B) { + peerStoreBenchmarker.AnnouncePeers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeers1KInfohash(b *testing.B) { + peerStoreBenchmarker.AnnouncePeers1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeersSeeder(b *testing.B) { + peerStoreBenchmarker.AnnouncePeersSeeder(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeersSeeder1KInfohash(b *testing.B) { + peerStoreBenchmarker.AnnouncePeersSeeder1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GetSeeders(b *testing.B) { + peerStoreBenchmarker.GetSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GetSeeders1KInfohash(b *testing.B) { + peerStoreBenchmarker.GetSeeders1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_NumSeeders(b *testing.B) { + peerStoreBenchmarker.NumSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_NumSeeders1KInfohash(b *testing.B) { + peerStoreBenchmarker.NumSeeders1KInfohash(b, peerStoreTestConfig) +} diff --git a/server/store/store_bench.go b/server/store/store_bench.go index c1b56f3..9e313a3 100644 --- a/server/store/store_bench.go +++ b/server/store/store_bench.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/chihaya/chihaya" "github.com/stretchr/testify/require" ) @@ -777,3 +778,483 @@ func (ib ipStoreBench) RemoveNonExist1KV6Network(b *testing.B, cfg *DriverConfig return nil }) } + +// PeerStoreBenchmarker is a collection of benchmarks for PeerStore drivers. +// Every benchmark expects a new, clean storage. Every benchmark should be +// called with a DriverConfig that ensures this. +type PeerStoreBenchmarker interface { + PutSeeder(*testing.B, *DriverConfig) + PutSeeder1KInfohash(*testing.B, *DriverConfig) + PutSeeder1KSeeders(*testing.B, *DriverConfig) + PutSeeder1KInfohash1KSeeders(*testing.B, *DriverConfig) + + PutDeleteSeeder(*testing.B, *DriverConfig) + PutDeleteSeeder1KInfohash(*testing.B, *DriverConfig) + PutDeleteSeeder1KSeeders(*testing.B, *DriverConfig) + PutDeleteSeeder1KInfohash1KSeeders(*testing.B, *DriverConfig) + + DeleteSeederNonExist(*testing.B, *DriverConfig) + DeleteSeederNonExist1KInfohash(*testing.B, *DriverConfig) + DeleteSeederNonExist1KSeeders(*testing.B, *DriverConfig) + DeleteSeederNonExist1KInfohash1KSeeders(*testing.B, *DriverConfig) + + PutGraduateDeleteLeecher(*testing.B, *DriverConfig) + PutGraduateDeleteLeecher1KInfohash(*testing.B, *DriverConfig) + PutGraduateDeleteLeecher1KLeechers(*testing.B, *DriverConfig) + PutGraduateDeleteLeecher1KInfohash1KLeechers(*testing.B, *DriverConfig) + + GraduateLeecherNonExist(*testing.B, *DriverConfig) + GraduateLeecherNonExist1KInfohash(*testing.B, *DriverConfig) + GraduateLeecherNonExist1KLeechers(*testing.B, *DriverConfig) + GraduateLeecherNonExist1KInfohash1KLeechers(*testing.B, *DriverConfig) + + AnnouncePeers(*testing.B, *DriverConfig) + AnnouncePeers1KInfohash(*testing.B, *DriverConfig) + AnnouncePeersSeeder(*testing.B, *DriverConfig) + AnnouncePeersSeeder1KInfohash(*testing.B, *DriverConfig) + + GetSeeders(*testing.B, *DriverConfig) + GetSeeders1KInfohash(*testing.B, *DriverConfig) + + NumSeeders(*testing.B, *DriverConfig) + NumSeeders1KInfohash(*testing.B, *DriverConfig) +} + +type peerStoreBench struct { + infohashes [num1KElements]chihaya.InfoHash + peers [num1KElements]chihaya.Peer + driver PeerStoreDriver +} + +func generateInfohashes() (a [num1KElements]chihaya.InfoHash) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = chihaya.InfoHash([20]byte{b[0], b[1]}) + } + + return +} + +func generatePeers() (a [num1KElements]chihaya.Peer) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = chihaya.Peer{ + ID: chihaya.PeerID([20]byte{b[0], b[1]}), + IP: net.ParseIP(fmt.Sprintf("64.%d.%d.64", b[0], b[1])), + Port: uint16(i), + } + } + + return +} + +// PreparePeerStoreBenchmarker prepares a reusable suite for PeerStore driver +// benchmarks. +func PreparePeerStoreBenchmarker(driver PeerStoreDriver) PeerStoreBenchmarker { + return peerStoreBench{ + driver: driver, + } +} + +type peerStoreSetupFunc func(PeerStore) error + +func peerStoreSetupNOP(PeerStore) error { return nil } + +type peerStoreBenchFunc func(PeerStore, int) error + +func (pb peerStoreBench) runBenchmark(b *testing.B, cfg *DriverConfig, setup peerStoreSetupFunc, execute peerStoreBenchFunc) { + ps, err := pb.driver.New(cfg) + require.Nil(b, err, "Constructor error must be nil") + require.NotNil(b, ps, "Peer store must not be nil") + + err = setup(ps) + require.Nil(b, err, "Benchmark setup must not fail") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + execute(ps, i) + } + b.StopTimer() + + errChan := ps.Stop() + err = <-errChan + require.Nil(b, err, "PeerStore shutdown must not fail") +} + +func (pb peerStoreBench) PutSeeder(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[0], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) PutSeeder1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) PutSeeder1KSeeders(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) + return nil + }) +} + +func (pb peerStoreBench) PutSeeder1KInfohash1KSeeders(b *testing.B, cfg *DriverConfig) { + j := 0 + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + j += 3 + return nil + }) +} + +func (pb peerStoreBench) PutDeleteSeeder(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[0], pb.peers[0]) + ps.DeleteSeeder(pb.infohashes[0], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) PutDeleteSeeder1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) + ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) PutDeleteSeeder1KSeeders(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) + ps.DeleteSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) + return nil + }) +} + +func (pb peerStoreBench) PutDeleteSeeder1KInfohash1KSeeders(b *testing.B, cfg *DriverConfig) { + j := 0 + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + j += 3 + return nil + }) +} + +func (pb peerStoreBench) DeleteSeederNonExist(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.DeleteSeeder(pb.infohashes[0], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) DeleteSeederNonExist1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) DeleteSeederNonExist1KSeeders(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.DeleteSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) + return nil + }) +} + +func (pb peerStoreBench) DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B, cfg *DriverConfig) { + j := 0 + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + j += 3 + return nil + }) +} + +func (pb peerStoreBench) GraduateLeecherNonExist(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.GraduateLeecher(pb.infohashes[0], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) GraduateLeecherNonExist1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) GraduateLeecherNonExist1KLeechers(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.GraduateLeecher(pb.infohashes[0], pb.peers[i%num1KElements]) + return nil + }) +} + +func (pb peerStoreBench) GraduateLeecherNonExist1KInfohash1KLeechers(b *testing.B, cfg *DriverConfig) { + j := 0 + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + j += 3 + return nil + }) +} + +func (pb peerStoreBench) PutGraduateDeleteLeecher(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutLeecher(pb.infohashes[0], pb.peers[0]) + ps.GraduateLeecher(pb.infohashes[0], pb.peers[0]) + ps.DeleteSeeder(pb.infohashes[0], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) PutGraduateDeleteLeecher1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutLeecher(pb.infohashes[i%num1KElements], pb.peers[0]) + ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[0]) + ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) + return nil + }) +} + +func (pb peerStoreBench) PutGraduateDeleteLeecher1KLeechers(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutLeecher(pb.infohashes[0], pb.peers[i%num1KElements]) + ps.GraduateLeecher(pb.infohashes[0], pb.peers[i%num1KElements]) + ps.DeleteSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) + return nil + }) +} + +func (pb peerStoreBench) PutGraduateDeleteLeecher1KInfohash1KLeechers(b *testing.B, cfg *DriverConfig) { + j := 0 + pb.runBenchmark(b, cfg, peerStoreSetupNOP, + func(ps PeerStore, i int) error { + ps.PutLeecher(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) + j += 3 + return nil + }) +} + +func (pb peerStoreBench) AnnouncePeers(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.AnnouncePeers(pb.infohashes[0], false, 50, pb.peers[0], chihaya.Peer{}) + return nil + }) +} + +func (pb peerStoreBench) AnnouncePeers1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.AnnouncePeers(pb.infohashes[i%num1KElements], false, 50, pb.peers[0], chihaya.Peer{}) + return nil + }) +} + +func (pb peerStoreBench) AnnouncePeersSeeder(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.AnnouncePeers(pb.infohashes[0], true, 50, pb.peers[0], chihaya.Peer{}) + return nil + }) +} + +func (pb peerStoreBench) AnnouncePeersSeeder1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.AnnouncePeers(pb.infohashes[i%num1KElements], true, 50, pb.peers[0], chihaya.Peer{}) + return nil + }) +} + +func (pb peerStoreBench) GetSeeders(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.GetSeeders(pb.infohashes[0]) + return nil + }) +} + +func (pb peerStoreBench) GetSeeders1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.GetSeeders(pb.infohashes[i%num1KElements]) + return nil + }) +} + +func (pb peerStoreBench) NumSeeders(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.NumSeeders(pb.infohashes[0]) + return nil + }) +} + +func (pb peerStoreBench) NumSeeders1KInfohash(b *testing.B, cfg *DriverConfig) { + pb.runBenchmark(b, cfg, + func(ps PeerStore) error { + for i := 0; i < num1KElements; i++ { + for j := 0; j < num1KElements; j++ { + var err error + if j < num1KElements/2 { + err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) + } else { + err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) + } + if err != nil { + return err + } + } + } + return nil + }, + func(ps PeerStore, i int) error { + ps.NumSeeders(pb.infohashes[i%num1KElements]) + return nil + }) +} From 9abf72e46aa5fdb536261e2f8ec3ac22cd909649 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 18 Jun 2016 23:05:58 -0400 Subject: [PATCH 8/8] store: add README --- server/store/README.md | 43 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 server/store/README.md diff --git a/server/store/README.md b/server/store/README.md new file mode 100644 index 0000000..05ff3ff --- /dev/null +++ b/server/store/README.md @@ -0,0 +1,43 @@ +## The store Package + +The `store` package offers a storage interface and middlewares sufficient to run a public tracker based on it. + +### Architecture + +The store consists of three parts: +- A set of interfaces, tests based on these interfaces and the store logic, unifying these interfaces into the store +- Drivers, implementing the store interfaces and +- Middleware that depends on the store + +The store interfaces are `IPStore`, `PeerStore` and `StringStore`. +During runtime, each of them will be implemented by a driver. +Even though all different drivers for one interface provide the same functionality, their behaviour can be very different. +For example: The memory implementation keeps all state in-memory - this is very fast, but not persistent, it loses its state on every restart. +A database-backed driver on the other hand could provide persistence, at the cost of performance. + +The pluggable design of Chihaya allows for the different interfaces to use different drivers. +For example: A typical use case of the `StringStore` is to provide blacklists or whitelists for infohashes/client IDs/.... +You'd typically want these lists to be persistent, so you'd choose a driver that provides persistence. +The `PeerStore` on the other hand rarely needs to be persistent, as all peer state will be restored after one announce interval. +You'd therefore typically choose a very performant but non-persistent driver for the `PeerStore`. + +### Testing + +The main store package also contains a set of tests and benchmarks for drivers. +Both use the store interfaces and can work with any driver that implements these interfaces. +The tests verify that the driver behaves as specified by the interface and its documentation. +The benchmarks can be used to compare performance of a wide range of operations on the interfaces. + +This makes it very easy to implement a new driver: +All functions that are part of the store interfaces can be tested easily with the tests that come with the store package. +Generally the memory implementation can be used as a guideline for implementing new drivers. + +Both benchmarks and tests require a clean state to work correctly. +All of the test and benchmark functions therefore take a `*DriverConfig` as a parameter, this should be used to configure the driver in a way that it provides a clean state for every test or benchmark. +For example: Imagine a file-based driver that achieves persistence by storing its state in a file. +It must then be possible to provide the location of this file in the `'DriverConfig`, so that every different benchmark gets to work with a new file. + +Most benchmarks come in two flavors: The "normal" version and the "1K" version. +A normal benchmark uses the same value over and over again to benchmark one operation. +A 1K benchmark uses a different value from a set of 1000 values for every iteration, this can show caching effects, if the driver uses them. +The 1K benchmarks require a little more computation to select the values and thus typically yield slightly lower results even for a "perfect" cache, i.e. the memory implementation.