tracker/storage/memory/peer_store.go

618 lines
14 KiB
Go
Raw Normal View History

// Package memory implements the storage interface for a Chihaya
// BitTorrent tracker keeping peer data in memory.
2016-08-10 03:34:16 +02:00
package memory
import (
"encoding/binary"
"net"
"runtime"
"sync"
"sync/atomic"
2016-08-10 03:34:16 +02:00
"time"
2017-02-21 06:58:57 +01:00
"gopkg.in/yaml.v2"
2016-08-17 03:42:08 +02:00
"github.com/chihaya/chihaya/bittorrent"
2017-06-20 14:58:44 +02:00
"github.com/chihaya/chihaya/pkg/log"
2016-08-17 03:42:08 +02:00
"github.com/chihaya/chihaya/storage"
2016-08-10 03:34:16 +02:00
)
// Name is the name by which this peer store is registered with Chihaya.
const Name = "memory"
2017-06-25 15:36:17 +02:00
// Default config constants.
const (
defaultShardCount = 1024
defaultPrometheusReportingInterval = time.Second * 1
defaultGarbageCollectionInterval = time.Minute * 3
defaultPeerLifetime = time.Minute * 30
)
func init() {
2017-02-21 06:58:57 +01:00
// Register the storage driver.
storage.RegisterDriver(Name, driver{})
}
2017-02-21 06:58:57 +01:00
type driver struct{}
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
// Marshal the config back into bytes.
bytes, err := yaml.Marshal(icfg)
if err != nil {
return nil, err
}
// Unmarshal the bytes into the proper config type.
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return New(cfg)
}
2016-08-12 02:37:31 +02:00
// Config holds the configuration of a memory PeerStore.
2016-08-10 03:34:16 +02:00
type Config struct {
2017-05-12 10:30:20 +02:00
GarbageCollectionInterval time.Duration `yaml:"gc_interval"`
PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"`
PeerLifetime time.Duration `yaml:"peer_lifetime"`
ShardCount int `yaml:"shard_count"`
2016-08-10 03:34:16 +02:00
}
2017-05-07 00:48:44 +02:00
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"name": Name,
"gcInterval": cfg.GarbageCollectionInterval,
"promReportInterval": cfg.PrometheusReportingInterval,
"peerLifetime": cfg.PeerLifetime,
"shardCount": cfg.ShardCount,
2017-05-07 00:48:44 +02:00
}
}
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
2017-06-20 14:58:44 +02:00
2017-06-25 15:36:17 +02:00
if cfg.ShardCount <= 0 {
validcfg.ShardCount = defaultShardCount
2017-06-20 14:58:44 +02:00
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".ShardCount",
2017-06-25 15:36:17 +02:00
"provided": cfg.ShardCount,
"default": validcfg.ShardCount,
2017-06-20 14:58:44 +02:00
})
2016-08-10 03:34:16 +02:00
}
if cfg.GarbageCollectionInterval <= 0 {
2017-06-25 15:36:17 +02:00
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
2017-06-20 14:58:44 +02:00
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".GarbageCollectionInterval",
"provided": cfg.GarbageCollectionInterval,
"default": validcfg.GarbageCollectionInterval,
2017-06-20 14:58:44 +02:00
})
}
if cfg.PrometheusReportingInterval <= 0 {
2017-06-25 15:36:17 +02:00
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
2017-06-20 14:58:44 +02:00
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PrometheusReportingInterval",
"provided": cfg.PrometheusReportingInterval,
"default": validcfg.PrometheusReportingInterval,
2017-06-20 14:58:44 +02:00
})
}
2017-06-25 15:36:17 +02:00
if cfg.PeerLifetime <= 0 {
validcfg.PeerLifetime = defaultPeerLifetime
2017-06-20 14:58:44 +02:00
log.Warn("falling back to default configuration", log.Fields{
2017-06-25 15:36:17 +02:00
"name": Name + ".PeerLifetime",
"provided": cfg.PeerLifetime,
"default": validcfg.PeerLifetime,
2017-06-20 14:58:44 +02:00
})
2017-06-25 15:36:17 +02:00
}
return validcfg
}
// New creates a new PeerStore backed by memory.
func New(provided Config) (storage.PeerStore, error) {
cfg := provided.Validate()
ps := &peerStore{
cfg: cfg,
shards: make([]*peerShard, cfg.ShardCount*2),
closed: make(chan struct{}),
}
for i := 0; i < cfg.ShardCount*2; i++ {
ps.shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
2016-08-10 03:34:16 +02:00
}
// Start a goroutine for garbage collection.
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
for {
select {
case <-ps.closed:
return
case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime)
2017-06-20 14:58:44 +02:00
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
ps.collectGarbage(before)
}
}
}()
// Start a goroutine for updating our cached system clock.
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
t := time.NewTicker(1 * time.Second)
for {
select {
case <-ps.closed:
t.Stop()
return
case now := <-t.C:
ps.setClock(now.UnixNano())
}
}
}()
// Start a goroutine for reporting statistics to Prometheus.
2017-05-12 10:30:20 +02:00
ps.wg.Add(1)
go func() {
defer ps.wg.Done()
t := time.NewTicker(cfg.PrometheusReportingInterval)
for {
select {
case <-ps.closed:
2017-05-12 10:30:20 +02:00
t.Stop()
return
case <-t.C:
before := time.Now()
ps.populateProm()
2017-06-20 14:58:44 +02:00
log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
2017-05-12 10:30:20 +02:00
}
}
}()
return ps, nil
2016-08-10 03:34:16 +02:00
}
type serializedPeer string
func newPeerKey(p bittorrent.Peer) serializedPeer {
b := make([]byte, 20+2+len(p.IP.IP))
copy(b[:20], p.ID[:])
binary.BigEndian.PutUint16(b[20:22], p.Port)
copy(b[22:], p.IP.IP)
return serializedPeer(b)
}
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip
peer.IP.AddressFamily = bittorrent.IPv4
} else if len(peer.IP.IP) == net.IPv6len { // implies toReturn.IP.To4() == nil
peer.IP.AddressFamily = bittorrent.IPv6
} else {
panic("IP is neither v4 nor v6")
}
return peer
}
2016-08-10 03:34:16 +02:00
type peerShard struct {
2017-05-12 10:30:20 +02:00
swarms map[bittorrent.InfoHash]swarm
numSeeders uint64
numLeechers uint64
2016-08-10 03:34:16 +02:00
sync.RWMutex
}
type swarm struct {
// map serialized peer to mtime
seeders map[serializedPeer]int64
leechers map[serializedPeer]int64
}
type peerStore struct {
cfg Config
shards []*peerShard
// clock stores the current time nanoseconds, updated every second.
// Must be accessed atomically!
clock int64
closed chan struct{}
wg sync.WaitGroup
2016-08-10 03:34:16 +02:00
}
var _ storage.PeerStore = &peerStore{}
2017-05-12 10:30:20 +02:00
// populateProm aggregates metrics over all shards and then posts them to
// prometheus.
func (ps *peerStore) populateProm() {
var numInfohashes, numSeeders, numLeechers uint64
for _, s := range ps.shards {
s.RLock()
numInfohashes += uint64(len(s.swarms))
numSeeders += s.numSeeders
numLeechers += s.numLeechers
s.RUnlock()
}
storage.PromInfohashesCount.Set(float64(numInfohashes))
storage.PromSeedersCount.Set(float64(numSeeders))
storage.PromLeechersCount.Set(float64(numLeechers))
}
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
storage.PromGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
2017-05-12 10:30:20 +02:00
}
func (ps *peerStore) getClock() int64 {
return atomic.LoadInt64(&ps.clock)
}
func (ps *peerStore) setClock(to int64) {
atomic.StoreInt64(&ps.clock, to)
}
func (ps *peerStore) shardIndex(infoHash bittorrent.InfoHash, af bittorrent.AddressFamily) uint32 {
2016-09-08 15:40:31 +02:00
// There are twice the amount of shards specified by the user, the first
// half is dedicated to IPv4 swarms and the second half is dedicated to
// IPv6 swarms.
idx := binary.BigEndian.Uint32(infoHash[:4]) % (uint32(len(ps.shards)) / 2)
2016-11-28 20:55:04 +01:00
if af == bittorrent.IPv6 {
idx += uint32(len(ps.shards) / 2)
}
return idx
2016-08-10 03:34:16 +02:00
}
func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
2016-08-10 03:34:16 +02:00
select {
case <-ps.closed:
2016-08-10 03:34:16 +02:00
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
2016-08-10 03:34:16 +02:00
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.swarms[ih] = swarm{
2016-08-10 03:34:16 +02:00
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
// If this peer isn't already a seeder, update the stats for the swarm.
2017-05-12 10:30:20 +02:00
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
shard.numSeeders++
}
// Update the peer in the swarm.
shard.swarms[ih].seeders[pk] = ps.getClock()
2016-08-10 03:34:16 +02:00
shard.Unlock()
return nil
}
func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
2016-08-10 03:34:16 +02:00
select {
case <-ps.closed:
2016-08-10 03:34:16 +02:00
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
2016-08-10 03:34:16 +02:00
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
2016-08-10 03:34:16 +02:00
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
2016-08-10 03:34:16 +02:00
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
shard.numSeeders--
delete(shard.swarms[ih].seeders, pk)
2016-08-10 03:34:16 +02:00
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
delete(shard.swarms, ih)
2016-08-10 03:34:16 +02:00
}
shard.Unlock()
return nil
}
func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
2016-08-10 03:34:16 +02:00
select {
case <-ps.closed:
2016-08-10 03:34:16 +02:00
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
2016-08-10 03:34:16 +02:00
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.swarms[ih] = swarm{
2016-08-10 03:34:16 +02:00
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
// If this peer isn't already a leecher, update the stats for the swarm.
2017-05-12 10:30:20 +02:00
if _, ok := shard.swarms[ih].leechers[pk]; !ok {
shard.numLeechers++
}
// Update the peer in the swarm.
shard.swarms[ih].leechers[pk] = ps.getClock()
2016-08-10 03:34:16 +02:00
shard.Unlock()
return nil
}
func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
2016-08-10 03:34:16 +02:00
select {
case <-ps.closed:
2016-08-10 03:34:16 +02:00
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
2016-08-10 03:34:16 +02:00
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
2016-08-10 03:34:16 +02:00
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
if _, ok := shard.swarms[ih].leechers[pk]; !ok {
2016-08-10 03:34:16 +02:00
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
shard.numLeechers--
delete(shard.swarms[ih].leechers, pk)
2016-08-10 03:34:16 +02:00
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
delete(shard.swarms, ih)
2016-08-10 03:34:16 +02:00
}
shard.Unlock()
return nil
}
func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
2016-08-10 03:34:16 +02:00
select {
case <-ps.closed:
2016-08-10 03:34:16 +02:00
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := ps.shards[ps.shardIndex(ih, p.IP.AddressFamily)]
2016-08-10 03:34:16 +02:00
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.swarms[ih] = swarm{
2016-08-10 03:34:16 +02:00
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
// If this peer is a leecher, update the stats for the swarm and remove them.
2017-05-12 10:30:20 +02:00
if _, ok := shard.swarms[ih].leechers[pk]; ok {
shard.numLeechers--
delete(shard.swarms[ih].leechers, pk)
}
2016-08-10 03:34:16 +02:00
// If this peer isn't already a seeder, update the stats for the swarm.
2017-05-12 10:30:20 +02:00
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
shard.numSeeders++
}
// Update the peer in the swarm.
shard.swarms[ih].seeders[pk] = ps.getClock()
2016-08-10 03:34:16 +02:00
shard.Unlock()
return nil
}
func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
2016-08-10 03:34:16 +02:00
select {
case <-ps.closed:
2016-08-10 03:34:16 +02:00
panic("attempted to interact with stopped memory store")
default:
}
shard := ps.shards[ps.shardIndex(ih, announcer.IP.AddressFamily)]
2016-08-10 03:34:16 +02:00
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
2016-08-10 03:34:16 +02:00
shard.RUnlock()
return nil, storage.ErrResourceDoesNotExist
}
if seeder {
// Append leechers as possible.
leechers := shard.swarms[ih].leechers
for pk := range leechers {
2016-08-10 03:34:16 +02:00
if numWant == 0 {
break
}
peers = append(peers, decodePeerKey(pk))
2016-08-10 03:34:16 +02:00
numWant--
}
} else {
// Append as many seeders as possible.
seeders := shard.swarms[ih].seeders
for pk := range seeders {
2016-08-10 03:34:16 +02:00
if numWant == 0 {
break
}
peers = append(peers, decodePeerKey(pk))
2016-08-10 03:34:16 +02:00
numWant--
}
// Append leechers until we reach numWant.
if numWant > 0 {
leechers := shard.swarms[ih].leechers
announcerPK := newPeerKey(announcer)
for pk := range leechers {
if pk == announcerPK {
continue
}
2016-08-10 03:34:16 +02:00
if numWant == 0 {
break
}
peers = append(peers, decodePeerKey(pk))
2016-08-10 03:34:16 +02:00
numWant--
}
}
}
shard.RUnlock()
return
}
func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) (resp bittorrent.Scrape) {
2016-09-08 15:40:31 +02:00
select {
case <-ps.closed:
2016-09-08 15:40:31 +02:00
panic("attempted to interact with stopped memory store")
default:
}
2017-01-29 18:47:07 +01:00
resp.InfoHash = ih
shard := ps.shards[ps.shardIndex(ih, addressFamily)]
2016-09-08 15:40:31 +02:00
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
shard.RUnlock()
return
}
resp.Incomplete = uint32(len(shard.swarms[ih].leechers))
resp.Complete = uint32(len(shard.swarms[ih].seeders))
shard.RUnlock()
return
}
// collectGarbage deletes all Peers from the PeerStore which are older than the
// cutoff time.
//
// This function must be able to execute while other methods on this interface
// are being executed in parallel.
func (ps *peerStore) collectGarbage(cutoff time.Time) error {
select {
case <-ps.closed:
return nil
default:
}
cutoffUnix := cutoff.UnixNano()
start := time.Now()
for _, shard := range ps.shards {
shard.RLock()
var infohashes []bittorrent.InfoHash
for ih := range shard.swarms {
infohashes = append(infohashes, ih)
}
shard.RUnlock()
runtime.Gosched()
for _, ih := range infohashes {
shard.Lock()
if _, stillExists := shard.swarms[ih]; !stillExists {
shard.Unlock()
runtime.Gosched()
continue
}
for pk, mtime := range shard.swarms[ih].leechers {
if mtime <= cutoffUnix {
2017-05-12 10:30:20 +02:00
shard.numLeechers--
delete(shard.swarms[ih].leechers, pk)
}
}
for pk, mtime := range shard.swarms[ih].seeders {
if mtime <= cutoffUnix {
2017-05-12 10:30:20 +02:00
shard.numSeeders--
delete(shard.swarms[ih].seeders, pk)
}
}
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
delete(shard.swarms, ih)
}
shard.Unlock()
runtime.Gosched()
}
runtime.Gosched()
}
recordGCDuration(time.Since(start))
return nil
}
func (ps *peerStore) Stop() <-chan error {
c := make(chan error)
2016-08-10 03:34:16 +02:00
go func() {
close(ps.closed)
ps.wg.Wait()
// Explicitly deallocate our storage.
shards := make([]*peerShard, len(ps.shards))
for i := 0; i < len(ps.shards); i++ {
shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
2016-08-10 03:34:16 +02:00
}
ps.shards = shards
close(c)
2016-08-10 03:34:16 +02:00
}()
return c
2016-08-10 03:34:16 +02:00
}
func (ps *peerStore) LogFields() log.Fields {
return ps.cfg.LogFields()
}