storage/redis: refactor redis storage

- Based on @duyanghao's PR
  - Make staticcheck pass
  - Address review commentsq
This commit is contained in:
onestraw 2018-12-12 15:26:11 +08:00
parent e78892d5ac
commit d65ab677e7
6 changed files with 471 additions and 312 deletions

View file

@ -20,12 +20,14 @@ Differentiating features include:
- IPv4 and IPv6 support - IPv4 and IPv6 support
- [YAML] configuration - [YAML] configuration
- Metrics via [Prometheus] - Metrics via [Prometheus]
- High Availability via [Redis]
[releases]: https://github.com/chihaya/chihaya/releases [releases]: https://github.com/chihaya/chihaya/releases
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker [BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
[Go]: https://golang.org [Go]: https://golang.org
[YAML]: http://yaml.org [YAML]: http://yaml.org
[Prometheus]: http://prometheus.io [Prometheus]: http://prometheus.io
[Redis]: https://redis.io
## Why Chihaya? ## Why Chihaya?
@ -155,10 +157,6 @@ After all PreHooks have executed, any missing response fields that are required
PostHooks are asynchronous tasks that occur after a response has been delivered to the client. PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Request data is written to the storage asynchronously in one of these PostHooks. Request data is written to the storage asynchronously in one of these PostHooks.
### HA(High Availability)
Chihaya can achieve high availability by using [redis](https://redis.io/) storage backend.
## Related projects ## Related projects
- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs - [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs

72
docs/storage/redis.md Normal file
View file

@ -0,0 +1,72 @@
# Redis Storage
This storage system separates chihaya from storage and stores all peer data in Redis to achieve HA.
## Use Case
When one chihaya instance is down, the Redis can continuily serve peer data through other chihaya instances.
## Configuration
```yaml
chihaya:
storage:
name: redis
config:
# The frequency which stale peers are removed.
gc_interval: 14m
# The frequency which metrics are pushed into a local Prometheus endpoint.
prometheus_reporting_interval: 1s
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: 16m
# The address of redis storage.
redis_broker: "redis://pwd@127.0.0.1:6379/0"
# The timeout for reading a command reply from redis.
redis_read_timeout: 15s
# The timeout for writing a command to redis.
redis_write_timeout: 15s
# The timeout for connecting to redis server.
redis_connect_timeout: 15s
```
## Implementation
Seeders and Leechers for a particular InfoHash are stored with a redis hash structure, the infohash is used as hash key, peer key is field, last modified time is value.
All the InfoHashes (swarms) are also stored into redis hash, IP family is the key, infohash is field, last modified time is value.
Here is an example
```
- IPv4
- IPv4_S_<infohash 1>: <modification time>
- IPv4_L_<infohash 1>: <modification time>
- IPv4_S_<infohash 2>: <modification time>
- IPv4_S_<infohash 1>
- <peer 1 key>: <modification time>
- <peer 2 key>: <modification time>
- IPv4_L_<infohash 1>
- <peer 3 key>: <modification time>
- IPv4_S_<infohash 2>
- <peer 3 key>: <modification time>
```
In this case, prometheus will record two swarms, three seeders and one leecher.
So tree keys are used to record the count of swarms, seeders and leechers for each group (IPv4, IPv6).
```
- IPv4_infohash_count: 2
- IPv4_S_count: 3
- IPv4_L_count: 1
```
Note: IPv4_infohash_count has the different meaning with `memory` storage, it represents the number of infohashes reported by seeder.

View file

@ -1,61 +0,0 @@
package common
import (
"time"
"github.com/garyburd/redigo/redis"
)
// RedisConnector ...
type RedisConnector struct{}
// NewPool returns a new pool of Redis connections
func (rc *RedisConnector) NewPool(socketPath, host, password string, db int) *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := rc.open(socketPath, host, password, db)
if err != nil {
return nil, err
}
if db != 0 {
_, err = c.Do("SELECT", db)
if err != nil {
return nil, err
}
}
return c, err
},
// PINGs connections that have been idle more than 10 seconds
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Duration(10*time.Second) {
return nil
}
_, err := c.Do("PING")
return err
},
}
}
// Open a new Redis connection
func (rc *RedisConnector) open(socketPath, host, password string, db int) (redis.Conn, error) {
var opts = []redis.DialOption{
redis.DialDatabase(db),
redis.DialReadTimeout(15 * time.Second),
redis.DialWriteTimeout(15 * time.Second),
redis.DialConnectTimeout(15 * time.Second),
}
if password != "" {
opts = append(opts, redis.DialPassword(password))
}
if socketPath != "" {
return redis.Dial("unix", socketPath, opts...)
}
return redis.Dial("tcp", host, opts...)
}

View file

@ -1,29 +1,43 @@
// Package redis implements the storage interface for a Chihaya // Package redis implements the storage interface for a Chihaya
// BitTorrent tracker keeping peer data in redis. // BitTorrent tracker keeping peer data in redis with hash.
// There two categories of hash:
//
// - IPv{4,6}_{L,S}_infohash
// To save peers that hold the infohash, used for fast searching,
// deleting, and timeout handling
//
// - IPv{4,6}
// To save all the infohashes, used for garbage collection,
// metrics aggregation and leecher graduation
//
// Tree keys are used to record the count of swarms, seeders
// and leechers for each group (IPv4, IPv6).
//
// - IPv{4,6}_infohash_count
// To record the number of infohashes.
//
// - IPv{4,6}_S_count
// To record the number of seeders.
//
// - IPv{4,6}_L_count
// To record the number of leechers.
package redis package redis
import ( import (
"encoding/binary" "encoding/binary"
"net" "net"
neturl "net/url" "strconv"
"sync" "sync"
"time" "time"
"gopkg.in/yaml.v2" "github.com/gomodule/redigo/redis"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log" "github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/pkg/timecache" "github.com/chihaya/chihaya/pkg/timecache"
"github.com/chihaya/chihaya/storage" "github.com/chihaya/chihaya/storage"
"errors"
"fmt"
"strconv"
"strings"
"github.com/chihaya/chihaya/storage/redis/common"
"github.com/garyburd/redigo/redis"
"gopkg.in/redsync.v1"
) )
// Name is the name by which this peer store is registered with Chihaya. // Name is the name by which this peer store is registered with Chihaya.
@ -35,6 +49,9 @@ const (
defaultGarbageCollectionInterval = time.Minute * 3 defaultGarbageCollectionInterval = time.Minute * 3
defaultPeerLifetime = time.Minute * 30 defaultPeerLifetime = time.Minute * 30
defaultRedisBroker = "redis://myRedis@127.0.0.1:6379/0" defaultRedisBroker = "redis://myRedis@127.0.0.1:6379/0"
defaultRedisReadTimeout = time.Second * 15
defaultRedisWriteTimeout = time.Second * 15
defaultRedisConnectTimeout = time.Second * 15
) )
func init() { func init() {
@ -67,16 +84,22 @@ type Config struct {
PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"` PrometheusReportingInterval time.Duration `yaml:"prometheus_reporting_interval"`
PeerLifetime time.Duration `yaml:"peer_lifetime"` PeerLifetime time.Duration `yaml:"peer_lifetime"`
RedisBroker string `yaml:"redis_broker"` RedisBroker string `yaml:"redis_broker"`
RedisReadTimeout time.Duration `yaml:"redis_read_timeout"`
RedisWriteTimeout time.Duration `yaml:"redis_write_timeout"`
RedisConnectTimeout time.Duration `yaml:"redis_connect_timeout"`
} }
// LogFields renders the current config as a set of Logrus fields. // LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields { func (cfg Config) LogFields() log.Fields {
return log.Fields{ return log.Fields{
"name": Name, "name": Name,
"gcInterval": cfg.GarbageCollectionInterval, "gcInterval": cfg.GarbageCollectionInterval,
"promReportInterval": cfg.PrometheusReportingInterval, "promReportInterval": cfg.PrometheusReportingInterval,
"peerLifetime": cfg.PeerLifetime, "peerLifetime": cfg.PeerLifetime,
"redisBroker": cfg.RedisBroker, "redisBroker": cfg.RedisBroker,
"redisReadTimeout": cfg.RedisReadTimeout,
"redisWriteTimeout": cfg.RedisWriteTimeout,
"redisConnectTimeout": cfg.RedisConnectTimeout,
} }
} }
@ -96,6 +119,33 @@ func (cfg Config) Validate() Config {
}) })
} }
if cfg.RedisReadTimeout <= 0 {
validcfg.RedisReadTimeout = defaultRedisReadTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisReadTimeout",
"provided": cfg.RedisReadTimeout,
"default": validcfg.RedisReadTimeout,
})
}
if cfg.RedisWriteTimeout <= 0 {
validcfg.RedisWriteTimeout = defaultRedisWriteTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisWriteTimeout",
"provided": cfg.RedisWriteTimeout,
"default": validcfg.RedisWriteTimeout,
})
}
if cfg.RedisConnectTimeout <= 0 {
validcfg.RedisConnectTimeout = defaultRedisConnectTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".RedisConnectTimeout",
"provided": cfg.RedisConnectTimeout,
"default": validcfg.RedisConnectTimeout,
})
}
if cfg.GarbageCollectionInterval <= 0 { if cfg.GarbageCollectionInterval <= 0 {
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
log.Warn("falling back to default configuration", log.Fields{ log.Warn("falling back to default configuration", log.Fields{
@ -126,74 +176,18 @@ func (cfg Config) Validate() Config {
return validcfg return validcfg
} }
// ParseRedisURL ...
func ParseRedisURL(url string) (host, password string, db int, err error) {
// redis://pwd@host/db
var u *neturl.URL
u, err = neturl.Parse(url)
if err != nil {
return
}
if u.Scheme != "redis" {
err = errors.New("No redis scheme found")
return
}
if u.User != nil {
password = u.User.String()
}
host = u.Host
parts := strings.Split(u.Path, "/")
if len(parts) == 1 {
db = 0 //default redis db
} else {
db, err = strconv.Atoi(parts[1])
if err != nil {
db, err = 0, nil //ignore err here
}
}
return
}
// NewRedisBackend creates RedisBackend instance
func NewRedisBackend(host, password, socketPath string, db int) *RedisBackend {
return &RedisBackend{
host: host,
db: db,
password: password,
socketPath: socketPath,
}
}
// open returns or creates instance of Redis connection
func (rb *RedisBackend) open() redis.Conn {
if rb.pool == nil {
rb.pool = rb.NewPool(rb.socketPath, rb.host, rb.password, rb.db)
}
if rb.redsync == nil {
var pools = []redsync.Pool{rb.pool}
rb.redsync = redsync.New(pools)
}
return rb.pool.Get()
}
// New creates a new PeerStore backed by redis. // New creates a new PeerStore backed by redis.
func New(provided Config) (storage.PeerStore, error) { func New(provided Config) (storage.PeerStore, error) {
cfg := provided.Validate() cfg := provided.Validate()
// creates RedisBackend instance u, err := parseRedisURL(cfg.RedisBroker)
h, p, db, err := ParseRedisURL(cfg.RedisBroker)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ps := &peerStore{ ps := &peerStore{
cfg: cfg, cfg: cfg,
rb: NewRedisBackend(h, p, "", db), rb: newRedisBackend(&provided, u, ""),
closed: make(chan struct{}), closed: make(chan struct{}),
} }
@ -263,83 +257,76 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
return peer return peer
} }
// RedisBackend represents a Memcache result backend
type RedisBackend struct {
host string
password string
db int
pool *redis.Pool
// If set, path to a socket file overrides hostname
socketPath string
redsync *redsync.Redsync
common.RedisConnector
}
type peerStore struct { type peerStore struct {
cfg Config cfg Config
rb *RedisBackend rb *redisBackend
closed chan struct{} closed chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
} }
// populateProm aggregates metrics over all shards and then posts them to func (ps *peerStore) groups() []string {
return []string{bittorrent.IPv4.String(), bittorrent.IPv6.String()}
}
func (ps *peerStore) leecherInfohashKey(af, ih string) string {
return af + "_L_" + ih
}
func (ps *peerStore) seederInfohashKey(af, ih string) string {
return af + "_S_" + ih
}
func (ps *peerStore) infohashCountKey(af string) string {
return af + "_infohash_count"
}
func (ps *peerStore) seederCountKey(af string) string {
return af + "_S_count"
}
func (ps *peerStore) leecherCountKey(af string) string {
return af + "_L_count"
}
// populateProm aggregates metrics over all groups and then posts them to
// prometheus. // prometheus.
func (ps *peerStore) populateProm() { func (ps *peerStore) populateProm() {
var numInfohashes, numSeeders, numLeechers uint64 var numInfohashes, numSeeders, numLeechers int64
shards := [2]string{bittorrent.IPv4.String(), bittorrent.IPv6.String()}
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
for _, shard := range shards { for _, group := range ps.groups() {
infohashes_list, err := conn.Do("HKEYS", shard) // key if n, err := conn.Do("GET", ps.infohashCountKey(group)); err != nil {
if err != nil { log.Error("storage: GET counter failure", log.Fields{
return "key": ps.infohashCountKey(group),
"error": err,
})
} else {
numInfohashes += n.(int64)
} }
infohashes := infohashes_list.([]interface{}) if n, err := conn.Do("GET", ps.seederCountKey(group)); err != nil {
log.Error("storage: GET counter failure", log.Fields{
InfohashLPrefix := shard + "_L_" "key": ps.seederCountKey(group),
InfohashSPrefix := shard + "_S_" "error": err,
InfohashPrefixLen := len(InfohashLPrefix) })
InfohashesMap := make(map[string]bool) } else {
numSeeders += n.(int64)
for _, ih := range infohashes { }
ih_str := string(ih.([]byte)) if n, err := conn.Do("GET", ps.leecherCountKey(group)); err != nil {
ih_str_infohash := ih_str[InfohashPrefixLen:] log.Error("storage: GET counter failure", log.Fields{
if strings.HasPrefix(ih_str, InfohashLPrefix) { "key": ps.leecherCountKey(group),
numLeechers++ "error": err,
InfohashesMap[ih_str_infohash] = true })
} else if strings.HasPrefix(ih_str, InfohashSPrefix) { } else {
numSeeders++ numLeechers += n.(int64)
InfohashesMap[ih_str_infohash] = true
} else {
log.Error("storage: invalid Redis state", log.Fields{
"Hkey": shard,
"Hfield": ih_str,
})
}
} }
numInfohashes += uint64(len(InfohashesMap))
} }
storage.PromInfohashesCount.Set(float64(numInfohashes)) storage.PromInfohashesCount.Set(float64(numInfohashes))
storage.PromSeedersCount.Set(float64(numSeeders)) storage.PromSeedersCount.Set(float64(numSeeders))
storage.PromLeechersCount.Set(float64(numLeechers)) storage.PromLeechersCount.Set(float64(numLeechers))
log.Debug("storage: populateProm() aggregates metrics over all shards", log.Fields{
"numInfohashes": float64(numInfohashes),
"numSeeders": float64(numSeeders),
"numLeechers": float64(numLeechers),
})
}
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
log.Debug("storage: recordGCDuration", log.Fields{"timeTaken(ms)": float64(duration.Nanoseconds()) / float64(time.Millisecond)})
storage.PromGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
} }
func (ps *peerStore) getClock() int64 { func (ps *peerStore) getClock() int64 {
@ -347,10 +334,10 @@ func (ps *peerStore) getClock() int64 {
} }
func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error { func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
IPver := p.IP.AddressFamily.String() addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: PutSeeder", log.Fields{ log.Debug("storage: PutSeeder", log.Fields{
"InfoHash": ih.String(), "InfoHash": ih.String(),
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", p.ID.String(), p.IP.String(), IPver, p.Port), "Peer": p,
}) })
select { select {
@ -361,29 +348,43 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error
pk := newPeerKey(p) pk := newPeerKey(p)
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, ih.String())
ct := ps.getClock()
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
// Update the peer in the swarm. conn.Send("MULTI")
encodedSeederInfoHash := IPver + "_S_" + ih.String() conn.Send("HSET", encodedSeederInfoHash, pk, ct)
ct := ps.getClock() conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
_, err := conn.Do("HSET", encodedSeederInfoHash, pk, ct) reply, err := redis.Values(conn.Do("EXEC"))
if err != nil { if err != nil {
return err return err
} }
_, err = conn.Do("HSET", IPver, encodedSeederInfoHash, ct)
if err != nil { // pk is a new field.
return err if reply[0].(int64) == 1 {
_, err = conn.Do("INCR", ps.seederCountKey(addressFamily))
if err != nil {
return err
}
}
// encodedSeederInfoHash is a new field.
if reply[1].(int64) == 1 {
_, err = conn.Do("INCR", ps.infohashCountKey(addressFamily))
if err != nil {
return err
}
} }
return nil return nil
} }
func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error { func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
IPver := p.IP.AddressFamily.String() addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: DeleteSeeder", log.Fields{ log.Debug("storage: DeleteSeeder", log.Fields{
"InfoHash": ih.String(), "InfoHash": ih.String(),
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", p.ID.String(), p.IP.String(), IPver, p.Port), "Peer": p,
}) })
select { select {
@ -397,24 +398,27 @@ func (ps *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) err
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
encodedSeederInfoHash := IPver + "_S_" + ih.String() encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, ih.String())
DelNum, err := conn.Do("HDEL", encodedSeederInfoHash, pk) delNum, err := conn.Do("HDEL", encodedSeederInfoHash, pk)
if err != nil { if err != nil {
return err return err
} }
if DelNum.(int64) == 0 { if delNum.(int64) == 0 {
return storage.ErrResourceDoesNotExist return storage.ErrResourceDoesNotExist
} }
if _, err := conn.Do("DECR", ps.seederCountKey(addressFamily)); err != nil {
return err
}
return nil return nil
} }
func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error { func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
IPver := p.IP.AddressFamily.String() addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: PutLeecher", log.Fields{ log.Debug("storage: PutLeecher", log.Fields{
"InfoHash": ih.String(), "InfoHash": ih.String(),
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", p.ID.String(), p.IP.String(), IPver, p.Port), "Peer": p,
}) })
select { select {
@ -423,31 +427,36 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
default: default:
} }
// Update the peer in the swarm.
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, ih.String())
pk := newPeerKey(p) pk := newPeerKey(p)
ct := ps.getClock()
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
// Update the peer in the swarm. conn.Send("MULTI")
encodedLeecherInfoHash := IPver + "_L_" + ih.String() conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
ct := ps.getClock() conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
_, err := conn.Do("HSET", encodedLeecherInfoHash, pk, ct) reply, err := redis.Values(conn.Do("EXEC"))
if err != nil { if err != nil {
return err return err
} }
_, err = conn.Do("HSET", IPver, encodedLeecherInfoHash, ct) // pk is a new field.
if err != nil { if reply[0].(int64) == 1 {
return err _, err = conn.Do("INCR", ps.leecherCountKey(addressFamily))
if err != nil {
return err
}
} }
return nil return nil
} }
func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error { func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
IPver := p.IP.AddressFamily.String() addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: DeleteLeecher", log.Fields{ log.Debug("storage: DeleteLeecher", log.Fields{
"InfoHash": ih.String(), "InfoHash": ih.String(),
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", p.ID.String(), p.IP.String(), IPver, p.Port), "Peer": p,
}) })
select { select {
@ -456,29 +465,31 @@ func (ps *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) er
default: default:
} }
pk := newPeerKey(p)
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
encodedLeecherInfoHash := IPver + "_L_" + ih.String() pk := newPeerKey(p)
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, ih.String())
DelNum, err := conn.Do("HDEL", encodedLeecherInfoHash, pk) delNum, err := conn.Do("HDEL", encodedLeecherInfoHash, pk)
if err != nil { if err != nil {
return err return err
} }
if DelNum.(int64) == 0 { if delNum.(int64) == 0 {
return storage.ErrResourceDoesNotExist return storage.ErrResourceDoesNotExist
} }
if _, err := conn.Do("DECR", ps.leecherCountKey(addressFamily)); err != nil {
return err
}
return nil return nil
} }
func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error { func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
IPver := p.IP.AddressFamily.String() addressFamily := p.IP.AddressFamily.String()
log.Debug("storage: GraduateLeecher", log.Fields{ log.Debug("storage: GraduateLeecher", log.Fields{
"InfoHash": ih.String(), "InfoHash": ih.String(),
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", p.ID.String(), p.IP.String(), IPver, p.Port), "Peer": p,
}) })
select { select {
@ -487,41 +498,52 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer)
default: default:
} }
encodedInfoHash := ih.String()
encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
pk := newPeerKey(p) pk := newPeerKey(p)
ct := ps.getClock()
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
encodedInfoHash := ih.String() conn.Send("MULTI")
encodedLeecherInfoHash := IPver + "_L_" + encodedInfoHash conn.Send("HDEL", encodedLeecherInfoHash, pk)
encodedSeederInfoHash := IPver + "_S_" + encodedInfoHash conn.Send("HSET", encodedSeederInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
_, err := conn.Do("HDEL", encodedLeecherInfoHash, pk) reply, err := redis.Values(conn.Do("EXEC"))
if err != nil { if err != nil {
return err return err
} }
if reply[0].(int64) == 1 {
// Update the peer in the swarm. _, err = conn.Do("DECR", ps.leecherCountKey(addressFamily))
ct := ps.getClock() if err != nil {
_, err = conn.Do("HSET", encodedSeederInfoHash, pk, ct) return err
if err != nil { }
return err
} }
_, err = conn.Do("HSET", IPver, encodedSeederInfoHash, ct) if reply[1].(int64) == 1 {
if err != nil { _, err = conn.Do("INCR", ps.seederCountKey(addressFamily))
return err if err != nil {
return err
}
}
if reply[2].(int64) == 1 {
_, err = conn.Do("INCR", ps.infohashCountKey(addressFamily))
if err != nil {
return err
}
} }
return nil return nil
} }
func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) { func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
IPver := announcer.IP.AddressFamily.String() addressFamily := announcer.IP.AddressFamily.String()
log.Debug("storage: AnnouncePeers", log.Fields{ log.Debug("storage: AnnouncePeers", log.Fields{
"InfoHash": ih.String(), "InfoHash": ih.String(),
"seeder": seeder, "seeder": seeder,
"numWant": numWant, "numWant": numWant,
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", announcer.ID.String(), announcer.IP.String(), IPver, announcer.Port), "Peer": announcer,
}) })
select { select {
@ -531,8 +553,8 @@ func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant
} }
encodedInfoHash := ih.String() encodedInfoHash := ih.String()
encodedLeecherInfoHash := IPver + "_L_" + encodedInfoHash // key encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
encodedSeederInfoHash := IPver + "_S_" + encodedInfoHash // key encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
@ -592,18 +614,10 @@ func (ps *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant
} }
} }
APResult := ""
for _, pr := range peers {
APResult = fmt.Sprintf("%s Peer:[ID: %s, IP: %s(AddressFamily: %s), Port %d]", APResult, pr.ID.String(), pr.IP.String(), IPver, pr.Port)
}
log.Debug("storage: AnnouncePeers result", log.Fields{
"peers": APResult,
})
return return
} }
func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) (resp bittorrent.Scrape) { func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, af bittorrent.AddressFamily) (resp bittorrent.Scrape) {
select { select {
case <-ps.closed: case <-ps.closed:
panic("attempted to interact with stopped redis store") panic("attempted to interact with stopped redis store")
@ -611,10 +625,10 @@ func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorren
} }
resp.InfoHash = ih resp.InfoHash = ih
IPver := addressFamily.String() addressFamily := af.String()
encodedInfoHash := ih.String() encodedInfoHash := ih.String()
encodedLeecherInfoHash := IPver + "_L_" + encodedInfoHash // key encodedLeecherInfoHash := ps.leecherInfohashKey(addressFamily, encodedInfoHash)
encodedSeederInfoHash := IPver + "_S_" + encodedInfoHash // key encodedSeederInfoHash := ps.seederInfohashKey(addressFamily, encodedInfoHash)
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
@ -627,7 +641,6 @@ func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorren
}) })
return return
} }
lLen := leechersLen.(int64)
seedersLen, err := conn.Do("HLEN", encodedSeederInfoHash) seedersLen, err := conn.Do("HLEN", encodedSeederInfoHash)
if err != nil { if err != nil {
@ -637,14 +650,9 @@ func (ps *peerStore) ScrapeSwarm(ih bittorrent.InfoHash, addressFamily bittorren
}) })
return return
} }
sLen := seedersLen.(int64)
if lLen == 0 && sLen == 0 { resp.Incomplete = uint32(leechersLen.(int64))
return resp.Complete = uint32(seedersLen.(int64))
}
resp.Incomplete = uint32(lLen)
resp.Complete = uint32(sLen)
return return
} }
@ -661,16 +669,15 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
default: default:
} }
shards := [2]string{bittorrent.IPv4.String(), bittorrent.IPv6.String()}
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
cutoffUnix := cutoff.UnixNano() cutoffUnix := cutoff.UnixNano()
start := time.Now() start := time.Now()
for _, shard := range shards { for _, group := range ps.groups() {
infohashesList, err := conn.Do("HKEYS", shard) // key // list all infohashes in the group
infohashesList, err := conn.Do("HKEYS", group)
if err != nil { if err != nil {
return err return err
} }
@ -678,51 +685,47 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
for _, ih := range infohashes { for _, ih := range infohashes {
ihStr := string(ih.([]byte)) ihStr := string(ih.([]byte))
isSeeder := len(ihStr) > 5 && ihStr[5:6] == "S"
ihList, err := conn.Do("HGETALL", ihStr) // field // list all (peer, timeout) pairs for the ih
ihList, err := conn.Do("HGETALL", ihStr)
if err != nil { if err != nil {
return err return err
} }
conIhList := ihList.([]interface{}) conIhList := ihList.([]interface{})
if len(conIhList) == 0 {
_, err := conn.Do("DEL", ihStr)
if err != nil {
return err
}
log.Debug("storage: Deleting Redis", log.Fields{"Hkey": ihStr})
_, err = conn.Do("HDEL", shard, ihStr)
if err != nil {
return err
}
log.Debug("storage: Deleting Redis", log.Fields{
"Hkey": shard,
"Hfield": ihStr,
})
continue
}
var pk serializedPeer var pk serializedPeer
var removedPeerCount int64
for index, ihField := range conIhList { for index, ihField := range conIhList {
if index%2 != 0 { // value if index%2 == 1 { // value
mtime, err := strconv.ParseInt(string(ihField.([]byte)), 10, 64) mtime, err := strconv.ParseInt(string(ihField.([]byte)), 10, 64)
if err != nil { if err != nil {
return err return err
} }
if mtime <= cutoffUnix { if mtime <= cutoffUnix {
_, err := conn.Do("HDEL", ihStr, pk) ret, err := redis.Int64(conn.Do("HDEL", ihStr, pk))
if err != nil { if err != nil {
return err return err
} }
p := decodePeerKey(pk)
log.Debug("storage: Deleting peer", log.Fields{ removedPeerCount += ret
"Peer": fmt.Sprintf("[ID: %s, IP: %s(AddressFamily: %s), Port %d]", p.ID.String(), p.IP.String(), p.IP.AddressFamily.String(), p.Port),
log.Debug("storage: deleting peer", log.Fields{
"Peer": decodePeerKey(pk).String(),
}) })
} }
} else { // key } else { // key
pk = serializedPeer(ihField.([]byte)) pk = serializedPeer(ihField.([]byte))
} }
} }
// DECR seeder/leecher counter
decrCounter := ps.leecherCountKey(group)
if isSeeder {
decrCounter = ps.seederCountKey(group)
}
if _, err := conn.Do("DECRBY", decrCounter, removedPeerCount); err != nil {
return err
}
ihLen, err := conn.Do("HLEN", ihStr) ihLen, err := conn.Do("HLEN", ihStr)
if err != nil { if err != nil {
@ -733,38 +736,36 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
if err != nil { if err != nil {
return err return err
} }
log.Debug("storage: Deleting Redis", log.Fields{"Hkey": ihStr}) log.Debug("storage: deleting infohash", log.Fields{
_, err = conn.Do("HDEL", shard, ihStr) "Group": group,
"Hkey": ihStr,
})
_, err = conn.Do("HDEL", group, ihStr)
if err != nil { if err != nil {
return err return err
} }
log.Debug("storage: Deleting Redis", log.Fields{
"Hkey": shard,
"Hfield": ihStr,
})
} }
} }
} }
recordGCDuration(time.Since(start)) duration := float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond)
log.Debug("storage: recordGCDuration", log.Fields{"timeTaken(ms)": duration})
storage.PromGCDurationMilliseconds.Observe(duration)
return nil return nil
} }
func (ps *peerStore) Stop() <-chan error { func (ps *peerStore) Stop() stop.Result {
c := make(chan error) c := make(stop.Channel)
go func() { go func() {
close(ps.closed) close(ps.closed)
ps.wg.Wait() ps.wg.Wait()
// chihaya does not clear data in redis when exiting.
// TODO(duyanghao): something to be done? // chihaya keys have prefix `IPv{4,6}_`.
close(c) close(c)
}() }()
return c return c.Result()
} }
func (ps *peerStore) LogFields() log.Fields { func (ps *peerStore) LogFields() log.Fields {

View file

@ -1,15 +1,29 @@
package redis package redis
import ( import (
"fmt"
"testing" "testing"
"time" "time"
"github.com/alicebob/miniredis"
s "github.com/chihaya/chihaya/storage" s "github.com/chihaya/chihaya/storage"
) )
func createNew() s.PeerStore { func createNew() s.PeerStore {
ps, err := New(Config{GarbageCollectionInterval: 10 * time.Minute, PrometheusReportingInterval: 10 * time.Minute, PeerLifetime: 30 * time.Minute, RedisBroker: "redis://myRedis@127.0.0.1:6379/0"}) rs, err := miniredis.Run()
if err != nil {
panic(err)
}
redisURL := fmt.Sprintf("redis://@%s/0", rs.Addr())
ps, err := New(Config{
GarbageCollectionInterval: 10 * time.Minute,
PrometheusReportingInterval: 10 * time.Minute,
PeerLifetime: 30 * time.Minute,
RedisBroker: redisURL,
RedisReadTimeout: 10 * time.Second,
RedisWriteTimeout: 10 * time.Second,
RedisConnectTimeout: 10 * time.Second})
if err != nil { if err != nil {
panic(err) panic(err)
} }

135
storage/redis/redis.go Normal file
View file

@ -0,0 +1,135 @@
package redis
import (
"errors"
"net/url"
"strconv"
"strings"
"time"
"github.com/gomodule/redigo/redis"
redsync "gopkg.in/redsync.v1"
)
// redisBackend represents a redis handler.
type redisBackend struct {
pool *redis.Pool
redsync *redsync.Redsync
}
// newRedisBackend creates a redisBackend instance.
func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend {
rc := &redisConnector{
URL: u,
SocketPath: socketPath,
ReadTimeout: cfg.RedisReadTimeout,
WriteTimeout: cfg.RedisWriteTimeout,
ConnectTimeout: cfg.RedisConnectTimeout,
}
pool := rc.NewPool()
redsync := redsync.New([]redsync.Pool{pool})
return &redisBackend{
pool: pool,
redsync: redsync,
}
}
// open returns or creates instance of Redis connection.
func (rb *redisBackend) open() redis.Conn {
return rb.pool.Get()
}
type redisConnector struct {
URL *redisURL
SocketPath string
ReadTimeout time.Duration
WriteTimeout time.Duration
ConnectTimeout time.Duration
}
// NewPool returns a new pool of Redis connections
func (rc *redisConnector) NewPool() *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := rc.open()
if err != nil {
return nil, err
}
if rc.URL.DB != 0 {
_, err = c.Do("SELECT", rc.URL.DB)
if err != nil {
return nil, err
}
}
return c, err
},
// PINGs connections that have been idle more than 10 seconds
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Duration(10*time.Second) {
return nil
}
_, err := c.Do("PING")
return err
},
}
}
// Open a new Redis connection
func (rc *redisConnector) open() (redis.Conn, error) {
var opts = []redis.DialOption{
redis.DialDatabase(rc.URL.DB),
redis.DialReadTimeout(rc.ReadTimeout),
redis.DialWriteTimeout(rc.WriteTimeout),
redis.DialConnectTimeout(rc.ConnectTimeout),
}
if rc.URL.Password != "" {
opts = append(opts, redis.DialPassword(rc.URL.Password))
}
if rc.SocketPath != "" {
return redis.Dial("unix", rc.SocketPath, opts...)
}
return redis.Dial("tcp", rc.URL.Host, opts...)
}
// A redisURL represents a parsed redisURL
// The general form represented is:
//
// redis://[password@]host][/][db]
type redisURL struct {
Host string
Password string
DB int
}
// parseRedisURL parse rawurl into redisURL
func parseRedisURL(target string) (*redisURL, error) {
var u *url.URL
u, err := url.Parse(target)
if err != nil {
return nil, err
}
if u.Scheme != "redis" {
return nil, errors.New("no redis scheme found")
}
db := 0 //default redis db
parts := strings.Split(u.Path, "/")
if len(parts) != 1 {
db, err = strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
}
return &redisURL{
Host: u.Host,
Password: u.User.String(),
DB: db,
}, nil
}