Merge pull request #129 from chihaya/jwt
add support for jwt validation of infohashes
This commit is contained in:
commit
e37f453b34
13 changed files with 269 additions and 29 deletions
|
@ -212,3 +212,26 @@ Whether the information about memory should be verbose.
|
||||||
default: "5s"
|
default: "5s"
|
||||||
|
|
||||||
Interval at which to collect statistics about memory.
|
Interval at which to collect statistics about memory.
|
||||||
|
|
||||||
|
|
||||||
|
##### `jwkSetURI`
|
||||||
|
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
If this string is not empty, then the tracker will attempt to use JWTs to validate infohashes before announces. The format for the JSON at this endpoint can be found at [the RFC for JWKs](https://tools.ietf.org/html/draft-ietf-jose-json-web-key-41#page-10) with the addition of an "issuer" key. Simply stated, this feature requires two fields at this JSON endpoint: "keys" and "issuer". "keys" is a list of JWKs that can be used to validate JWTs and "issuer" should match the "iss" claim in the JWT. The lifetime of a JWK is based upon standard HTTP caching headers and falls back to 5 minutes if no cache headers are provided.
|
||||||
|
|
||||||
|
|
||||||
|
#### `jwkSetUpdateInterval`
|
||||||
|
|
||||||
|
type: duration
|
||||||
|
default: "5m"
|
||||||
|
|
||||||
|
The interval at which keys are updated from JWKSetURI. Because the fallback lifetime for keys without cache headers is 5 minutes, this value should never be below 5 minutes unless you know your jwkSetURI has caching headers.
|
||||||
|
|
||||||
|
#### `jwtAudience`
|
||||||
|
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
|
||||||
|
The audience claim that is used to validate JWTs.
|
||||||
|
|
|
@ -24,7 +24,10 @@ ADD udp /go/src/github.com/chihaya/chihaya/udp
|
||||||
# Install
|
# Install
|
||||||
RUN go install github.com/chihaya/chihaya/cmd/chihaya
|
RUN go install github.com/chihaya/chihaya/cmd/chihaya
|
||||||
|
|
||||||
# docker run -p 6881:6881 -v $PATH_TO_DIR_WITH_CONF_FILE:/config quay.io/jzelinskie/chihaya
|
# Configuration/environment
|
||||||
VOLUME ["/config"]
|
VOLUME ["/config"]
|
||||||
EXPOSE 6881
|
EXPOSE 6880-6882
|
||||||
CMD ["chihaya", "-config=/config/config.json", "-logtostderr=true"]
|
|
||||||
|
# docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -v=5
|
||||||
|
ENTRYPOINT ["chihaya", "-config=/config/config.json", "-logtostderr=true"]
|
||||||
|
CMD ["-v=5"]
|
||||||
|
|
30
Godeps/Godeps.json
generated
30
Godeps/Godeps.json
generated
|
@ -1,18 +1,36 @@
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/chihaya/chihaya",
|
"ImportPath": "github.com/chihaya/chihaya",
|
||||||
"GoVersion": "go1.4.2",
|
"GoVersion": "go1.5.1",
|
||||||
"Deps": [
|
"Deps": [
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/chihaya/bencode",
|
"ImportPath": "github.com/chihaya/bencode",
|
||||||
"Rev": "3c485a8d166ff6a79baba90c2c2da01c8348e930"
|
"Rev": "3c485a8d166ff6a79baba90c2c2da01c8348e930"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/go-oidc/http",
|
||||||
|
"Rev": "ec2746d2ccb220e81c41b0b0cb2d4a1cc23f7950"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/go-oidc/jose",
|
||||||
|
"Rev": "ec2746d2ccb220e81c41b0b0cb2d4a1cc23f7950"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/go-systemd/journal",
|
||||||
|
"Comment": "v4-36-gdd4f6b8",
|
||||||
|
"Rev": "dd4f6b87c2a80813d1a01790344322da19ff195e"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/coreos/pkg/capnslog",
|
||||||
|
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/glog",
|
"ImportPath": "github.com/golang/glog",
|
||||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
"Rev": "fca8c8854093a154ff1eb580aae10276ad6b1b5f"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/julienschmidt/httprouter",
|
"ImportPath": "github.com/julienschmidt/httprouter",
|
||||||
"Rev": "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
|
"Comment": "v1.1-14-g21439ef",
|
||||||
|
"Rev": "21439ef4d70ba4f3e2a5ed9249e7b03af4019b40"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/pushrax/bufferpool",
|
"ImportPath": "github.com/pushrax/bufferpool",
|
||||||
|
@ -28,12 +46,12 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/tylerb/graceful",
|
"ImportPath": "github.com/tylerb/graceful",
|
||||||
"Comment": "v1-7-g0c01122",
|
"Comment": "v1.2.3",
|
||||||
"Rev": "0c011221e91b35f488b8818b00ca279929e9ed7d"
|
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "golang.org/x/net/netutil",
|
"ImportPath": "golang.org/x/net/netutil",
|
||||||
"Rev": "d175081df37eff8cda13f478bc11a0a65b39958b"
|
"Rev": "520af5de654dc4dd4f0f65aa40e66dbbd9043df1"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
12
README.md
12
README.md
|
@ -10,15 +10,18 @@ programming language. It is still heavily under development and the current
|
||||||
`master` branch should probably not be used in production
|
`master` branch should probably not be used in production
|
||||||
(unless you know what you're doing).
|
(unless you know what you're doing).
|
||||||
|
|
||||||
Features include:
|
Current features include:
|
||||||
|
|
||||||
- Public tracker feature-set with full compatibility with what exists of the BitTorrent spec
|
- Public tracker feature-set with full compatibility with what exists of the BitTorrent spec
|
||||||
- Private tracker feature-set with compatibility for a [Gazelle]-like deployment (WIP)
|
|
||||||
- Low resource consumption, and fast, asynchronous request processing
|
- Low resource consumption, and fast, asynchronous request processing
|
||||||
- Full IPv6 support, including handling for dual-stacked peers
|
- Full IPv6 support, including handling for dual-stacked peers
|
||||||
- Extensive metrics for visibility into the tracker and swarm's performance
|
- Extensive metrics for visibility into the tracker and swarm's performance
|
||||||
- Ability to prioritize peers in local subnets to reduce backbone contention
|
- Ability to prioritize peers in local subnets to reduce backbone contention
|
||||||
- Pluggable backend driver that can coordinate with an external database
|
- JWT Validation to approve the usage of a given infohash.
|
||||||
|
|
||||||
|
Planned features include:
|
||||||
|
|
||||||
|
- Private tracker feature-set with compatibility for a [Gazelle]-like deployment (WIP)
|
||||||
|
|
||||||
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
|
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
|
||||||
[gazelle]: https://github.com/whatcd/gazelle
|
[gazelle]: https://github.com/whatcd/gazelle
|
||||||
|
@ -48,7 +51,8 @@ An explanation of the available keys can be found in [CONFIGURATION.md].
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ docker pull quay.io/jzelinskie/chihaya:latest
|
$ docker pull quay.io/jzelinskie/chihaya:latest
|
||||||
$ docker run -p 6881:6881 -v $DIR_WITH_CONFIG:/config:ro quay.io/jzelinskie/chihaya:latest
|
$ export CHIHAYA_LOG_LEVEL=5 # most verbose, and the default
|
||||||
|
$ docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -v=$CHIHAYA_LOG_LEVEL
|
||||||
```
|
```
|
||||||
|
|
||||||
## Developing Chihaya
|
## Developing Chihaya
|
||||||
|
|
|
@ -71,6 +71,10 @@ type TrackerConfig struct {
|
||||||
NumWantFallback int `json:"defaultNumWant"`
|
NumWantFallback int `json:"defaultNumWant"`
|
||||||
TorrentMapShards int `json:"torrentMapShards"`
|
TorrentMapShards int `json:"torrentMapShards"`
|
||||||
|
|
||||||
|
JWKSetURI string `json:"jwkSetURI"`
|
||||||
|
JWKSetUpdateInterval Duration `json:"jwkSetUpdateInterval"`
|
||||||
|
JWTAudience string `json:"jwtAudience"`
|
||||||
|
|
||||||
NetConfig
|
NetConfig
|
||||||
WhitelistConfig
|
WhitelistConfig
|
||||||
}
|
}
|
||||||
|
@ -119,6 +123,9 @@ var DefaultConfig = Config{
|
||||||
ReapRatio: 1.25,
|
ReapRatio: 1.25,
|
||||||
NumWantFallback: 50,
|
NumWantFallback: 50,
|
||||||
TorrentMapShards: 1,
|
TorrentMapShards: 1,
|
||||||
|
JWKSetURI: "",
|
||||||
|
JWKSetUpdateInterval: Duration{5 * time.Minute},
|
||||||
|
JWTAudience: "",
|
||||||
|
|
||||||
NetConfig: NetConfig{
|
NetConfig: NetConfig{
|
||||||
AllowIPSpoofing: true,
|
AllowIPSpoofing: true,
|
||||||
|
|
|
@ -7,6 +7,9 @@
|
||||||
"reapRatio": 1.25,
|
"reapRatio": 1.25,
|
||||||
"defaultNumWant": 50,
|
"defaultNumWant": 50,
|
||||||
"torrentMapShards": 1,
|
"torrentMapShards": 1,
|
||||||
|
"jwkSetURI": "",
|
||||||
|
"jwkSetUpdateInterval": "5m",
|
||||||
|
"jwtAudience": "",
|
||||||
"allowIPSpoofing": true,
|
"allowIPSpoofing": true,
|
||||||
"dualStackedPeers": true,
|
"dualStackedPeers": true,
|
||||||
"realIPHeader": "",
|
"realIPHeader": "",
|
||||||
|
|
|
@ -38,6 +38,11 @@ func (s *Server) newAnnounce(r *http.Request, p httprouter.Params) (*models.Anno
|
||||||
return nil, models.ErrMalformedRequest
|
return nil, models.ErrMalformedRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jwt, exists := q.Params["jwt"]
|
||||||
|
if s.config.JWKSetURI != "" && !exists {
|
||||||
|
return nil, models.ErrMalformedRequest
|
||||||
|
}
|
||||||
|
|
||||||
port, err := q.Uint64("port")
|
port, err := q.Uint64("port")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, models.ErrMalformedRequest
|
return nil, models.ErrMalformedRequest
|
||||||
|
@ -78,6 +83,7 @@ func (s *Server) newAnnounce(r *http.Request, p httprouter.Params) (*models.Anno
|
||||||
NumWant: numWant,
|
NumWant: numWant,
|
||||||
PeerID: peerID,
|
PeerID: peerID,
|
||||||
Uploaded: uploaded,
|
Uploaded: uploaded,
|
||||||
|
JWT: jwt,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,7 +122,7 @@ func New(cfg config.StatsConfig) *Stats {
|
||||||
|
|
||||||
if cfg.IncludeMem {
|
if cfg.IncludeMem {
|
||||||
s.MemStatsWrapper = NewMemStatsWrapper(cfg.VerboseMem)
|
s.MemStatsWrapper = NewMemStatsWrapper(cfg.VerboseMem)
|
||||||
s.recordMemStats = time.NewTicker(cfg.MemUpdateInterval.Duration).C
|
s.recordMemStats = time.After(cfg.MemUpdateInterval.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.flattened = flatjson.Flatten(s)
|
s.flattened = flatjson.Flatten(s)
|
||||||
|
|
|
@ -18,8 +18,14 @@ func (tkr *Tracker) HandleAnnounce(ann *models.Announce, w Writer) (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
torrent, err := tkr.FindTorrent(ann.Infohash)
|
if tkr.Config.JWKSetURI != "" {
|
||||||
|
err := tkr.validateJWT(ann.JWT, ann.Infohash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
torrent, err := tkr.FindTorrent(ann.Infohash)
|
||||||
if err == models.ErrTorrentDNE && tkr.Config.CreateOnAnnounce {
|
if err == models.ErrTorrentDNE && tkr.Config.CreateOnAnnounce {
|
||||||
torrent = &models.Torrent{
|
torrent = &models.Torrent{
|
||||||
Infohash: ann.Infohash,
|
Infohash: ann.Infohash,
|
||||||
|
|
147
tracker/jwt.go
Normal file
147
tracker/jwt.go
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
// Copyright 2015 The Chihaya Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by the BSD 2-Clause license,
|
||||||
|
// which can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package tracker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
oidchttp "github.com/coreos/go-oidc/http"
|
||||||
|
"github.com/coreos/go-oidc/jose"
|
||||||
|
"github.com/golang/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const jwkTTLFallback = 5 * time.Minute
|
||||||
|
|
||||||
|
func (tkr *Tracker) updateJWKSetForever() {
|
||||||
|
defer tkr.shutdownWG.Done()
|
||||||
|
|
||||||
|
client := &http.Client{Timeout: 5 * time.Second}
|
||||||
|
|
||||||
|
// Get initial JWK Set.
|
||||||
|
err := tkr.updateJWKSet(client)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to get initial JWK Set: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tkr.shuttingDown:
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-time.After(tkr.Config.JWKSetUpdateInterval.Duration):
|
||||||
|
err = tkr.updateJWKSet(client)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to update JWK Set: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type jwkSet struct {
|
||||||
|
Keys []jose.JWK `json:"keys"`
|
||||||
|
Issuer string `json:"issuer"`
|
||||||
|
validUntil time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tkr *Tracker) updateJWKSet(client *http.Client) error {
|
||||||
|
glog.Info("Attemping to update JWK Set")
|
||||||
|
resp, err := client.Get(tkr.Config.JWKSetURI)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var jwks jwkSet
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&jwks)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(jwks.Keys) == 0 {
|
||||||
|
return errors.New("Failed to find any keys from JWK Set URI")
|
||||||
|
}
|
||||||
|
|
||||||
|
if jwks.Issuer == "" {
|
||||||
|
return errors.New("Failed to find any issuer from JWK Set URI")
|
||||||
|
}
|
||||||
|
|
||||||
|
ttl, _, _ := oidchttp.Cacheable(resp.Header)
|
||||||
|
if ttl == 0 {
|
||||||
|
ttl = jwkTTLFallback
|
||||||
|
}
|
||||||
|
jwks.validUntil = time.Now().Add(ttl)
|
||||||
|
|
||||||
|
tkr.jwkSet = jwks
|
||||||
|
glog.Info("Successfully updated JWK Set")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateJWTSignature(jwt *jose.JWT, jwkSet *jwkSet) (bool, error) {
|
||||||
|
for _, jwk := range jwkSet.Keys {
|
||||||
|
v, err := jose.NewVerifier(jwk)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.Verify(jwt.Signature, []byte(jwt.Data())); err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tkr *Tracker) validateJWT(jwtStr, infohash string) error {
|
||||||
|
jwkSet := tkr.jwkSet
|
||||||
|
if time.Now().After(jwkSet.validUntil) {
|
||||||
|
return fmt.Errorf("Failed verify JWT due to stale JWK Set")
|
||||||
|
}
|
||||||
|
|
||||||
|
jwt, err := jose.ParseJWT(jwtStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
validated, err := validateJWTSignature(&jwt, &jwkSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if !validated {
|
||||||
|
return errors.New("Failed to verify JWT with all available verifiers")
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, err := jwt.Claims()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if claimedIssuer, ok, err := claims.StringClaim("iss"); claimedIssuer != jwkSet.Issuer || err != nil || !ok {
|
||||||
|
return errors.New("Failed to validate JWT issuer claim")
|
||||||
|
}
|
||||||
|
|
||||||
|
if claimedAudience, ok, err := claims.StringClaim("aud"); claimedAudience != tkr.Config.JWTAudience || err != nil || !ok {
|
||||||
|
return errors.New("Failed to validate JWT audience claim")
|
||||||
|
}
|
||||||
|
|
||||||
|
claimedInfohash, ok, err := claims.StringClaim("infohash")
|
||||||
|
if err != nil || !ok {
|
||||||
|
return errors.New("Failed to validate JWT infohash claim")
|
||||||
|
}
|
||||||
|
|
||||||
|
unescapedInfohash, err := url.QueryUnescape(claimedInfohash)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("Failed to unescape JWT infohash claim")
|
||||||
|
}
|
||||||
|
|
||||||
|
if unescapedInfohash != infohash {
|
||||||
|
return errors.New("Failed to match infohash claim with requested infohash")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -136,6 +136,7 @@ type Announce struct {
|
||||||
NumWant int `json:"numwant"`
|
NumWant int `json:"numwant"`
|
||||||
PeerID string `json:"peer_id"`
|
PeerID string `json:"peer_id"`
|
||||||
Uploaded uint64 `json:"uploaded"`
|
Uploaded uint64 `json:"uploaded"`
|
||||||
|
JWT string `json:"jwt"`
|
||||||
|
|
||||||
Torrent *Torrent `json:"-"`
|
Torrent *Torrent `json:"-"`
|
||||||
Peer *Peer `json:"-"`
|
Peer *Peer `json:"-"`
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
package tracker
|
package tracker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
@ -19,6 +20,12 @@ import (
|
||||||
// independently of the underlying data transports used.
|
// independently of the underlying data transports used.
|
||||||
type Tracker struct {
|
type Tracker struct {
|
||||||
Config *config.Config
|
Config *config.Config
|
||||||
|
|
||||||
|
jwkSet jwkSet
|
||||||
|
|
||||||
|
shuttingDown chan struct{}
|
||||||
|
shutdownWG sync.WaitGroup
|
||||||
|
|
||||||
*Storage
|
*Storage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,14 +35,23 @@ func New(cfg *config.Config) (*Tracker, error) {
|
||||||
tkr := &Tracker{
|
tkr := &Tracker{
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
Storage: NewStorage(cfg),
|
Storage: NewStorage(cfg),
|
||||||
|
shuttingDown: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
glog.Info("Starting garbage collection goroutine")
|
||||||
|
tkr.shutdownWG.Add(1)
|
||||||
go tkr.purgeInactivePeers(
|
go tkr.purgeInactivePeers(
|
||||||
cfg.PurgeInactiveTorrents,
|
cfg.PurgeInactiveTorrents,
|
||||||
time.Duration(float64(cfg.MinAnnounce.Duration)*cfg.ReapRatio),
|
time.Duration(float64(cfg.MinAnnounce.Duration)*cfg.ReapRatio),
|
||||||
cfg.ReapInterval.Duration,
|
cfg.ReapInterval.Duration,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if tkr.Config.JWKSetURI != "" {
|
||||||
|
glog.Info("Starting JWK Set update goroutine")
|
||||||
|
tkr.shutdownWG.Add(1)
|
||||||
|
go tkr.updateJWKSetForever()
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.ClientWhitelistEnabled {
|
if cfg.ClientWhitelistEnabled {
|
||||||
tkr.LoadApprovedClients(cfg.ClientWhitelist)
|
tkr.LoadApprovedClients(cfg.ClientWhitelist)
|
||||||
}
|
}
|
||||||
|
@ -45,8 +61,8 @@ func New(cfg *config.Config) (*Tracker, error) {
|
||||||
|
|
||||||
// Close gracefully shutdowns a Tracker by closing any database connections.
|
// Close gracefully shutdowns a Tracker by closing any database connections.
|
||||||
func (tkr *Tracker) Close() error {
|
func (tkr *Tracker) Close() error {
|
||||||
|
close(tkr.shuttingDown)
|
||||||
// TODO(jzelinskie): shutdown purgeInactivePeers goroutine.
|
tkr.shutdownWG.Wait()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -73,7 +89,14 @@ type Writer interface {
|
||||||
// purgeInactivePeers periodically walks the torrent database and removes
|
// purgeInactivePeers periodically walks the torrent database and removes
|
||||||
// peers that haven't announced recently.
|
// peers that haven't announced recently.
|
||||||
func (tkr *Tracker) purgeInactivePeers(purgeEmptyTorrents bool, threshold, interval time.Duration) {
|
func (tkr *Tracker) purgeInactivePeers(purgeEmptyTorrents bool, threshold, interval time.Duration) {
|
||||||
for _ = range time.NewTicker(interval).C {
|
defer tkr.shutdownWG.Done()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tkr.shuttingDown:
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-time.After(interval):
|
||||||
before := time.Now().Add(-threshold)
|
before := time.Now().Add(-threshold)
|
||||||
glog.V(0).Infof("Purging peers with no announces since %s", before)
|
glog.V(0).Infof("Purging peers with no announces since %s", before)
|
||||||
|
|
||||||
|
@ -83,3 +106,4 @@ func (tkr *Tracker) purgeInactivePeers(purgeEmptyTorrents bool, threshold, inter
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -107,14 +107,12 @@ func (s *Server) Serve() {
|
||||||
s.wg.Add(1)
|
s.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
// Generate a new IV every hour.
|
|
||||||
t := time.NewTicker(time.Hour)
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-t.C:
|
|
||||||
s.connIDGen.NewIV()
|
|
||||||
case <-s.closing:
|
case <-s.closing:
|
||||||
return
|
return
|
||||||
|
case <-time.After(time.Hour):
|
||||||
|
s.connIDGen.NewIV()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
Loading…
Reference in a new issue