overhaul everything
This commit is contained in:
parent
18f6c32d97
commit
3bfb3074b4
27 changed files with 1193 additions and 1143 deletions
104
bencode/bencode.go
Normal file
104
bencode/bencode.go
Normal file
|
@ -0,0 +1,104 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package bencode implements bencoding of objects as defined in BEP 3 using
|
||||
// type assertion rather than the use of reflection.
|
||||
package bencode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An Encoder writes Bencoded objects to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode writes the bencoding of v to the stream.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
return marshal(enc.w, v)
|
||||
}
|
||||
|
||||
// Marshal returns the bencoding of v.
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
err := marshal(buf, v)
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
// Marshaler is the interface implemented by objects that can marshal
|
||||
// themselves.
|
||||
type Marshaler interface {
|
||||
MarshalBencode() ([]byte, error)
|
||||
}
|
||||
|
||||
// Marshal writes types bencoded to an io.Writer
|
||||
func marshal(w io.Writer, data interface{}) error {
|
||||
switch v := data.(type) {
|
||||
case Marshaler:
|
||||
bencoded, err := v.MarshalBencode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(bencoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case string:
|
||||
fmt.Fprintf(w, "%d:%s", len(v), v)
|
||||
|
||||
case int:
|
||||
fmt.Fprintf(w, "i%de", v)
|
||||
|
||||
case uint:
|
||||
fmt.Fprintf(w, "i%se", strconv.FormatUint(uint64(v), 10))
|
||||
|
||||
case int64:
|
||||
fmt.Fprintf(w, "i%se", strconv.FormatInt(v, 10))
|
||||
|
||||
case uint64:
|
||||
fmt.Fprintf(w, "i%se", strconv.FormatUint(v, 10))
|
||||
|
||||
case time.Duration: // Assume seconds
|
||||
fmt.Fprintf(w, "i%se", strconv.FormatInt(int64(v/time.Second), 10))
|
||||
|
||||
case map[string]interface{}:
|
||||
fmt.Fprintf(w, "d")
|
||||
for key, val := range v {
|
||||
fmt.Fprintf(w, "%s:%s", strconv.Itoa(len(key)), key)
|
||||
err := marshal(w, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case []string:
|
||||
fmt.Fprintf(w, "l")
|
||||
for _, val := range v {
|
||||
err := marshal(w, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "e")
|
||||
|
||||
default:
|
||||
// Although not currently necessary,
|
||||
// should handle []interface{} manually; Go can't do it implicitly
|
||||
return errors.New("bencode: attempted to marshal unsupported type")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
7
bencode/bencode_test.go
Normal file
7
bencode/bencode_test.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package bencode
|
||||
|
||||
// TODO Write bencode tests
|
|
@ -70,15 +70,15 @@ func Open(path string) (*Config, error) {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
conf, err := newConfig(f)
|
||||
conf, err := decode(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
// New decodes JSON from a Reader into a Config.
|
||||
func newConfig(raw io.Reader) (*Config, error) {
|
||||
// decode transforms Reader populated with JSON into a *Config.
|
||||
func decode(raw io.Reader) (*Config, error) {
|
||||
conf := &Config{}
|
||||
err := json.NewDecoder(raw).Decode(conf)
|
||||
if err != nil {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
var drivers = make(map[string]Driver)
|
||||
|
@ -35,7 +35,7 @@ func Register(name string, driver Driver) {
|
|||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// Open creates a connection specified by a storage configuration.
|
||||
// Open creates a connection specified by a models configuration.
|
||||
func Open(conf *config.DataStore) (Conn, error) {
|
||||
driver, ok := drivers[conf.Driver]
|
||||
if !ok {
|
||||
|
@ -50,51 +50,23 @@ func Open(conf *config.DataStore) (Conn, error) {
|
|||
|
||||
// Conn represents a connection to the data store.
|
||||
type Conn interface {
|
||||
// Start is called once when the server starts.
|
||||
// It starts any necessary goroutines a given driver requires, and sets
|
||||
// up the driver's initial state
|
||||
Start() error
|
||||
|
||||
// Close terminates connections to the database(s) and gracefully shuts
|
||||
// down the driver
|
||||
Close() error
|
||||
|
||||
// RecordAnnounce is called once per announce, and is passed the delta in
|
||||
// statistics for the client peer since its last announce.
|
||||
RecordAnnounce(delta *AnnounceDelta) error
|
||||
RecordAnnounce(delta *models.AnnounceDelta) error
|
||||
|
||||
// LoadTorrents fetches and returns the specified torrents.
|
||||
LoadTorrents(ids []uint64) ([]*storage.Torrent, error)
|
||||
LoadTorrents(ids []uint64) ([]*models.Torrent, error)
|
||||
|
||||
// LoadAllTorrents fetches and returns all torrents.
|
||||
LoadAllTorrents() ([]*storage.Torrent, error)
|
||||
LoadAllTorrents() ([]*models.Torrent, error)
|
||||
|
||||
// LoadUsers fetches and returns the specified users.
|
||||
LoadUsers(ids []uint64) ([]*storage.User, error)
|
||||
LoadUsers(ids []uint64) ([]*models.User, error)
|
||||
|
||||
// LoadAllUsers fetches and returns all users.
|
||||
LoadAllUsers(ids []uint64) ([]*storage.User, error)
|
||||
}
|
||||
|
||||
// AnnounceDelta contains a difference in statistics for a peer.
|
||||
// It is used for communicating changes to be recorded by the driver.
|
||||
type AnnounceDelta struct {
|
||||
Peer *storage.Peer
|
||||
Torrent *storage.Torrent
|
||||
User *storage.User
|
||||
|
||||
// Created is true if this announce created a new peer or changed an existing peer's address
|
||||
Created bool
|
||||
|
||||
// Uploaded contains the raw upload delta for this announce, in bytes
|
||||
Uploaded uint64
|
||||
|
||||
// Downloaded contains the raw download delta for this announce, in bytes
|
||||
Downloaded uint64
|
||||
|
||||
// Timestamp is the unix timestamp this announce occurred at
|
||||
Timestamp int64
|
||||
|
||||
// Snatched is true if this announce completed the download
|
||||
Snatched bool
|
||||
LoadAllUsers(ids []uint64) ([]*models.User, error)
|
||||
}
|
|
@ -2,8 +2,8 @@
|
|||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package mock implements the storage interface for a BitTorrent tracker's
|
||||
// backend storage. It can be used in production, but isn't recommended.
|
||||
// Package mock implements the models interface for a BitTorrent tracker's
|
||||
// backend models. It can be used in production, but isn't recommended.
|
||||
// Stored values will not persist if the tracker is restarted.
|
||||
package mock
|
||||
|
||||
|
@ -11,8 +11,8 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/backend"
|
||||
"github.com/chihaya/chihaya/drivers/backend"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
type driver struct{}
|
||||
|
@ -20,7 +20,7 @@ type driver struct{}
|
|||
// Mock is a concrete implementation of the backend.Conn interface (plus some
|
||||
// debugging methods) that stores deltas in memory.
|
||||
type Mock struct {
|
||||
deltaHistory []*backend.AnnounceDelta
|
||||
deltaHistory []*models.AnnounceDelta
|
||||
deltaHistoryM sync.RWMutex
|
||||
}
|
||||
|
||||
|
@ -28,18 +28,13 @@ func (d *driver) New(conf *config.DataStore) backend.Conn {
|
|||
return &Mock{}
|
||||
}
|
||||
|
||||
// Start returns nil.
|
||||
func (m *Mock) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close returns nil.
|
||||
func (m *Mock) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordAnnounce adds a delta to the history.
|
||||
func (m *Mock) RecordAnnounce(delta *backend.AnnounceDelta) error {
|
||||
func (m *Mock) RecordAnnounce(delta *models.AnnounceDelta) error {
|
||||
m.deltaHistoryM.Lock()
|
||||
defer m.deltaHistoryM.Unlock()
|
||||
|
||||
|
@ -49,11 +44,11 @@ func (m *Mock) RecordAnnounce(delta *backend.AnnounceDelta) error {
|
|||
}
|
||||
|
||||
// DeltaHistory safely copies and returns the history of recorded deltas.
|
||||
func (m *Mock) DeltaHistory() []backend.AnnounceDelta {
|
||||
func (m *Mock) DeltaHistory() []models.AnnounceDelta {
|
||||
m.deltaHistoryM.Lock()
|
||||
defer m.deltaHistoryM.Unlock()
|
||||
|
||||
cp := make([]backend.AnnounceDelta, len(m.deltaHistory))
|
||||
cp := make([]models.AnnounceDelta, len(m.deltaHistory))
|
||||
for index, delta := range m.deltaHistory {
|
||||
cp[index] = *delta
|
||||
}
|
||||
|
@ -62,22 +57,22 @@ func (m *Mock) DeltaHistory() []backend.AnnounceDelta {
|
|||
}
|
||||
|
||||
// LoadTorrents returns (nil, nil).
|
||||
func (m *Mock) LoadTorrents(ids []uint64) ([]*storage.Torrent, error) {
|
||||
func (m *Mock) LoadTorrents(ids []uint64) ([]*models.Torrent, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// LoadAllTorrents returns (nil, nil).
|
||||
func (m *Mock) LoadAllTorrents() ([]*storage.Torrent, error) {
|
||||
func (m *Mock) LoadAllTorrents() ([]*models.Torrent, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// LoadUsers returns (nil, nil).
|
||||
func (m *Mock) LoadUsers(ids []uint64) ([]*storage.User, error) {
|
||||
func (m *Mock) LoadUsers(ids []uint64) ([]*models.User, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// LoadAllUsers returns (nil, nil).
|
||||
func (m *Mock) LoadAllUsers(ids []uint64) ([]*storage.User, error) {
|
||||
func (m *Mock) LoadAllUsers(ids []uint64) ([]*models.User, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
238
drivers/tracker/mock/conn.go
Normal file
238
drivers/tracker/mock/conn.go
Normal file
|
@ -0,0 +1,238 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package mock
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
// Conn implements a connection to a memory-based tracker data store.
|
||||
type Conn struct {
|
||||
*Pool
|
||||
}
|
||||
|
||||
func (c *Conn) FindUser(passkey string) (*models.User, error) {
|
||||
c.usersM.RLock()
|
||||
defer c.usersM.RUnlock()
|
||||
|
||||
user, exists := c.users[passkey]
|
||||
if !exists {
|
||||
return nil, tracker.ErrUserDNE
|
||||
}
|
||||
return &*user, nil
|
||||
}
|
||||
|
||||
func (c *Conn) FindTorrent(infohash string) (*models.Torrent, error) {
|
||||
c.torrentsM.RLock()
|
||||
defer c.torrentsM.RUnlock()
|
||||
|
||||
torrent, exists := c.torrents[infohash]
|
||||
if !exists {
|
||||
return nil, tracker.ErrTorrentDNE
|
||||
}
|
||||
return &*torrent, nil
|
||||
}
|
||||
|
||||
func (c *Conn) ClientWhitelisted(peerID string) error {
|
||||
c.whitelistM.RLock()
|
||||
defer c.whitelistM.RUnlock()
|
||||
|
||||
_, ok := c.whitelist[peerID]
|
||||
if !ok {
|
||||
return tracker.ErrClientUnapproved
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) IncrementSnatches(t *models.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
torrent.Snatches++
|
||||
t.Snatches++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) MarkActive(t *models.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
torrent.Active = true
|
||||
t.Active = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) MarkInactive(t *models.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
torrent.Active = false
|
||||
t.Active = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddLeecher(t *models.Torrent, p *models.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
torrent.Leechers[p.Key()] = *p
|
||||
t.Leechers[p.Key()] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddSeeder(t *models.Torrent, p *models.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
torrent.Seeders[p.Key()] = *p
|
||||
t.Seeders[p.Key()] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveLeecher(t *models.Torrent, p *models.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
delete(torrent.Leechers, p.Key())
|
||||
delete(t.Leechers, p.Key())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveSeeder(t *models.Torrent, p *models.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
delete(torrent.Seeders, p.Key())
|
||||
delete(t.Seeders, p.Key())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) SetLeecher(t *models.Torrent, p *models.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
torrent.Leechers[p.Key()] = *p
|
||||
t.Leechers[p.Key()] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) SetSeeder(t *models.Torrent, p *models.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrTorrentDNE
|
||||
}
|
||||
|
||||
torrent.Seeders[p.Key()] = *p
|
||||
t.Seeders[p.Key()] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddTorrent(t *models.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent := *t
|
||||
c.torrents[t.Infohash] = &torrent
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveTorrent(t *models.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
delete(c.torrents, t.Infohash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddUser(u *models.User) error {
|
||||
c.usersM.Lock()
|
||||
defer c.usersM.Unlock()
|
||||
|
||||
user := *u
|
||||
c.users[u.Passkey] = &user
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveUser(u *models.User) error {
|
||||
c.usersM.Lock()
|
||||
defer c.usersM.Unlock()
|
||||
|
||||
delete(c.users, u.Passkey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) WhitelistClient(peerID string) error {
|
||||
c.whitelistM.Lock()
|
||||
defer c.whitelistM.Unlock()
|
||||
|
||||
c.whitelist[peerID] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) UnWhitelistClient(peerID string) error {
|
||||
c.whitelistM.Lock()
|
||||
defer c.whitelistM.Unlock()
|
||||
|
||||
delete(c.whitelist, peerID)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -2,23 +2,23 @@
|
|||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package mock implements the storage interface for a BitTorrent tracker
|
||||
// Package mock implements the models interface for a BitTorrent tracker
|
||||
// within memory. It can be used in production, but isn't recommended.
|
||||
// Stored values will not persist if the tracker is restarted.
|
||||
package mock
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/tracker"
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
type driver struct{}
|
||||
|
||||
func (d *driver) New(conf *config.DataStore) tracker.Pool {
|
||||
return &Pool{
|
||||
users: make(map[string]*storage.User),
|
||||
torrents: make(map[string]*storage.Torrent),
|
||||
users: make(map[string]*models.User),
|
||||
torrents: make(map[string]*models.Torrent),
|
||||
whitelist: make(map[string]bool),
|
||||
}
|
||||
}
|
|
@ -7,15 +7,15 @@ package mock
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/tracker"
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
type Pool struct {
|
||||
users map[string]*storage.User
|
||||
users map[string]*models.User
|
||||
usersM sync.RWMutex
|
||||
|
||||
torrents map[string]*storage.Torrent
|
||||
torrents map[string]*models.Torrent
|
||||
torrentsM sync.RWMutex
|
||||
|
||||
whitelist map[string]bool
|
104
drivers/tracker/tracker.go
Normal file
104
drivers/tracker/tracker.go
Normal file
|
@ -0,0 +1,104 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package tracker provides a generic interface for manipulating a
|
||||
// BitTorrent tracker's fast-moving data.
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUserDNE is returned when a user does not exist.
|
||||
ErrUserDNE = errors.New("user does not exist")
|
||||
// ErrTorrentDNE is returned when a torrent does not exist.
|
||||
ErrTorrentDNE = errors.New("torrent does not exist")
|
||||
// ErrClientUnapproved is returned when a clientID is not in the whitelist.
|
||||
ErrClientUnapproved = errors.New("client is not approved")
|
||||
// ErrInvalidPasskey is returned when a passkey is not properly formatted.
|
||||
ErrInvalidPasskey = errors.New("passkey is invalid")
|
||||
|
||||
drivers = make(map[string]Driver)
|
||||
)
|
||||
|
||||
// Driver represents an interface to pool of connections to models used for
|
||||
// the tracker.
|
||||
type Driver interface {
|
||||
New(*config.DataStore) Pool
|
||||
}
|
||||
|
||||
// Register makes a database driver available by the provided name.
|
||||
// If Register is called twice with the same name or if driver is nil,
|
||||
// it panics.
|
||||
func Register(name string, driver Driver) {
|
||||
if driver == nil {
|
||||
panic("tracker: Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("tracker: Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// Open creates a pool of data store connections specified by a models configuration.
|
||||
func Open(conf *config.DataStore) (Pool, error) {
|
||||
driver, ok := drivers[conf.Driver]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"unknown driver %q (forgotten import?)",
|
||||
conf.Driver,
|
||||
)
|
||||
}
|
||||
pool := driver.New(conf)
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// Pool represents a thread-safe pool of connections to the data store
|
||||
// that can be used to safely within concurrent goroutines.
|
||||
type Pool interface {
|
||||
Close() error
|
||||
Get() (Conn, error)
|
||||
}
|
||||
|
||||
// Conn represents a connection to the data store that can be used
|
||||
// to make reads/writes.
|
||||
type Conn interface {
|
||||
// Reads
|
||||
FindUser(passkey string) (*models.User, error)
|
||||
FindTorrent(infohash string) (*models.Torrent, error)
|
||||
ClientWhitelisted(clientID string) error
|
||||
|
||||
// Writes
|
||||
IncrementSnatches(t *models.Torrent) error
|
||||
MarkActive(t *models.Torrent) error
|
||||
AddLeecher(t *models.Torrent, p *models.Peer) error
|
||||
AddSeeder(t *models.Torrent, p *models.Peer) error
|
||||
RemoveLeecher(t *models.Torrent, p *models.Peer) error
|
||||
RemoveSeeder(t *models.Torrent, p *models.Peer) error
|
||||
SetLeecher(t *models.Torrent, p *models.Peer) error
|
||||
SetSeeder(t *models.Torrent, p *models.Peer) error
|
||||
|
||||
// Priming / Testing
|
||||
AddTorrent(t *models.Torrent) error
|
||||
RemoveTorrent(t *models.Torrent) error
|
||||
AddUser(u *models.User) error
|
||||
RemoveUser(u *models.User) error
|
||||
WhitelistClient(clientID string) error
|
||||
UnWhitelistClient(clientID string) error
|
||||
}
|
||||
|
||||
// LeecherFinished moves a peer from the leeching pool to the seeder pool.
|
||||
func LeecherFinished(c Conn, t *models.Torrent, p *models.Peer) error {
|
||||
err := c.RemoveLeecher(t, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.AddSeeder(t, p)
|
||||
return err
|
||||
}
|
4
main.go
4
main.go
|
@ -15,8 +15,8 @@ import (
|
|||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/server"
|
||||
|
||||
_ "github.com/chihaya/chihaya/storage/backend/mock"
|
||||
_ "github.com/chihaya/chihaya/storage/tracker/mock"
|
||||
_ "github.com/chihaya/chihaya/drivers/backend/mock"
|
||||
_ "github.com/chihaya/chihaya/drivers/tracker/mock"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
265
models/models.go
Normal file
265
models/models.go
Normal file
|
@ -0,0 +1,265 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package models implements the models for an abstraction over the
|
||||
// multiple data stores used by a BitTorrent tracker.
|
||||
package models
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/models/query"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMalformedRequest is returned when a request does no have the required
|
||||
// parameters.
|
||||
ErrMalformedRequest = errors.New("malformed request")
|
||||
)
|
||||
|
||||
// Peer is the internal representation of a participant in a swarm.
|
||||
type Peer struct {
|
||||
ID string `json:"id"`
|
||||
UserID uint64 `json:"user_id"`
|
||||
TorrentID uint64 `json:"torrent_id"`
|
||||
|
||||
IP string `json:"ip"`
|
||||
Port uint64 `json:"port"`
|
||||
|
||||
Uploaded uint64 `json:"uploaded"`
|
||||
Downloaded uint64 `json:"downloaded`
|
||||
Left uint64 `json:"left"`
|
||||
LastAnnounce int64 `json:"last_announce"`
|
||||
}
|
||||
|
||||
// Key is a helper that returns the proper format for keys used for maps
|
||||
// of peers (i.e. torrent.Seeders & torrent.Leechers).
|
||||
func (p Peer) Key() string {
|
||||
return p.ID + ":" + strconv.FormatUint(p.UserID, 36)
|
||||
}
|
||||
|
||||
// Torrent is the internal representation of a swarm for a given torrent file.
|
||||
type Torrent struct {
|
||||
ID uint64 `json:"id"`
|
||||
Infohash string `json:"infohash"`
|
||||
Active bool `json:"active"`
|
||||
|
||||
Seeders map[string]Peer `json:"seeders"`
|
||||
Leechers map[string]Peer `json:"leechers"`
|
||||
|
||||
Snatches uint64 `json:"snatches"`
|
||||
UpMultiplier float64 `json:"up_multiplier"`
|
||||
DownMultiplier float64 `json:"down_multiplier"`
|
||||
LastAction int64 `json:"last_action"`
|
||||
}
|
||||
|
||||
// InSeederPool returns true if a peer is within a torrent's pool of seeders.
|
||||
func (t *Torrent) InSeederPool(p *Peer) bool {
|
||||
_, exists := t.Seeders[p.Key()]
|
||||
return exists
|
||||
}
|
||||
|
||||
// InLeecherPool returns true if a peer is within a torrent's pool of leechers.
|
||||
func (t *Torrent) InLeecherPool(p *Peer) bool {
|
||||
_, exists := t.Leechers[p.Key()]
|
||||
return exists
|
||||
}
|
||||
|
||||
// NewPeer creates a new peer using the information provided by an announce.
|
||||
func NewPeer(t *Torrent, u *User, a *Announce) *Peer {
|
||||
return &Peer{
|
||||
ID: a.PeerID,
|
||||
UserID: u.ID,
|
||||
TorrentID: t.ID,
|
||||
IP: a.IP,
|
||||
Port: a.Port,
|
||||
Uploaded: a.Uploaded,
|
||||
Downloaded: a.Downloaded,
|
||||
Left: a.Left,
|
||||
LastAnnounce: time.Now().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
// User is the internal representation of registered user for private trackers.
|
||||
type User struct {
|
||||
ID uint64 `json:"id"`
|
||||
Passkey string `json:"passkey"`
|
||||
|
||||
UpMultiplier float64 `json:"up_multiplier"`
|
||||
DownMultiplier float64 `json:"down_multiplier"`
|
||||
Snatches uint64 `json:"snatches"`
|
||||
}
|
||||
|
||||
// Announce represents all of the data from an announce request.
|
||||
type Announce struct {
|
||||
Config *config.Config `json:"config"`
|
||||
Request *http.Request `json:"request"`
|
||||
|
||||
Compact bool `json:"compact"`
|
||||
Downloaded uint64 `json:"downloaded"`
|
||||
Event string `json:"event"`
|
||||
IP string `json:"ip"`
|
||||
Infohash string `json:"infohash"`
|
||||
Left uint64 `json:"left"`
|
||||
NumWant int `json:"numwant"`
|
||||
Passkey string `json:"passkey"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Port uint64 `json:"port"`
|
||||
Uploaded uint64 `json:"uploaded"`
|
||||
}
|
||||
|
||||
// NewAnnounce parses an HTTP request and generates an Announce.
|
||||
func NewAnnounce(r *http.Request, conf *config.Config) (*Announce, error) {
|
||||
q, err := query.New(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
compact := q.Params["compact"] != "0"
|
||||
downloaded, downloadedErr := q.Uint64("downloaded")
|
||||
event, _ := q.Params["event"]
|
||||
infohash, _ := q.Params["info_hash"]
|
||||
ip, _ := q.RequestedIP(r)
|
||||
left, leftErr := q.Uint64("left")
|
||||
numWant := q.RequestedPeerCount(conf.DefaultNumWant)
|
||||
dir, _ := path.Split(r.URL.Path)
|
||||
peerID, _ := q.Params["peer_id"]
|
||||
port, portErr := q.Uint64("port")
|
||||
uploaded, uploadedErr := q.Uint64("uploaded")
|
||||
|
||||
if downloadedErr != nil ||
|
||||
infohash == "" ||
|
||||
leftErr != nil ||
|
||||
peerID == "" ||
|
||||
portErr != nil ||
|
||||
uploadedErr != nil ||
|
||||
ip == "" ||
|
||||
len(dir) != 34 {
|
||||
return nil, ErrMalformedRequest
|
||||
}
|
||||
|
||||
return &Announce{
|
||||
Config: conf,
|
||||
Request: r,
|
||||
Compact: compact,
|
||||
Downloaded: downloaded,
|
||||
Event: event,
|
||||
IP: ip,
|
||||
Infohash: infohash,
|
||||
Left: left,
|
||||
NumWant: numWant,
|
||||
Passkey: dir[1:33],
|
||||
PeerID: peerID,
|
||||
Port: port,
|
||||
Uploaded: uploaded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ClientID returns the part of a PeerID that identifies the client software.
|
||||
func (a Announce) ClientID() (clientID string) {
|
||||
length := len(a.PeerID)
|
||||
if length >= 6 {
|
||||
if a.PeerID[0] == '-' {
|
||||
if length >= 7 {
|
||||
clientID = a.PeerID[1:7]
|
||||
}
|
||||
} else {
|
||||
clientID = a.PeerID[0:6]
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// AnnounceDelta contains a difference in statistics for a peer.
|
||||
// It is used for communicating changes to be recorded by the driver.
|
||||
type AnnounceDelta struct {
|
||||
Peer *Peer
|
||||
Torrent *Torrent
|
||||
User *User
|
||||
|
||||
// Created is true if this announce created a new peer or changed an existing
|
||||
// peer's address
|
||||
Created bool
|
||||
// Snatched is true if this announce completed the download
|
||||
Snatched bool
|
||||
|
||||
// Uploaded contains the raw upload delta for this announce, in bytes
|
||||
Uploaded uint64
|
||||
// Downloaded contains the raw download delta for this announce, in bytes
|
||||
Downloaded uint64
|
||||
}
|
||||
|
||||
// NewAnnounceDelta does stuff
|
||||
func NewAnnounceDelta(p *Peer, u *User, a *Announce, t *Torrent, created, snatched bool) *AnnounceDelta {
|
||||
rawDeltaUp := p.Uploaded - a.Uploaded
|
||||
rawDeltaDown := p.Downloaded - a.Downloaded
|
||||
|
||||
// Restarting a torrent may cause a delta to be negative.
|
||||
if rawDeltaUp < 0 {
|
||||
rawDeltaUp = 0
|
||||
}
|
||||
if rawDeltaDown < 0 {
|
||||
rawDeltaDown = 0
|
||||
}
|
||||
|
||||
return &AnnounceDelta{
|
||||
Peer: p,
|
||||
Torrent: t,
|
||||
User: u,
|
||||
|
||||
Created: created,
|
||||
Snatched: snatched,
|
||||
|
||||
Uploaded: uint64(float64(rawDeltaUp) * u.UpMultiplier * t.UpMultiplier),
|
||||
Downloaded: uint64(float64(rawDeltaDown) * u.DownMultiplier * t.DownMultiplier),
|
||||
}
|
||||
}
|
||||
|
||||
// Scrape represents all of the data from an scrape request.
|
||||
type Scrape struct {
|
||||
Config *config.Config `json:"config"`
|
||||
Request *http.Request `json:"request"`
|
||||
|
||||
Passkey string
|
||||
Infohashes []string
|
||||
}
|
||||
|
||||
// NewScrape parses an HTTP request and generates a Scrape.
|
||||
func NewScrape(r *http.Request, c *config.Config) (*Scrape, error) {
|
||||
q, err := query.New(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var passkey string
|
||||
if c.Private {
|
||||
dir, _ := path.Split(r.URL.Path)
|
||||
if len(dir) != 34 {
|
||||
return nil, ErrMalformedRequest
|
||||
}
|
||||
passkey = dir[1:34]
|
||||
}
|
||||
|
||||
if q.Infohashes == nil {
|
||||
if _, exists := q.Params["infohash"]; !exists {
|
||||
// There aren't any infohashes.
|
||||
return nil, ErrMalformedRequest
|
||||
}
|
||||
q.Infohashes = []string{q.Params["infohash"]}
|
||||
}
|
||||
|
||||
return &Scrape{
|
||||
Config: c,
|
||||
Request: r,
|
||||
|
||||
Passkey: passkey,
|
||||
Infohashes: q.Infohashes,
|
||||
}, nil
|
||||
}
|
62
models/models_test.go
Normal file
62
models/models_test.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type PeerClientPair struct {
|
||||
announce Announce
|
||||
clientID string
|
||||
}
|
||||
|
||||
var TestClients = []PeerClientPair{
|
||||
{Announce{PeerID: "-AZ3034-6wfG2wk6wWLc"}, "AZ3034"},
|
||||
{Announce{PeerID: "-AZ3042-6ozMq5q6Q3NX"}, "AZ3042"},
|
||||
{Announce{PeerID: "-BS5820-oy4La2MWGEFj"}, "BS5820"},
|
||||
{Announce{PeerID: "-AR6360-6oZyyMWoOOBe"}, "AR6360"},
|
||||
{Announce{PeerID: "-AG2083-s1hiF8vGAAg0"}, "AG2083"},
|
||||
{Announce{PeerID: "-AG3003-lEl2Mm4NEO4n"}, "AG3003"},
|
||||
{Announce{PeerID: "-MR1100-00HS~T7*65rm"}, "MR1100"},
|
||||
{Announce{PeerID: "-LK0140-ATIV~nbEQAMr"}, "LK0140"},
|
||||
{Announce{PeerID: "-KT2210-347143496631"}, "KT2210"},
|
||||
{Announce{PeerID: "-TR0960-6ep6svaa61r4"}, "TR0960"},
|
||||
{Announce{PeerID: "-XX1150-dv220cotgj4d"}, "XX1150"},
|
||||
{Announce{PeerID: "-AZ2504-192gwethivju"}, "AZ2504"},
|
||||
{Announce{PeerID: "-KT4310-3L4UvarKuqIu"}, "KT4310"},
|
||||
{Announce{PeerID: "-AZ2060-0xJQ02d4309O"}, "AZ2060"},
|
||||
{Announce{PeerID: "-BD0300-2nkdf08Jd890"}, "BD0300"},
|
||||
{Announce{PeerID: "-A~0010-a9mn9DFkj39J"}, "A~0010"},
|
||||
{Announce{PeerID: "-UT2300-MNu93JKnm930"}, "UT2300"},
|
||||
{Announce{PeerID: "-UT2300-KT4310KT4301"}, "UT2300"},
|
||||
|
||||
{Announce{PeerID: "T03A0----f089kjsdf6e"}, "T03A0-"},
|
||||
{Announce{PeerID: "S58B-----nKl34GoNb75"}, "S58B--"},
|
||||
{Announce{PeerID: "M4-4-0--9aa757Efd5Bl"}, "M4-4-0"},
|
||||
|
||||
{Announce{PeerID: "AZ2500BTeYUzyabAfo6U"}, "AZ2500"}, // BitTyrant
|
||||
{Announce{PeerID: "exbc0JdSklm834kj9Udf"}, "exbc0J"}, // Old BitComet
|
||||
{Announce{PeerID: "FUTB0L84j542mVc84jkd"}, "FUTB0L"}, // Alt BitComet
|
||||
{Announce{PeerID: "XBT054d-8602Jn83NnF9"}, "XBT054"}, // XBT
|
||||
{Announce{PeerID: "OP1011affbecbfabeefb"}, "OP1011"}, // Opera
|
||||
{Announce{PeerID: "-ML2.7.2-kgjjfkd9762"}, "ML2.7."}, // MLDonkey
|
||||
{Announce{PeerID: "-BOWA0C-SDLFJWEIORNM"}, "BOWA0C"}, // Bits on Wheels
|
||||
{Announce{PeerID: "Q1-0-0--dsn34DFn9083"}, "Q1-0-0"}, // Queen Bee
|
||||
{Announce{PeerID: "Q1-10-0-Yoiumn39BDfO"}, "Q1-10-"}, // Queen Bee Alt
|
||||
{Announce{PeerID: "346------SDFknl33408"}, "346---"}, // TorreTopia
|
||||
{Announce{PeerID: "QVOD0054ABFFEDCCDEDB"}, "QVOD00"}, // Qvod
|
||||
|
||||
{Announce{PeerID: ""}, ""},
|
||||
{Announce{PeerID: "-"}, ""},
|
||||
{Announce{PeerID: "12345"}, ""},
|
||||
{Announce{PeerID: "-12345"}, ""},
|
||||
{Announce{PeerID: "123456"}, "123456"},
|
||||
{Announce{PeerID: "-123456"}, "123456"},
|
||||
}
|
||||
|
||||
func TestClientID(t *testing.T) {
|
||||
for _, pair := range TestClients {
|
||||
if parsedID := pair.announce.ClientID(); parsedID != pair.clientID {
|
||||
t.Error("Incorrectly parsed peer ID", pair.announce.PeerID, "as", parsedID)
|
||||
}
|
||||
}
|
||||
}
|
152
models/query/query.go
Normal file
152
models/query/query.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package query implements a fast, simple URL Query parser.
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Query represents a parsed URL.Query.
|
||||
type Query struct {
|
||||
Infohashes []string
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// New parses a raw url query.
|
||||
func New(query string) (*Query, error) {
|
||||
var (
|
||||
keyStart, keyEnd int
|
||||
valStart, valEnd int
|
||||
firstInfohash string
|
||||
|
||||
onKey = true
|
||||
hasInfohash = false
|
||||
|
||||
q = &Query{
|
||||
Infohashes: nil,
|
||||
Params: make(map[string]string),
|
||||
}
|
||||
)
|
||||
|
||||
for i, length := 0, len(query); i < length; i++ {
|
||||
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
|
||||
if separator || i == length-1 {
|
||||
if onKey {
|
||||
keyStart = i + 1
|
||||
continue
|
||||
}
|
||||
|
||||
if i == length-1 && !separator {
|
||||
if query[i] == '=' {
|
||||
continue
|
||||
}
|
||||
valEnd = i
|
||||
}
|
||||
|
||||
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
valStr, err := url.QueryUnescape(query[valStart : valEnd+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q.Params[keyStr] = valStr
|
||||
|
||||
if keyStr == "info_hash" {
|
||||
if hasInfohash {
|
||||
// Multiple infohashes
|
||||
if q.Infohashes == nil {
|
||||
q.Infohashes = []string{firstInfohash}
|
||||
}
|
||||
q.Infohashes = append(q.Infohashes, valStr)
|
||||
} else {
|
||||
firstInfohash = valStr
|
||||
hasInfohash = true
|
||||
}
|
||||
}
|
||||
|
||||
onKey = true
|
||||
keyStart = i + 1
|
||||
|
||||
} else if query[i] == '=' {
|
||||
onKey = false
|
||||
valStart = i + 1
|
||||
} else if onKey {
|
||||
keyEnd = i
|
||||
} else {
|
||||
valEnd = i
|
||||
}
|
||||
}
|
||||
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// Uint64 is a helper to obtain a uints of any base from a Query. After being
|
||||
// called, you can safely cast the uint64 to your desired base.
|
||||
func (q *Query) Uint64(key string) (uint64, error) {
|
||||
str, exists := q.Params[key]
|
||||
if !exists {
|
||||
return 0, errors.New("value does not exist for key: " + key)
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// RequestedPeerCount returns the request peer count or the provided fallback.
|
||||
func (q Query) RequestedPeerCount(fallback int) int {
|
||||
if numWantStr, exists := q.Params["numWant"]; exists {
|
||||
numWant, err := strconv.Atoi(numWantStr)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
return numWant
|
||||
}
|
||||
|
||||
return fallback
|
||||
}
|
||||
|
||||
// RequestedIP returns the requested IP address from a Query.
|
||||
func (q Query) RequestedIP(r *http.Request) (string, error) {
|
||||
if ip, ok := q.Params["ip"]; ok {
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
if ip, ok := q.Params["ipv4"]; ok {
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
if xRealIPs, ok := q.Params["X-Real-Ip"]; ok {
|
||||
return string(xRealIPs[0]), nil
|
||||
}
|
||||
|
||||
if r.RemoteAddr == "" {
|
||||
return "127.0.0.1", nil
|
||||
}
|
||||
|
||||
portIndex := len(r.RemoteAddr) - 1
|
||||
for ; portIndex >= 0; portIndex-- {
|
||||
if r.RemoteAddr[portIndex] == ':' {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if portIndex != -1 {
|
||||
return r.RemoteAddr[0:portIndex], nil
|
||||
}
|
||||
|
||||
return "", errors.New("failed to parse IP address")
|
||||
}
|
|
@ -1,4 +1,7 @@
|
|||
package server
|
||||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
package query
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
@ -46,7 +49,7 @@ func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
|
|||
|
||||
func TestValidQueries(t *testing.T) {
|
||||
for parseIndex, parseVal := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := parseQuery(baseAddr + "announce/?" + parseVal.Encode())
|
||||
parsedQueryObj, err := New(baseAddr + "announce/?" + parseVal.Encode())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -59,7 +62,7 @@ func TestValidQueries(t *testing.T) {
|
|||
|
||||
func TestInvalidQueries(t *testing.T) {
|
||||
for parseIndex, parseStr := range InvalidQueries {
|
||||
parsedQueryObj, err := parseQuery(parseStr)
|
||||
parsedQueryObj, err := New(parseStr)
|
||||
if err == nil {
|
||||
t.Error("Should have produced error", parseIndex)
|
||||
}
|
||||
|
@ -73,7 +76,7 @@ func TestInvalidQueries(t *testing.T) {
|
|||
func BenchmarkParseQuery(b *testing.B) {
|
||||
for bCount := 0; bCount < b.N; bCount++ {
|
||||
for parseIndex, parseStr := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := parseQuery(baseAddr + "announce/?" + parseStr.Encode())
|
||||
parsedQueryObj, err := New(baseAddr + "announce/?" + parseStr.Encode())
|
||||
if err != nil {
|
||||
b.Error(err, parseIndex)
|
||||
b.Log(parsedQueryObj)
|
|
@ -1,116 +0,0 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
)
|
||||
|
||||
// announce represents all of the data from an announce request.
|
||||
type announce struct {
|
||||
Compact bool
|
||||
Downloaded uint64
|
||||
Event string
|
||||
IP string
|
||||
Infohash string
|
||||
Left uint64
|
||||
NumWant int
|
||||
Passkey string
|
||||
PeerID string
|
||||
Port uint64
|
||||
Uploaded uint64
|
||||
}
|
||||
|
||||
// newAnnounce parses an HTTP request and generates an Announce.
|
||||
func newAnnounce(r *http.Request, conf *config.Config) (*announce, error) {
|
||||
pq, err := parseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
compact := pq.Params["compact"] == "1"
|
||||
downloaded, downloadedErr := pq.getUint64("downloaded")
|
||||
event, _ := pq.Params["event"]
|
||||
infohash, _ := pq.Params["info_hash"]
|
||||
ip, _ := requestedIP(r, pq)
|
||||
left, leftErr := pq.getUint64("left")
|
||||
numWant := requestedPeerCount(conf.DefaultNumWant, pq)
|
||||
passkey, _ := path.Split(r.URL.Path)
|
||||
peerID, _ := pq.Params["peer_id"]
|
||||
port, portErr := pq.getUint64("port")
|
||||
uploaded, uploadedErr := pq.getUint64("uploaded")
|
||||
|
||||
if downloadedErr != nil ||
|
||||
infohash == "" ||
|
||||
leftErr != nil ||
|
||||
peerID == "" ||
|
||||
portErr != nil ||
|
||||
uploadedErr != nil ||
|
||||
ip == "" {
|
||||
return nil, errors.New("malformed request")
|
||||
}
|
||||
|
||||
return &announce{
|
||||
Compact: compact,
|
||||
Downloaded: downloaded,
|
||||
Event: event,
|
||||
IP: ip,
|
||||
Infohash: infohash,
|
||||
Left: left,
|
||||
NumWant: numWant,
|
||||
Passkey: passkey,
|
||||
PeerID: peerID,
|
||||
Port: port,
|
||||
Uploaded: uploaded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func requestedPeerCount(fallback int, pq *parsedQuery) int {
|
||||
if numWantStr, exists := pq.Params["numWant"]; exists {
|
||||
numWant, err := strconv.Atoi(numWantStr)
|
||||
if err != nil {
|
||||
return fallback
|
||||
}
|
||||
return numWant
|
||||
}
|
||||
|
||||
return fallback
|
||||
}
|
||||
|
||||
func requestedIP(r *http.Request, pq *parsedQuery) (string, error) {
|
||||
if ip, ok := pq.Params["ip"]; ok {
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
if ip, ok := pq.Params["ipv4"]; ok {
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
if xRealIPs, ok := pq.Params["X-Real-Ip"]; ok {
|
||||
return string(xRealIPs[0]), nil
|
||||
}
|
||||
|
||||
if r.RemoteAddr == "" {
|
||||
return "127.0.0.1", nil
|
||||
}
|
||||
|
||||
portIndex := len(r.RemoteAddr) - 1
|
||||
for ; portIndex >= 0; portIndex-- {
|
||||
if r.RemoteAddr[portIndex] == ':' {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if portIndex != -1 {
|
||||
return r.RemoteAddr[0:portIndex], nil
|
||||
}
|
||||
|
||||
return "", errors.New("failed to parse IP address")
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func writeBencoded(w io.Writer, data interface{}) {
|
||||
switch v := data.(type) {
|
||||
case string:
|
||||
str := fmt.Sprintf("%s:%s", strconv.Itoa(len(v)), v)
|
||||
io.WriteString(w, str)
|
||||
|
||||
case int:
|
||||
str := fmt.Sprintf("i%se", strconv.Itoa(v))
|
||||
io.WriteString(w, str)
|
||||
|
||||
case uint:
|
||||
str := fmt.Sprintf("i%se", strconv.FormatUint(uint64(v), 10))
|
||||
io.WriteString(w, str)
|
||||
|
||||
case int64:
|
||||
str := fmt.Sprintf("i%se", strconv.FormatInt(v, 10))
|
||||
io.WriteString(w, str)
|
||||
|
||||
case uint64:
|
||||
str := fmt.Sprintf("i%se", strconv.FormatUint(v, 10))
|
||||
io.WriteString(w, str)
|
||||
|
||||
case time.Duration: // Assume seconds
|
||||
str := fmt.Sprintf("i%se", strconv.FormatInt(int64(v/time.Second), 10))
|
||||
io.WriteString(w, str)
|
||||
|
||||
case map[string]interface{}:
|
||||
io.WriteString(w, "d")
|
||||
for key, val := range v {
|
||||
str := fmt.Sprintf("%s:%s", strconv.Itoa(len(key)), key)
|
||||
io.WriteString(w, str)
|
||||
writeBencoded(w, val)
|
||||
}
|
||||
io.WriteString(w, "e")
|
||||
|
||||
case []string:
|
||||
io.WriteString(w, "l")
|
||||
for _, val := range v {
|
||||
writeBencoded(w, val)
|
||||
}
|
||||
io.WriteString(w, "e")
|
||||
|
||||
default:
|
||||
// Although not currently necessary,
|
||||
// should handle []interface{} manually; Go can't do it implicitly
|
||||
panic("tried to bencode an unsupported type!")
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya/storage/backend"
|
||||
"github.com/chihaya/chihaya/storage/tracker"
|
||||
)
|
||||
|
||||
// Primer represents a function that can prime storage with data.
|
||||
type Primer func(tracker.Pool, backend.Conn) error
|
||||
|
||||
// Prime executes a priming function on the server.
|
||||
func (s *Server) Prime(p Primer) error {
|
||||
return p(s.trackerPool, s.backendConn)
|
||||
}
|
104
server/query.go
104
server/query.go
|
@ -1,104 +0,0 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// parsedQuery represents a parsed URL.Query.
|
||||
type parsedQuery struct {
|
||||
Infohashes []string
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// getUint64 is a helper to obtain a uint64 from a parsedQuery.
|
||||
func (pq *parsedQuery) getUint64(key string) (uint64, error) {
|
||||
str, exists := pq.Params[key]
|
||||
if !exists {
|
||||
return 0, errors.New("value does not exist for key: " + key)
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// parseQuery parses a raw url query.
|
||||
func parseQuery(query string) (*parsedQuery, error) {
|
||||
var (
|
||||
keyStart, keyEnd int
|
||||
valStart, valEnd int
|
||||
firstInfohash string
|
||||
|
||||
onKey = true
|
||||
hasInfohash = false
|
||||
|
||||
pq = &parsedQuery{
|
||||
Infohashes: nil,
|
||||
Params: make(map[string]string),
|
||||
}
|
||||
)
|
||||
|
||||
for i, length := 0, len(query); i < length; i++ {
|
||||
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
|
||||
if separator || i == length-1 {
|
||||
if onKey {
|
||||
keyStart = i + 1
|
||||
continue
|
||||
}
|
||||
|
||||
if i == length-1 && !separator {
|
||||
if query[i] == '=' {
|
||||
continue
|
||||
}
|
||||
valEnd = i
|
||||
}
|
||||
|
||||
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
valStr, err := url.QueryUnescape(query[valStart : valEnd+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pq.Params[keyStr] = valStr
|
||||
|
||||
if keyStr == "info_hash" {
|
||||
if hasInfohash {
|
||||
// Multiple infohashes
|
||||
if pq.Infohashes == nil {
|
||||
pq.Infohashes = []string{firstInfohash}
|
||||
}
|
||||
pq.Infohashes = append(pq.Infohashes, valStr)
|
||||
} else {
|
||||
firstInfohash = valStr
|
||||
hasInfohash = true
|
||||
}
|
||||
}
|
||||
|
||||
onKey = true
|
||||
keyStart = i + 1
|
||||
|
||||
} else if query[i] == '=' {
|
||||
onKey = false
|
||||
valStart = i + 1
|
||||
} else if onKey {
|
||||
keyEnd = i
|
||||
} else {
|
||||
valEnd = i
|
||||
}
|
||||
}
|
||||
|
||||
return pq, nil
|
||||
}
|
|
@ -5,242 +5,236 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/backend"
|
||||
log "github.com/golang/glog"
|
||||
|
||||
"github.com/chihaya/chihaya/bencode"
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
func (s Server) serveAnnounce(w http.ResponseWriter, r *http.Request) {
|
||||
// Parse the required data from a request
|
||||
announce, err := newAnnounce(r, s.conf)
|
||||
announce, err := models.NewAnnounce(r, s.conf)
|
||||
if err != nil {
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Get a connection to the tracker db
|
||||
conn, err := s.trackerPool.Get()
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the user's passkey
|
||||
user, err := validateUser(conn, announce.Passkey)
|
||||
err = conn.ClientWhitelisted(announce.ClientID())
|
||||
if err != nil {
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the user's client is whitelisted
|
||||
whitelisted, err := conn.ClientWhitelisted(parsePeerID(announce.PeerID))
|
||||
var user *models.User
|
||||
if s.conf.Private {
|
||||
user, err = conn.FindUser(announce.Passkey)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
if !whitelisted {
|
||||
fail(errors.New("client is not approved"), w, r)
|
||||
}
|
||||
|
||||
torrent, err := conn.FindTorrent(announce.Infohash)
|
||||
if err != nil {
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Find the specified torrent
|
||||
torrent, exists, err := conn.FindTorrent(announce.Infohash)
|
||||
peer := models.NewPeer(torrent, user, announce)
|
||||
|
||||
created, err := updateTorrent(peer, torrent, conn, announce)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
if !exists {
|
||||
fail(errors.New("torrent does not exist"), w, r)
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// If the torrent was pruned and the user is seeding, unprune it
|
||||
if !torrent.Active && announce.Left == 0 {
|
||||
err := conn.MarkActive(torrent)
|
||||
snatched, err := handleEvent(announce, user, torrent, peer, conn)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
// Create a new peer object from the request
|
||||
peer := &storage.Peer{
|
||||
ID: announce.PeerID,
|
||||
UserID: user.ID,
|
||||
TorrentID: torrent.ID,
|
||||
IP: announce.IP,
|
||||
Port: announce.Port,
|
||||
Uploaded: announce.Uploaded,
|
||||
Downloaded: announce.Downloaded,
|
||||
Left: announce.Left,
|
||||
LastAnnounce: now,
|
||||
}
|
||||
delta := &backend.AnnounceDelta{
|
||||
Peer: peer,
|
||||
Torrent: torrent,
|
||||
User: user,
|
||||
Timestamp: now,
|
||||
}
|
||||
writeAnnounceResponse(w, announce, user, torrent)
|
||||
|
||||
// Look for the user in in the pool of seeders and leechers
|
||||
_, seeder := torrent.Seeders[storage.PeerMapKey(peer)]
|
||||
_, leecher := torrent.Leechers[storage.PeerMapKey(peer)]
|
||||
delta := models.NewAnnounceDelta(peer, user, announce, torrent, created, snatched)
|
||||
s.backendConn.RecordAnnounce(delta)
|
||||
|
||||
log.V(3).Infof("chihaya: handled announce from %s", announce.IP)
|
||||
}
|
||||
|
||||
func updateTorrent(p *models.Peer, t *models.Torrent, conn tracker.Conn, a *models.Announce) (created bool, err error) {
|
||||
if !t.Active && a.Left == 0 {
|
||||
err = conn.MarkActive(t)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
// Guarantee that no user is in both pools
|
||||
case seeder && leecher:
|
||||
if announce.Left == 0 {
|
||||
err := conn.RemoveLeecher(torrent, peer)
|
||||
case t.InSeederPool(p):
|
||||
err = conn.SetSeeder(t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
leecher = false
|
||||
} else {
|
||||
err := conn.RemoveSeeder(torrent, peer)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
seeder = false
|
||||
return
|
||||
}
|
||||
|
||||
case seeder:
|
||||
// Update the peer with the stats from the request
|
||||
err := conn.SetSeeder(torrent, peer)
|
||||
case t.InLeecherPool(p):
|
||||
err = conn.SetLeecher(t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
|
||||
case leecher:
|
||||
// Update the peer with the stats from the request
|
||||
err := conn.SetLeecher(torrent, peer)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
if announce.Left == 0 {
|
||||
// Save the peer as a new seeder
|
||||
err := conn.AddSeeder(torrent, peer)
|
||||
if a.Left == 0 {
|
||||
err = conn.AddSeeder(t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = conn.AddLeecher(torrent, peer)
|
||||
err = conn.AddLeecher(t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
delta.Created = true
|
||||
created = true
|
||||
}
|
||||
|
||||
// Handle any events in the request
|
||||
return
|
||||
}
|
||||
|
||||
func handleEvent(a *models.Announce, u *models.User, t *models.Torrent, p *models.Peer, conn tracker.Conn) (snatched bool, err error) {
|
||||
switch {
|
||||
case announce.Event == "stopped" || announce.Event == "paused":
|
||||
if seeder {
|
||||
err := conn.RemoveSeeder(torrent, peer)
|
||||
case a.Event == "stopped" || a.Event == "paused":
|
||||
if t.InSeederPool(p) {
|
||||
err = conn.RemoveSeeder(t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if leecher {
|
||||
err := conn.RemoveLeecher(torrent, peer)
|
||||
if t.InLeecherPool(p) {
|
||||
err = conn.RemoveLeecher(t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case announce.Event == "completed":
|
||||
err := conn.RecordSnatch(user, torrent)
|
||||
case a.Event == "completed":
|
||||
err = conn.IncrementSnatches(t)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
delta.Snatched = true
|
||||
if leecher {
|
||||
err := conn.LeecherFinished(torrent, peer)
|
||||
snatched = true
|
||||
|
||||
if t.InLeecherPool(p) {
|
||||
err = tracker.LeecherFinished(conn, t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
case leecher && announce.Left == 0:
|
||||
case t.InLeecherPool(p) && a.Left == 0:
|
||||
// A leecher completed but the event was never received
|
||||
err := conn.LeecherFinished(torrent, peer)
|
||||
err = tracker.LeecherFinished(conn, t, p)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if announce.IP != peer.IP || announce.Port != peer.Port {
|
||||
peer.Port = announce.Port
|
||||
peer.IP = announce.IP
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Generate the response
|
||||
seedCount := len(torrent.Seeders)
|
||||
leechCount := len(torrent.Leechers)
|
||||
func writeAnnounceResponse(w io.Writer, a *models.Announce, u *models.User, t *models.Torrent) {
|
||||
bencoder := bencode.NewEncoder(w)
|
||||
seedCount := len(t.Seeders)
|
||||
leechCount := len(t.Leechers)
|
||||
|
||||
writeBencoded(w, "d")
|
||||
writeBencoded(w, "complete")
|
||||
writeBencoded(w, seedCount)
|
||||
writeBencoded(w, "incomplete")
|
||||
writeBencoded(w, leechCount)
|
||||
writeBencoded(w, "interval")
|
||||
writeBencoded(w, s.conf.Announce.Duration)
|
||||
writeBencoded(w, "min interval")
|
||||
writeBencoded(w, s.conf.MinAnnounce.Duration)
|
||||
bencoder.Encode("d")
|
||||
bencoder.Encode("complete")
|
||||
bencoder.Encode(seedCount)
|
||||
bencoder.Encode("incomplete")
|
||||
bencoder.Encode(leechCount)
|
||||
bencoder.Encode("interval")
|
||||
bencoder.Encode(a.Config.Announce.Duration)
|
||||
bencoder.Encode("min interval")
|
||||
bencoder.Encode(a.Config.MinAnnounce.Duration)
|
||||
|
||||
if announce.NumWant > 0 && announce.Event != "stopped" && announce.Event != "paused" {
|
||||
writeBencoded(w, "peers")
|
||||
var peerCount, count int
|
||||
if a.NumWant > 0 && a.Event != "stopped" && a.Event != "paused" {
|
||||
bencoder.Encode("peers")
|
||||
|
||||
if announce.Compact {
|
||||
if announce.Left > 0 {
|
||||
peerCount = minInt(announce.NumWant, leechCount)
|
||||
var peerCount int
|
||||
if a.Compact {
|
||||
if a.Left == 0 {
|
||||
peerCount = minInt(a.NumWant, leechCount)
|
||||
} else {
|
||||
peerCount = minInt(announce.NumWant, leechCount+seedCount-1)
|
||||
peerCount = minInt(a.NumWant, leechCount+seedCount-1)
|
||||
}
|
||||
writeBencoded(w, strconv.Itoa(peerCount*6))
|
||||
writeBencoded(w, ":")
|
||||
// 6 is the number of bytes 1 compact peer takes up.
|
||||
bencoder.Encode(strconv.Itoa(peerCount * 6))
|
||||
bencoder.Encode(":")
|
||||
} else {
|
||||
writeBencoded(w, "l")
|
||||
bencoder.Encode("l")
|
||||
}
|
||||
|
||||
if announce.Left > 0 {
|
||||
var count int
|
||||
if a.Left == 0 {
|
||||
// If they're seeding, give them only leechers
|
||||
count += writeLeechers(w, user, torrent, announce.NumWant, announce.Compact)
|
||||
count = writePeers(w, u, t.Leechers, a.NumWant, a.Compact)
|
||||
} else {
|
||||
// If they're leeching, prioritize giving them seeders
|
||||
count += writeSeeders(w, user, torrent, announce.NumWant, announce.Compact)
|
||||
count += writeLeechers(w, user, torrent, announce.NumWant-count, announce.Compact)
|
||||
count += writePeers(w, u, t.Seeders, a.NumWant, a.Compact)
|
||||
count += writePeers(w, u, t.Leechers, a.NumWant-count, a.Compact)
|
||||
}
|
||||
if a.Compact && peerCount != count {
|
||||
log.Errorf("calculated peer count (%d) != real count (%d)", peerCount, count)
|
||||
}
|
||||
|
||||
if announce.Compact && peerCount != count {
|
||||
log.Panicf("calculated peer count (%d) != real count (%d)", peerCount, count)
|
||||
}
|
||||
|
||||
if !announce.Compact {
|
||||
writeBencoded(w, "e")
|
||||
if !a.Compact {
|
||||
bencoder.Encode("e")
|
||||
}
|
||||
}
|
||||
writeBencoded(w, "e")
|
||||
bencoder.Encode("e")
|
||||
}
|
||||
|
||||
rawDeltaUp := peer.Uploaded - announce.Uploaded
|
||||
rawDeltaDown := peer.Downloaded - announce.Downloaded
|
||||
|
||||
// Restarting a torrent may cause a delta to be negative.
|
||||
if rawDeltaUp < 0 {
|
||||
rawDeltaUp = 0
|
||||
}
|
||||
if rawDeltaDown < 0 {
|
||||
rawDeltaDown = 0
|
||||
func writePeers(w io.Writer, user *models.User, peers map[string]models.Peer, numWant int, compact bool) (count int) {
|
||||
bencoder := bencode.NewEncoder(w)
|
||||
for _, peer := range peers {
|
||||
if count >= numWant {
|
||||
break
|
||||
}
|
||||
|
||||
delta.Uploaded = uint64(float64(rawDeltaUp) * user.UpMultiplier * torrent.UpMultiplier)
|
||||
delta.Downloaded = uint64(float64(rawDeltaDown) * user.DownMultiplier * torrent.DownMultiplier)
|
||||
if peer.UserID == user.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
s.backendConn.RecordAnnounce(delta)
|
||||
if compact {
|
||||
if ip := net.ParseIP(peer.IP); ip != nil {
|
||||
w.Write(ip)
|
||||
w.Write([]byte{byte(peer.Port >> 8), byte(peer.Port & 0xff)})
|
||||
}
|
||||
} else {
|
||||
bencoder.Encode("d")
|
||||
bencoder.Encode("ip")
|
||||
bencoder.Encode(peer.IP)
|
||||
bencoder.Encode("peer id")
|
||||
bencoder.Encode(peer.ID)
|
||||
bencoder.Encode("port")
|
||||
bencoder.Encode(peer.Port)
|
||||
bencoder.Encode("e")
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func minInt(a, b int) int {
|
||||
|
@ -250,61 +244,3 @@ func minInt(a, b int) int {
|
|||
|
||||
return b
|
||||
}
|
||||
|
||||
func writeSeeders(w http.ResponseWriter, user *storage.User, t *storage.Torrent, numWant int, compact bool) int {
|
||||
count := 0
|
||||
for _, peer := range t.Seeders {
|
||||
if count >= numWant {
|
||||
break
|
||||
}
|
||||
|
||||
if peer.UserID == user.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
if compact {
|
||||
// TODO writeBencoded(w, compactAddr)
|
||||
} else {
|
||||
writeBencoded(w, "d")
|
||||
writeBencoded(w, "ip")
|
||||
writeBencoded(w, peer.IP)
|
||||
writeBencoded(w, "peer id")
|
||||
writeBencoded(w, peer.ID)
|
||||
writeBencoded(w, "port")
|
||||
writeBencoded(w, peer.Port)
|
||||
writeBencoded(w, "e")
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func writeLeechers(w http.ResponseWriter, user *storage.User, t *storage.Torrent, numWant int, compact bool) int {
|
||||
count := 0
|
||||
for _, peer := range t.Leechers {
|
||||
if count >= numWant {
|
||||
break
|
||||
}
|
||||
|
||||
if peer.UserID == user.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
if compact {
|
||||
// TODO writeBencoded(w, compactAddr)
|
||||
} else {
|
||||
writeBencoded(w, "d")
|
||||
writeBencoded(w, "ip")
|
||||
writeBencoded(w, peer.IP)
|
||||
writeBencoded(w, "peer id")
|
||||
writeBencoded(w, peer.ID)
|
||||
writeBencoded(w, "port")
|
||||
writeBencoded(w, peer.Port)
|
||||
writeBencoded(w, "e")
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
|
|
@ -9,12 +9,12 @@ import (
|
|||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/backend"
|
||||
"github.com/chihaya/chihaya/storage/tracker"
|
||||
"github.com/chihaya/chihaya/drivers/backend"
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
|
||||
_ "github.com/chihaya/chihaya/storage/backend/mock"
|
||||
_ "github.com/chihaya/chihaya/storage/tracker/mock"
|
||||
_ "github.com/chihaya/chihaya/drivers/backend/mock"
|
||||
_ "github.com/chihaya/chihaya/drivers/tracker/mock"
|
||||
)
|
||||
|
||||
func TestAnnounce(t *testing.T) {
|
||||
|
@ -29,7 +29,7 @@ func TestAnnounce(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
err = conn.AddUser(&storage.User{
|
||||
err = conn.AddUser(&models.User{
|
||||
ID: 1,
|
||||
Passkey: "yby47f04riwpndba456rqxtmifenq5h6",
|
||||
})
|
||||
|
@ -42,11 +42,11 @@ func TestAnnounce(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
err = conn.AddTorrent(&storage.Torrent{
|
||||
err = conn.AddTorrent(&models.Torrent{
|
||||
ID: 1,
|
||||
Infohash: string([]byte{0x89, 0xd4, 0xbc, 0x52, 0x11, 0x16, 0xca, 0x1d, 0x42, 0xa2, 0xf3, 0x0d, 0x1f, 0x27, 0x4d, 0x94, 0xe4, 0x68, 0x1d, 0xaf}),
|
||||
Seeders: make(map[string]storage.Peer),
|
||||
Leechers: make(map[string]storage.Peer),
|
||||
Seeders: make(map[string]models.Peer),
|
||||
Leechers: make(map[string]models.Peer),
|
||||
})
|
||||
|
||||
return
|
||||
|
@ -64,8 +64,8 @@ func TestAnnounce(t *testing.T) {
|
|||
w := httptest.NewRecorder()
|
||||
s.serveAnnounce(w, r)
|
||||
|
||||
if w.Body.String() != "1:d8:completei0e10:incompletei1e8:intervali1800e12:min intervali900e1:e" {
|
||||
t.Errorf("improper response from server")
|
||||
if w.Body.String() != "1:d8:completei0e10:incompletei0e8:intervali1800e12:min intervali900e1:e" {
|
||||
t.Errorf("improper response from server:\n%s", w.Body.String())
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,72 +5,67 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
log "github.com/golang/glog"
|
||||
|
||||
"github.com/chihaya/chihaya/bencode"
|
||||
"github.com/chihaya/chihaya/models"
|
||||
)
|
||||
|
||||
func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request) {
|
||||
// Parse the query
|
||||
pq, err := parseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
fail(errors.New("error parsing query"), w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Get a connection to the tracker db
|
||||
conn, err := s.trackerPool.Get()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Find and validate the user
|
||||
passkey, _ := path.Split(r.URL.Path)
|
||||
_, err = validateUser(conn, passkey)
|
||||
scrape, err := models.NewScrape(r, s.conf)
|
||||
if err != nil {
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
io.WriteString(w, "d")
|
||||
writeBencoded(w, "files")
|
||||
if pq.Infohashes != nil {
|
||||
for _, infohash := range pq.Infohashes {
|
||||
torrent, exists, err := conn.FindTorrent(infohash)
|
||||
conn, err := s.trackerPool.Get()
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
fail(err, w, r)
|
||||
}
|
||||
if exists {
|
||||
writeBencoded(w, infohash)
|
||||
writeScrapeInfo(w, torrent)
|
||||
}
|
||||
}
|
||||
} else if infohash, exists := pq.Params["info_hash"]; exists {
|
||||
torrent, exists, err := conn.FindTorrent(infohash)
|
||||
|
||||
if s.conf.Private {
|
||||
_, err = conn.FindUser(scrape.Passkey)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
if exists {
|
||||
writeBencoded(w, infohash)
|
||||
writeScrapeInfo(w, torrent)
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
io.WriteString(w, "e")
|
||||
|
||||
var torrents []*models.Torrent
|
||||
for _, infohash := range scrape.Infohashes {
|
||||
torrent, err := conn.FindTorrent(infohash)
|
||||
if err != nil {
|
||||
fail(err, w, r)
|
||||
return
|
||||
}
|
||||
torrents = append(torrents, torrent)
|
||||
}
|
||||
|
||||
bencoder := bencode.NewEncoder(w)
|
||||
bencoder.Encode("d")
|
||||
bencoder.Encode("files")
|
||||
for _, torrent := range torrents {
|
||||
writeTorrentStatus(w, torrent)
|
||||
}
|
||||
bencoder.Encode("e")
|
||||
|
||||
log.V(3).Infof("chihaya: handled scrape from %s", r.RemoteAddr)
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func writeScrapeInfo(w io.Writer, torrent *storage.Torrent) {
|
||||
io.WriteString(w, "d")
|
||||
writeBencoded(w, "complete")
|
||||
writeBencoded(w, len(torrent.Seeders))
|
||||
writeBencoded(w, "downloaded")
|
||||
writeBencoded(w, torrent.Snatches)
|
||||
writeBencoded(w, "incomplete")
|
||||
writeBencoded(w, len(torrent.Leechers))
|
||||
io.WriteString(w, "e")
|
||||
func writeTorrentStatus(w io.Writer, t *models.Torrent) {
|
||||
bencoder := bencode.NewEncoder(w)
|
||||
bencoder.Encode("t.Infohash")
|
||||
bencoder.Encode("d")
|
||||
bencoder.Encode("complete")
|
||||
bencoder.Encode(len(t.Seeders))
|
||||
bencoder.Encode("downloaded")
|
||||
bencoder.Encode(t.Snatches)
|
||||
bencoder.Encode("incomplete")
|
||||
bencoder.Encode(len(t.Leechers))
|
||||
bencoder.Encode("e")
|
||||
}
|
||||
|
|
|
@ -10,8 +10,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
_ "github.com/chihaya/chihaya/storage/backend/mock"
|
||||
_ "github.com/chihaya/chihaya/storage/tracker/mock"
|
||||
_ "github.com/chihaya/chihaya/drivers/backend/mock"
|
||||
_ "github.com/chihaya/chihaya/drivers/tracker/mock"
|
||||
)
|
||||
|
||||
func newTestServer() (*Server, error) {
|
||||
|
|
|
@ -8,7 +8,6 @@ package server
|
|||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
|
@ -17,11 +16,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/etix/stoppableListener"
|
||||
log "github.com/golang/glog"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/backend"
|
||||
"github.com/chihaya/chihaya/storage/tracker"
|
||||
"github.com/chihaya/chihaya/drivers/backend"
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
)
|
||||
|
||||
// Server represents BitTorrent tracker server.
|
||||
|
@ -53,11 +52,6 @@ func New(conf *config.Config) (*Server, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = backendConn.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
conf: conf,
|
||||
trackerPool: trackerPool,
|
||||
|
@ -130,42 +124,11 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
func fail(err error, w http.ResponseWriter, r *http.Request) {
|
||||
errmsg := err.Error()
|
||||
log.Println("handled failure: " + errmsg)
|
||||
msg := "d14:failure reason" + strconv.Itoa(len(errmsg)) + ":" + errmsg + "e"
|
||||
length, _ := io.WriteString(w, msg)
|
||||
w.Header().Add("Content-Length", string(length))
|
||||
|
||||
log.V(2).Infof("chihaya: handled failure: %s from %s ", errmsg, r.RemoteAddr)
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func validateUser(conn tracker.Conn, dir string) (*storage.User, error) {
|
||||
if len(dir) != 34 {
|
||||
return nil, errors.New("passkey is invalid")
|
||||
}
|
||||
passkey := dir[1:33]
|
||||
|
||||
user, exists, err := conn.FindUser(passkey)
|
||||
if err != nil {
|
||||
log.Panicf("server: %s", err)
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.New("user not found")
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// parsePeerID returns the clientID for a given peerID.
|
||||
func parsePeerID(peerID string) (clientID string) {
|
||||
length := len(peerID)
|
||||
if length >= 6 {
|
||||
if peerID[0] == '-' {
|
||||
if length >= 7 {
|
||||
clientID = peerID[1:7]
|
||||
}
|
||||
} else {
|
||||
clientID = peerID[0:6]
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -5,62 +5,14 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"github.com/chihaya/chihaya/drivers/backend"
|
||||
"github.com/chihaya/chihaya/drivers/tracker"
|
||||
)
|
||||
|
||||
type PeerClientPair struct {
|
||||
peerID string
|
||||
clientID string
|
||||
}
|
||||
|
||||
var TestClients = []PeerClientPair{
|
||||
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
|
||||
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
|
||||
{"-BS5820-oy4La2MWGEFj", "BS5820"},
|
||||
{"-AR6360-6oZyyMWoOOBe", "AR6360"},
|
||||
{"-AG2083-s1hiF8vGAAg0", "AG2083"},
|
||||
{"-AG3003-lEl2Mm4NEO4n", "AG3003"},
|
||||
{"-MR1100-00HS~T7*65rm", "MR1100"},
|
||||
{"-LK0140-ATIV~nbEQAMr", "LK0140"},
|
||||
{"-KT2210-347143496631", "KT2210"},
|
||||
{"-TR0960-6ep6svaa61r4", "TR0960"},
|
||||
{"-XX1150-dv220cotgj4d", "XX1150"},
|
||||
{"-AZ2504-192gwethivju", "AZ2504"},
|
||||
{"-KT4310-3L4UvarKuqIu", "KT4310"},
|
||||
{"-AZ2060-0xJQ02d4309O", "AZ2060"},
|
||||
{"-BD0300-2nkdf08Jd890", "BD0300"},
|
||||
{"-A~0010-a9mn9DFkj39J", "A~0010"},
|
||||
{"-UT2300-MNu93JKnm930", "UT2300"},
|
||||
{"-UT2300-KT4310KT4301", "UT2300"},
|
||||
|
||||
{"T03A0----f089kjsdf6e", "T03A0-"},
|
||||
{"S58B-----nKl34GoNb75", "S58B--"},
|
||||
{"M4-4-0--9aa757Efd5Bl", "M4-4-0"},
|
||||
|
||||
{"AZ2500BTeYUzyabAfo6U", "AZ2500"}, // BitTyrant
|
||||
{"exbc0JdSklm834kj9Udf", "exbc0J"}, // Old BitComet
|
||||
{"FUTB0L84j542mVc84jkd", "FUTB0L"}, // Alt BitComet
|
||||
{"XBT054d-8602Jn83NnF9", "XBT054"}, // XBT
|
||||
{"OP1011affbecbfabeefb", "OP1011"}, // Opera
|
||||
{"-ML2.7.2-kgjjfkd9762", "ML2.7."}, // MLDonkey
|
||||
{"-BOWA0C-SDLFJWEIORNM", "BOWA0C"}, // Bits on Wheels
|
||||
{"Q1-0-0--dsn34DFn9083", "Q1-0-0"}, // Queen Bee
|
||||
{"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt
|
||||
{"346------SDFknl33408", "346---"}, // TorreTopia
|
||||
{"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod
|
||||
|
||||
{"", ""},
|
||||
{"-", ""},
|
||||
{"12345", ""},
|
||||
{"-12345", ""},
|
||||
{"123456", "123456"},
|
||||
{"-123456", "123456"},
|
||||
}
|
||||
|
||||
func TestParseClientID(t *testing.T) {
|
||||
for _, pair := range TestClients {
|
||||
if parsedID := parsePeerID(pair.peerID); parsedID != pair.clientID {
|
||||
t.Error("Incorrectly parsed peer ID", pair.peerID, "as", parsedID)
|
||||
}
|
||||
}
|
||||
// Primer represents a function that can prime drivers with data.
|
||||
type Primer func(tracker.Pool, backend.Conn) error
|
||||
|
||||
// Prime executes a priming function on the server.
|
||||
func (s *Server) Prime(p Primer) error {
|
||||
return p(s.trackerPool, s.backendConn)
|
||||
}
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package storage implements the models for an abstraction over the
|
||||
// multiple data stores used by a BitTorrent tracker.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Peer is the internal representation of a participant in a swarm.
|
||||
type Peer struct {
|
||||
ID string `json:"id"`
|
||||
UserID uint64 `json:"user_id"`
|
||||
TorrentID uint64 `json:"torrent_id"`
|
||||
|
||||
IP string `json:"ip"`
|
||||
Port uint64 `json:"port"`
|
||||
|
||||
Uploaded uint64 `json:"uploaded"`
|
||||
Downloaded uint64 `json:"downloaded`
|
||||
Left uint64 `json:"left"`
|
||||
LastAnnounce int64 `json:"last_announce"`
|
||||
}
|
||||
|
||||
// PeerMapKey is a helper that returns the proper format for keys used for maps
|
||||
// of peers (i.e. torrent.Seeders & torrent.Leechers).
|
||||
func PeerMapKey(peer *Peer) string {
|
||||
return peer.ID + ":" + strconv.FormatUint(peer.UserID, 36)
|
||||
}
|
||||
|
||||
// Torrent is the internal representation of a swarm for a given torrent file.
|
||||
type Torrent struct {
|
||||
ID uint64 `json:"id"`
|
||||
Infohash string `json:"infohash"`
|
||||
Active bool `json:"active"`
|
||||
|
||||
Seeders map[string]Peer `json:"seeders"`
|
||||
Leechers map[string]Peer `json:"leechers"`
|
||||
|
||||
Snatches uint64 `json:"snatches"`
|
||||
UpMultiplier float64 `json:"up_multiplier"`
|
||||
DownMultiplier float64 `json:"down_multiplier"`
|
||||
LastAction int64 `json:"last_action"`
|
||||
}
|
||||
|
||||
// User is the internal representation of registered user for private trackers.
|
||||
type User struct {
|
||||
ID uint64 `json:"id"`
|
||||
Passkey string `json:"passkey"`
|
||||
|
||||
UpMultiplier float64 `json:"up_multiplier"`
|
||||
DownMultiplier float64 `json:"down_multiplier"`
|
||||
Snatches uint64 `json:"snatches"`
|
||||
}
|
|
@ -1,257 +0,0 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package mock
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
"github.com/chihaya/chihaya/storage/tracker"
|
||||
)
|
||||
|
||||
type Conn struct {
|
||||
*Pool
|
||||
}
|
||||
|
||||
func (c *Conn) FindUser(passkey string) (*storage.User, bool, error) {
|
||||
c.usersM.RLock()
|
||||
defer c.usersM.RUnlock()
|
||||
|
||||
user, ok := c.users[passkey]
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
u := *user
|
||||
return &u, true, nil
|
||||
}
|
||||
|
||||
func (c *Conn) FindTorrent(infohash string) (*storage.Torrent, bool, error) {
|
||||
c.torrentsM.RLock()
|
||||
defer c.torrentsM.RUnlock()
|
||||
|
||||
torrent, ok := c.torrents[infohash]
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
t := *torrent
|
||||
return &t, true, nil
|
||||
}
|
||||
|
||||
func (c *Conn) ClientWhitelisted(peerID string) (bool, error) {
|
||||
c.whitelistM.RLock()
|
||||
defer c.whitelistM.RUnlock()
|
||||
|
||||
_, ok := c.whitelist[peerID]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Conn) RecordSnatch(u *storage.User, t *storage.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
torrent.Snatches++
|
||||
t.Snatches++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) MarkActive(t *storage.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Active = true
|
||||
t.Active = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) MarkInactive(t *storage.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Active = false
|
||||
t.Active = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddLeecher(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Leechers[storage.PeerMapKey(p)] = *p
|
||||
t.Leechers[storage.PeerMapKey(p)] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddSeeder(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Leechers[storage.PeerMapKey(p)] = *p
|
||||
t.Leechers[storage.PeerMapKey(p)] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveLeecher(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
delete(torrent.Leechers, storage.PeerMapKey(p))
|
||||
delete(t.Leechers, storage.PeerMapKey(p))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveSeeder(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
delete(torrent.Seeders, storage.PeerMapKey(p))
|
||||
delete(t.Seeders, storage.PeerMapKey(p))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) SetLeecher(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Leechers[storage.PeerMapKey(p)] = *p
|
||||
t.Leechers[storage.PeerMapKey(p)] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) SetSeeder(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Seeders[storage.PeerMapKey(p)] = *p
|
||||
t.Seeders[storage.PeerMapKey(p)] = *p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) LeecherFinished(t *storage.Torrent, p *storage.Peer) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent, ok := c.torrents[t.Infohash]
|
||||
if !ok {
|
||||
return tracker.ErrMissingResource
|
||||
}
|
||||
|
||||
torrent.Seeders[storage.PeerMapKey(p)] = *p
|
||||
delete(torrent.Leechers, storage.PeerMapKey(p))
|
||||
|
||||
t.Seeders[storage.PeerMapKey(p)] = *p
|
||||
delete(t.Leechers, storage.PeerMapKey(p))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddTorrent(t *storage.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
torrent := *t
|
||||
c.torrents[t.Infohash] = &torrent
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveTorrent(t *storage.Torrent) error {
|
||||
c.torrentsM.Lock()
|
||||
defer c.torrentsM.Unlock()
|
||||
|
||||
delete(c.torrents, t.Infohash)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) AddUser(u *storage.User) error {
|
||||
c.usersM.Lock()
|
||||
defer c.usersM.Unlock()
|
||||
|
||||
user := *u
|
||||
c.users[u.Passkey] = &user
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) RemoveUser(u *storage.User) error {
|
||||
c.usersM.Lock()
|
||||
defer c.usersM.Unlock()
|
||||
|
||||
delete(c.users, u.Passkey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) WhitelistClient(peerID string) error {
|
||||
c.whitelistM.Lock()
|
||||
defer c.whitelistM.Unlock()
|
||||
|
||||
c.whitelist[peerID] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Conn) UnWhitelistClient(peerID string) error {
|
||||
c.whitelistM.Lock()
|
||||
defer c.whitelistM.Unlock()
|
||||
|
||||
delete(c.whitelist, peerID)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
// Copyright 2013 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package tracker provides a generic interface for manipulating a
|
||||
// BitTorrent tracker's fast-moving data.
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/chihaya/chihaya/config"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMissingResource is an error returned when a resource does not exist.
|
||||
ErrMissingResource = errors.New("tracker: resource missing")
|
||||
|
||||
drivers = make(map[string]Driver)
|
||||
)
|
||||
|
||||
// Driver represents an interface to pool of connections to storage used for
|
||||
// the tracker.
|
||||
type Driver interface {
|
||||
New(*config.DataStore) Pool
|
||||
}
|
||||
|
||||
// Register makes a database driver available by the provided name.
|
||||
// If Register is called twice with the same name or if driver is nil,
|
||||
// it panics.
|
||||
func Register(name string, driver Driver) {
|
||||
if driver == nil {
|
||||
panic("tracker: Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("tracker: Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// Open creates a pool of data store connections specified by a storage configuration.
|
||||
func Open(conf *config.DataStore) (Pool, error) {
|
||||
driver, ok := drivers[conf.Driver]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"tracker: unknown driver %q (forgotten import?)",
|
||||
conf.Driver,
|
||||
)
|
||||
}
|
||||
pool := driver.New(conf)
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// Pool represents a thread-safe pool of connections to the data store
|
||||
// that can be used to safely within concurrent goroutines.
|
||||
type Pool interface {
|
||||
Close() error
|
||||
Get() (Conn, error)
|
||||
}
|
||||
|
||||
// Conn represents a connection to the data store that can be used
|
||||
// to make reads/writes.
|
||||
type Conn interface {
|
||||
// Reads
|
||||
FindUser(passkey string) (*storage.User, bool, error)
|
||||
FindTorrent(infohash string) (*storage.Torrent, bool, error)
|
||||
ClientWhitelisted(peerID string) (bool, error)
|
||||
|
||||
// Writes
|
||||
RecordSnatch(u *storage.User, t *storage.Torrent) error
|
||||
MarkActive(t *storage.Torrent) error
|
||||
AddLeecher(t *storage.Torrent, p *storage.Peer) error
|
||||
AddSeeder(t *storage.Torrent, p *storage.Peer) error
|
||||
RemoveLeecher(t *storage.Torrent, p *storage.Peer) error
|
||||
RemoveSeeder(t *storage.Torrent, p *storage.Peer) error
|
||||
SetLeecher(t *storage.Torrent, p *storage.Peer) error
|
||||
SetSeeder(t *storage.Torrent, p *storage.Peer) error
|
||||
LeecherFinished(t *storage.Torrent, p *storage.Peer) error
|
||||
|
||||
// Priming / Testing
|
||||
AddTorrent(t *storage.Torrent) error
|
||||
RemoveTorrent(t *storage.Torrent) error
|
||||
AddUser(u *storage.User) error
|
||||
RemoveUser(u *storage.User) error
|
||||
WhitelistClient(peerID string) error
|
||||
UnWhitelistClient(peerID string) error
|
||||
}
|
Loading…
Reference in a new issue