delete old code
This commit is contained in:
parent
250725179e
commit
1bff8d1571
76 changed files with 0 additions and 7791 deletions
5
AUTHORS
5
AUTHORS
|
@ -1,5 +0,0 @@
|
|||
# This is the official list of Chihaya authors for copyright purposes, in alphabetical order.
|
||||
|
||||
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||
Justin Li <jli@j-li.net>
|
||||
|
27
Dockerfile
27
Dockerfile
|
@ -1,27 +0,0 @@
|
|||
# vim: ft=dockerfile
|
||||
FROM golang
|
||||
MAINTAINER Jimmy Zelinskie <jimmyzelinskie@gmail.com>
|
||||
|
||||
# Install glide
|
||||
WORKDIR /tmp
|
||||
ADD https://github.com/Masterminds/glide/releases/download/0.10.2/glide-0.10.2-linux-amd64.tar.gz /tmp
|
||||
RUN tar xvf /tmp/glide-0.10.2-linux-amd64.tar.gz
|
||||
RUN mv /tmp/linux-amd64/glide /usr/bin/glide
|
||||
|
||||
# Add files
|
||||
WORKDIR /go/src/github.com/chihaya/chihaya/
|
||||
RUN mkdir -p /go/src/github.com/chihaya/chihaya/
|
||||
|
||||
# Add source
|
||||
ADD . .
|
||||
|
||||
# Install chihaya
|
||||
RUN glide install
|
||||
RUN go install github.com/chihaya/chihaya/cmd/chihaya
|
||||
|
||||
# Configuration/environment
|
||||
VOLUME ["/config"]
|
||||
EXPOSE 6880-6882
|
||||
|
||||
# docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest
|
||||
ENTRYPOINT ["chihaya", "-config=/config/config.json"]
|
161
chihaya.go
161
chihaya.go
|
@ -1,161 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.package middleware
|
||||
|
||||
package chihaya
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/event"
|
||||
)
|
||||
|
||||
// PeerID represents a peer ID.
|
||||
type PeerID [20]byte
|
||||
|
||||
// PeerIDFromBytes creates a PeerID from a byte slice.
|
||||
//
|
||||
// It panics if b is not 20 bytes long.
|
||||
func PeerIDFromBytes(b []byte) PeerID {
|
||||
if len(b) != 20 {
|
||||
panic("peer ID must be 20 bytes")
|
||||
}
|
||||
|
||||
var buf [20]byte
|
||||
copy(buf[:], b)
|
||||
return PeerID(buf)
|
||||
}
|
||||
|
||||
// PeerIDFromString creates a PeerID from a string.
|
||||
//
|
||||
// It panics if s is not 20 bytes long.
|
||||
func PeerIDFromString(s string) PeerID {
|
||||
if len(s) != 20 {
|
||||
panic("peer ID must be 20 bytes")
|
||||
}
|
||||
|
||||
var buf [20]byte
|
||||
copy(buf[:], s)
|
||||
return PeerID(buf)
|
||||
}
|
||||
|
||||
// InfoHash represents an infohash.
|
||||
type InfoHash [20]byte
|
||||
|
||||
// InfoHashFromBytes creates an InfoHash from a byte slice.
|
||||
//
|
||||
// It panics if b is not 20 bytes long.
|
||||
func InfoHashFromBytes(b []byte) InfoHash {
|
||||
if len(b) != 20 {
|
||||
panic("infohash must be 20 bytes")
|
||||
}
|
||||
|
||||
var buf [20]byte
|
||||
copy(buf[:], b)
|
||||
return InfoHash(buf)
|
||||
}
|
||||
|
||||
// InfoHashFromString creates an InfoHash from a string.
|
||||
//
|
||||
// It panics if s is not 20 bytes long.
|
||||
func InfoHashFromString(s string) InfoHash {
|
||||
if len(s) != 20 {
|
||||
panic("infohash must be 20 bytes")
|
||||
}
|
||||
|
||||
var buf [20]byte
|
||||
copy(buf[:], s)
|
||||
return InfoHash(buf)
|
||||
}
|
||||
|
||||
// AnnounceRequest represents the parsed parameters from an announce request.
|
||||
type AnnounceRequest struct {
|
||||
Event event.Event
|
||||
InfoHash InfoHash
|
||||
PeerID PeerID
|
||||
|
||||
IPv4, IPv6 net.IP
|
||||
Port uint16
|
||||
|
||||
Compact bool
|
||||
NumWant int32
|
||||
|
||||
Left, Downloaded, Uploaded uint64
|
||||
|
||||
Params Params
|
||||
}
|
||||
|
||||
// Peer4 returns a Peer using the IPv4 endpoint of the Announce.
|
||||
// Note that, if the Announce does not contain an IPv4 address, the IP field of
|
||||
// the returned Peer can be nil.
|
||||
func (r *AnnounceRequest) Peer4() Peer {
|
||||
return Peer{
|
||||
IP: r.IPv4,
|
||||
Port: r.Port,
|
||||
ID: r.PeerID,
|
||||
}
|
||||
}
|
||||
|
||||
// Peer6 returns a Peer using the IPv6 endpoint of the Announce.
|
||||
// Note that, if the Announce does not contain an IPv6 address, the IP field of
|
||||
// the returned Peer can be nil.
|
||||
func (r *AnnounceRequest) Peer6() Peer {
|
||||
return Peer{
|
||||
IP: r.IPv6,
|
||||
Port: r.Port,
|
||||
ID: r.PeerID,
|
||||
}
|
||||
}
|
||||
|
||||
// AnnounceResponse represents the parameters used to create an announce
|
||||
// response.
|
||||
type AnnounceResponse struct {
|
||||
Compact bool
|
||||
Complete int32
|
||||
Incomplete int32
|
||||
Interval time.Duration
|
||||
MinInterval time.Duration
|
||||
IPv4Peers []Peer
|
||||
IPv6Peers []Peer
|
||||
}
|
||||
|
||||
// ScrapeRequest represents the parsed parameters from a scrape request.
|
||||
type ScrapeRequest struct {
|
||||
InfoHashes []InfoHash
|
||||
Params Params
|
||||
}
|
||||
|
||||
// ScrapeResponse represents the parameters used to create a scrape response.
|
||||
type ScrapeResponse struct {
|
||||
Files map[InfoHash]Scrape
|
||||
}
|
||||
|
||||
// Scrape represents the state of a swarm that is returned in a scrape response.
|
||||
type Scrape struct {
|
||||
Complete int32
|
||||
Incomplete int32
|
||||
}
|
||||
|
||||
// Peer represents the connection details of a peer that is returned in an
|
||||
// announce response.
|
||||
type Peer struct {
|
||||
ID PeerID
|
||||
IP net.IP
|
||||
Port uint16
|
||||
}
|
||||
|
||||
// Equal reports whether p and x are the same.
|
||||
func (p Peer) Equal(x Peer) bool {
|
||||
return p.EqualEndpoint(x) && p.ID == x.ID
|
||||
}
|
||||
|
||||
// EqualEndpoint reports whether p and x have the same endpoint.
|
||||
func (p Peer) EqualEndpoint(x Peer) bool {
|
||||
return p.Port == x.Port && p.IP.Equal(x.IP)
|
||||
}
|
||||
|
||||
// Params is used to fetch request parameters.
|
||||
type Params interface {
|
||||
String(key string) (string, error)
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package chihaya
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
peers = []struct {
|
||||
peerID string
|
||||
ip string
|
||||
port uint16
|
||||
}{
|
||||
{"-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720},
|
||||
{"-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878},
|
||||
{"-TR0960-6ep6svaa61r4", "fd45:7856:3dae::48", 2878},
|
||||
{"-BS5820-oy4La2MWGEFj", "fd0a:29a8:8445::38", 2878},
|
||||
{"-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 8999},
|
||||
}
|
||||
)
|
||||
|
||||
func TestPeerEquality(t *testing.T) {
|
||||
// Build peers from test data.
|
||||
var builtPeers []Peer
|
||||
for _, peer := range peers {
|
||||
builtPeers = append(builtPeers, Peer{
|
||||
ID: PeerIDFromString(peer.peerID),
|
||||
IP: net.ParseIP(peer.ip),
|
||||
Port: peer.port,
|
||||
})
|
||||
}
|
||||
|
||||
assert.True(t, builtPeers[0].Equal(builtPeers[0]))
|
||||
assert.False(t, builtPeers[0].Equal(builtPeers[1]))
|
||||
assert.True(t, builtPeers[1].Equal(builtPeers[1]))
|
||||
assert.False(t, builtPeers[1].Equal(builtPeers[2]))
|
||||
assert.False(t, builtPeers[1].Equal(builtPeers[3]))
|
||||
assert.False(t, builtPeers[1].Equal(builtPeers[4]))
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
|
||||
// Servers
|
||||
_ "github.com/chihaya/chihaya/server/http"
|
||||
_ "github.com/chihaya/chihaya/server/prometheus"
|
||||
_ "github.com/chihaya/chihaya/server/store"
|
||||
_ "github.com/chihaya/chihaya/server/store/memory"
|
||||
|
||||
// Middleware
|
||||
_ "github.com/chihaya/chihaya/middleware/deniability"
|
||||
_ "github.com/chihaya/chihaya/middleware/varinterval"
|
||||
_ "github.com/chihaya/chihaya/server/store/middleware/client"
|
||||
_ "github.com/chihaya/chihaya/server/store/middleware/infohash"
|
||||
_ "github.com/chihaya/chihaya/server/store/middleware/ip"
|
||||
_ "github.com/chihaya/chihaya/server/store/middleware/response"
|
||||
_ "github.com/chihaya/chihaya/server/store/middleware/swarm"
|
||||
)
|
||||
|
||||
var (
|
||||
configPath string
|
||||
cpuprofile string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&configPath, "config", "", "path to the configuration file")
|
||||
flag.StringVar(&cpuprofile, "cpuprofile", "", "path to cpu profile output")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if cpuprofile != "" {
|
||||
log.Println("profiling...")
|
||||
f, err := os.Create(cpuprofile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
cfg, err := chihaya.OpenConfigFile(configPath)
|
||||
if err != nil {
|
||||
log.Fatal("failed to load config: " + err.Error())
|
||||
}
|
||||
|
||||
tkr, err := tracker.NewTracker(&cfg.Tracker)
|
||||
if err != nil {
|
||||
log.Fatal("failed to create tracker: " + err.Error())
|
||||
}
|
||||
|
||||
pool, err := server.StartPool(cfg.Servers, tkr)
|
||||
if err != nil {
|
||||
log.Fatal("failed to create server pool: " + err.Error())
|
||||
}
|
||||
|
||||
shutdown := make(chan os.Signal)
|
||||
signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-shutdown
|
||||
pool.Stop()
|
||||
}
|
98
config.go
98
config.go
|
@ -1,98 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package chihaya
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// DefaultConfig is a sane configuration used as a fallback or for testing.
|
||||
var DefaultConfig = Config{
|
||||
Tracker: TrackerConfig{
|
||||
AnnounceInterval: 30 * time.Minute,
|
||||
MinAnnounceInterval: 20 * time.Minute,
|
||||
AnnounceMiddleware: []MiddlewareConfig{},
|
||||
ScrapeMiddleware: []MiddlewareConfig{},
|
||||
},
|
||||
Servers: []ServerConfig{},
|
||||
}
|
||||
|
||||
// Config represents the global configuration of a chihaya binary.
|
||||
type Config struct {
|
||||
Tracker TrackerConfig `yaml:"tracker"`
|
||||
Servers []ServerConfig `yaml:"servers"`
|
||||
}
|
||||
|
||||
// TrackerConfig represents the configuration of protocol-agnostic BitTorrent
|
||||
// Tracker used by Servers started by chihaya.
|
||||
type TrackerConfig struct {
|
||||
AnnounceInterval time.Duration `yaml:"announce"`
|
||||
MinAnnounceInterval time.Duration `yaml:"min_announce"`
|
||||
AnnounceMiddleware []MiddlewareConfig `yaml:"announce_middleware"`
|
||||
ScrapeMiddleware []MiddlewareConfig `yaml:"scrape_middleware"`
|
||||
}
|
||||
|
||||
// MiddlewareConfig represents the configuration of a middleware used by
|
||||
// the tracker.
|
||||
type MiddlewareConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Config interface{} `yaml:"config"`
|
||||
}
|
||||
|
||||
// ServerConfig represents the configuration of the Servers started by chihaya.
|
||||
type ServerConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Config interface{} `yaml:"config"`
|
||||
}
|
||||
|
||||
// ConfigFile represents a YAML configuration file that namespaces all chihaya
|
||||
// configuration under the "chihaya" namespace.
|
||||
type ConfigFile struct {
|
||||
Chihaya Config `yaml:"chihaya"`
|
||||
}
|
||||
|
||||
// DecodeConfigFile unmarshals an io.Reader into a new Config.
|
||||
func DecodeConfigFile(r io.Reader) (*Config, error) {
|
||||
contents, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfgFile := &ConfigFile{}
|
||||
err = yaml.Unmarshal(contents, cfgFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfgFile.Chihaya, nil
|
||||
}
|
||||
|
||||
// OpenConfigFile returns a new Config given the path to a YAML configuration
|
||||
// file.
|
||||
// It supports relative and absolute paths and environment variables.
|
||||
// Given "", it returns DefaultConfig.
|
||||
func OpenConfigFile(path string) (*Config, error) {
|
||||
if path == "" {
|
||||
return &DefaultConfig, nil
|
||||
}
|
||||
|
||||
f, err := os.Open(os.ExpandEnv(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
cfg, err := DecodeConfigFile(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
# Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
# Use of this source code is governed by the BSD 2-Clause license,
|
||||
# which can be found in the LICENSE file.
|
||||
|
||||
chihaya:
|
||||
tracker:
|
||||
announce: 10m
|
||||
min_announce: 5m
|
||||
announce_middleware:
|
||||
# - name: ip_blacklist
|
||||
# - name: ip_whitelist
|
||||
# - name: client_blacklist
|
||||
# - name: client_whitelist
|
||||
# - name: infohash_blacklist
|
||||
# - name: infohash_whitelist
|
||||
# - name: varinterval
|
||||
# - name: deniability
|
||||
- name: store_swarm_interaction
|
||||
- name: store_response
|
||||
scrape_middleware:
|
||||
# - name: infohash_blacklist
|
||||
# config:
|
||||
# mode: block
|
||||
- name: store_response
|
||||
|
||||
servers:
|
||||
- name: store
|
||||
config:
|
||||
addr: localhost:6880
|
||||
request_timeout: 10s
|
||||
read_timeout: 10s
|
||||
write_timeout: 10s
|
||||
client_store:
|
||||
name: memory
|
||||
ip_store:
|
||||
name: memory
|
||||
string_store:
|
||||
name: memory
|
||||
peer_store:
|
||||
name: memory
|
||||
config:
|
||||
gcAfter: 30m
|
||||
shards: 1
|
||||
|
||||
- name: prometheus
|
||||
config:
|
||||
addr: localhost:6881
|
||||
shutdown_timeout: 10s
|
||||
read_timeout: 10s
|
||||
write_timeout: 10s
|
||||
|
||||
- name: http
|
||||
config:
|
||||
addr: localhost:6882
|
||||
request_timeout: 10s
|
||||
read_timeout: 10s
|
||||
write_timeout: 10s
|
||||
|
||||
# - name: udp
|
||||
# config:
|
||||
# addr: localhost:6883
|
44
glide.lock
generated
44
glide.lock
generated
|
@ -1,44 +0,0 @@
|
|||
hash: e7d2be6c361fe6fe6242b56e502829e8a72733f9ff0aa57443c9397c3488174f
|
||||
updated: 2016-05-21T17:58:26.448148976-04:00
|
||||
imports:
|
||||
- name: github.com/beorn7/perks
|
||||
version: 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
subpackages:
|
||||
- quantile
|
||||
- name: github.com/golang/protobuf
|
||||
version: cd85f19845cc96cc6e5269c894d8cd3c67e9ed83
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/julienschmidt/httprouter
|
||||
version: 77366a47451a56bb3ba682481eed85b64fea14e8
|
||||
- name: github.com/matttproud/golang_protobuf_extensions
|
||||
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
subpackages:
|
||||
- pbutil
|
||||
- name: github.com/mrd0ll4r/netmatch
|
||||
version: af335c21c765757f2649dbf1d3d43f77eb6c4eb8
|
||||
- name: github.com/prometheus/client_golang
|
||||
version: d38f1ef46f0d78136db3e585f7ebe1bcc3476f73
|
||||
subpackages:
|
||||
- prometheus
|
||||
- name: github.com/prometheus/client_model
|
||||
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
version: a715f9d07a512e8339f70a275ace0e67c0f9a65f
|
||||
subpackages:
|
||||
- expfmt
|
||||
- internal/bitbucket.org/ww/goautoneg
|
||||
- model
|
||||
- name: github.com/prometheus/procfs
|
||||
version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
|
||||
- name: github.com/tylerb/graceful
|
||||
version: 9a3d4236b03bb5d26f7951134d248f9d5510d599
|
||||
- name: golang.org/x/net
|
||||
version: 0c607074acd38c5f23d1344dfe74c977464d1257
|
||||
subpackages:
|
||||
- netutil
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: a83829b6f1293c91addabc89d0571c246397bbf4
|
||||
devImports: []
|
|
@ -1,9 +0,0 @@
|
|||
package: github.com/chihaya/chihaya
|
||||
import:
|
||||
- package: github.com/julienschmidt/httprouter
|
||||
- package: github.com/mrd0ll4r/netmatch
|
||||
- package: github.com/prometheus/client_golang
|
||||
subpackages:
|
||||
- prometheus
|
||||
- package: github.com/tylerb/graceful
|
||||
- package: gopkg.in/yaml.v2
|
|
@ -1,39 +0,0 @@
|
|||
## Deniability Middleware
|
||||
|
||||
This package provides the announce middleware `deniability` which inserts ghost peers into announce responses to achieve plausible deniability.
|
||||
|
||||
### Functionality
|
||||
|
||||
This middleware will choose random announces and modify the list of peers returned.
|
||||
A random number of randomly generated peers will be inserted at random positions into the list of peers.
|
||||
As soon as the list of peers exceeds `numWant`, peers will be replaced rather than inserted.
|
||||
|
||||
Note that if a response is picked for augmentation, both IPv4 and IPv6 peers will be modified, in case they are not empty.
|
||||
|
||||
Also note that the IP address for the generated peeer consists of bytes in the range [1,254].
|
||||
|
||||
### Configuration
|
||||
|
||||
This middleware provides the following parameters for configuration:
|
||||
|
||||
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be augmented with random peers.
|
||||
- `max_random_peers` (int, >0) sets an upper boundary (inclusive) for the amount of peers added.
|
||||
- `prefix` (string, 20 characters at most) sets the prefix for generated peer IDs.
|
||||
The peer ID will be padded to 20 bytes using a random string of alphanumeric characters.
|
||||
- `min_port` (int, >0, <=65535) sets a lower boundary for the port for generated peers.
|
||||
- `max_port` (int, >0, <=65536, > `min_port`) sets an upper boundary for the port for generated peers.
|
||||
|
||||
An example config might look like this:
|
||||
|
||||
chihaya:
|
||||
tracker:
|
||||
announce_middleware:
|
||||
- name: deniability
|
||||
config:
|
||||
modify_response_probability: 0.2
|
||||
max_random_peers: 5
|
||||
prefix: -AZ2060-
|
||||
min_port: 40000
|
||||
max_port: 60000
|
||||
|
||||
For more information about peer IDs and their prefixes, see [this wiki entry](https://wiki.theory.org/BitTorrentSpecification#peer_id).
|
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package deniability
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
// Config represents the configuration for the deniability middleware.
|
||||
type Config struct {
|
||||
// ModifyResponseProbability is the probability by which a response will
|
||||
// be augmented with random peers.
|
||||
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
|
||||
|
||||
// MaxRandomPeers is the amount of peers that will be added at most.
|
||||
MaxRandomPeers int `yaml:"max_random_peers"`
|
||||
|
||||
// Prefix is the prefix to be used for peer IDs.
|
||||
Prefix string `yaml:"prefix"`
|
||||
|
||||
// MinPort is the minimum port (inclusive) for the generated peer.
|
||||
MinPort int `yaml:"min_port"`
|
||||
|
||||
// MaxPort is the maximum port (exclusive) for the generated peer.
|
||||
MaxPort int `yaml:"max_port"`
|
||||
}
|
||||
|
||||
// newConfig parses the given MiddlewareConfig as a deniability.Config.
|
||||
func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) {
|
||||
bytes, err := yaml.Marshal(mwcfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package deniability
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
type configTestData struct {
|
||||
modifyProbability string
|
||||
maxNewPeers string
|
||||
prefix string
|
||||
minPort string
|
||||
maxPort string
|
||||
err bool
|
||||
expected Config
|
||||
}
|
||||
|
||||
var (
|
||||
configTemplate = `
|
||||
name: foo
|
||||
config:
|
||||
modify_response_probability: %s
|
||||
max_random_peers: %s
|
||||
prefix: %s
|
||||
min_port: %s
|
||||
max_port: %s`
|
||||
|
||||
configData = []configTestData{
|
||||
{"1.0", "5", "abc", "2000", "3000", false, Config{1.0, 5, "abc", 2000, 3000}},
|
||||
{"a", "a", "12", "a", "a", true, Config{}},
|
||||
}
|
||||
)
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
var mwconfig chihaya.MiddlewareConfig
|
||||
|
||||
cfg, err := newConfig(mwconfig)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
for _, test := range configData {
|
||||
config := fmt.Sprintf(configTemplate, test.modifyProbability, test.maxNewPeers, test.prefix, test.minPort, test.maxPort)
|
||||
err = yaml.Unmarshal([]byte(config), &mwconfig)
|
||||
assert.Nil(t, err)
|
||||
|
||||
cfg, err = newConfig(mwconfig)
|
||||
if test.err {
|
||||
assert.NotNil(t, err)
|
||||
continue
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.expected, *cfg)
|
||||
}
|
||||
}
|
|
@ -1,121 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package deniability
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/random"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddlewareConstructor("deniability", constructor)
|
||||
}
|
||||
|
||||
type deniabilityMiddleware struct {
|
||||
cfg *Config
|
||||
r *rand.Rand
|
||||
}
|
||||
|
||||
// constructor provides a middleware constructor that returns a middleware to
|
||||
// insert peers into the peer lists returned as a response to an announce.
|
||||
//
|
||||
// It returns an error if the config provided is either syntactically or
|
||||
// semantically incorrect.
|
||||
func constructor(c chihaya.MiddlewareConfig) (tracker.AnnounceMiddleware, error) {
|
||||
cfg, err := newConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
|
||||
return nil, errors.New("modify_response_probability must be in [0,1)")
|
||||
}
|
||||
|
||||
if cfg.MaxRandomPeers <= 0 {
|
||||
return nil, errors.New("max_random_peers must be > 0")
|
||||
}
|
||||
|
||||
if cfg.MinPort <= 0 {
|
||||
return nil, errors.New("min_port must not be <= 0")
|
||||
}
|
||||
|
||||
if cfg.MaxPort > 65536 {
|
||||
return nil, errors.New("max_port must not be > 65536")
|
||||
}
|
||||
|
||||
if cfg.MinPort >= cfg.MaxPort {
|
||||
return nil, errors.New("max_port must not be <= min_port")
|
||||
}
|
||||
|
||||
if len(cfg.Prefix) > 20 {
|
||||
return nil, errors.New("prefix must not be longer than 20 bytes")
|
||||
}
|
||||
|
||||
mw := deniabilityMiddleware{
|
||||
cfg: cfg,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
|
||||
return mw.modifyResponse, nil
|
||||
}
|
||||
|
||||
func (mw *deniabilityMiddleware) modifyResponse(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
err := next(cfg, req, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mw.cfg.ModifyResponseProbability == 1 || mw.r.Float32() < mw.cfg.ModifyResponseProbability {
|
||||
numNewPeers := mw.r.Intn(mw.cfg.MaxRandomPeers) + 1
|
||||
for i := 0; i < numNewPeers; i++ {
|
||||
if len(resp.IPv6Peers) > 0 {
|
||||
if len(resp.IPv6Peers) >= int(req.NumWant) {
|
||||
mw.replacePeer(resp.IPv6Peers, true)
|
||||
} else {
|
||||
resp.IPv6Peers = mw.insertPeer(resp.IPv6Peers, true)
|
||||
}
|
||||
}
|
||||
|
||||
if len(resp.IPv4Peers) > 0 {
|
||||
if len(resp.IPv4Peers) >= int(req.NumWant) {
|
||||
mw.replacePeer(resp.IPv4Peers, false)
|
||||
} else {
|
||||
resp.IPv4Peers = mw.insertPeer(resp.IPv4Peers, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// replacePeer replaces a peer from a random position within the given slice
|
||||
// of peers with a randomly generated one.
|
||||
//
|
||||
// replacePeer panics if len(peers) == 0.
|
||||
func (mw *deniabilityMiddleware) replacePeer(peers []chihaya.Peer, v6 bool) {
|
||||
peers[mw.r.Intn(len(peers))] = random.Peer(mw.r, mw.cfg.Prefix, v6, mw.cfg.MinPort, mw.cfg.MaxPort)
|
||||
}
|
||||
|
||||
// insertPeer inserts a randomly generated peer at a random position into the
|
||||
// given slice and returns the new slice.
|
||||
func (mw *deniabilityMiddleware) insertPeer(peers []chihaya.Peer, v6 bool) []chihaya.Peer {
|
||||
pos := 0
|
||||
if len(peers) > 0 {
|
||||
pos = mw.r.Intn(len(peers))
|
||||
}
|
||||
peers = append(peers, chihaya.Peer{})
|
||||
copy(peers[pos+1:], peers[pos:])
|
||||
peers[pos] = random.Peer(mw.r, mw.cfg.Prefix, v6, mw.cfg.MinPort, mw.cfg.MaxPort)
|
||||
|
||||
return peers
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package deniability
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
type constructorTestData struct {
|
||||
cfg Config
|
||||
error bool
|
||||
}
|
||||
|
||||
var constructorData = []constructorTestData{
|
||||
{Config{1.0, 10, "abc", 1024, 1025}, false},
|
||||
{Config{1.1, 10, "abc", 1024, 1025}, true},
|
||||
{Config{0, 10, "abc", 1024, 1025}, true},
|
||||
{Config{1.0, 0, "abc", 1024, 1025}, true},
|
||||
{Config{1.0, 10, "01234567890123456789_", 1024, 1025}, true},
|
||||
{Config{1.0, 10, "abc", 0, 1025}, true},
|
||||
{Config{1.0, 10, "abc", 1024, 0}, true},
|
||||
{Config{1.0, 10, "abc", 1024, 65537}, true},
|
||||
}
|
||||
|
||||
func TestReplacePeer(t *testing.T) {
|
||||
cfg := Config{
|
||||
Prefix: "abc",
|
||||
MinPort: 1024,
|
||||
MaxPort: 1025,
|
||||
}
|
||||
mw := deniabilityMiddleware{
|
||||
r: rand.New(rand.NewSource(0)),
|
||||
cfg: &cfg,
|
||||
}
|
||||
peer := chihaya.Peer{
|
||||
ID: chihaya.PeerID([20]byte{}),
|
||||
Port: 2000,
|
||||
IP: net.ParseIP("10.150.255.23"),
|
||||
}
|
||||
peers := []chihaya.Peer{peer}
|
||||
|
||||
mw.replacePeer(peers, false)
|
||||
assert.Equal(t, 1, len(peers))
|
||||
assert.Equal(t, "abc", string(peers[0].ID[:3]))
|
||||
assert.Equal(t, uint16(1024), peers[0].Port)
|
||||
assert.NotNil(t, peers[0].IP.To4())
|
||||
|
||||
mw.replacePeer(peers, true)
|
||||
assert.Equal(t, 1, len(peers))
|
||||
assert.Equal(t, "abc", string(peers[0].ID[:3]))
|
||||
assert.Equal(t, uint16(1024), peers[0].Port)
|
||||
assert.Nil(t, peers[0].IP.To4())
|
||||
|
||||
peers = []chihaya.Peer{peer, peer}
|
||||
|
||||
mw.replacePeer(peers, true)
|
||||
assert.True(t, (peers[0].Port == peer.Port) != (peers[1].Port == peer.Port), "not exactly one peer was replaced")
|
||||
}
|
||||
|
||||
func TestInsertPeer(t *testing.T) {
|
||||
cfg := Config{
|
||||
Prefix: "abc",
|
||||
MinPort: 1024,
|
||||
MaxPort: 1025,
|
||||
}
|
||||
mw := deniabilityMiddleware{
|
||||
r: rand.New(rand.NewSource(0)),
|
||||
cfg: &cfg,
|
||||
}
|
||||
peer := chihaya.Peer{
|
||||
ID: chihaya.PeerID([20]byte{}),
|
||||
Port: 2000,
|
||||
IP: net.ParseIP("10.150.255.23"),
|
||||
}
|
||||
var peers []chihaya.Peer
|
||||
|
||||
peers = mw.insertPeer(peers, false)
|
||||
assert.Equal(t, 1, len(peers))
|
||||
assert.Equal(t, uint16(1024), peers[0].Port)
|
||||
assert.Equal(t, "abc", string(peers[0].ID[:3]))
|
||||
assert.NotNil(t, peers[0].IP.To4())
|
||||
|
||||
peers = []chihaya.Peer{peer, peer}
|
||||
|
||||
peers = mw.insertPeer(peers, true)
|
||||
assert.Equal(t, 3, len(peers))
|
||||
}
|
||||
|
||||
func TestConstructor(t *testing.T) {
|
||||
for _, tt := range constructorData {
|
||||
_, err := constructor(chihaya.MiddlewareConfig{
|
||||
Config: tt.cfg,
|
||||
})
|
||||
|
||||
if tt.error {
|
||||
assert.NotNil(t, err, fmt.Sprintf("error expected for %+v", tt.cfg))
|
||||
} else {
|
||||
assert.Nil(t, err, fmt.Sprintf("no error expected for %+v", tt.cfg))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
## Announce Interval Variation Middleware
|
||||
|
||||
This package provides the announce middleware `varinterval` which randomizes the announce interval.
|
||||
|
||||
### Functionality
|
||||
|
||||
This middleware will choose random announces and modify the `interval` and `min_interval` fields.
|
||||
A random number of seconds will be added to the `interval` field and, if desired, also to the `min_interval` field.
|
||||
|
||||
Note that if a response is picked for modification and `min_interval` should be changed as well, both `interval` and `min_interval` will be modified by the same amount.
|
||||
|
||||
### Use Case
|
||||
|
||||
Use this middleware to avoid recurring load spikes on the tracker.
|
||||
By randomizing the announce interval, load spikes will flatten out after a few cycles.
|
||||
|
||||
### Configuration
|
||||
|
||||
This middleware provides the following parameters for configuration:
|
||||
|
||||
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be chosen to have its announce intervals modified.
|
||||
- `max_increase_delta` (int, >0) sets an upper boundary (inclusive) for the amount of seconds added.
|
||||
- `modify_min_interval` (boolean) whether to modify the `min_interval` field as well.
|
||||
|
||||
An example config might look like this:
|
||||
|
||||
chihaya:
|
||||
tracker:
|
||||
announce_middleware:
|
||||
- name: varinterval
|
||||
config:
|
||||
modify_response_probability: 0.2
|
||||
max_increase_delta: 60
|
||||
modify_min_interval: true
|
|
@ -1,43 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package varinterval
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
// Config represents the configuration for the varinterval middleware.
|
||||
type Config struct {
|
||||
// ModifyResponseProbability is the probability by which a response will
|
||||
// be modified.
|
||||
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
|
||||
|
||||
// MaxIncreaseDelta is the amount of seconds that will be added at most.
|
||||
MaxIncreaseDelta int `yaml:"max_increase_delta"`
|
||||
|
||||
// ModifyMinInterval specifies whether min_interval should be increased
|
||||
// as well.
|
||||
ModifyMinInterval bool `yaml:"modify_min_interval"`
|
||||
}
|
||||
|
||||
// newConfig parses the given MiddlewareConfig as a varinterval.Config.
|
||||
//
|
||||
// The contents of the config are not checked.
|
||||
func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) {
|
||||
bytes, err := yaml.Marshal(mwcfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package varinterval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
type configTestData struct {
|
||||
modifyProbability string
|
||||
maxIncreaseDelta string
|
||||
modifyMinInterval string
|
||||
err bool
|
||||
expected Config
|
||||
}
|
||||
|
||||
var (
|
||||
configTemplate = `
|
||||
name: foo
|
||||
config:
|
||||
modify_response_probability: %s
|
||||
max_increase_delta: %s
|
||||
modify_min_interval: %s`
|
||||
|
||||
configData = []configTestData{
|
||||
{"1.0", "60", "false", false, Config{1.0, 60, false}},
|
||||
{"a", "60", "false", true, Config{}},
|
||||
}
|
||||
)
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
var mwconfig chihaya.MiddlewareConfig
|
||||
|
||||
cfg, err := newConfig(mwconfig)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, cfg)
|
||||
|
||||
for _, test := range configData {
|
||||
config := fmt.Sprintf(configTemplate, test.modifyProbability, test.maxIncreaseDelta, test.modifyMinInterval)
|
||||
err = yaml.Unmarshal([]byte(config), &mwconfig)
|
||||
assert.Nil(t, err)
|
||||
|
||||
cfg, err = newConfig(mwconfig)
|
||||
if test.err {
|
||||
assert.NotNil(t, err)
|
||||
continue
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.expected, *cfg)
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package varinterval
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddlewareConstructor("varinterval", constructor)
|
||||
}
|
||||
|
||||
type varintervalMiddleware struct {
|
||||
cfg *Config
|
||||
r *rand.Rand
|
||||
}
|
||||
|
||||
// constructor provides a middleware constructor that returns a middleware to
|
||||
// insert a variation into announce intervals.
|
||||
//
|
||||
// It returns an error if the config provided is either syntactically or
|
||||
// semantically incorrect.
|
||||
func constructor(c chihaya.MiddlewareConfig) (tracker.AnnounceMiddleware, error) {
|
||||
cfg, err := newConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
|
||||
return nil, errors.New("modify_response_probability must be in [0,1)")
|
||||
}
|
||||
|
||||
if cfg.MaxIncreaseDelta <= 0 {
|
||||
return nil, errors.New("max_increase_delta must be > 0")
|
||||
}
|
||||
|
||||
mw := varintervalMiddleware{
|
||||
cfg: cfg,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
|
||||
return mw.modifyResponse, nil
|
||||
}
|
||||
|
||||
func (mw *varintervalMiddleware) modifyResponse(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
err := next(cfg, req, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mw.cfg.ModifyResponseProbability == 1 || mw.r.Float32() < mw.cfg.ModifyResponseProbability {
|
||||
addSeconds := time.Duration(mw.r.Intn(mw.cfg.MaxIncreaseDelta)+1) * time.Second
|
||||
resp.Interval += addSeconds
|
||||
|
||||
if mw.cfg.ModifyMinInterval {
|
||||
resp.MinInterval += addSeconds
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package varinterval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
type constructorTestData struct {
|
||||
cfg Config
|
||||
error bool
|
||||
}
|
||||
|
||||
var constructorData = []constructorTestData{
|
||||
{Config{1.0, 10, false}, false},
|
||||
{Config{1.1, 10, false}, true},
|
||||
{Config{0, 10, true}, true},
|
||||
{Config{1.0, 0, false}, true},
|
||||
}
|
||||
|
||||
func TestConstructor(t *testing.T) {
|
||||
for _, tt := range constructorData {
|
||||
_, err := constructor(chihaya.MiddlewareConfig{
|
||||
Config: tt.cfg,
|
||||
})
|
||||
|
||||
if tt.error {
|
||||
assert.NotNil(t, err, fmt.Sprintf("error expected for %+v", tt.cfg))
|
||||
} else {
|
||||
assert.Nil(t, err, fmt.Sprintf("no error expected for %+v", tt.cfg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestModifyResponse(t *testing.T) {
|
||||
var (
|
||||
achain tracker.AnnounceChain
|
||||
req chihaya.AnnounceRequest
|
||||
resp chihaya.AnnounceResponse
|
||||
)
|
||||
|
||||
mw, err := constructor(chihaya.MiddlewareConfig{
|
||||
Config: Config{
|
||||
ModifyResponseProbability: 1.0,
|
||||
MaxIncreaseDelta: 10,
|
||||
ModifyMinInterval: true,
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
achain.Append(mw)
|
||||
handler := achain.Handler()
|
||||
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, resp.Interval > 0, "interval should have been increased")
|
||||
assert.True(t, resp.MinInterval > 0, "min_interval should have been increased")
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package bencode implements bencoding of data as defined in BEP 3 using
|
||||
// type assertion over reflection for performance.
|
||||
package bencode
|
||||
|
||||
// Dict represents a bencode dictionary.
|
||||
type Dict map[string]interface{}
|
||||
|
||||
// NewDict allocates the memory for a Dict.
|
||||
func NewDict() Dict {
|
||||
return make(Dict)
|
||||
}
|
||||
|
||||
// List represents a bencode list.
|
||||
type List []interface{}
|
||||
|
||||
// NewList allocates the memory for a List.
|
||||
func NewList() List {
|
||||
return make(List, 0)
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package bencode
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A Decoder reads bencoded objects from an input stream.
|
||||
type Decoder struct {
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: bufio.NewReader(r)}
|
||||
}
|
||||
|
||||
// Decode unmarshals the next bencoded value in the stream.
|
||||
func (dec *Decoder) Decode() (interface{}, error) {
|
||||
return unmarshal(dec.r)
|
||||
}
|
||||
|
||||
// Unmarshal deserializes and returns the bencoded value in buf.
|
||||
func Unmarshal(buf []byte) (interface{}, error) {
|
||||
r := bufio.NewReader(bytes.NewBuffer(buf))
|
||||
return unmarshal(r)
|
||||
}
|
||||
|
||||
// unmarshal reads bencoded values from a bufio.Reader
|
||||
func unmarshal(r *bufio.Reader) (interface{}, error) {
|
||||
tok, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch tok {
|
||||
case 'i':
|
||||
return readTerminatedInt(r, 'e')
|
||||
|
||||
case 'l':
|
||||
list := NewList()
|
||||
for {
|
||||
ok, err := readTerminator(r, 'e')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := unmarshal(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, v)
|
||||
}
|
||||
return list, nil
|
||||
|
||||
case 'd':
|
||||
dict := NewDict()
|
||||
for {
|
||||
ok, err := readTerminator(r, 'e')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := unmarshal(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("bencode: non-string map key")
|
||||
}
|
||||
|
||||
dict[key], err = unmarshal(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return dict, nil
|
||||
|
||||
default:
|
||||
err = r.UnreadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
length, err := readTerminatedInt(r, ':')
|
||||
if err != nil {
|
||||
return nil, errors.New("bencode: unknown input sequence")
|
||||
}
|
||||
|
||||
buf := make([]byte, length)
|
||||
n, err := r.Read(buf)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if int64(n) != length {
|
||||
return nil, errors.New("bencode: short read")
|
||||
}
|
||||
|
||||
return string(buf), nil
|
||||
}
|
||||
}
|
||||
|
||||
func readTerminator(r io.ByteScanner, term byte) (bool, error) {
|
||||
tok, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if tok == term {
|
||||
return true, nil
|
||||
}
|
||||
return false, r.UnreadByte()
|
||||
}
|
||||
|
||||
func readTerminatedInt(r *bufio.Reader, term byte) (int64, error) {
|
||||
buf, err := r.ReadSlice(term)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if len(buf) <= 1 {
|
||||
return 0, errors.New("bencode: empty integer field")
|
||||
}
|
||||
|
||||
return strconv.ParseInt(string(buf[:len(buf)-1]), 10, 64)
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package bencode
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var unmarshalTests = []struct {
|
||||
input string
|
||||
expected interface{}
|
||||
}{
|
||||
{"i42e", int64(42)},
|
||||
{"i-42e", int64(-42)},
|
||||
|
||||
{"7:example", "example"},
|
||||
|
||||
{"l3:one3:twoe", List{"one", "two"}},
|
||||
{"le", List{}},
|
||||
|
||||
{"d3:one2:aa3:two2:bbe", Dict{"one": "aa", "two": "bb"}},
|
||||
{"de", Dict{}},
|
||||
}
|
||||
|
||||
func TestUnmarshal(t *testing.T) {
|
||||
for _, tt := range unmarshalTests {
|
||||
got, err := Unmarshal([]byte(tt.input))
|
||||
assert.Nil(t, err, "unmarshal should not fail")
|
||||
assert.Equal(t, got, tt.expected, "unmarshalled values should match the expected results")
|
||||
}
|
||||
}
|
||||
|
||||
type bufferLoop struct {
|
||||
val string
|
||||
}
|
||||
|
||||
func (r *bufferLoop) Read(b []byte) (int, error) {
|
||||
n := copy(b, r.val)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalScalar(b *testing.B) {
|
||||
d1 := NewDecoder(&bufferLoop{"7:example"})
|
||||
d2 := NewDecoder(&bufferLoop{"i42e"})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
d1.Decode()
|
||||
d2.Decode()
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalLarge(t *testing.T) {
|
||||
data := Dict{
|
||||
"k1": List{"a", "b", "c"},
|
||||
"k2": int64(42),
|
||||
"k3": "val",
|
||||
"k4": int64(-42),
|
||||
}
|
||||
|
||||
buf, _ := Marshal(data)
|
||||
dec := NewDecoder(&bufferLoop{string(buf)})
|
||||
|
||||
got, err := dec.Decode()
|
||||
assert.Nil(t, err, "decode should not fail")
|
||||
assert.Equal(t, got, data, "encoding and decoding should equal the original value")
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalLarge(b *testing.B) {
|
||||
data := map[string]interface{}{
|
||||
"k1": []string{"a", "b", "c"},
|
||||
"k2": 42,
|
||||
"k3": "val",
|
||||
"k4": uint(42),
|
||||
}
|
||||
|
||||
buf, _ := Marshal(data)
|
||||
dec := NewDecoder(&bufferLoop{string(buf)})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
dec.Decode()
|
||||
}
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package bencode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An Encoder writes bencoded objects to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode writes the bencoding of v to the stream.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
return marshal(enc.w, v)
|
||||
}
|
||||
|
||||
// Marshal returns the bencoding of v.
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
err := marshal(buf, v)
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
// Marshaler is the interface implemented by objects that can marshal
|
||||
// themselves.
|
||||
type Marshaler interface {
|
||||
MarshalBencode() ([]byte, error)
|
||||
}
|
||||
|
||||
// marshal writes types bencoded to an io.Writer
|
||||
func marshal(w io.Writer, data interface{}) error {
|
||||
switch v := data.(type) {
|
||||
case Marshaler:
|
||||
bencoded, err := v.MarshalBencode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(bencoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case string:
|
||||
marshalString(w, v)
|
||||
|
||||
case int:
|
||||
marshalInt(w, int64(v))
|
||||
|
||||
case uint:
|
||||
marshalUint(w, uint64(v))
|
||||
|
||||
case int16:
|
||||
marshalInt(w, int64(v))
|
||||
|
||||
case uint16:
|
||||
marshalUint(w, uint64(v))
|
||||
|
||||
case int32:
|
||||
marshalInt(w, int64(v))
|
||||
|
||||
case uint32:
|
||||
marshalUint(w, uint64(v))
|
||||
|
||||
case int64:
|
||||
marshalInt(w, v)
|
||||
|
||||
case uint64:
|
||||
marshalUint(w, v)
|
||||
|
||||
case []byte:
|
||||
marshalBytes(w, v)
|
||||
|
||||
case time.Duration: // Assume seconds
|
||||
marshalInt(w, int64(v/time.Second))
|
||||
|
||||
case Dict:
|
||||
marshal(w, map[string]interface{}(v))
|
||||
|
||||
case []Dict:
|
||||
w.Write([]byte{'l'})
|
||||
for _, val := range v {
|
||||
err := marshal(w, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.Write([]byte{'e'})
|
||||
|
||||
case map[string]interface{}:
|
||||
w.Write([]byte{'d'})
|
||||
for key, val := range v {
|
||||
marshalString(w, key)
|
||||
err := marshal(w, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.Write([]byte{'e'})
|
||||
|
||||
case []string:
|
||||
w.Write([]byte{'l'})
|
||||
for _, val := range v {
|
||||
err := marshal(w, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.Write([]byte{'e'})
|
||||
|
||||
case List:
|
||||
marshal(w, []interface{}(v))
|
||||
|
||||
case []interface{}:
|
||||
w.Write([]byte{'l'})
|
||||
for _, val := range v {
|
||||
err := marshal(w, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.Write([]byte{'e'})
|
||||
|
||||
default:
|
||||
return fmt.Errorf("attempted to marshal unsupported type:\n%t", v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalInt(w io.Writer, v int64) {
|
||||
w.Write([]byte{'i'})
|
||||
w.Write([]byte(strconv.FormatInt(v, 10)))
|
||||
w.Write([]byte{'e'})
|
||||
}
|
||||
|
||||
func marshalUint(w io.Writer, v uint64) {
|
||||
w.Write([]byte{'i'})
|
||||
w.Write([]byte(strconv.FormatUint(v, 10)))
|
||||
w.Write([]byte{'e'})
|
||||
}
|
||||
|
||||
func marshalBytes(w io.Writer, v []byte) {
|
||||
w.Write([]byte(strconv.Itoa(len(v))))
|
||||
w.Write([]byte{':'})
|
||||
w.Write(v)
|
||||
}
|
||||
|
||||
func marshalString(w io.Writer, v string) {
|
||||
marshalBytes(w, []byte(v))
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package bencode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var marshalTests = []struct {
|
||||
input interface{}
|
||||
expected []string
|
||||
}{
|
||||
{int(42), []string{"i42e"}},
|
||||
{int(-42), []string{"i-42e"}},
|
||||
{uint(43), []string{"i43e"}},
|
||||
{int64(44), []string{"i44e"}},
|
||||
{uint64(45), []string{"i45e"}},
|
||||
{int16(44), []string{"i44e"}},
|
||||
{uint16(45), []string{"i45e"}},
|
||||
|
||||
{"example", []string{"7:example"}},
|
||||
{[]byte("example"), []string{"7:example"}},
|
||||
{30 * time.Minute, []string{"i1800e"}},
|
||||
|
||||
{[]string{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}},
|
||||
{[]interface{}{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}},
|
||||
{[]string{}, []string{"le"}},
|
||||
|
||||
{map[string]interface{}{"one": "aa", "two": "bb"}, []string{"d3:one2:aa3:two2:bbe", "d3:two2:bb3:one2:aae"}},
|
||||
{map[string]interface{}{}, []string{"de"}},
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
for _, test := range marshalTests {
|
||||
got, err := Marshal(test.input)
|
||||
assert.Nil(t, err, "marshal should not fail")
|
||||
assert.Contains(t, test.expected, string(got), "the marshaled result should be one of the expected permutations")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalScalar(b *testing.B) {
|
||||
buf := &bytes.Buffer{}
|
||||
encoder := NewEncoder(buf)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
encoder.Encode("test")
|
||||
encoder.Encode(123)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalLarge(b *testing.B) {
|
||||
data := map[string]interface{}{
|
||||
"k1": []string{"a", "b", "c"},
|
||||
"k2": 42,
|
||||
"k3": "val",
|
||||
"k4": uint(42),
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
encoder := NewEncoder(buf)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
encoder.Encode(data)
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package clientid implements the parsing of BitTorrent ClientIDs from
|
||||
// BitTorrent PeerIDs.
|
||||
package clientid
|
||||
|
||||
// New returns the part of a PeerID that identifies a peer's client software.
|
||||
func New(peerID string) (clientID string) {
|
||||
length := len(peerID)
|
||||
if length >= 6 {
|
||||
if peerID[0] == '-' {
|
||||
if length >= 7 {
|
||||
clientID = peerID[1:7]
|
||||
}
|
||||
} else {
|
||||
clientID = peerID[:6]
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package clientid
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestClientID(t *testing.T) {
|
||||
var clientTable = []struct {
|
||||
peerID string
|
||||
clientID string
|
||||
}{
|
||||
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
|
||||
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
|
||||
{"-BS5820-oy4La2MWGEFj", "BS5820"},
|
||||
{"-AR6360-6oZyyMWoOOBe", "AR6360"},
|
||||
{"-AG2083-s1hiF8vGAAg0", "AG2083"},
|
||||
{"-AG3003-lEl2Mm4NEO4n", "AG3003"},
|
||||
{"-MR1100-00HS~T7*65rm", "MR1100"},
|
||||
{"-LK0140-ATIV~nbEQAMr", "LK0140"},
|
||||
{"-KT2210-347143496631", "KT2210"},
|
||||
{"-TR0960-6ep6svaa61r4", "TR0960"},
|
||||
{"-XX1150-dv220cotgj4d", "XX1150"},
|
||||
{"-AZ2504-192gwethivju", "AZ2504"},
|
||||
{"-KT4310-3L4UvarKuqIu", "KT4310"},
|
||||
{"-AZ2060-0xJQ02d4309O", "AZ2060"},
|
||||
{"-BD0300-2nkdf08Jd890", "BD0300"},
|
||||
{"-A~0010-a9mn9DFkj39J", "A~0010"},
|
||||
{"-UT2300-MNu93JKnm930", "UT2300"},
|
||||
{"-UT2300-KT4310KT4301", "UT2300"},
|
||||
|
||||
{"T03A0----f089kjsdf6e", "T03A0-"},
|
||||
{"S58B-----nKl34GoNb75", "S58B--"},
|
||||
{"M4-4-0--9aa757Efd5Bl", "M4-4-0"},
|
||||
|
||||
{"AZ2500BTeYUzyabAfo6U", "AZ2500"}, // BitTyrant
|
||||
{"exbc0JdSklm834kj9Udf", "exbc0J"}, // Old BitComet
|
||||
{"FUTB0L84j542mVc84jkd", "FUTB0L"}, // Alt BitComet
|
||||
{"XBT054d-8602Jn83NnF9", "XBT054"}, // XBT
|
||||
{"OP1011affbecbfabeefb", "OP1011"}, // Opera
|
||||
{"-ML2.7.2-kgjjfkd9762", "ML2.7."}, // MLDonkey
|
||||
{"-BOWA0C-SDLFJWEIORNM", "BOWA0C"}, // Bits on Wheels
|
||||
{"Q1-0-0--dsn34DFn9083", "Q1-0-0"}, // Queen Bee
|
||||
{"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt
|
||||
{"346------SDFknl33408", "346---"}, // TorreTopia
|
||||
{"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod
|
||||
|
||||
{"", ""},
|
||||
{"-", ""},
|
||||
{"12345", ""},
|
||||
{"-12345", ""},
|
||||
{"123456", "123456"},
|
||||
{"-123456", "123456"},
|
||||
}
|
||||
|
||||
for _, tt := range clientTable {
|
||||
if parsedID := New(tt.peerID); parsedID != tt.clientID {
|
||||
t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package event implements type-level constraints for dealing with the events
|
||||
// communicated via BitTorrent announce.
|
||||
package event
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrUnknownEvent is returned when New fails to return an event.
|
||||
var ErrUnknownEvent = errors.New("unknown event")
|
||||
|
||||
// Event represents an event done by a BitTorrent client.
|
||||
type Event uint8
|
||||
|
||||
const (
|
||||
// None is the event when a BitTorrent client announces due to time lapsed
|
||||
// since the previous announce.
|
||||
None Event = iota
|
||||
|
||||
// Started is the event sent by a BitTorrent client when it joins a swarm.
|
||||
Started
|
||||
|
||||
// Stopped is the event sent by a BitTorrent client when it leaves a swarm.
|
||||
Stopped
|
||||
|
||||
// Completed is the event sent by a BitTorrent client when it finishes
|
||||
// downloading all of the required chunks.
|
||||
Completed
|
||||
)
|
||||
|
||||
var (
|
||||
eventToString = make(map[Event]string)
|
||||
stringToEvent = make(map[string]Event)
|
||||
)
|
||||
|
||||
func init() {
|
||||
eventToString[None] = "none"
|
||||
eventToString[Started] = "started"
|
||||
eventToString[Stopped] = "stopped"
|
||||
eventToString[Completed] = "completed"
|
||||
|
||||
stringToEvent[""] = None
|
||||
|
||||
for k, v := range eventToString {
|
||||
stringToEvent[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// New returns the proper Event given a string.
|
||||
func New(eventStr string) (Event, error) {
|
||||
if e, ok := stringToEvent[strings.ToLower(eventStr)]; ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
return None, ErrUnknownEvent
|
||||
}
|
||||
|
||||
// String implements Stringer for an event.
|
||||
func (e Event) String() string {
|
||||
if name, ok := eventToString[e]; ok {
|
||||
return name
|
||||
}
|
||||
|
||||
panic("event: event has no associated name")
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
var table = []struct {
|
||||
data string
|
||||
expected Event
|
||||
expectedErr error
|
||||
}{
|
||||
{"", None, nil},
|
||||
{"NONE", None, nil},
|
||||
{"none", None, nil},
|
||||
{"started", Started, nil},
|
||||
{"stopped", Stopped, nil},
|
||||
{"completed", Completed, nil},
|
||||
{"notAnEvent", None, ErrUnknownEvent},
|
||||
}
|
||||
|
||||
for _, tt := range table {
|
||||
got, err := New(tt.data)
|
||||
assert.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
|
||||
assert.Equal(t, got, tt.expected, "events should equal the expected value")
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package random
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
// Peer generates a random chihaya.Peer.
|
||||
//
|
||||
// prefix is the prefix to use for the peer ID. If len(prefix) > 20, it will be
|
||||
// truncated to 20 characters. If len(prefix) < 20, it will be padded with an
|
||||
// alphanumeric random string to have 20 characters.
|
||||
//
|
||||
// v6 indicates whether an IPv6 address should be generated.
|
||||
// Regardless of the length of the generated IP address, its bytes will have
|
||||
// values in [1,254].
|
||||
//
|
||||
// minPort and maxPort describe the range for the randomly generated port, where
|
||||
// minPort <= port < maxPort.
|
||||
// minPort and maxPort will be checked and altered so that
|
||||
// 1 <= minPort <= maxPort <= 65536.
|
||||
// If minPort == maxPort, port will be set to minPort.
|
||||
func Peer(r *rand.Rand, prefix string, v6 bool, minPort, maxPort int) chihaya.Peer {
|
||||
var (
|
||||
port uint16
|
||||
ip net.IP
|
||||
)
|
||||
|
||||
if minPort <= 0 {
|
||||
minPort = 1
|
||||
}
|
||||
if maxPort > 65536 {
|
||||
maxPort = 65536
|
||||
}
|
||||
if maxPort < minPort {
|
||||
maxPort = minPort
|
||||
}
|
||||
if len(prefix) > 20 {
|
||||
prefix = prefix[:20]
|
||||
}
|
||||
|
||||
if minPort == maxPort {
|
||||
port = uint16(minPort)
|
||||
} else {
|
||||
port = uint16(r.Int63()%int64(maxPort-minPort)) + uint16(minPort)
|
||||
}
|
||||
|
||||
if v6 {
|
||||
b := make([]byte, 16)
|
||||
ip = net.IP(b)
|
||||
} else {
|
||||
b := make([]byte, 4)
|
||||
ip = net.IP(b)
|
||||
}
|
||||
|
||||
for i := range ip {
|
||||
b := r.Intn(254) + 1
|
||||
ip[i] = byte(b)
|
||||
}
|
||||
|
||||
prefix = prefix + AlphaNumericString(r, 20-len(prefix))
|
||||
|
||||
return chihaya.Peer{
|
||||
ID: chihaya.PeerIDFromString(prefix),
|
||||
Port: port,
|
||||
IP: ip,
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package random
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPeer(t *testing.T) {
|
||||
r := rand.New(rand.NewSource(0))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
minPort := 2000
|
||||
maxPort := 2010
|
||||
p := Peer(r, "", false, minPort, maxPort)
|
||||
assert.Equal(t, 20, len(p.ID))
|
||||
assert.True(t, p.Port >= uint16(minPort) && p.Port < uint16(maxPort))
|
||||
assert.NotNil(t, p.IP.To4())
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
minPort := 2000
|
||||
maxPort := 2010
|
||||
p := Peer(r, "", true, minPort, maxPort)
|
||||
assert.Equal(t, 20, len(p.ID))
|
||||
assert.True(t, p.Port >= uint16(minPort) && p.Port < uint16(maxPort))
|
||||
assert.True(t, len(p.IP) == net.IPv6len)
|
||||
}
|
||||
|
||||
p := Peer(r, "abcdefghijklmnopqrst", false, 2000, 2000)
|
||||
assert.Equal(t, "abcdefghijklmnopqrst", string(p.ID[:]))
|
||||
assert.Equal(t, uint16(2000), p.Port)
|
||||
|
||||
p = Peer(r, "abcdefghijklmnopqrstUVWXYZ", true, -10, -5)
|
||||
assert.Equal(t, "abcdefghijklmnopqrst", string(p.ID[:]))
|
||||
assert.True(t, p.Port >= uint16(1) && p.Port <= uint16(65535))
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package random
|
||||
|
||||
import "math/rand"
|
||||
|
||||
// AlphaNumeric is an alphabet with all lower- and uppercase letters and
|
||||
// numbers.
|
||||
const AlphaNumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
|
||||
// AlphaNumericString is a shorthand for String(r, l, AlphaNumeric).
|
||||
func AlphaNumericString(r rand.Source, l int) string {
|
||||
return String(r, l, AlphaNumeric)
|
||||
}
|
||||
|
||||
// String generates a random string of length l, containing only runes from
|
||||
// the alphabet using the random source r.
|
||||
func String(r rand.Source, l int, alphabet string) string {
|
||||
b := make([]byte, l)
|
||||
for i := range b {
|
||||
b[i] = alphabet[r.Int63()%int64(len(alphabet))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package random
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAlphaNumericString(t *testing.T) {
|
||||
r := rand.NewSource(0)
|
||||
|
||||
s := AlphaNumericString(r, 0)
|
||||
assert.Equal(t, 0, len(s))
|
||||
|
||||
s = AlphaNumericString(r, 10)
|
||||
assert.Equal(t, 10, len(s))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
s := AlphaNumericString(r, 10)
|
||||
for _, c := range s {
|
||||
assert.True(t, strings.Contains(AlphaNumeric, string(c)))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package stopper
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AlreadyStopped is a closed error channel to be used by StopperFuncs when
|
||||
// an element was already stopped.
|
||||
var AlreadyStopped <-chan error
|
||||
|
||||
// AlreadyStoppedFunc is a StopperFunc that returns AlreadyStopped.
|
||||
var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped }
|
||||
|
||||
func init() {
|
||||
closeMe := make(chan error)
|
||||
close(closeMe)
|
||||
AlreadyStopped = closeMe
|
||||
}
|
||||
|
||||
// Stopper is an interface that allows a clean shutdown.
|
||||
type Stopper interface {
|
||||
// Stop returns a channel that indicates whether the stop was
|
||||
// successful.
|
||||
// The channel can either return one error or be closed. Closing the
|
||||
// channel signals a clean shutdown.
|
||||
// The Stop function should return immediately and perform the actual
|
||||
// shutdown in a seperate goroutine.
|
||||
Stop() <-chan error
|
||||
}
|
||||
|
||||
// StopGroup is a group that can be stopped.
|
||||
type StopGroup struct {
|
||||
stoppables []StopperFunc
|
||||
stoppablesLock sync.Mutex
|
||||
}
|
||||
|
||||
// StopperFunc is a function that can be used to provide a clean shutdown.
|
||||
type StopperFunc func() <-chan error
|
||||
|
||||
// NewStopGroup creates a new StopGroup.
|
||||
func NewStopGroup() *StopGroup {
|
||||
return &StopGroup{
|
||||
stoppables: make([]StopperFunc, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a Stopper to the StopGroup.
|
||||
// On the next call to Stop(), the Stopper will be stopped.
|
||||
func (cg *StopGroup) Add(toAdd Stopper) {
|
||||
cg.stoppablesLock.Lock()
|
||||
defer cg.stoppablesLock.Unlock()
|
||||
|
||||
cg.stoppables = append(cg.stoppables, toAdd.Stop)
|
||||
}
|
||||
|
||||
// AddFunc adds a StopperFunc to the StopGroup.
|
||||
// On the next call to Stop(), the StopperFunc will be called.
|
||||
func (cg *StopGroup) AddFunc(toAddFunc StopperFunc) {
|
||||
cg.stoppablesLock.Lock()
|
||||
defer cg.stoppablesLock.Unlock()
|
||||
|
||||
cg.stoppables = append(cg.stoppables, toAddFunc)
|
||||
}
|
||||
|
||||
// Stop stops all members of the StopGroup.
|
||||
// Stopping will be done in a concurrent fashion.
|
||||
// The slice of errors returned contains all errors returned by stopping the
|
||||
// members.
|
||||
func (cg *StopGroup) Stop() []error {
|
||||
cg.stoppablesLock.Lock()
|
||||
defer cg.stoppablesLock.Unlock()
|
||||
|
||||
var errors []error
|
||||
whenDone := make(chan struct{})
|
||||
|
||||
waitChannels := make([]<-chan error, 0, len(cg.stoppables))
|
||||
for _, toStop := range cg.stoppables {
|
||||
waitFor := toStop()
|
||||
if waitFor == nil {
|
||||
panic("received a nil chan from Stop")
|
||||
}
|
||||
waitChannels = append(waitChannels, waitFor)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, waitForMe := range waitChannels {
|
||||
err := <-waitForMe
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
close(whenDone)
|
||||
}()
|
||||
|
||||
<-whenDone
|
||||
return errors
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
type httpConfig struct {
|
||||
Addr string `yaml:"addr"`
|
||||
RequestTimeout time.Duration `yaml:"request_timeout"`
|
||||
ReadTimeout time.Duration `yaml:"read_timeout"`
|
||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||
DualStackedPeers bool `yaml:"dual_stacked_peers"`
|
||||
RealIPHeader string `yaml:"real_ip_header"`
|
||||
}
|
||||
|
||||
func newHTTPConfig(srvcfg *chihaya.ServerConfig) (*httpConfig, error) {
|
||||
bytes, err := yaml.Marshal(srvcfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg httpConfig
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
|
@ -1,133 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package query implements a simple, fast URL parser designed to be used to
|
||||
// parse parameters sent from BitTorrent clients. The last value of a key wins,
|
||||
// except for they key "info_hash".
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
// ErrKeyNotFound is returned when a provided key has no value associated with
|
||||
// it.
|
||||
var ErrKeyNotFound = errors.New("query: value for the provided key does not exist")
|
||||
|
||||
// ErrInvalidInfohash is returned when parsing a query encounters an infohash
|
||||
// with invalid length.
|
||||
var ErrInvalidInfohash = errors.New("query: invalid infohash")
|
||||
|
||||
// Query represents a parsed URL.Query.
|
||||
type Query struct {
|
||||
query string
|
||||
params map[string]string
|
||||
infoHashes []chihaya.InfoHash
|
||||
}
|
||||
|
||||
// New parses a raw URL query.
|
||||
func New(query string) (*Query, error) {
|
||||
var (
|
||||
keyStart, keyEnd int
|
||||
valStart, valEnd int
|
||||
|
||||
onKey = true
|
||||
|
||||
q = &Query{
|
||||
query: query,
|
||||
infoHashes: nil,
|
||||
params: make(map[string]string),
|
||||
}
|
||||
)
|
||||
|
||||
for i, length := 0, len(query); i < length; i++ {
|
||||
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
|
||||
last := i == length-1
|
||||
|
||||
if separator || last {
|
||||
if onKey && !last {
|
||||
keyStart = i + 1
|
||||
continue
|
||||
}
|
||||
|
||||
if last && !separator && !onKey {
|
||||
valEnd = i
|
||||
}
|
||||
|
||||
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var valStr string
|
||||
|
||||
if valEnd > 0 {
|
||||
valStr, err = url.QueryUnescape(query[valStart : valEnd+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if keyStr == "info_hash" {
|
||||
if len(valStr) != 20 {
|
||||
return nil, ErrInvalidInfohash
|
||||
}
|
||||
q.infoHashes = append(q.infoHashes, chihaya.InfoHashFromString(valStr))
|
||||
} else {
|
||||
q.params[strings.ToLower(keyStr)] = valStr
|
||||
}
|
||||
|
||||
valEnd = 0
|
||||
onKey = true
|
||||
keyStart = i + 1
|
||||
|
||||
} else if query[i] == '=' {
|
||||
onKey = false
|
||||
valStart = i + 1
|
||||
valEnd = 0
|
||||
} else if onKey {
|
||||
keyEnd = i
|
||||
} else {
|
||||
valEnd = i
|
||||
}
|
||||
}
|
||||
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// String returns a string parsed from a query. Every key can be returned as a
|
||||
// string because they are encoded in the URL as strings.
|
||||
func (q *Query) String(key string) (string, error) {
|
||||
val, exists := q.params[key]
|
||||
if !exists {
|
||||
return "", ErrKeyNotFound
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Uint64 returns a uint parsed from a query. After being called, it is safe to
|
||||
// cast the uint64 to your desired length.
|
||||
func (q *Query) Uint64(key string) (uint64, error) {
|
||||
str, exists := q.params[key]
|
||||
if !exists {
|
||||
return 0, ErrKeyNotFound
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(str, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// InfoHashes returns a list of requested infohashes.
|
||||
func (q *Query) InfoHashes() []chihaya.InfoHash {
|
||||
return q.infoHashes
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package query
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
baseAddr = "https://www.subdomain.tracker.com:80/"
|
||||
testInfoHash = "01234567890123456789"
|
||||
testPeerID = "-TEST01-6wfG2wk6wWLc"
|
||||
|
||||
ValidAnnounceArguments = []url.Values{
|
||||
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
|
||||
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
|
||||
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
|
||||
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
|
||||
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
|
||||
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
|
||||
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
|
||||
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
|
||||
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
|
||||
url.Values{"peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
|
||||
url.Values{"peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
|
||||
url.Values{"peer_id": {""}, "compact": {""}},
|
||||
}
|
||||
|
||||
InvalidQueries = []string{
|
||||
baseAddr + "announce/?" + "info_hash=%0%a",
|
||||
}
|
||||
)
|
||||
|
||||
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
|
||||
if len(boxed) != len(unboxed) {
|
||||
return false
|
||||
}
|
||||
|
||||
for mapKey, mapVal := range boxed {
|
||||
// Always expect box to hold only one element
|
||||
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func TestValidQueries(t *testing.T) {
|
||||
for parseIndex, parseVal := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := New(baseAddr + "announce/?" + parseVal.Encode())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !mapArrayEqual(parseVal, parsedQueryObj.params) {
|
||||
t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidQueries(t *testing.T) {
|
||||
for parseIndex, parseStr := range InvalidQueries {
|
||||
parsedQueryObj, err := New(parseStr)
|
||||
if err == nil {
|
||||
t.Error("Should have produced error", parseIndex)
|
||||
}
|
||||
|
||||
if parsedQueryObj != nil {
|
||||
t.Error("Should be nil after error", parsedQueryObj, parseIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParseQuery(b *testing.B) {
|
||||
for bCount := 0; bCount < b.N; bCount++ {
|
||||
for parseIndex, parseStr := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := New(baseAddr + "announce/?" + parseStr.Encode())
|
||||
if err != nil {
|
||||
b.Error(err, parseIndex)
|
||||
b.Log(parsedQueryObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkURLParseQuery(b *testing.B) {
|
||||
for bCount := 0; bCount < b.N; bCount++ {
|
||||
for parseIndex, parseStr := range ValidAnnounceArguments {
|
||||
parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode())
|
||||
if err != nil {
|
||||
b.Error(err, parseIndex)
|
||||
b.Log(parsedQueryObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/event"
|
||||
"github.com/chihaya/chihaya/server/http/query"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func announceRequest(r *http.Request, cfg *httpConfig) (*chihaya.AnnounceRequest, error) {
|
||||
q, err := query.New(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request := &chihaya.AnnounceRequest{Params: q}
|
||||
|
||||
eventStr, err := q.String("event")
|
||||
if err == query.ErrKeyNotFound {
|
||||
eventStr = ""
|
||||
} else if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse parameter: event")
|
||||
}
|
||||
request.Event, err = event.New(eventStr)
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to provide valid client event")
|
||||
}
|
||||
|
||||
compactStr, _ := q.String("compact")
|
||||
request.Compact = compactStr != "" && compactStr != "0"
|
||||
|
||||
infoHashes := q.InfoHashes()
|
||||
if len(infoHashes) < 1 {
|
||||
return nil, tracker.ClientError("no info_hash parameter supplied")
|
||||
}
|
||||
if len(infoHashes) > 1 {
|
||||
return nil, tracker.ClientError("multiple info_hash parameters supplied")
|
||||
}
|
||||
request.InfoHash = infoHashes[0]
|
||||
|
||||
peerID, err := q.String("peer_id")
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse parameter: peer_id")
|
||||
}
|
||||
if len(peerID) != 20 {
|
||||
return nil, tracker.ClientError("failed to provide valid peer_id")
|
||||
}
|
||||
request.PeerID = chihaya.PeerIDFromString(peerID)
|
||||
|
||||
request.Left, err = q.Uint64("left")
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse parameter: left")
|
||||
}
|
||||
|
||||
request.Downloaded, err = q.Uint64("downloaded")
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse parameter: downloaded")
|
||||
}
|
||||
|
||||
request.Uploaded, err = q.Uint64("uploaded")
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse parameter: uploaded")
|
||||
}
|
||||
|
||||
numwant, _ := q.Uint64("numwant")
|
||||
request.NumWant = int32(numwant)
|
||||
|
||||
port, err := q.Uint64("port")
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse parameter: port")
|
||||
}
|
||||
request.Port = uint16(port)
|
||||
|
||||
v4, v6, err := requestedIP(q, r, cfg)
|
||||
if err != nil {
|
||||
return nil, tracker.ClientError("failed to parse remote IP")
|
||||
}
|
||||
request.IPv4 = v4
|
||||
request.IPv6 = v6
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
func scrapeRequest(r *http.Request, cfg *httpConfig) (*chihaya.ScrapeRequest, error) {
|
||||
q, err := query.New(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
infoHashes := q.InfoHashes()
|
||||
if len(infoHashes) < 1 {
|
||||
return nil, tracker.ClientError("no info_hash parameter supplied")
|
||||
}
|
||||
|
||||
request := &chihaya.ScrapeRequest{
|
||||
InfoHashes: infoHashes,
|
||||
Params: q,
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// requestedIP returns the IP address for a request. If there are multiple in
|
||||
// the request, one IPv4 and one IPv6 will be returned.
|
||||
func requestedIP(p chihaya.Params, r *http.Request, cfg *httpConfig) (v4, v6 net.IP, err error) {
|
||||
var done bool
|
||||
|
||||
if cfg.AllowIPSpoofing {
|
||||
if str, e := p.String("ip"); e == nil {
|
||||
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if str, e := p.String("ipv4"); e == nil {
|
||||
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if str, e := p.String("ipv6"); e == nil {
|
||||
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.RealIPHeader != "" {
|
||||
if xRealIPs, ok := r.Header[cfg.RealIPHeader]; ok {
|
||||
if v4, v6, done = getIPs(string(xRealIPs[0]), v4, v6, cfg); done {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if r.RemoteAddr == "" && v4 == nil {
|
||||
if v4, v6, done = getIPs("127.0.0.1", v4, v6, cfg); done {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if v4, v6, done = getIPs(r.RemoteAddr, v4, v6, cfg); done {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if v4 == nil && v6 == nil {
|
||||
err = tracker.ClientError("failed to parse IP address")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getIPs(ipstr string, ipv4, ipv6 net.IP, cfg *httpConfig) (net.IP, net.IP, bool) {
|
||||
host, _, err := net.SplitHostPort(ipstr)
|
||||
if err != nil {
|
||||
host = ipstr
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
ipTo4 := ip.To4()
|
||||
if ipv4 == nil && ipTo4 != nil {
|
||||
ipv4 = ipTo4
|
||||
} else if ipv6 == nil && ipTo4 == nil {
|
||||
ipv6 = ip
|
||||
}
|
||||
}
|
||||
|
||||
var done bool
|
||||
if cfg.DualStackedPeers {
|
||||
done = ipv4 != nil && ipv6 != nil
|
||||
} else {
|
||||
done = ipv4 != nil || ipv6 != nil
|
||||
}
|
||||
|
||||
return ipv4, ipv6, done
|
||||
}
|
|
@ -1,133 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/tylerb/graceful"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
server.Register("http", constructor)
|
||||
}
|
||||
|
||||
func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {
|
||||
cfg, err := newHTTPConfig(srvcfg)
|
||||
if err != nil {
|
||||
return nil, errors.New("http: invalid config: " + err.Error())
|
||||
}
|
||||
|
||||
return &httpServer{
|
||||
cfg: cfg,
|
||||
tkr: tkr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type httpServer struct {
|
||||
cfg *httpConfig
|
||||
tkr *tracker.Tracker
|
||||
grace *graceful.Server
|
||||
}
|
||||
|
||||
// Start runs the server and blocks until it has exited.
|
||||
//
|
||||
// It panics if the server exits unexpectedly.
|
||||
func (s *httpServer) Start() {
|
||||
s.grace = &graceful.Server{
|
||||
Server: &http.Server{
|
||||
Addr: s.cfg.Addr,
|
||||
Handler: s.routes(),
|
||||
ReadTimeout: s.cfg.ReadTimeout,
|
||||
WriteTimeout: s.cfg.WriteTimeout,
|
||||
},
|
||||
Timeout: s.cfg.RequestTimeout,
|
||||
NoSignalHandling: true,
|
||||
ConnState: func(conn net.Conn, state http.ConnState) {
|
||||
switch state {
|
||||
case http.StateNew:
|
||||
//stats.RecordEvent(stats.AcceptedConnection)
|
||||
|
||||
case http.StateClosed:
|
||||
//stats.RecordEvent(stats.ClosedConnection)
|
||||
|
||||
case http.StateHijacked:
|
||||
panic("http: connection impossibly hijacked")
|
||||
|
||||
// Ignore the following cases.
|
||||
case http.StateActive, http.StateIdle:
|
||||
|
||||
default:
|
||||
panic("http: connection transitioned to unknown state")
|
||||
}
|
||||
},
|
||||
}
|
||||
s.grace.SetKeepAlivesEnabled(false)
|
||||
|
||||
if err := s.grace.ListenAndServe(); err != nil {
|
||||
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
|
||||
log.Printf("Failed to gracefully run HTTP server: %s", err.Error())
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("HTTP server shut down cleanly")
|
||||
}
|
||||
|
||||
// Stop stops the server and blocks until the server has exited.
|
||||
func (s *httpServer) Stop() {
|
||||
s.grace.Stop(s.grace.Timeout)
|
||||
<-s.grace.StopChan()
|
||||
}
|
||||
|
||||
func (s *httpServer) routes() *httprouter.Router {
|
||||
r := httprouter.New()
|
||||
r.GET("/announce", s.serveAnnounce)
|
||||
r.GET("/scrape", s.serveScrape)
|
||||
return r
|
||||
}
|
||||
|
||||
func (s *httpServer) serveAnnounce(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
req, err := announceRequest(r, s.cfg)
|
||||
if err != nil {
|
||||
writeError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := s.tkr.HandleAnnounce(req)
|
||||
if err != nil {
|
||||
writeError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = writeAnnounceResponse(w, resp)
|
||||
if err != nil {
|
||||
log.Println("error serializing response", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *httpServer) serveScrape(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
|
||||
req, err := scrapeRequest(r, s.cfg)
|
||||
if err != nil {
|
||||
writeError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := s.tkr.HandleScrape(req)
|
||||
if err != nil {
|
||||
writeError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
writeScrapeResponse(w, resp)
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/bencode"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func writeError(w http.ResponseWriter, err error) error {
|
||||
message := "internal server error"
|
||||
if _, clientErr := err.(tracker.ClientError); clientErr {
|
||||
message = err.Error()
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return bencode.NewEncoder(w).Encode(bencode.Dict{
|
||||
"failure reason": message,
|
||||
})
|
||||
}
|
||||
|
||||
func writeAnnounceResponse(w http.ResponseWriter, resp *chihaya.AnnounceResponse) error {
|
||||
bdict := bencode.Dict{
|
||||
"complete": resp.Complete,
|
||||
"incomplete": resp.Incomplete,
|
||||
"interval": resp.Interval,
|
||||
"min interval": resp.MinInterval,
|
||||
}
|
||||
|
||||
// Add the peers to the dictionary in the compact format.
|
||||
if resp.Compact {
|
||||
var IPv4CompactDict, IPv6CompactDict []byte
|
||||
|
||||
// Add the IPv4 peers to the dictionary.
|
||||
for _, peer := range resp.IPv4Peers {
|
||||
IPv4CompactDict = append(IPv4CompactDict, compact(peer)...)
|
||||
}
|
||||
if len(IPv4CompactDict) > 0 {
|
||||
bdict["peers"] = IPv4CompactDict
|
||||
}
|
||||
|
||||
// Add the IPv6 peers to the dictionary.
|
||||
for _, peer := range resp.IPv6Peers {
|
||||
IPv6CompactDict = append(IPv6CompactDict, compact(peer)...)
|
||||
}
|
||||
if len(IPv6CompactDict) > 0 {
|
||||
bdict["peers6"] = IPv6CompactDict
|
||||
}
|
||||
|
||||
return bencode.NewEncoder(w).Encode(bdict)
|
||||
}
|
||||
|
||||
// Add the peers to the dictionary.
|
||||
var peers []bencode.Dict
|
||||
for _, peer := range resp.IPv4Peers {
|
||||
peers = append(peers, dict(peer))
|
||||
}
|
||||
for _, peer := range resp.IPv6Peers {
|
||||
peers = append(peers, dict(peer))
|
||||
}
|
||||
bdict["peers"] = peers
|
||||
|
||||
return bencode.NewEncoder(w).Encode(bdict)
|
||||
}
|
||||
|
||||
func writeScrapeResponse(w http.ResponseWriter, resp *chihaya.ScrapeResponse) error {
|
||||
filesDict := bencode.NewDict()
|
||||
for infohash, scrape := range resp.Files {
|
||||
filesDict[string(infohash[:])] = bencode.Dict{
|
||||
"complete": scrape.Complete,
|
||||
"incomplete": scrape.Incomplete,
|
||||
}
|
||||
}
|
||||
|
||||
return bencode.NewEncoder(w).Encode(bencode.Dict{
|
||||
"files": filesDict,
|
||||
})
|
||||
}
|
||||
|
||||
func compact(peer chihaya.Peer) (buf []byte) {
|
||||
buf = []byte(peer.IP)
|
||||
buf = append(buf, byte(peer.Port>>8))
|
||||
buf = append(buf, byte(peer.Port&0xff))
|
||||
return
|
||||
}
|
||||
|
||||
func dict(peer chihaya.Peer) bencode.Dict {
|
||||
return bencode.Dict{
|
||||
"peer id": string(peer.ID[:]),
|
||||
"ip": peer.IP.String(),
|
||||
"port": peer.Port,
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
var table = []struct {
|
||||
reason, expected string
|
||||
}{
|
||||
{"hello world", "d14:failure reason11:hello worlde"},
|
||||
{"what's up", "d14:failure reason9:what's upe"},
|
||||
}
|
||||
|
||||
for _, tt := range table {
|
||||
r := httptest.NewRecorder()
|
||||
err := writeError(r, tracker.ClientError(tt.reason))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, r.Body.String(), tt.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteStatus(t *testing.T) {
|
||||
r := httptest.NewRecorder()
|
||||
err := writeError(r, tracker.ClientError("something is missing"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge")
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
// Pool represents a running pool of servers.
|
||||
type Pool struct {
|
||||
servers []Server
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// StartPool creates a new pool of servers specified by the provided
|
||||
// configuration and runs them.
|
||||
func StartPool(cfgs []chihaya.ServerConfig, tkr *tracker.Tracker) (*Pool, error) {
|
||||
var toReturn Pool
|
||||
|
||||
for _, cfg := range cfgs {
|
||||
srv, err := New(&cfg, tkr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toReturn.wg.Add(1)
|
||||
go func(srv Server) {
|
||||
defer toReturn.wg.Done()
|
||||
srv.Start()
|
||||
}(srv)
|
||||
|
||||
toReturn.servers = append(toReturn.servers, srv)
|
||||
}
|
||||
|
||||
return &toReturn, nil
|
||||
}
|
||||
|
||||
// Stop safely shuts down a pool of servers.
|
||||
func (p *Pool) Stop() {
|
||||
for _, srv := range p.servers {
|
||||
srv.Stop()
|
||||
}
|
||||
p.wg.Wait()
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package prometheus implements a chihaya Server for serving metrics to
|
||||
// Prometheus.
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/tylerb/graceful"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
server.Register("prometheus", constructor)
|
||||
}
|
||||
|
||||
func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {
|
||||
cfg, err := NewServerConfig(srvcfg)
|
||||
if err != nil {
|
||||
return nil, errors.New("prometheus: invalid config: " + err.Error())
|
||||
}
|
||||
|
||||
return &Server{
|
||||
cfg: cfg,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ServerConfig represents the configuration options for a
|
||||
// PrometheusServer.
|
||||
type ServerConfig struct {
|
||||
Addr string `yaml:"addr"`
|
||||
ShutdownTimeout time.Duration `yaml:"shutdown_timeout"`
|
||||
ReadTimeout time.Duration `yaml:"read_timeout"`
|
||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||
}
|
||||
|
||||
// NewServerConfig marshals a chihaya.ServerConfig and unmarshals it
|
||||
// into a more specific prometheus ServerConfig.
|
||||
func NewServerConfig(srvcfg *chihaya.ServerConfig) (*ServerConfig, error) {
|
||||
bytes, err := yaml.Marshal(srvcfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg ServerConfig
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// Server implements a chihaya Server for serving metrics to Prometheus.
|
||||
type Server struct {
|
||||
cfg *ServerConfig
|
||||
grace *graceful.Server
|
||||
}
|
||||
|
||||
var _ server.Server = &Server{}
|
||||
|
||||
// Start starts the prometheus server and blocks until it exits.
|
||||
//
|
||||
// It panics if the server exits unexpectedly.
|
||||
func (s *Server) Start() {
|
||||
s.grace = &graceful.Server{
|
||||
Server: &http.Server{
|
||||
Addr: s.cfg.Addr,
|
||||
Handler: prometheus.Handler(),
|
||||
ReadTimeout: s.cfg.ReadTimeout,
|
||||
WriteTimeout: s.cfg.WriteTimeout,
|
||||
},
|
||||
Timeout: s.cfg.ShutdownTimeout,
|
||||
NoSignalHandling: true,
|
||||
}
|
||||
|
||||
if err := s.grace.ListenAndServe(); err != nil {
|
||||
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
|
||||
log.Printf("Failed to gracefully run Prometheus server: %s", err.Error())
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Prometheus server shut down cleanly")
|
||||
}
|
||||
|
||||
// Stop stops the prometheus server and blocks until it exits.
|
||||
func (s *Server) Stop() {
|
||||
s.grace.Stop(s.cfg.ShutdownTimeout)
|
||||
<-s.grace.StopChan()
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package server implements an abstraction over servers meant to be run .
|
||||
// alongside a tracker.
|
||||
//
|
||||
// Servers may be implementations of different transport protocols or have their
|
||||
// own custom behavior.
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
var constructors = make(map[string]Constructor)
|
||||
|
||||
// Constructor is a function that creates a new Server.
|
||||
type Constructor func(*chihaya.ServerConfig, *tracker.Tracker) (Server, error)
|
||||
|
||||
// Register makes a Constructor available by the provided name.
|
||||
//
|
||||
// If this function is called twice with the same name or if the Constructor is
|
||||
// nil, it panics.
|
||||
func Register(name string, con Constructor) {
|
||||
if con == nil {
|
||||
panic("server: could not register nil Constructor")
|
||||
}
|
||||
if _, dup := constructors[name]; dup {
|
||||
panic("server: could not register duplicate Constructor: " + name)
|
||||
}
|
||||
constructors[name] = con
|
||||
}
|
||||
|
||||
// New creates a Server specified by a configuration.
|
||||
func New(cfg *chihaya.ServerConfig, tkr *tracker.Tracker) (Server, error) {
|
||||
con, ok := constructors[cfg.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("server: unknown Constructor %q (forgotten import?)", cfg.Name)
|
||||
}
|
||||
return con(cfg, tkr)
|
||||
}
|
||||
|
||||
// Server represents one instance of a server accessing the tracker.
|
||||
type Server interface {
|
||||
// Start starts a server and blocks until the server exits.
|
||||
//
|
||||
// It should panic if the server exits unexpectedly.
|
||||
Start()
|
||||
|
||||
// Stop stops a server and blocks until the server exits.
|
||||
Stop()
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
## The store Package
|
||||
|
||||
The `store` package offers a storage interface and middlewares sufficient to run a public tracker based on it.
|
||||
|
||||
### Architecture
|
||||
|
||||
The store consists of three parts:
|
||||
- A set of interfaces, tests based on these interfaces and the store logic, unifying these interfaces into the store
|
||||
- Drivers, implementing the store interfaces and
|
||||
- Middleware that depends on the store
|
||||
|
||||
The store interfaces are `IPStore`, `PeerStore` and `StringStore`.
|
||||
During runtime, each of them will be implemented by a driver.
|
||||
Even though all different drivers for one interface provide the same functionality, their behaviour can be very different.
|
||||
For example: The memory implementation keeps all state in-memory - this is very fast, but not persistent, it loses its state on every restart.
|
||||
A database-backed driver on the other hand could provide persistence, at the cost of performance.
|
||||
|
||||
The pluggable design of Chihaya allows for the different interfaces to use different drivers.
|
||||
For example: A typical use case of the `StringStore` is to provide blacklists or whitelists for infohashes/client IDs/....
|
||||
You'd typically want these lists to be persistent, so you'd choose a driver that provides persistence.
|
||||
The `PeerStore` on the other hand rarely needs to be persistent, as all peer state will be restored after one announce interval.
|
||||
You'd therefore typically choose a very performant but non-persistent driver for the `PeerStore`.
|
||||
|
||||
### Testing
|
||||
|
||||
The main store package also contains a set of tests and benchmarks for drivers.
|
||||
Both use the store interfaces and can work with any driver that implements these interfaces.
|
||||
The tests verify that the driver behaves as specified by the interface and its documentation.
|
||||
The benchmarks can be used to compare performance of a wide range of operations on the interfaces.
|
||||
|
||||
This makes it very easy to implement a new driver:
|
||||
All functions that are part of the store interfaces can be tested easily with the tests that come with the store package.
|
||||
Generally the memory implementation can be used as a guideline for implementing new drivers.
|
||||
|
||||
Both benchmarks and tests require a clean state to work correctly.
|
||||
All of the test and benchmark functions therefore take a `*DriverConfig` as a parameter, this should be used to configure the driver in a way that it provides a clean state for every test or benchmark.
|
||||
For example: Imagine a file-based driver that achieves persistence by storing its state in a file.
|
||||
It must then be possible to provide the location of this file in the `'DriverConfig`, so that every different benchmark gets to work with a new file.
|
||||
|
||||
Most benchmarks come in two flavors: The "normal" version and the "1K" version.
|
||||
A normal benchmark uses the same value over and over again to benchmark one operation.
|
||||
A 1K benchmark uses a different value from a set of 1000 values for every iteration, this can show caching effects, if the driver uses them.
|
||||
The 1K benchmarks require a little more computation to select the values and thus typically yield slightly lower results even for a "perfect" cache, i.e. the memory implementation.
|
|
@ -1,93 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/stopper"
|
||||
)
|
||||
|
||||
var ipStoreDrivers = make(map[string]IPStoreDriver)
|
||||
|
||||
// IPStore represents an interface for manipulating IPs and IP ranges.
|
||||
type IPStore interface {
|
||||
// AddIP adds a single IP address to the IPStore.
|
||||
AddIP(ip net.IP) error
|
||||
|
||||
// AddNetwork adds a range of IP addresses, denoted by a network in CIDR
|
||||
// notation, to the IPStore.
|
||||
AddNetwork(network string) error
|
||||
|
||||
// HasIP returns whether the given IP address is contained in the IPStore
|
||||
// or belongs to any of the stored networks.
|
||||
HasIP(ip net.IP) (bool, error)
|
||||
|
||||
// HasAnyIP returns whether any of the given IP addresses are contained
|
||||
// in the IPStore or belongs to any of the stored networks.
|
||||
HasAnyIP(ips []net.IP) (bool, error)
|
||||
|
||||
// HassAllIPs returns whether all of the given IP addresses are
|
||||
// contained in the IPStore or belongs to any of the stored networks.
|
||||
HasAllIPs(ips []net.IP) (bool, error)
|
||||
|
||||
// RemoveIP removes a single IP address from the IPStore.
|
||||
//
|
||||
// This wil not remove the given address from any networks it belongs to
|
||||
// that are stored in the IPStore.
|
||||
//
|
||||
// Returns ErrResourceDoesNotExist if the given IP address is not
|
||||
// contained in the store.
|
||||
RemoveIP(ip net.IP) error
|
||||
|
||||
// RemoveNetwork removes a range of IP addresses that was previously
|
||||
// added through AddNetwork.
|
||||
//
|
||||
// The given network must not, as a string, match the previously added
|
||||
// network, but rather denote the same network, e.g. if the network
|
||||
// 192.168.22.255/24 was added, removing the network 192.168.22.123/24
|
||||
// will succeed.
|
||||
//
|
||||
// Returns ErrResourceDoesNotExist if the given network is not
|
||||
// contained in the store.
|
||||
RemoveNetwork(network string) error
|
||||
|
||||
// Stopper provides the Stop method that stops the IPStore.
|
||||
// Stop should shut down the IPStore in a separate goroutine and send
|
||||
// an error to the channel if the shutdown failed. If the shutdown
|
||||
// was successful, the channel is to be closed.
|
||||
stopper.Stopper
|
||||
}
|
||||
|
||||
// IPStoreDriver represents an interface for creating a handle to the
|
||||
// storage of IPs.
|
||||
type IPStoreDriver interface {
|
||||
New(*DriverConfig) (IPStore, error)
|
||||
}
|
||||
|
||||
// RegisterIPStoreDriver makes a driver available by the provided name.
|
||||
//
|
||||
// If this function is called twice with the same name or if the driver is nil,
|
||||
// it panics.
|
||||
func RegisterIPStoreDriver(name string, driver IPStoreDriver) {
|
||||
if driver == nil {
|
||||
panic("store: could not register nil IPStoreDriver")
|
||||
}
|
||||
if _, dup := ipStoreDrivers[name]; dup {
|
||||
panic("store: could not register duplicate IPStoreDriver: " + name)
|
||||
}
|
||||
ipStoreDrivers[name] = driver
|
||||
}
|
||||
|
||||
// OpenIPStore returns an IPStore specified by a configuration.
|
||||
func OpenIPStore(cfg *DriverConfig) (IPStore, error) {
|
||||
driver, ok := ipStoreDrivers[cfg.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("store: unknown IPStoreDriver %q (forgotten import?)", cfg)
|
||||
}
|
||||
|
||||
return driver.New(cfg)
|
||||
}
|
|
@ -1,225 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/mrd0ll4r/netmatch"
|
||||
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
)
|
||||
|
||||
func init() {
|
||||
store.RegisterIPStoreDriver("memory", &ipStoreDriver{})
|
||||
}
|
||||
|
||||
type ipStoreDriver struct{}
|
||||
|
||||
func (d *ipStoreDriver) New(_ *store.DriverConfig) (store.IPStore, error) {
|
||||
return &ipStore{
|
||||
ips: make(map[[16]byte]struct{}),
|
||||
networks: netmatch.New(),
|
||||
closed: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ipStore implements store.IPStore using an in-memory map of byte arrays and
|
||||
// a trie-like structure.
|
||||
type ipStore struct {
|
||||
ips map[[16]byte]struct{}
|
||||
networks *netmatch.Trie
|
||||
closed chan struct{}
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ store.IPStore = &ipStore{}
|
||||
v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
|
||||
)
|
||||
|
||||
// key converts an IP address to a [16]byte.
|
||||
// The byte array can then be used as a key for a map, unlike net.IP, which is a
|
||||
// []byte.
|
||||
// If an IPv4 address is specified, it will be prefixed with
|
||||
// the net.v4InV6Prefix and thus becomes a valid IPv6 address.
|
||||
func key(ip net.IP) [16]byte {
|
||||
var array [16]byte
|
||||
|
||||
if len(ip) == net.IPv4len {
|
||||
copy(array[:], v4InV6Prefix)
|
||||
copy(array[12:], ip)
|
||||
} else {
|
||||
copy(array[:], ip)
|
||||
}
|
||||
return array
|
||||
}
|
||||
|
||||
func (s *ipStore) AddNetwork(network string) error {
|
||||
key, length, err := netmatch.ParseNetwork(network)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
return s.networks.Add(key, length)
|
||||
}
|
||||
|
||||
func (s *ipStore) AddIP(ip net.IP) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
s.ips[key(ip)] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ipStore) HasIP(ip net.IP) (bool, error) {
|
||||
key := key(ip)
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
_, ok := s.ips[key]
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
match, err := s.networks.Match(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return match, nil
|
||||
}
|
||||
|
||||
func (s *ipStore) HasAnyIP(ips []net.IP) (bool, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
key := key(ip)
|
||||
if _, ok := s.ips[key]; ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
match, err := s.networks.Match(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if match {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (s *ipStore) HasAllIPs(ips []net.IP) (bool, error) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
key := key(ip)
|
||||
if _, ok := s.ips[key]; !ok {
|
||||
match, err := s.networks.Match(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !match {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *ipStore) RemoveIP(ip net.IP) error {
|
||||
key := key(ip)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
if _, ok := s.ips[key]; !ok {
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
delete(s.ips, key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ipStore) RemoveNetwork(network string) error {
|
||||
key, length, err := netmatch.ParseNetwork(network)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
err = s.networks.Remove(key, length)
|
||||
if err != nil && err == netmatch.ErrNotContained {
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ipStore) Stop() <-chan error {
|
||||
toReturn := make(chan error)
|
||||
go func() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.ips = make(map[[16]byte]struct{})
|
||||
s.networks = netmatch.New()
|
||||
close(s.closed)
|
||||
close(toReturn)
|
||||
}()
|
||||
return toReturn
|
||||
}
|
|
@ -1,200 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
v6 = net.ParseIP("0c22:384e:0:0c22:384e::68")
|
||||
v4 = net.ParseIP("12.13.14.15")
|
||||
v4s = net.ParseIP("12.13.14.15").To4()
|
||||
|
||||
ipStoreTester = store.PrepareIPStoreTester(&ipStoreDriver{})
|
||||
ipStoreBenchmarker = store.PrepareIPStoreBenchmarker(&ipStoreDriver{})
|
||||
ipStoreTestConfig = &store.DriverConfig{}
|
||||
)
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
var table = []struct {
|
||||
input net.IP
|
||||
expected [16]byte
|
||||
}{
|
||||
{v6, [16]byte{12, 34, 56, 78, 0, 0, 12, 34, 56, 78, 0, 0, 0, 0, 0, 104}},
|
||||
{v4, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 12, 13, 14, 15}}, // IPv4 in IPv6 prefix
|
||||
{v4s, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 12, 13, 14, 15}}, // is equal to the one above, should produce equal output
|
||||
}
|
||||
|
||||
for _, tt := range table {
|
||||
got := key(tt.input)
|
||||
require.Equal(t, got, tt.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPStore(t *testing.T) {
|
||||
ipStoreTester.TestIPStore(t, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func TestHasAllHasAny(t *testing.T) {
|
||||
ipStoreTester.TestHasAllHasAny(t, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func TestNetworks(t *testing.T) {
|
||||
ipStoreTester.TestNetworks(t, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func TestHasAllHasAnyNetworks(t *testing.T) {
|
||||
ipStoreTester.TestHasAllHasAnyNetworks(t, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddV4(b *testing.B) {
|
||||
ipStoreBenchmarker.AddV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddV6(b *testing.B) {
|
||||
ipStoreBenchmarker.AddV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupV4(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupV6(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemoveV4(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemoveV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemoveV6(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemoveV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupNonExistV4(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupNonExistV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupNonExistV6(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupNonExistV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExistV4(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExistV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExistV6(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExistV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.AddV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.AddV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemoveV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemoveV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemoveV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemoveV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExistV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExistV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExistV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExistV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Add1KV4(b *testing.B) {
|
||||
ipStoreBenchmarker.Add1KV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Add1KV6(b *testing.B) {
|
||||
ipStoreBenchmarker.Add1KV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Lookup1KV4(b *testing.B) {
|
||||
ipStoreBenchmarker.Lookup1KV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Lookup1KV6(b *testing.B) {
|
||||
ipStoreBenchmarker.Lookup1KV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemove1KV4(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemove1KV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemove1KV6(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemove1KV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupNonExist1KV4(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupNonExist1KV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_LookupNonExist1KV6(b *testing.B) {
|
||||
ipStoreBenchmarker.LookupNonExist1KV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExist1KV4(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExist1KV4(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExist1KV6(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExist1KV6(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Add1KV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.Add1KV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Add1KV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.Add1KV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Lookup1KV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.Lookup1KV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_Lookup1KV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.Lookup1KV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemove1KV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemove1KV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_AddRemove1KV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.AddRemove1KV6Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExist1KV4Network(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExist1KV4Network(b, ipStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkIPStore_RemoveNonExist1KV6Network(b *testing.B) {
|
||||
ipStoreBenchmarker.RemoveNonExist1KV6Network(b, ipStoreTestConfig)
|
||||
}
|
|
@ -1,478 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
)
|
||||
|
||||
func init() {
|
||||
store.RegisterPeerStoreDriver("memory", &peerStoreDriver{})
|
||||
}
|
||||
|
||||
type peerStoreDriver struct{}
|
||||
|
||||
func (d *peerStoreDriver) New(storecfg *store.DriverConfig) (store.PeerStore, error) {
|
||||
cfg, err := newPeerStoreConfig(storecfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
shards := make([]*peerShard, cfg.Shards)
|
||||
for i := 0; i < cfg.Shards; i++ {
|
||||
shards[i] = &peerShard{}
|
||||
shards[i].swarms = make(map[chihaya.InfoHash]swarm)
|
||||
}
|
||||
return &peerStore{
|
||||
shards: shards,
|
||||
closed: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type peerStoreConfig struct {
|
||||
Shards int `yaml:"shards"`
|
||||
}
|
||||
|
||||
func newPeerStoreConfig(storecfg *store.DriverConfig) (*peerStoreConfig, error) {
|
||||
bytes, err := yaml.Marshal(storecfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg peerStoreConfig
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Shards < 1 {
|
||||
cfg.Shards = 1
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
type serializedPeer string
|
||||
|
||||
type peerShard struct {
|
||||
swarms map[chihaya.InfoHash]swarm
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type swarm struct {
|
||||
// map serialized peer to mtime
|
||||
seeders map[serializedPeer]int64
|
||||
leechers map[serializedPeer]int64
|
||||
}
|
||||
|
||||
type peerStore struct {
|
||||
shards []*peerShard
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
var _ store.PeerStore = &peerStore{}
|
||||
|
||||
func (s *peerStore) shardIndex(infoHash chihaya.InfoHash) uint32 {
|
||||
return binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards))
|
||||
}
|
||||
|
||||
func peerKey(p chihaya.Peer) serializedPeer {
|
||||
b := make([]byte, 20+2+len(p.IP))
|
||||
copy(b[:20], p.ID[:])
|
||||
binary.BigEndian.PutUint16(b[20:22], p.Port)
|
||||
copy(b[22:], p.IP)
|
||||
|
||||
return serializedPeer(b)
|
||||
}
|
||||
|
||||
func decodePeerKey(pk serializedPeer) chihaya.Peer {
|
||||
return chihaya.Peer{
|
||||
ID: chihaya.PeerIDFromString(string(pk[:20])),
|
||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||
IP: net.IP(pk[22:]),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *peerStore) PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.swarms[infoHash] = swarm{
|
||||
seeders: make(map[serializedPeer]int64),
|
||||
leechers: make(map[serializedPeer]int64),
|
||||
}
|
||||
}
|
||||
|
||||
shard.swarms[infoHash].seeders[peerKey(p)] = time.Now().UnixNano()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
pk := peerKey(p)
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.Unlock()
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
if _, ok := shard.swarms[infoHash].seeders[pk]; !ok {
|
||||
shard.Unlock()
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
delete(shard.swarms[infoHash].seeders, pk)
|
||||
|
||||
if len(shard.swarms[infoHash].seeders)|len(shard.swarms[infoHash].leechers) == 0 {
|
||||
delete(shard.swarms, infoHash)
|
||||
}
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.swarms[infoHash] = swarm{
|
||||
seeders: make(map[serializedPeer]int64),
|
||||
leechers: make(map[serializedPeer]int64),
|
||||
}
|
||||
}
|
||||
|
||||
shard.swarms[infoHash].leechers[peerKey(p)] = time.Now().UnixNano()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
pk := peerKey(p)
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.Unlock()
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
if _, ok := shard.swarms[infoHash].leechers[pk]; !ok {
|
||||
shard.Unlock()
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
delete(shard.swarms[infoHash].leechers, pk)
|
||||
|
||||
if len(shard.swarms[infoHash].seeders)|len(shard.swarms[infoHash].leechers) == 0 {
|
||||
delete(shard.swarms, infoHash)
|
||||
}
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
key := peerKey(p)
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.Lock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.swarms[infoHash] = swarm{
|
||||
seeders: make(map[serializedPeer]int64),
|
||||
leechers: make(map[serializedPeer]int64),
|
||||
}
|
||||
}
|
||||
|
||||
delete(shard.swarms[infoHash].leechers, key)
|
||||
|
||||
shard.swarms[infoHash].seeders[key] = time.Now().UnixNano()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) CollectGarbage(cutoff time.Time) error {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String())
|
||||
cutoffUnix := cutoff.UnixNano()
|
||||
for _, shard := range s.shards {
|
||||
shard.RLock()
|
||||
var infohashes []chihaya.InfoHash
|
||||
for key := range shard.swarms {
|
||||
infohashes = append(infohashes, key)
|
||||
}
|
||||
shard.RUnlock()
|
||||
runtime.Gosched()
|
||||
|
||||
for _, infohash := range infohashes {
|
||||
shard.Lock()
|
||||
|
||||
for peerKey, mtime := range shard.swarms[infohash].leechers {
|
||||
if mtime <= cutoffUnix {
|
||||
delete(shard.swarms[infohash].leechers, peerKey)
|
||||
}
|
||||
}
|
||||
|
||||
for peerKey, mtime := range shard.swarms[infohash].seeders {
|
||||
if mtime <= cutoffUnix {
|
||||
delete(shard.swarms[infohash].seeders, peerKey)
|
||||
}
|
||||
}
|
||||
|
||||
if len(shard.swarms[infohash].seeders)|len(shard.swarms[infohash].leechers) == 0 {
|
||||
delete(shard.swarms, infohash)
|
||||
}
|
||||
|
||||
shard.Unlock()
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
runtime.Gosched()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *peerStore) AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers, peers6 []chihaya.Peer, err error) {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.RUnlock()
|
||||
return nil, nil, store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
if seeder {
|
||||
// Append leechers as possible.
|
||||
leechers := shard.swarms[infoHash].leechers
|
||||
for p := range leechers {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if decodedPeer.IP.To4() == nil {
|
||||
peers6 = append(peers6, decodedPeer)
|
||||
} else {
|
||||
peers = append(peers, decodedPeer)
|
||||
}
|
||||
numWant--
|
||||
}
|
||||
} else {
|
||||
// Append as many seeders as possible.
|
||||
seeders := shard.swarms[infoHash].seeders
|
||||
for p := range seeders {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if decodedPeer.IP.To4() == nil {
|
||||
peers6 = append(peers6, decodedPeer)
|
||||
} else {
|
||||
peers = append(peers, decodedPeer)
|
||||
}
|
||||
numWant--
|
||||
}
|
||||
|
||||
// Append leechers until we reach numWant.
|
||||
leechers := shard.swarms[infoHash].leechers
|
||||
if numWant > 0 {
|
||||
for p := range leechers {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
if numWant == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if decodedPeer.IP.To4() == nil {
|
||||
if decodedPeer.Equal(peer6) {
|
||||
continue
|
||||
}
|
||||
peers6 = append(peers6, decodedPeer)
|
||||
} else {
|
||||
if decodedPeer.Equal(peer4) {
|
||||
continue
|
||||
}
|
||||
peers = append(peers, decodedPeer)
|
||||
}
|
||||
numWant--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shard.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *peerStore) GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.RUnlock()
|
||||
return nil, nil, store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
seeders := shard.swarms[infoHash].seeders
|
||||
for p := range seeders {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
if decodedPeer.IP.To4() == nil {
|
||||
peers6 = append(peers6, decodedPeer)
|
||||
} else {
|
||||
peers = append(peers, decodedPeer)
|
||||
}
|
||||
}
|
||||
|
||||
shard.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *peerStore) GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.RUnlock()
|
||||
return nil, nil, store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
leechers := shard.swarms[infoHash].leechers
|
||||
for p := range leechers {
|
||||
decodedPeer := decodePeerKey(p)
|
||||
if decodedPeer.IP.To4() == nil {
|
||||
peers6 = append(peers6, decodedPeer)
|
||||
} else {
|
||||
peers = append(peers, decodedPeer)
|
||||
}
|
||||
}
|
||||
|
||||
shard.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *peerStore) NumSeeders(infoHash chihaya.InfoHash) int {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.RUnlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
numSeeders := len(shard.swarms[infoHash].seeders)
|
||||
|
||||
shard.RUnlock()
|
||||
return numSeeders
|
||||
}
|
||||
|
||||
func (s *peerStore) NumLeechers(infoHash chihaya.InfoHash) int {
|
||||
select {
|
||||
case <-s.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
shard := s.shards[s.shardIndex(infoHash)]
|
||||
shard.RLock()
|
||||
|
||||
if _, ok := shard.swarms[infoHash]; !ok {
|
||||
shard.RUnlock()
|
||||
return 0
|
||||
}
|
||||
|
||||
numLeechers := len(shard.swarms[infoHash].leechers)
|
||||
|
||||
shard.RUnlock()
|
||||
return numLeechers
|
||||
}
|
||||
|
||||
func (s *peerStore) Stop() <-chan error {
|
||||
toReturn := make(chan error)
|
||||
go func() {
|
||||
shards := make([]*peerShard, len(s.shards))
|
||||
for i := 0; i < len(s.shards); i++ {
|
||||
shards[i] = &peerShard{}
|
||||
shards[i].swarms = make(map[chihaya.InfoHash]swarm)
|
||||
}
|
||||
s.shards = shards
|
||||
close(s.closed)
|
||||
close(toReturn)
|
||||
}()
|
||||
return toReturn
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
)
|
||||
|
||||
var (
|
||||
peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{})
|
||||
peerStoreBenchmarker = store.PreparePeerStoreBenchmarker(&peerStoreDriver{})
|
||||
peerStoreTestConfig = &store.DriverConfig{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
unmarshalledConfig := struct {
|
||||
Shards int
|
||||
}{
|
||||
1,
|
||||
}
|
||||
peerStoreTestConfig.Config = unmarshalledConfig
|
||||
}
|
||||
|
||||
func TestPeerStore(t *testing.T) {
|
||||
peerStoreTester.TestPeerStore(t, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutSeeder(b *testing.B) {
|
||||
peerStoreBenchmarker.PutSeeder(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutSeeder1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.PutSeeder1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutSeeder1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.PutSeeder1KSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutSeeder1KInfohash1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.PutSeeder1KInfohash1KSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutDeleteSeeder(b *testing.B) {
|
||||
peerStoreBenchmarker.PutDeleteSeeder(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutDeleteSeeder1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.PutDeleteSeeder1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutDeleteSeeder1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.PutDeleteSeeder1KSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutDeleteSeeder1KInfohash1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.PutDeleteSeeder1KInfohash1KSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_DeleteSeederNonExist(b *testing.B) {
|
||||
peerStoreBenchmarker.DeleteSeederNonExist(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.DeleteSeederNonExist1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_DeleteSeederNonExist1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.DeleteSeederNonExist1KSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.DeleteSeederNonExist1KInfohash1KSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutGraduateDeleteLeecher(b *testing.B) {
|
||||
peerStoreBenchmarker.PutGraduateDeleteLeecher(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutGraduateDeleteLeecher1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.PutGraduateDeleteLeecher1KLeechers(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash1KLeechers(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_GraduateLeecherNonExist(b *testing.B) {
|
||||
peerStoreBenchmarker.GraduateLeecherNonExist(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_GraduateLeecherNonExist1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.GraduateLeecherNonExist1KLeechers(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash1KSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash1KLeechers(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_AnnouncePeers(b *testing.B) {
|
||||
peerStoreBenchmarker.AnnouncePeers(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_AnnouncePeers1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.AnnouncePeers1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_AnnouncePeersSeeder(b *testing.B) {
|
||||
peerStoreBenchmarker.AnnouncePeersSeeder(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_AnnouncePeersSeeder1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.AnnouncePeersSeeder1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_GetSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.GetSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_GetSeeders1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.GetSeeders1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_NumSeeders(b *testing.B) {
|
||||
peerStoreBenchmarker.NumSeeders(b, peerStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkPeerStore_NumSeeders1KInfohash(b *testing.B) {
|
||||
peerStoreBenchmarker.NumSeeders1KInfohash(b, peerStoreTestConfig)
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
)
|
||||
|
||||
func init() {
|
||||
store.RegisterStringStoreDriver("memory", &stringStoreDriver{})
|
||||
}
|
||||
|
||||
type stringStoreDriver struct{}
|
||||
|
||||
func (d *stringStoreDriver) New(_ *store.DriverConfig) (store.StringStore, error) {
|
||||
return &stringStore{
|
||||
strings: make(map[string]struct{}),
|
||||
closed: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type stringStore struct {
|
||||
strings map[string]struct{}
|
||||
closed chan struct{}
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
var _ store.StringStore = &stringStore{}
|
||||
|
||||
func (ss *stringStore) PutString(s string) error {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
|
||||
select {
|
||||
case <-ss.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
ss.strings[s] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *stringStore) HasString(s string) (bool, error) {
|
||||
ss.RLock()
|
||||
defer ss.RUnlock()
|
||||
|
||||
select {
|
||||
case <-ss.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
_, ok := ss.strings[s]
|
||||
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ss *stringStore) RemoveString(s string) error {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
|
||||
select {
|
||||
case <-ss.closed:
|
||||
panic("attempted to interact with stopped store")
|
||||
default:
|
||||
}
|
||||
|
||||
if _, ok := ss.strings[s]; !ok {
|
||||
return store.ErrResourceDoesNotExist
|
||||
}
|
||||
|
||||
delete(ss.strings, s)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *stringStore) Stop() <-chan error {
|
||||
toReturn := make(chan error)
|
||||
go func() {
|
||||
ss.Lock()
|
||||
defer ss.Unlock()
|
||||
ss.strings = make(map[string]struct{})
|
||||
close(ss.closed)
|
||||
close(toReturn)
|
||||
}()
|
||||
return toReturn
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
)
|
||||
|
||||
var (
|
||||
stringStoreTester = store.PrepareStringStoreTester(&stringStoreDriver{})
|
||||
stringStoreBenchmarker = store.PrepareStringStoreBenchmarker(&stringStoreDriver{})
|
||||
stringStoreTestConfig = &store.DriverConfig{}
|
||||
)
|
||||
|
||||
func TestStringStore(t *testing.T) {
|
||||
stringStoreTester.TestStringStore(t, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_AddShort(b *testing.B) {
|
||||
stringStoreBenchmarker.AddShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_AddLong(b *testing.B) {
|
||||
stringStoreBenchmarker.AddLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_LookupShort(b *testing.B) {
|
||||
stringStoreBenchmarker.LookupShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_LookupLong(b *testing.B) {
|
||||
stringStoreBenchmarker.LookupLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_AddRemoveShort(b *testing.B) {
|
||||
stringStoreBenchmarker.AddRemoveShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_AddRemoveLong(b *testing.B) {
|
||||
stringStoreBenchmarker.AddRemoveLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_LookupNonExistShort(b *testing.B) {
|
||||
stringStoreBenchmarker.LookupNonExistShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_LookupNonExistLong(b *testing.B) {
|
||||
stringStoreBenchmarker.LookupNonExistLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_RemoveNonExistShort(b *testing.B) {
|
||||
stringStoreBenchmarker.RemoveNonExistShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_RemoveNonExistLong(b *testing.B) {
|
||||
stringStoreBenchmarker.RemoveNonExistLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_Add1KShort(b *testing.B) {
|
||||
stringStoreBenchmarker.Add1KShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_Add1KLong(b *testing.B) {
|
||||
stringStoreBenchmarker.Add1KLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_Lookup1KShort(b *testing.B) {
|
||||
stringStoreBenchmarker.Lookup1KShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_Lookup1KLong(b *testing.B) {
|
||||
stringStoreBenchmarker.Lookup1KLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_AddRemove1KShort(b *testing.B) {
|
||||
stringStoreBenchmarker.AddRemove1KShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_AddRemove1KLong(b *testing.B) {
|
||||
stringStoreBenchmarker.AddRemove1KLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_LookupNonExist1KShort(b *testing.B) {
|
||||
stringStoreBenchmarker.LookupNonExist1KShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_LookupNonExist1KLong(b *testing.B) {
|
||||
stringStoreBenchmarker.LookupNonExist1KLong(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_RemoveNonExist1KShort(b *testing.B) {
|
||||
stringStoreBenchmarker.RemoveNonExist1KShort(b, stringStoreTestConfig)
|
||||
}
|
||||
|
||||
func BenchmarkStringStore_RemoveNonExist1KLong(b *testing.B) {
|
||||
stringStoreBenchmarker.RemoveNonExist1KLong(b, stringStoreTestConfig)
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
## Client Blacklisting/Whitelisting Middlewares
|
||||
|
||||
This package provides the announce middlewares `client_whitelist` and `client_blacklist` for blacklisting or whitelisting clients for announces.
|
||||
|
||||
### `client_blacklist`
|
||||
|
||||
The `client_blacklist` middleware uses all clientIDs stored in the `StringStore` to blacklist, i.e. block announces.
|
||||
|
||||
The clientID part of the peerID of an announce is matched against the `StringStore`, if it's contained within the `StringStore`, the announce is aborted.
|
||||
|
||||
### `client_whitelist`
|
||||
|
||||
The `client_whitelist` middleware uses all clientIDs stored in the `StringStore` to whitelist, i.e. allow announces.
|
||||
|
||||
The clientID part of the peerID of an announce is matched against the `StringStore`, if it's _not_ contained within the `StringStore`, the announce is aborted.
|
||||
|
||||
### Important things to notice
|
||||
|
||||
Both middlewares operate on announce requests only.
|
||||
|
||||
Both middlewares use the same `StringStore`.
|
||||
It is therefore not advised to have both the `client_blacklist` and the `client_whitelist` middleware running.
|
||||
(If you add clientID to the `StringStore`, it will be used for blacklisting and whitelisting.
|
||||
If your store contains no clientIDs, no announces will be blocked by the blacklist, but all announces will be blocked by the whitelist.
|
||||
If your store contains all clientIDs, no announces will be blocked by the whitelist, but all announces will be blocked by the blacklist.)
|
|
@ -1,34 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/clientid"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("client_blacklist", blacklistAnnounceClient)
|
||||
}
|
||||
|
||||
// ErrBlacklistedClient is returned by an announce middleware if the announcing
|
||||
// Client is blacklisted.
|
||||
var ErrBlacklistedClient = tracker.ClientError("client blacklisted")
|
||||
|
||||
// blacklistAnnounceClient provides a middleware that only allows Clients to
|
||||
// announce that are not stored in the StringStore.
|
||||
func blacklistAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
blacklisted, err := store.MustGetStore().HasString(PrefixClient + clientid.New(string(req.PeerID[:])))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if blacklisted {
|
||||
return ErrBlacklistedClient
|
||||
}
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/clientid"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("client_whitelist", whitelistAnnounceClient)
|
||||
}
|
||||
|
||||
// PrefixClient is the prefix to be used for client peer IDs.
|
||||
const PrefixClient = "c-"
|
||||
|
||||
// ErrNotWhitelistedClient is returned by an announce middleware if the
|
||||
// announcing Client is not whitelisted.
|
||||
var ErrNotWhitelistedClient = tracker.ClientError("client not whitelisted")
|
||||
|
||||
// whitelistAnnounceClient provides a middleware that only allows Clients to
|
||||
// announce that are stored in the StringStore.
|
||||
func whitelistAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
whitelisted, err := store.MustGetStore().HasString(PrefixClient + clientid.New(string(req.PeerID[:])))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !whitelisted {
|
||||
return ErrNotWhitelistedClient
|
||||
}
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
## Infohash Blacklisting/Whitelisting Middlewares
|
||||
|
||||
This package provides the middleware `infohash_blacklist` and `infohash_whitelist` for blacklisting or whitelisting infohashes.
|
||||
It also provides the configurable scrape middleware `infohash_blacklist` and `infohash_whitelist` for blacklisting or whitelisting infohashes.
|
||||
|
||||
### `infohash_blacklist`
|
||||
|
||||
#### For Announces
|
||||
|
||||
The `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to blacklist, i.e. block announces.
|
||||
|
||||
#### For Scrapes
|
||||
|
||||
The configurable `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to blacklist scrape requests.
|
||||
|
||||
The scrape middleware has two modes of operation: _Block_ and _Filter_.
|
||||
|
||||
- _Block_ will drop a scrape request if it contains a blacklisted infohash.
|
||||
- _Filter_ will filter all blacklisted infohashes from a scrape request, potentially leaving behind an empty scrape request.
|
||||
**IMPORTANT**: This mode **does not work with UDP servers**.
|
||||
|
||||
See the configuration section for information about how to configure the scrape middleware.
|
||||
|
||||
### `infohash_whitelist`
|
||||
|
||||
#### For Announces
|
||||
|
||||
The `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to whitelist, i.e. allow announces.
|
||||
|
||||
#### For Scrapes
|
||||
|
||||
The configurable `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to whitelist scrape requests.
|
||||
|
||||
The scrape middleware has two modes of operation: _Block_ and _Filter_.
|
||||
|
||||
- _Block_ will drop a scrape request if it contains a non-whitelisted infohash.
|
||||
- _Filter_ will filter all non-whitelisted infohashes from a scrape request, potentially leaving behind an empty scrape request.
|
||||
**IMPORTANT**: This mode **does not work with UDP servers**.
|
||||
|
||||
See the configuration section for information about how to configure the scrape middleware.
|
||||
|
||||
### Important things to notice
|
||||
|
||||
Both blacklist and whitelist middleware use the same `StringStore`.
|
||||
It is therefore not advised to have both the `infohash_blacklist` and the `infohash_whitelist` announce or scrape middleware running.
|
||||
(If you add an infohash to the `StringStore`, it will be used for blacklisting and whitelisting.
|
||||
If your store contains no infohashes, no announces/scrapes will be blocked by the blacklist, but all will be blocked by the whitelist.
|
||||
If your store contains all addresses, no announces/scrapes will be blocked by the whitelist, but all will be blocked by the blacklist.)
|
||||
|
||||
Also note that the announce and scrape middleware both use the same `StringStore`.
|
||||
It is therefore not possible to use different infohashes for black-/whitelisting on announces and scrape requests.
|
||||
|
||||
### Configuration
|
||||
|
||||
The scrape middleware is configurable.
|
||||
|
||||
The configuration uses a single required parameter `mode` to determine the mode of operation for the middleware.
|
||||
An example configuration might look like this:
|
||||
|
||||
chihaya:
|
||||
tracker:
|
||||
scrape_middleware:
|
||||
- name: infohash_blacklist
|
||||
config:
|
||||
mode: block
|
||||
|
||||
`mode` accepts two values: `block` and `filter`.
|
||||
|
||||
**IMPORTANT**: The `filter` mode **does not work with UDP servers**.
|
|
@ -1,106 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package infohash
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("infohash_blacklist", blacklistAnnounceInfohash)
|
||||
tracker.RegisterScrapeMiddlewareConstructor("infohash_blacklist", blacklistScrapeInfohash)
|
||||
mustGetStore = func() store.StringStore {
|
||||
return store.MustGetStore().StringStore
|
||||
}
|
||||
}
|
||||
|
||||
// ErrBlockedInfohash is returned by a middleware if any of the infohashes
|
||||
// contained in an announce or scrape are disallowed.
|
||||
var ErrBlockedInfohash = tracker.ClientError("disallowed infohash")
|
||||
|
||||
var mustGetStore func() store.StringStore
|
||||
|
||||
// blacklistAnnounceInfohash provides a middleware that only allows announces
|
||||
// for infohashes that are not stored in a StringStore.
|
||||
func blacklistAnnounceInfohash(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
|
||||
blacklisted, err := mustGetStore().HasString(PrefixInfohash + string(req.InfoHash[:]))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if blacklisted {
|
||||
return ErrBlockedInfohash
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// blacklistScrapeInfohash provides a middleware constructor for a middleware
|
||||
// that blocks or filters scrape requests based on the infohashes scraped.
|
||||
//
|
||||
// The middleware works in two modes: block and filter.
|
||||
// The block mode blocks a scrape completely if any of the infohashes is
|
||||
// disallowed.
|
||||
// The filter mode filters any disallowed infohashes from the scrape,
|
||||
// potentially leaving an empty scrape.
|
||||
//
|
||||
// ErrUnknownMode is returned if the Mode specified in the config is unknown.
|
||||
func blacklistScrapeInfohash(c chihaya.MiddlewareConfig) (tracker.ScrapeMiddleware, error) {
|
||||
cfg, err := newConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch cfg.Mode {
|
||||
case ModeFilter:
|
||||
return blacklistFilterScrape, nil
|
||||
case ModeBlock:
|
||||
return blacklistBlockScrape, nil
|
||||
default:
|
||||
panic("unknown mode")
|
||||
}
|
||||
}
|
||||
|
||||
func blacklistFilterScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
|
||||
blacklisted := false
|
||||
storage := mustGetStore()
|
||||
infohashes := req.InfoHashes
|
||||
|
||||
for i, ih := range infohashes {
|
||||
blacklisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if blacklisted {
|
||||
req.InfoHashes[i] = req.InfoHashes[len(req.InfoHashes)-1]
|
||||
req.InfoHashes = req.InfoHashes[:len(req.InfoHashes)-1]
|
||||
}
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func blacklistBlockScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
|
||||
blacklisted := false
|
||||
storage := mustGetStore()
|
||||
|
||||
for _, ih := range req.InfoHashes {
|
||||
blacklisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if blacklisted {
|
||||
return ErrBlockedInfohash
|
||||
}
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package infohash
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/stopper"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
type storeMock struct {
|
||||
strings map[string]struct{}
|
||||
}
|
||||
|
||||
func (ss *storeMock) PutString(s string) error {
|
||||
ss.strings[s] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *storeMock) HasString(s string) (bool, error) {
|
||||
_, ok := ss.strings[s]
|
||||
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ss *storeMock) RemoveString(s string) error {
|
||||
delete(ss.strings, s)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *storeMock) Stop() <-chan error {
|
||||
return stopper.AlreadyStopped
|
||||
}
|
||||
|
||||
var mock store.StringStore = &storeMock{
|
||||
strings: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
var (
|
||||
ih1 = chihaya.InfoHash([20]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
||||
ih2 = chihaya.InfoHash([20]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
||||
)
|
||||
|
||||
func TestASetUp(t *testing.T) {
|
||||
mustGetStore = func() store.StringStore {
|
||||
return mock
|
||||
}
|
||||
|
||||
mustGetStore().PutString(PrefixInfohash + string(ih1[:]))
|
||||
}
|
||||
|
||||
func TestBlacklistAnnounceMiddleware(t *testing.T) {
|
||||
var (
|
||||
achain tracker.AnnounceChain
|
||||
req chihaya.AnnounceRequest
|
||||
resp chihaya.AnnounceResponse
|
||||
)
|
||||
|
||||
achain.Append(blacklistAnnounceInfohash)
|
||||
handler := achain.Handler()
|
||||
|
||||
err := handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.InfoHash = chihaya.InfoHash(ih1)
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Equal(t, ErrBlockedInfohash, err)
|
||||
|
||||
req.InfoHash = chihaya.InfoHash(ih2)
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestBlacklistScrapeMiddlewareBlock(t *testing.T) {
|
||||
var (
|
||||
schain tracker.ScrapeChain
|
||||
req chihaya.ScrapeRequest
|
||||
resp chihaya.ScrapeResponse
|
||||
)
|
||||
|
||||
mw, err := blacklistScrapeInfohash(chihaya.MiddlewareConfig{
|
||||
Name: "blacklist_infohash",
|
||||
Config: Config{
|
||||
Mode: ModeBlock,
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
schain.Append(mw)
|
||||
handler := schain.Handler()
|
||||
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Equal(t, ErrBlockedInfohash, err)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih2)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestBlacklistScrapeMiddlewareFilter(t *testing.T) {
|
||||
var (
|
||||
schain tracker.ScrapeChain
|
||||
req chihaya.ScrapeRequest
|
||||
resp chihaya.ScrapeResponse
|
||||
)
|
||||
|
||||
mw, err := blacklistScrapeInfohash(chihaya.MiddlewareConfig{
|
||||
Name: "blacklist_infohash",
|
||||
Config: Config{
|
||||
Mode: ModeFilter,
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
schain.Append(mw)
|
||||
handler := schain.Handler()
|
||||
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih2)}, req.InfoHashes)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih2)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package infohash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
// ErrUnknownMode is returned by a MiddlewareConstructor if the Mode specified
|
||||
// in the configuration is unknown.
|
||||
var ErrUnknownMode = errors.New("unknown mode")
|
||||
|
||||
// Mode represents the mode of operation for an infohash scrape middleware.
|
||||
type Mode string
|
||||
|
||||
const (
|
||||
// ModeFilter makes the middleware filter disallowed infohashes from a
|
||||
// scrape request.
|
||||
ModeFilter = Mode("filter")
|
||||
|
||||
// ModeBlock makes the middleware block a scrape request if it contains
|
||||
// at least one disallowed infohash.
|
||||
ModeBlock = Mode("block")
|
||||
)
|
||||
|
||||
// Config represents the configuration for an infohash scrape middleware.
|
||||
type Config struct {
|
||||
Mode Mode `yaml:"mode"`
|
||||
}
|
||||
|
||||
// newConfig parses the given MiddlewareConfig as an infohash.Config.
|
||||
// ErrUnknownMode is returned if the mode is unknown.
|
||||
func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) {
|
||||
bytes, err := yaml.Marshal(mwcfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Mode != ModeBlock && cfg.Mode != ModeFilter {
|
||||
return nil, ErrUnknownMode
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package infohash
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
configTemplate = `name: foo
|
||||
config:
|
||||
%s: %s`
|
||||
|
||||
data = []testData{
|
||||
{"mode", "block", false, ModeBlock},
|
||||
{"mode", "filter", false, ModeFilter},
|
||||
{"some", "stuff", true, ModeBlock},
|
||||
}
|
||||
)
|
||||
|
||||
type testData struct {
|
||||
key string
|
||||
value string
|
||||
err bool
|
||||
expected Mode
|
||||
}
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
var mwconfig chihaya.MiddlewareConfig
|
||||
|
||||
cfg, err := newConfig(mwconfig)
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, cfg)
|
||||
|
||||
for _, test := range data {
|
||||
config := fmt.Sprintf(configTemplate, test.key, test.value)
|
||||
err = yaml.Unmarshal([]byte(config), &mwconfig)
|
||||
assert.Nil(t, err)
|
||||
|
||||
cfg, err = newConfig(mwconfig)
|
||||
if test.err {
|
||||
assert.NotNil(t, err)
|
||||
continue
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, test.expected, cfg.Mode)
|
||||
}
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package infohash
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("infohash_whitelist", whitelistAnnounceInfohash)
|
||||
tracker.RegisterScrapeMiddlewareConstructor("infohash_whitelist", whitelistScrapeInfohash)
|
||||
}
|
||||
|
||||
// PrefixInfohash is the prefix to be used for infohashes.
|
||||
const PrefixInfohash = "ih-"
|
||||
|
||||
// whitelistAnnounceInfohash provides a middleware that only allows announces
|
||||
// for infohashes that are not stored in a StringStore
|
||||
func whitelistAnnounceInfohash(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
|
||||
whitelisted, err := mustGetStore().HasString(PrefixInfohash + string(req.InfoHash[:]))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !whitelisted {
|
||||
return ErrBlockedInfohash
|
||||
}
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// whitelistScrapeInfohash provides a middleware constructor for a middleware
|
||||
// that blocks or filters scrape requests based on the infohashes scraped.
|
||||
//
|
||||
// The middleware works in two modes: block and filter.
|
||||
// The block mode blocks a scrape completely if any of the infohashes is
|
||||
// disallowed.
|
||||
// The filter mode filters any disallowed infohashes from the scrape,
|
||||
// potentially leaving an empty scrape.
|
||||
//
|
||||
// ErrUnknownMode is returned if the Mode specified in the config is unknown.
|
||||
func whitelistScrapeInfohash(c chihaya.MiddlewareConfig) (tracker.ScrapeMiddleware, error) {
|
||||
cfg, err := newConfig(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch cfg.Mode {
|
||||
case ModeFilter:
|
||||
return whitelistFilterScrape, nil
|
||||
case ModeBlock:
|
||||
return whitelistBlockScrape, nil
|
||||
default:
|
||||
panic("unknown mode")
|
||||
}
|
||||
}
|
||||
|
||||
func whitelistFilterScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
|
||||
whitelisted := false
|
||||
storage := mustGetStore()
|
||||
infohashes := req.InfoHashes
|
||||
|
||||
for i, ih := range infohashes {
|
||||
whitelisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !whitelisted {
|
||||
req.InfoHashes[i] = req.InfoHashes[len(req.InfoHashes)-1]
|
||||
req.InfoHashes = req.InfoHashes[:len(req.InfoHashes)-1]
|
||||
}
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func whitelistBlockScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
|
||||
whitelisted := false
|
||||
storage := mustGetStore()
|
||||
|
||||
for _, ih := range req.InfoHashes {
|
||||
whitelisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !whitelisted {
|
||||
return ErrBlockedInfohash
|
||||
}
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package infohash
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func TestWhitelistAnnounceMiddleware(t *testing.T) {
|
||||
var (
|
||||
achain tracker.AnnounceChain
|
||||
req chihaya.AnnounceRequest
|
||||
resp chihaya.AnnounceResponse
|
||||
)
|
||||
|
||||
achain.Append(whitelistAnnounceInfohash)
|
||||
handler := achain.Handler()
|
||||
|
||||
err := handler(nil, &req, &resp)
|
||||
assert.Equal(t, ErrBlockedInfohash, err)
|
||||
|
||||
req.InfoHash = chihaya.InfoHash(ih2)
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Equal(t, ErrBlockedInfohash, err)
|
||||
|
||||
req.InfoHash = chihaya.InfoHash(ih1)
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestWhitelistScrapeMiddlewareBlock(t *testing.T) {
|
||||
var (
|
||||
schain tracker.ScrapeChain
|
||||
req chihaya.ScrapeRequest
|
||||
resp chihaya.ScrapeResponse
|
||||
)
|
||||
|
||||
mw, err := whitelistScrapeInfohash(chihaya.MiddlewareConfig{
|
||||
Name: "whitelist_infohash",
|
||||
Config: Config{
|
||||
Mode: ModeBlock,
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
schain.Append(mw)
|
||||
handler := schain.Handler()
|
||||
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Equal(t, ErrBlockedInfohash, err)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestWhitelistScrapeMiddlewareFilter(t *testing.T) {
|
||||
var (
|
||||
schain tracker.ScrapeChain
|
||||
req chihaya.ScrapeRequest
|
||||
resp chihaya.ScrapeResponse
|
||||
)
|
||||
|
||||
mw, err := whitelistScrapeInfohash(chihaya.MiddlewareConfig{
|
||||
Name: "whitelist_infohash",
|
||||
Config: Config{
|
||||
Mode: ModeFilter,
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
schain.Append(mw)
|
||||
handler := schain.Handler()
|
||||
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih1)}, req.InfoHashes)
|
||||
|
||||
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1)}
|
||||
err = handler(nil, &req, &resp)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih1)}, req.InfoHashes)
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
## IP Blacklisting/Whitelisting Middlewares
|
||||
|
||||
This package provides the announce middlewares `ip_blacklist` and `ip_whitelist` for blacklisting or whitelisting IP addresses and networks for announces.
|
||||
|
||||
### `ip_blacklist`
|
||||
|
||||
The `ip_blacklist` middleware uses all IP addresses and networks stored in the `IPStore` to blacklist, i.e. block announces.
|
||||
|
||||
Both the IPv4 and the IPv6 addresses contained in the announce are matched against the `IPStore`.
|
||||
If one or both of the two are contained in the `IPStore`, the announce will be rejected _completely_.
|
||||
|
||||
### `ip_whitelist`
|
||||
|
||||
The `ip_whitelist` middleware uses all IP addresses and networks stored in the `IPStore` to whitelist, i.e. allow announces.
|
||||
|
||||
If present, both the IPv4 and the IPv6 addresses contained in the announce are matched against the `IPStore`.
|
||||
Only if all IP address that are present in the announce are also present in the `IPStore` will the announce be allowed, otherwise it will be rejected _completely_.
|
||||
|
||||
### Important things to notice
|
||||
|
||||
Both middlewares operate on announce requests only.
|
||||
The middlewares will check the IPv4 and IPv6 IPs a client announces to the tracker against an `IPStore`.
|
||||
Normally the IP address embedded in the announce is the public IP address of the machine the client is running on.
|
||||
Note however, that a client can override this behaviour by specifying an IP address in the announce itself.
|
||||
_This middleware does not (dis)allow announces coming from certain IP addresses, but announces containing certain IP addresses_.
|
||||
Always keep that in mind.
|
||||
|
||||
Both middlewares use the same `IPStore`.
|
||||
It is therefore not advised to have both the `ip_blacklist` and the `ip_whitelist` middleware running.
|
||||
(If you add an IP address or network to the `IPStore`, it will be used for blacklisting and whitelisting.
|
||||
If your store contains no addresses, no announces will be blocked by the blacklist, but all announces will be blocked by the whitelist.
|
||||
If your store contains all addresses, no announces will be blocked by the whitelist, but all announces will be blocked by the blacklist.)
|
|
@ -1,47 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("ip_blacklist", blacklistAnnounceIP)
|
||||
}
|
||||
|
||||
// ErrBlockedIP is returned by an announce middleware if any of the announcing
|
||||
// IPs is disallowed.
|
||||
var ErrBlockedIP = tracker.ClientError("disallowed IP address")
|
||||
|
||||
// blacklistAnnounceIP provides a middleware that only allows IPs to announce
|
||||
// that are not stored in an IPStore.
|
||||
func blacklistAnnounceIP(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
|
||||
blacklisted := false
|
||||
storage := store.MustGetStore()
|
||||
|
||||
// We have to check explicitly if they are present, because someone
|
||||
// could have added a <nil> net.IP to the store.
|
||||
if req.IPv6 != nil && req.IPv4 != nil {
|
||||
blacklisted, err = storage.HasAnyIP([]net.IP{req.IPv4, req.IPv6})
|
||||
} else if req.IPv4 != nil {
|
||||
blacklisted, err = storage.HasIP(req.IPv4)
|
||||
} else {
|
||||
blacklisted, err = storage.HasIP(req.IPv6)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if blacklisted {
|
||||
return ErrBlockedIP
|
||||
}
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("ip_whitelist", whitelistAnnounceIP)
|
||||
}
|
||||
|
||||
// whitelistAnnounceIP provides a middleware that only allows IPs to announce
|
||||
// that are stored in an IPStore.
|
||||
func whitelistAnnounceIP(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
|
||||
whitelisted := false
|
||||
storage := store.MustGetStore()
|
||||
|
||||
// We have to check explicitly if they are present, because someone
|
||||
// could have added a <nil> net.IP to the store.
|
||||
if req.IPv4 != nil && req.IPv6 != nil {
|
||||
whitelisted, err = storage.HasAllIPs([]net.IP{req.IPv4, req.IPv6})
|
||||
} else if req.IPv4 != nil {
|
||||
whitelisted, err = storage.HasIP(req.IPv4)
|
||||
} else {
|
||||
whitelisted, err = storage.HasIP(req.IPv6)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !whitelisted {
|
||||
return ErrBlockedIP
|
||||
}
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
## Response Middleware
|
||||
|
||||
This package provides the final response for a chain of middleware using the “store” package.
|
||||
|
||||
### `store_response`
|
||||
|
||||
The `store_response` middleware uses the peer data stored in the peerStore to create a response for the request.
|
||||
|
||||
### Important things to notice
|
||||
|
||||
This middleware is very basic, and may not do everything that you require.
|
|
@ -1,59 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package response
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("store_response", responseAnnounceClient)
|
||||
tracker.RegisterScrapeMiddleware("store_response", responseScrapeClient)
|
||||
}
|
||||
|
||||
// FailedToRetrievePeers represents an error that has been return when
|
||||
// attempting to fetch peers from the store.
|
||||
type FailedToRetrievePeers string
|
||||
|
||||
// Error interface for FailedToRetrievePeers.
|
||||
func (f FailedToRetrievePeers) Error() string { return string(f) }
|
||||
|
||||
// responseAnnounceClient provides a middleware to make a response to an
|
||||
// announce based on the current request.
|
||||
func responseAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
|
||||
storage := store.MustGetStore()
|
||||
|
||||
resp.Interval = cfg.AnnounceInterval
|
||||
resp.MinInterval = cfg.MinAnnounceInterval
|
||||
resp.Compact = req.Compact
|
||||
resp.Complete = int32(storage.NumSeeders(req.InfoHash))
|
||||
resp.Incomplete = int32(storage.NumLeechers(req.InfoHash))
|
||||
resp.IPv4Peers, resp.IPv6Peers, err = storage.AnnouncePeers(req.InfoHash, req.Left == 0, int(req.NumWant), req.Peer4(), req.Peer6())
|
||||
if err != nil {
|
||||
return FailedToRetrievePeers(err.Error())
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
// responseScrapeClient provides a middleware to make a response to an
|
||||
// scrape based on the current request.
|
||||
func responseScrapeClient(next tracker.ScrapeHandler) tracker.ScrapeHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
|
||||
storage := store.MustGetStore()
|
||||
for _, infoHash := range req.InfoHashes {
|
||||
resp.Files[infoHash] = chihaya.Scrape{
|
||||
Complete: int32(storage.NumSeeders(infoHash)),
|
||||
Incomplete: int32(storage.NumLeechers(infoHash)),
|
||||
}
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
## Swarm Interaction Middleware
|
||||
|
||||
This package provides the announce middleware that modifies peer data stored in the `store` package.
|
||||
|
||||
### `store_swarm_interaction`
|
||||
|
||||
The `store_swarm_interaction` middleware updates the data stored in the `peerStore` based on the announce.
|
||||
|
||||
### Important things to notice
|
||||
|
||||
It is recommended to have this middleware run before the `store_response` middleware.
|
||||
The `store_response` middleware assumes the store to be already updated by the announce.
|
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package response
|
||||
|
||||
import (
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/event"
|
||||
"github.com/chihaya/chihaya/server/store"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
tracker.RegisterAnnounceMiddleware("store_swarm_interaction", announceSwarmInteraction)
|
||||
}
|
||||
|
||||
// FailedSwarmInteraction represents an error that indicates that the
|
||||
// interaction of a peer with a swarm failed.
|
||||
type FailedSwarmInteraction string
|
||||
|
||||
// Error satisfies the error interface for FailedSwarmInteraction.
|
||||
func (f FailedSwarmInteraction) Error() string { return string(f) }
|
||||
|
||||
// announceSwarmInteraction provides a middleware that manages swarm
|
||||
// interactions for a peer based on the announce.
|
||||
func announceSwarmInteraction(next tracker.AnnounceHandler) tracker.AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
|
||||
if req.IPv4 != nil {
|
||||
err = updatePeerStore(req, req.Peer4())
|
||||
if err != nil {
|
||||
return FailedSwarmInteraction(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if req.IPv6 != nil {
|
||||
err = updatePeerStore(req, req.Peer6())
|
||||
if err != nil {
|
||||
return FailedSwarmInteraction(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func updatePeerStore(req *chihaya.AnnounceRequest, peer chihaya.Peer) (err error) {
|
||||
storage := store.MustGetStore()
|
||||
|
||||
switch {
|
||||
case req.Event == event.Stopped:
|
||||
err = storage.DeleteSeeder(req.InfoHash, peer)
|
||||
if err != nil && err != store.ErrResourceDoesNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storage.DeleteLeecher(req.InfoHash, peer)
|
||||
if err != nil && err != store.ErrResourceDoesNotExist {
|
||||
return err
|
||||
}
|
||||
|
||||
case req.Event == event.Completed || req.Left == 0:
|
||||
err = storage.GraduateLeecher(req.InfoHash, peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
err = storage.PutLeecher(req.InfoHash, peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/stopper"
|
||||
)
|
||||
|
||||
var peerStoreDrivers = make(map[string]PeerStoreDriver)
|
||||
|
||||
// PeerStore represents an interface for manipulating peers.
|
||||
type PeerStore interface {
|
||||
// PutSeeder adds a seeder for the infoHash to the PeerStore.
|
||||
PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error
|
||||
// DeleteSeeder removes a seeder for the infoHash from the PeerStore.
|
||||
//
|
||||
// Returns ErrResourceDoesNotExist if the infoHash or peer does not
|
||||
// exist.
|
||||
DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error
|
||||
|
||||
// PutLeecher adds a leecher for the infoHash to the PeerStore.
|
||||
PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error
|
||||
// DeleteLeecher removes a leecher for the infoHash from the PeerStore.
|
||||
//
|
||||
// Returns ErrResourceDoesNotExist if the infoHash or peer does not
|
||||
// exist.
|
||||
DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error
|
||||
|
||||
// GraduateLeecher promotes a peer from a leecher to a seeder for the
|
||||
// infoHash within the PeerStore.
|
||||
//
|
||||
// If the given Peer is not a leecher, it will still be added to the
|
||||
// list of seeders and no error will be returned.
|
||||
GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error
|
||||
|
||||
// AnnouncePeers returns a list of both IPv4, and IPv6 peers for an
|
||||
// announce.
|
||||
//
|
||||
// If seeder is true then the peers returned will only be leechers, the
|
||||
// ammount of leechers returned will be the smaller value of numWant or
|
||||
// the available leechers.
|
||||
// If it is false then seeders will be returned up until numWant or the
|
||||
// available seeders, whichever is smaller. If the available seeders is
|
||||
// less than numWant then peers are returned until numWant or they run out.
|
||||
AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers, peers6 []chihaya.Peer, err error)
|
||||
// CollectGarbage deletes peers from the peerStore which are older than the
|
||||
// cutoff time.
|
||||
CollectGarbage(cutoff time.Time) error
|
||||
|
||||
// GetSeeders gets all the seeders for a particular infoHash.
|
||||
GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error)
|
||||
// GetLeechers gets all the leechers for a particular infoHash.
|
||||
GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error)
|
||||
|
||||
// NumSeeders gets the amount of seeders for a particular infoHash.
|
||||
NumSeeders(infoHash chihaya.InfoHash) int
|
||||
// NumLeechers gets the amount of leechers for a particular infoHash.
|
||||
NumLeechers(infoHash chihaya.InfoHash) int
|
||||
|
||||
// Stopper provides the Stop method that stops the PeerStore.
|
||||
// Stop should shut down the PeerStore in a separate goroutine and send
|
||||
// an error to the channel if the shutdown failed. If the shutdown
|
||||
// was successful, the channel is to be closed.
|
||||
stopper.Stopper
|
||||
}
|
||||
|
||||
// PeerStoreDriver represents an interface for creating a handle to the storage
|
||||
// of peers.
|
||||
type PeerStoreDriver interface {
|
||||
New(*DriverConfig) (PeerStore, error)
|
||||
}
|
||||
|
||||
// RegisterPeerStoreDriver makes a driver available by the provided name.
|
||||
//
|
||||
// If this function is called twice with the same name or if the driver is nil,
|
||||
// it panics.
|
||||
func RegisterPeerStoreDriver(name string, driver PeerStoreDriver) {
|
||||
if driver == nil {
|
||||
panic("storage: could not register nil PeerStoreDriver")
|
||||
}
|
||||
|
||||
if _, dup := peerStoreDrivers[name]; dup {
|
||||
panic("storage: could not register duplicate PeerStoreDriver: " + name)
|
||||
}
|
||||
|
||||
peerStoreDrivers[name] = driver
|
||||
}
|
||||
|
||||
// OpenPeerStore returns a PeerStore specified by a configuration.
|
||||
func OpenPeerStore(cfg *DriverConfig) (PeerStore, error) {
|
||||
driver, ok := peerStoreDrivers[cfg.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("storage: unknown PeerStoreDriver %q (forgotten import?)", cfg)
|
||||
}
|
||||
|
||||
return driver.New(cfg)
|
||||
}
|
|
@ -1,142 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/chihaya/chihaya/pkg/stopper"
|
||||
"github.com/chihaya/chihaya/server"
|
||||
"github.com/chihaya/chihaya/tracker"
|
||||
)
|
||||
|
||||
var theStore *Store
|
||||
|
||||
func init() {
|
||||
server.Register("store", constructor)
|
||||
}
|
||||
|
||||
// ErrResourceDoesNotExist is the error returned by all delete methods in the
|
||||
// store if the requested resource does not exist.
|
||||
var ErrResourceDoesNotExist = errors.New("resource does not exist")
|
||||
|
||||
func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {
|
||||
if theStore == nil {
|
||||
cfg, err := newConfig(srvcfg)
|
||||
if err != nil {
|
||||
return nil, errors.New("store: invalid store config: " + err.Error())
|
||||
}
|
||||
|
||||
theStore = &Store{
|
||||
cfg: cfg,
|
||||
tkr: tkr,
|
||||
shutdown: make(chan struct{}),
|
||||
sg: stopper.NewStopGroup(),
|
||||
}
|
||||
|
||||
ps, err := OpenPeerStore(&cfg.PeerStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
theStore.sg.Add(ps)
|
||||
|
||||
ips, err := OpenIPStore(&cfg.IPStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
theStore.sg.Add(ips)
|
||||
|
||||
ss, err := OpenStringStore(&cfg.StringStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
theStore.sg.Add(ss)
|
||||
|
||||
theStore.PeerStore = ps
|
||||
theStore.IPStore = ips
|
||||
theStore.StringStore = ss
|
||||
}
|
||||
return theStore, nil
|
||||
}
|
||||
|
||||
// Config represents the configuration for the store.
|
||||
type Config struct {
|
||||
Addr string `yaml:"addr"`
|
||||
RequestTimeout time.Duration `yaml:"request_timeout"`
|
||||
ReadTimeout time.Duration `yaml:"read_timeout"`
|
||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||
GCAfter time.Duration `yaml:"gc_after"`
|
||||
PeerStore DriverConfig `yaml:"peer_store"`
|
||||
IPStore DriverConfig `yaml:"ip_store"`
|
||||
StringStore DriverConfig `yaml:"string_store"`
|
||||
}
|
||||
|
||||
// DriverConfig represents the configuration for a store driver.
|
||||
type DriverConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Config interface{} `yaml:"config"`
|
||||
}
|
||||
|
||||
func newConfig(srvcfg *chihaya.ServerConfig) (*Config, error) {
|
||||
bytes, err := yaml.Marshal(srvcfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
err = yaml.Unmarshal(bytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// MustGetStore is used by middleware to access the store.
|
||||
//
|
||||
// This function calls log.Fatal if a server hasn't been already created by
|
||||
// the server package.
|
||||
func MustGetStore() *Store {
|
||||
if theStore == nil {
|
||||
log.Fatal("store middleware used without store server")
|
||||
}
|
||||
return theStore
|
||||
}
|
||||
|
||||
// Store provides storage for a tracker.
|
||||
type Store struct {
|
||||
cfg *Config
|
||||
tkr *tracker.Tracker
|
||||
shutdown chan struct{}
|
||||
sg *stopper.StopGroup
|
||||
|
||||
PeerStore
|
||||
IPStore
|
||||
StringStore
|
||||
}
|
||||
|
||||
// Start starts the store drivers and blocks until all of them exit.
|
||||
func (s *Store) Start() {
|
||||
<-s.shutdown
|
||||
}
|
||||
|
||||
// Stop stops the store drivers and waits for them to exit.
|
||||
func (s *Store) Stop() {
|
||||
errors := s.sg.Stop()
|
||||
if len(errors) == 0 {
|
||||
log.Println("Store server shut down cleanly")
|
||||
} else {
|
||||
log.Println("Store server: failed to shutdown drivers")
|
||||
for _, err := range errors {
|
||||
log.Println(err.Error())
|
||||
}
|
||||
}
|
||||
close(s.shutdown)
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,526 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"net"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// StringStoreTester is a collection of tests for a StringStore driver.
|
||||
// Every benchmark expects a new, clean storage. Every benchmark should be
|
||||
// called with a DriverConfig that ensures this.
|
||||
type StringStoreTester interface {
|
||||
TestStringStore(*testing.T, *DriverConfig)
|
||||
}
|
||||
|
||||
var _ StringStoreTester = &stringStoreTester{}
|
||||
|
||||
type stringStoreTester struct {
|
||||
s1, s2 string
|
||||
driver StringStoreDriver
|
||||
}
|
||||
|
||||
// PrepareStringStoreTester prepares a reusable suite for StringStore driver
|
||||
// tests.
|
||||
func PrepareStringStoreTester(driver StringStoreDriver) StringStoreTester {
|
||||
return &stringStoreTester{
|
||||
s1: "abc",
|
||||
s2: "def",
|
||||
driver: driver,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stringStoreTester) TestStringStore(t *testing.T, cfg *DriverConfig) {
|
||||
ss, err := s.driver.New(cfg)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, ss)
|
||||
|
||||
has, err := ss.HasString(s.s1)
|
||||
require.Nil(t, err)
|
||||
require.False(t, has)
|
||||
|
||||
has, err = ss.HasString(s.s2)
|
||||
require.Nil(t, err)
|
||||
require.False(t, has)
|
||||
|
||||
err = ss.RemoveString(s.s1)
|
||||
require.NotNil(t, err)
|
||||
|
||||
err = ss.PutString(s.s1)
|
||||
require.Nil(t, err)
|
||||
|
||||
has, err = ss.HasString(s.s1)
|
||||
require.Nil(t, err)
|
||||
require.True(t, has)
|
||||
|
||||
has, err = ss.HasString(s.s2)
|
||||
require.Nil(t, err)
|
||||
require.False(t, has)
|
||||
|
||||
err = ss.PutString(s.s1)
|
||||
require.Nil(t, err)
|
||||
|
||||
err = ss.PutString(s.s2)
|
||||
require.Nil(t, err)
|
||||
|
||||
has, err = ss.HasString(s.s1)
|
||||
require.Nil(t, err)
|
||||
require.True(t, has)
|
||||
|
||||
has, err = ss.HasString(s.s2)
|
||||
require.Nil(t, err)
|
||||
require.True(t, has)
|
||||
|
||||
err = ss.RemoveString(s.s1)
|
||||
require.Nil(t, err)
|
||||
|
||||
err = ss.RemoveString(s.s2)
|
||||
require.Nil(t, err)
|
||||
|
||||
has, err = ss.HasString(s.s1)
|
||||
require.Nil(t, err)
|
||||
require.False(t, has)
|
||||
|
||||
has, err = ss.HasString(s.s2)
|
||||
require.Nil(t, err)
|
||||
require.False(t, has)
|
||||
|
||||
errChan := ss.Stop()
|
||||
err = <-errChan
|
||||
require.Nil(t, err, "StringStore shutdown must not fail")
|
||||
}
|
||||
|
||||
// IPStoreTester is a collection of tests for an IPStore driver.
|
||||
// Every benchmark expects a new, clean storage. Every benchmark should be
|
||||
// called with a DriverConfig that ensures this.
|
||||
type IPStoreTester interface {
|
||||
TestIPStore(*testing.T, *DriverConfig)
|
||||
TestHasAllHasAny(*testing.T, *DriverConfig)
|
||||
TestNetworks(*testing.T, *DriverConfig)
|
||||
TestHasAllHasAnyNetworks(*testing.T, *DriverConfig)
|
||||
}
|
||||
|
||||
var _ IPStoreTester = &ipStoreTester{}
|
||||
|
||||
type ipStoreTester struct {
|
||||
v6, v4, v4s net.IP
|
||||
net1, net2 string
|
||||
inNet1, inNet2 net.IP
|
||||
excluded net.IP
|
||||
driver IPStoreDriver
|
||||
}
|
||||
|
||||
// PrepareIPStoreTester prepares a reusable suite for IPStore driver
|
||||
// tests.
|
||||
func PrepareIPStoreTester(driver IPStoreDriver) IPStoreTester {
|
||||
return &ipStoreTester{
|
||||
v6: net.ParseIP("0c22:384e:0:0c22:384e::68"),
|
||||
v4: net.ParseIP("12.13.14.15"),
|
||||
v4s: net.ParseIP("12.13.14.15").To4(),
|
||||
net1: "192.168.22.255/24",
|
||||
net2: "192.168.23.255/24",
|
||||
inNet1: net.ParseIP("192.168.22.22"),
|
||||
inNet2: net.ParseIP("192.168.23.23"),
|
||||
excluded: net.ParseIP("10.154.243.22"),
|
||||
driver: driver,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ipStoreTester) TestIPStore(t *testing.T, cfg *DriverConfig) {
|
||||
is, err := s.driver.New(cfg)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, is)
|
||||
|
||||
// check default state
|
||||
found, err := is.HasIP(s.v4)
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
// check IPv4
|
||||
err = is.AddIP(s.v4)
|
||||
require.Nil(t, err)
|
||||
|
||||
found, err = is.HasIP(s.v4)
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
found, err = is.HasIP(s.v4s)
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
found, err = is.HasIP(s.v6)
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
// check removes
|
||||
err = is.RemoveIP(s.v6)
|
||||
require.NotNil(t, err)
|
||||
|
||||
err = is.RemoveIP(s.v4s)
|
||||
require.Nil(t, err)
|
||||
|
||||
found, err = is.HasIP(s.v4)
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
// check IPv6
|
||||
err = is.AddIP(s.v6)
|
||||
require.Nil(t, err)
|
||||
|
||||
found, err = is.HasIP(s.v6)
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
err = is.RemoveIP(s.v6)
|
||||
require.Nil(t, err)
|
||||
|
||||
found, err = is.HasIP(s.v6)
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
errChan := is.Stop()
|
||||
err = <-errChan
|
||||
require.Nil(t, err, "IPStore shutdown must not fail")
|
||||
}
|
||||
|
||||
func (s *ipStoreTester) TestHasAllHasAny(t *testing.T, cfg *DriverConfig) {
|
||||
is, err := s.driver.New(cfg)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, is)
|
||||
|
||||
found, err := is.HasAnyIP(nil)
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
found, err = is.HasAllIPs(nil)
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
found, err = is.HasAllIPs([]net.IP{s.v6})
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
err = is.AddIP(s.v4)
|
||||
require.Nil(t, err)
|
||||
|
||||
found, err = is.HasAnyIP([]net.IP{s.v6, s.v4})
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
found, err = is.HasAllIPs([]net.IP{s.v6, s.v4})
|
||||
require.Nil(t, err)
|
||||
require.False(t, found)
|
||||
|
||||
found, err = is.HasAllIPs([]net.IP{s.v4})
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
err = is.AddIP(s.v6)
|
||||
require.Nil(t, err)
|
||||
|
||||
found, err = is.HasAnyIP([]net.IP{s.v6, s.v6})
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
found, err = is.HasAllIPs([]net.IP{s.v6, s.v6})
|
||||
require.Nil(t, err)
|
||||
require.True(t, found)
|
||||
|
||||
errChan := is.Stop()
|
||||
err = <-errChan
|
||||
require.Nil(t, err, "IPStore shutdown must not fail")
|
||||
}
|
||||
|
||||
func (s *ipStoreTester) TestNetworks(t *testing.T, cfg *DriverConfig) {
|
||||
is, err := s.driver.New(cfg)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, is)
|
||||
|
||||
match, err := is.HasIP(s.inNet1)
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
match, err = is.HasIP(s.inNet2)
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
err = is.AddNetwork("")
|
||||
require.NotNil(t, err)
|
||||
|
||||
err = is.RemoveNetwork("")
|
||||
require.NotNil(t, err)
|
||||
|
||||
err = is.AddNetwork(s.net1)
|
||||
require.Nil(t, err)
|
||||
|
||||
match, err = is.HasIP(s.inNet1)
|
||||
require.Nil(t, err)
|
||||
require.True(t, match)
|
||||
|
||||
match, err = is.HasIP(s.inNet2)
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
err = is.RemoveNetwork(s.net2)
|
||||
require.NotNil(t, err)
|
||||
|
||||
err = is.RemoveNetwork(s.net1)
|
||||
require.Nil(t, err)
|
||||
|
||||
match, err = is.HasIP(s.inNet1)
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
match, err = is.HasIP(s.inNet2)
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
errChan := is.Stop()
|
||||
err = <-errChan
|
||||
require.Nil(t, err, "IPStore shutdown must not fail")
|
||||
}
|
||||
|
||||
func (s *ipStoreTester) TestHasAllHasAnyNetworks(t *testing.T, cfg *DriverConfig) {
|
||||
is, err := s.driver.New(cfg)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, s)
|
||||
|
||||
match, err := is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
err = is.AddNetwork(s.net1)
|
||||
require.Nil(t, err)
|
||||
|
||||
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.True(t, match)
|
||||
|
||||
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
err = is.AddNetwork(s.net2)
|
||||
require.Nil(t, err)
|
||||
|
||||
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded})
|
||||
require.Nil(t, err)
|
||||
require.True(t, match)
|
||||
|
||||
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.True(t, match)
|
||||
|
||||
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
err = is.RemoveNetwork(s.net1)
|
||||
require.Nil(t, err)
|
||||
|
||||
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.True(t, match)
|
||||
|
||||
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
err = is.RemoveNetwork(s.net2)
|
||||
require.Nil(t, err)
|
||||
|
||||
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
|
||||
require.Nil(t, err)
|
||||
require.False(t, match)
|
||||
|
||||
errChan := is.Stop()
|
||||
err = <-errChan
|
||||
require.Nil(t, err, "IPStore shutdown must not fail")
|
||||
}
|
||||
|
||||
// PeerStoreTester is a collection of tests for a PeerStore driver.
|
||||
// Every benchmark expects a new, clean storage. Every benchmark should be
|
||||
// called with a DriverConfig that ensures this.
|
||||
type PeerStoreTester interface {
|
||||
// CompareEndpoints sets the function used to compare peers to a
|
||||
// comparison that only compares endpoints and omits PeerIDs.
|
||||
CompareEndpoints()
|
||||
|
||||
TestPeerStore(*testing.T, *DriverConfig)
|
||||
}
|
||||
|
||||
var _ PeerStoreTester = &peerStoreTester{}
|
||||
|
||||
type peerStoreTester struct {
|
||||
driver PeerStoreDriver
|
||||
equalityFunc func(a, b chihaya.Peer) bool
|
||||
}
|
||||
|
||||
// PreparePeerStoreTester prepares a reusable suite for PeerStore driver
|
||||
// tests.
|
||||
// The tester will use PeerIDs and endpoints to compare peers.
|
||||
func PreparePeerStoreTester(driver PeerStoreDriver) PeerStoreTester {
|
||||
return &peerStoreTester{
|
||||
driver: driver,
|
||||
equalityFunc: func(a, b chihaya.Peer) bool { return a.Equal(b) },
|
||||
}
|
||||
}
|
||||
|
||||
func (pt *peerStoreTester) CompareEndpoints() {
|
||||
pt.equalityFunc = func(a, b chihaya.Peer) bool { return a.EqualEndpoint(b) }
|
||||
}
|
||||
|
||||
func (pt *peerStoreTester) peerInSlice(peer chihaya.Peer, peers []chihaya.Peer) bool {
|
||||
for _, v := range peers {
|
||||
if pt.equalityFunc(peer, v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (pt *peerStoreTester) TestPeerStore(t *testing.T, cfg *DriverConfig) {
|
||||
var (
|
||||
hash = chihaya.InfoHash([20]byte{})
|
||||
|
||||
peers = []struct {
|
||||
seeder bool
|
||||
peerID string
|
||||
ip string
|
||||
port uint16
|
||||
}{
|
||||
{false, "-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720},
|
||||
{false, "-AZ3042-6ozMq5q6Q3NX", "38.241.13.19", 4833},
|
||||
{false, "-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878},
|
||||
{false, "-AR6360-6oZyyMWoOOBe", "fd0a:29a8:8445::38", 3167},
|
||||
{true, "-AG2083-s1hiF8vGAAg0", "231.231.49.173", 1453},
|
||||
{true, "-AG3003-lEl2Mm4NEO4n", "254.99.84.77", 7032},
|
||||
{true, "-MR1100-00HS~T7*65rm", "211.229.100.129", 2614},
|
||||
{true, "-LK0140-ATIV~nbEQAMr", "fdad:c435:bf79::12", 4114},
|
||||
{true, "-KT2210-347143496631", "fdda:1b35:7d6e::9", 6179},
|
||||
{true, "-TR0960-6ep6svaa61r4", "fd7f:78f0:4c77::55", 4727},
|
||||
}
|
||||
)
|
||||
s, err := pt.driver.New(cfg)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, s)
|
||||
|
||||
for _, p := range peers {
|
||||
// Construct chihaya.Peer from test data.
|
||||
peer := chihaya.Peer{
|
||||
ID: chihaya.PeerIDFromString(p.peerID),
|
||||
IP: net.ParseIP(p.ip),
|
||||
Port: p.port,
|
||||
}
|
||||
|
||||
if p.seeder {
|
||||
err = s.PutSeeder(hash, peer)
|
||||
} else {
|
||||
err = s.PutLeecher(hash, peer)
|
||||
}
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
leechers1, leechers61, err := s.GetLeechers(hash)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, leechers1)
|
||||
require.NotEmpty(t, leechers61)
|
||||
num := s.NumLeechers(hash)
|
||||
require.Equal(t, len(leechers1)+len(leechers61), num)
|
||||
|
||||
seeders1, seeders61, err := s.GetSeeders(hash)
|
||||
require.Nil(t, err)
|
||||
require.NotEmpty(t, seeders1)
|
||||
require.NotEmpty(t, seeders61)
|
||||
num = s.NumSeeders(hash)
|
||||
require.Equal(t, len(seeders1)+len(seeders61), num)
|
||||
|
||||
leechers := append(leechers1, leechers61...)
|
||||
seeders := append(seeders1, seeders61...)
|
||||
|
||||
for _, p := range peers {
|
||||
// Construct chihaya.Peer from test data.
|
||||
peer := chihaya.Peer{
|
||||
ID: chihaya.PeerIDFromString(p.peerID),
|
||||
IP: net.ParseIP(p.ip),
|
||||
Port: p.port,
|
||||
}
|
||||
|
||||
if p.seeder {
|
||||
require.True(t, pt.peerInSlice(peer, seeders))
|
||||
} else {
|
||||
require.True(t, pt.peerInSlice(peer, leechers))
|
||||
}
|
||||
|
||||
if p.seeder {
|
||||
err = s.DeleteSeeder(hash, peer)
|
||||
} else {
|
||||
err = s.DeleteLeecher(hash, peer)
|
||||
}
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
require.Zero(t, s.NumLeechers(hash))
|
||||
require.Zero(t, s.NumSeeders(hash))
|
||||
|
||||
// Re-add all the peers to the peerStore.
|
||||
for _, p := range peers {
|
||||
// Construct chihaya.Peer from test data.
|
||||
peer := chihaya.Peer{
|
||||
ID: chihaya.PeerIDFromString(p.peerID),
|
||||
IP: net.ParseIP(p.ip),
|
||||
Port: p.port,
|
||||
}
|
||||
if p.seeder {
|
||||
s.PutSeeder(hash, peer)
|
||||
} else {
|
||||
s.PutLeecher(hash, peer)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that there are 6 seeders, and 4 leechers.
|
||||
require.Equal(t, 6, s.NumSeeders(hash))
|
||||
require.Equal(t, 4, s.NumLeechers(hash))
|
||||
peer := chihaya.Peer{
|
||||
ID: chihaya.PeerIDFromString(peers[0].peerID),
|
||||
IP: net.ParseIP(peers[0].ip),
|
||||
Port: peers[0].port,
|
||||
}
|
||||
err = s.GraduateLeecher(hash, peer)
|
||||
require.Nil(t, err)
|
||||
// Check that there are 7 seeders, and 3 leechers after graduating a
|
||||
// leecher to a seeder.
|
||||
require.Equal(t, 7, s.NumSeeders(hash))
|
||||
require.Equal(t, 3, s.NumLeechers(hash))
|
||||
|
||||
_, _, err = s.AnnouncePeers(hash, true, 5, peer, chihaya.Peer{})
|
||||
// Only test if it works, do not test the slices returned. They change
|
||||
// depending on the driver.
|
||||
require.Nil(t, err)
|
||||
|
||||
err = s.CollectGarbage(time.Now())
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 0, s.NumLeechers(hash))
|
||||
require.Equal(t, 0, s.NumSeeders(hash))
|
||||
|
||||
errChan := s.Stop()
|
||||
err = <-errChan
|
||||
require.Nil(t, err, "PeerStore shutdown must not fail")
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/chihaya/chihaya/pkg/stopper"
|
||||
)
|
||||
|
||||
var stringStoreDrivers = make(map[string]StringStoreDriver)
|
||||
|
||||
// StringStore represents an interface for manipulating strings.
|
||||
type StringStore interface {
|
||||
// PutString adds the given string to the StringStore.
|
||||
PutString(s string) error
|
||||
|
||||
// HasString returns whether or not the StringStore contains the given
|
||||
// string.
|
||||
HasString(s string) (bool, error)
|
||||
|
||||
// RemoveString removes the string from the string store.
|
||||
// Returns ErrResourceDoesNotExist if the given string is not contained
|
||||
// in the store.
|
||||
RemoveString(s string) error
|
||||
|
||||
// Stopper provides the Stop method that stops the StringStore.
|
||||
// Stop should shut down the StringStore in a separate goroutine and send
|
||||
// an error to the channel if the shutdown failed. If the shutdown
|
||||
// was successful, the channel is to be closed.
|
||||
stopper.Stopper
|
||||
}
|
||||
|
||||
// StringStoreDriver represents an interface for creating a handle to the
|
||||
// storage of strings.
|
||||
type StringStoreDriver interface {
|
||||
New(*DriverConfig) (StringStore, error)
|
||||
}
|
||||
|
||||
// RegisterStringStoreDriver makes a driver available by the provided name.
|
||||
//
|
||||
// If this function is called twice with the same name or if the driver is nil,
|
||||
// it panics.
|
||||
func RegisterStringStoreDriver(name string, driver StringStoreDriver) {
|
||||
if driver == nil {
|
||||
panic("store: could not register nil StringStoreDriver")
|
||||
}
|
||||
if _, dup := stringStoreDrivers[name]; dup {
|
||||
panic("store: could not register duplicate StringStoreDriver: " + name)
|
||||
}
|
||||
stringStoreDrivers[name] = driver
|
||||
}
|
||||
|
||||
// OpenStringStore returns a StringStore specified by a configuration.
|
||||
func OpenStringStore(cfg *DriverConfig) (StringStore, error) {
|
||||
driver, ok := stringStoreDrivers[cfg.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("store: unknown StringStoreDriver %q (forgotten import?)", cfg)
|
||||
}
|
||||
|
||||
return driver.New(cfg)
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package tracker
|
||||
|
||||
import "github.com/chihaya/chihaya"
|
||||
|
||||
// AnnounceHandler is a function that operates on an AnnounceResponse before it
|
||||
// has been delivered to a client.
|
||||
type AnnounceHandler func(*chihaya.TrackerConfig, *chihaya.AnnounceRequest, *chihaya.AnnounceResponse) error
|
||||
|
||||
// AnnounceMiddleware is a higher-order function used to implement the chaining
|
||||
// of AnnounceHandlers.
|
||||
type AnnounceMiddleware func(AnnounceHandler) AnnounceHandler
|
||||
|
||||
// AnnounceMiddlewareConstructor is a function that creates a new
|
||||
// AnnounceMiddleware from a MiddlewareConfig.
|
||||
type AnnounceMiddlewareConstructor func(chihaya.MiddlewareConfig) (AnnounceMiddleware, error)
|
||||
|
||||
// AnnounceChain is a chain of AnnounceMiddlewares.
|
||||
type AnnounceChain struct{ mw []AnnounceMiddleware }
|
||||
|
||||
// Append appends AnnounceMiddlewares to the AnnounceChain.
|
||||
func (c *AnnounceChain) Append(mw ...AnnounceMiddleware) {
|
||||
c.mw = append(c.mw, mw...)
|
||||
}
|
||||
|
||||
// Handler builds an AnnounceChain into an AnnounceHandler.
|
||||
func (c *AnnounceChain) Handler() AnnounceHandler {
|
||||
final := func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := len(c.mw) - 1; i >= 0; i-- {
|
||||
final = c.mw[i](final)
|
||||
}
|
||||
return final
|
||||
}
|
||||
|
||||
var announceMiddlewareConstructors = make(map[string]AnnounceMiddlewareConstructor)
|
||||
|
||||
// RegisterAnnounceMiddlewareConstructor makes a configurable middleware
|
||||
// globally available under the provided name.
|
||||
//
|
||||
// If this function is called twice with the same name or if the constructor is
|
||||
// nil, it panics.
|
||||
func RegisterAnnounceMiddlewareConstructor(name string, mw AnnounceMiddlewareConstructor) {
|
||||
if mw == nil {
|
||||
panic("tracker: could not register nil AnnounceMiddlewareConstructor")
|
||||
}
|
||||
|
||||
if _, dup := announceMiddlewareConstructors[name]; dup {
|
||||
panic("tracker: could not register duplicate AnnounceMiddleware: " + name)
|
||||
}
|
||||
|
||||
announceMiddlewareConstructors[name] = mw
|
||||
}
|
||||
|
||||
// RegisterAnnounceMiddleware makes a middleware globally available under the
|
||||
// provided name.
|
||||
//
|
||||
// This function is intended to register middleware that has no configuration.
|
||||
// If this function is called twice with the same name or if the middleware is
|
||||
// nil, it panics.
|
||||
func RegisterAnnounceMiddleware(name string, mw AnnounceMiddleware) {
|
||||
if mw == nil {
|
||||
panic("tracker: could not register nil AnnounceMiddleware")
|
||||
}
|
||||
|
||||
RegisterAnnounceMiddlewareConstructor(name, func(_ chihaya.MiddlewareConfig) (AnnounceMiddleware, error) {
|
||||
return mw, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ScrapeHandler is a function that operates on a ScrapeResponse before it has
|
||||
// been delivered to a client.
|
||||
type ScrapeHandler func(*chihaya.TrackerConfig, *chihaya.ScrapeRequest, *chihaya.ScrapeResponse) error
|
||||
|
||||
// ScrapeMiddleware is higher-order function used to implement the chaining of
|
||||
// ScrapeHandlers.
|
||||
type ScrapeMiddleware func(ScrapeHandler) ScrapeHandler
|
||||
|
||||
// ScrapeMiddlewareConstructor is a function that creates a new
|
||||
// ScrapeMiddleware from a MiddlewareConfig.
|
||||
type ScrapeMiddlewareConstructor func(chihaya.MiddlewareConfig) (ScrapeMiddleware, error)
|
||||
|
||||
// ScrapeChain is a chain of ScrapeMiddlewares.
|
||||
type ScrapeChain struct{ mw []ScrapeMiddleware }
|
||||
|
||||
// Append appends ScrapeMiddlewares to the ScrapeChain.
|
||||
func (c *ScrapeChain) Append(mw ...ScrapeMiddleware) {
|
||||
c.mw = append(c.mw, mw...)
|
||||
}
|
||||
|
||||
// Handler builds the ScrapeChain into a ScrapeHandler.
|
||||
func (c *ScrapeChain) Handler() ScrapeHandler {
|
||||
final := func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) error {
|
||||
return nil
|
||||
}
|
||||
for i := len(c.mw) - 1; i >= 0; i-- {
|
||||
final = c.mw[i](final)
|
||||
}
|
||||
return final
|
||||
}
|
||||
|
||||
var scrapeMiddlewareConstructors = make(map[string]ScrapeMiddlewareConstructor)
|
||||
|
||||
// RegisterScrapeMiddlewareConstructor makes a configurable middleware globally
|
||||
// available under the provided name.
|
||||
//
|
||||
// If this function is called twice with the same name or if the constructor is
|
||||
// nil, it panics.
|
||||
func RegisterScrapeMiddlewareConstructor(name string, mw ScrapeMiddlewareConstructor) {
|
||||
if mw == nil {
|
||||
panic("tracker: could not register nil ScrapeMiddlewareConstructor")
|
||||
}
|
||||
|
||||
if _, dup := scrapeMiddlewareConstructors[name]; dup {
|
||||
panic("tracker: could not register duplicate ScrapeMiddleware: " + name)
|
||||
}
|
||||
|
||||
scrapeMiddlewareConstructors[name] = mw
|
||||
}
|
||||
|
||||
// RegisterScrapeMiddleware makes a middleware globally available under the
|
||||
// provided name.
|
||||
//
|
||||
// This function is intended to register middleware that has no configuration.
|
||||
// If this function is called twice with the same name or if the middleware is
|
||||
// nil, it panics.
|
||||
func RegisterScrapeMiddleware(name string, mw ScrapeMiddleware) {
|
||||
if mw == nil {
|
||||
panic("tracker: could not register nil ScrapeMiddleware")
|
||||
}
|
||||
|
||||
RegisterScrapeMiddlewareConstructor(name, func(_ chihaya.MiddlewareConfig) (ScrapeMiddleware, error) {
|
||||
return mw, nil
|
||||
})
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
func testAnnounceMW1(next AnnounceHandler) AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
resp.IPv4Peers = append(resp.IPv4Peers, chihaya.Peer{
|
||||
Port: 1,
|
||||
})
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func testAnnounceMW2(next AnnounceHandler) AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
resp.IPv4Peers = append(resp.IPv4Peers, chihaya.Peer{
|
||||
Port: 2,
|
||||
})
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func testAnnounceMW3(next AnnounceHandler) AnnounceHandler {
|
||||
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
|
||||
resp.IPv4Peers = append(resp.IPv4Peers, chihaya.Peer{
|
||||
Port: 3,
|
||||
})
|
||||
return next(cfg, req, resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnnounceChain(t *testing.T) {
|
||||
var achain AnnounceChain
|
||||
achain.Append(testAnnounceMW1)
|
||||
achain.Append(testAnnounceMW2)
|
||||
achain.Append(testAnnounceMW3)
|
||||
handler := achain.Handler()
|
||||
resp := &chihaya.AnnounceResponse{}
|
||||
err := handler(nil, &chihaya.AnnounceRequest{}, resp)
|
||||
assert.Nil(t, err, "the handler should not return an error")
|
||||
assert.Equal(t, resp.IPv4Peers, []chihaya.Peer{{Port: 1}, {Port: 2}, {Port: 3}}, "the list of peers added from the middleware should be in the same order.")
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
// Copyright 2016 The Chihaya Authors. All rights reserved.
|
||||
// Use of this source code is governed by the BSD 2-Clause license,
|
||||
// which can be found in the LICENSE file.
|
||||
|
||||
// Package tracker implements a protocol-independent, middleware-composed
|
||||
// BitTorrent tracker.
|
||||
package tracker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/chihaya/chihaya"
|
||||
)
|
||||
|
||||
// ClientError represents an error that should be exposed to the client over
|
||||
// the BitTorrent protocol implementation.
|
||||
type ClientError string
|
||||
|
||||
// Error implements the error interface for ClientError.
|
||||
func (c ClientError) Error() string { return string(c) }
|
||||
|
||||
// Tracker represents a protocol-independent, middleware-composed BitTorrent
|
||||
// tracker.
|
||||
type Tracker struct {
|
||||
cfg *chihaya.TrackerConfig
|
||||
handleAnnounce AnnounceHandler
|
||||
handleScrape ScrapeHandler
|
||||
}
|
||||
|
||||
// NewTracker constructs a newly allocated Tracker composed of the middleware
|
||||
// in the provided configuration.
|
||||
func NewTracker(cfg *chihaya.TrackerConfig) (*Tracker, error) {
|
||||
var achain AnnounceChain
|
||||
for _, mwConfig := range cfg.AnnounceMiddleware {
|
||||
mw, ok := announceMiddlewareConstructors[mwConfig.Name]
|
||||
if !ok {
|
||||
return nil, errors.New("failed to find announce middleware: " + mwConfig.Name)
|
||||
}
|
||||
middleware, err := mw(mwConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load announce middleware %q: %s", mwConfig.Name, err.Error())
|
||||
}
|
||||
achain.Append(middleware)
|
||||
}
|
||||
|
||||
var schain ScrapeChain
|
||||
for _, mwConfig := range cfg.ScrapeMiddleware {
|
||||
mw, ok := scrapeMiddlewareConstructors[mwConfig.Name]
|
||||
if !ok {
|
||||
return nil, errors.New("failed to find scrape middleware: " + mwConfig.Name)
|
||||
}
|
||||
middleware, err := mw(mwConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load scrape middleware %q: %s", mwConfig.Name, err.Error())
|
||||
}
|
||||
schain.Append(middleware)
|
||||
}
|
||||
|
||||
return &Tracker{
|
||||
cfg: cfg,
|
||||
handleAnnounce: achain.Handler(),
|
||||
handleScrape: schain.Handler(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HandleAnnounce runs an AnnounceRequest through the Tracker's middleware and
|
||||
// returns the result.
|
||||
func (t *Tracker) HandleAnnounce(req *chihaya.AnnounceRequest) (*chihaya.AnnounceResponse, error) {
|
||||
resp := &chihaya.AnnounceResponse{}
|
||||
err := t.handleAnnounce(t.cfg, req, resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// HandleScrape runs a ScrapeRequest through the Tracker's middleware and
|
||||
// returns the result.
|
||||
func (t *Tracker) HandleScrape(req *chihaya.ScrapeRequest) (*chihaya.ScrapeResponse, error) {
|
||||
resp := &chihaya.ScrapeResponse{
|
||||
Files: make(map[chihaya.InfoHash]chihaya.Scrape),
|
||||
}
|
||||
err := t.handleScrape(t.cfg, req, resp)
|
||||
return resp, err
|
||||
}
|
Loading…
Reference in a new issue