From 1bff8d1571ce8eb84573a3b1543e0075f88e2ff0 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 21:47:52 -0400 Subject: [PATCH 01/74] delete old code --- AUTHORS | 5 - Dockerfile | 27 - chihaya.go | 161 --- chihaya_test.go | 45 - cmd/chihaya/main.go | 77 - config.go | 98 -- config_example.yaml | 61 - glide.lock | 44 - glide.yaml | 9 - middleware/deniability/README.md | 39 - middleware/deniability/config.go | 46 - middleware/deniability/config_test.go | 63 - middleware/deniability/deniability.go | 121 -- middleware/deniability/deniability_test.go | 110 -- middleware/varinterval/README.md | 34 - middleware/varinterval/config.go | 43 - middleware/varinterval/config_test.go | 59 - middleware/varinterval/varinterval.go | 70 - middleware/varinterval/varinterval_test.go | 66 - pkg/bencode/bencode.go | 23 - pkg/bencode/decoder.go | 135 -- pkg/bencode/decoder_test.go | 86 -- pkg/bencode/encoder.go | 163 --- pkg/bencode/encoder_test.go | 71 - pkg/clientid/client_id.go | 23 - pkg/clientid/client_id_test.go | 62 - pkg/event/event.go | 70 - pkg/event/event_test.go | 33 - pkg/random/peer.go | 74 - pkg/random/peer_test.go | 43 - pkg/random/string.go | 26 - pkg/random/string_test.go | 30 - pkg/stopper/stopper.go | 101 -- server/http/config.go | 38 - server/http/query/query.go | 133 -- server/http/query/query_test.go | 100 -- server/http/request.go | 183 --- server/http/server.go | 133 -- server/http/writer.go | 98 -- server/http/writer_test.go | 36 - server/pool.go | 49 - server/prometheus/prometheus.go | 103 -- server/server.go | 56 - server/store/README.md | 43 - server/store/ip_store.go | 93 -- server/store/memory/ip_store.go | 225 --- server/store/memory/ip_store_test.go | 200 --- server/store/memory/peer_store.go | 478 ------- server/store/memory/peer_store_test.go | 142 -- server/store/memory/string_store.go | 93 -- server/store/memory/string_store_test.go | 101 -- server/store/middleware/client/README.md | 25 - server/store/middleware/client/blacklist.go | 34 - server/store/middleware/client/whitelist.go | 37 - server/store/middleware/infohash/README.md | 69 - server/store/middleware/infohash/blacklist.go | 106 -- .../middleware/infohash/blacklist_test.go | 140 -- server/store/middleware/infohash/config.go | 56 - .../store/middleware/infohash/config_test.go | 56 - server/store/middleware/infohash/whitelist.go | 99 -- .../middleware/infohash/whitelist_test.go | 96 -- server/store/middleware/ip/README.md | 32 - server/store/middleware/ip/blacklist.go | 47 - server/store/middleware/ip/whitelist.go | 43 - server/store/middleware/response/README.md | 11 - server/store/middleware/response/response.go | 59 - server/store/middleware/swarm/README.md | 12 - server/store/middleware/swarm/swarm.go | 75 - server/store/peer_store.go | 103 -- server/store/store.go | 142 -- server/store/store_bench.go | 1262 ----------------- server/store/store_tests.go | 526 ------- server/store/string_store.go | 64 - tracker/middleware.go | 140 -- tracker/middleware_test.go | 52 - tracker/tracker.go | 83 -- 76 files changed, 7791 deletions(-) delete mode 100644 AUTHORS delete mode 100644 Dockerfile delete mode 100644 chihaya.go delete mode 100644 chihaya_test.go delete mode 100644 cmd/chihaya/main.go delete mode 100644 config.go delete mode 100644 config_example.yaml delete mode 100644 glide.lock delete mode 100644 glide.yaml delete mode 100644 middleware/deniability/README.md delete mode 100644 middleware/deniability/config.go delete mode 100644 middleware/deniability/config_test.go delete mode 100644 middleware/deniability/deniability.go delete mode 100644 middleware/deniability/deniability_test.go delete mode 100644 middleware/varinterval/README.md delete mode 100644 middleware/varinterval/config.go delete mode 100644 middleware/varinterval/config_test.go delete mode 100644 middleware/varinterval/varinterval.go delete mode 100644 middleware/varinterval/varinterval_test.go delete mode 100644 pkg/bencode/bencode.go delete mode 100644 pkg/bencode/decoder.go delete mode 100644 pkg/bencode/decoder_test.go delete mode 100644 pkg/bencode/encoder.go delete mode 100644 pkg/bencode/encoder_test.go delete mode 100644 pkg/clientid/client_id.go delete mode 100644 pkg/clientid/client_id_test.go delete mode 100644 pkg/event/event.go delete mode 100644 pkg/event/event_test.go delete mode 100644 pkg/random/peer.go delete mode 100644 pkg/random/peer_test.go delete mode 100644 pkg/random/string.go delete mode 100644 pkg/random/string_test.go delete mode 100644 pkg/stopper/stopper.go delete mode 100644 server/http/config.go delete mode 100644 server/http/query/query.go delete mode 100644 server/http/query/query_test.go delete mode 100644 server/http/request.go delete mode 100644 server/http/server.go delete mode 100644 server/http/writer.go delete mode 100644 server/http/writer_test.go delete mode 100644 server/pool.go delete mode 100644 server/prometheus/prometheus.go delete mode 100644 server/server.go delete mode 100644 server/store/README.md delete mode 100644 server/store/ip_store.go delete mode 100644 server/store/memory/ip_store.go delete mode 100644 server/store/memory/ip_store_test.go delete mode 100644 server/store/memory/peer_store.go delete mode 100644 server/store/memory/peer_store_test.go delete mode 100644 server/store/memory/string_store.go delete mode 100644 server/store/memory/string_store_test.go delete mode 100644 server/store/middleware/client/README.md delete mode 100644 server/store/middleware/client/blacklist.go delete mode 100644 server/store/middleware/client/whitelist.go delete mode 100644 server/store/middleware/infohash/README.md delete mode 100644 server/store/middleware/infohash/blacklist.go delete mode 100644 server/store/middleware/infohash/blacklist_test.go delete mode 100644 server/store/middleware/infohash/config.go delete mode 100644 server/store/middleware/infohash/config_test.go delete mode 100644 server/store/middleware/infohash/whitelist.go delete mode 100644 server/store/middleware/infohash/whitelist_test.go delete mode 100644 server/store/middleware/ip/README.md delete mode 100644 server/store/middleware/ip/blacklist.go delete mode 100644 server/store/middleware/ip/whitelist.go delete mode 100644 server/store/middleware/response/README.md delete mode 100644 server/store/middleware/response/response.go delete mode 100644 server/store/middleware/swarm/README.md delete mode 100644 server/store/middleware/swarm/swarm.go delete mode 100644 server/store/peer_store.go delete mode 100644 server/store/store.go delete mode 100644 server/store/store_bench.go delete mode 100644 server/store/store_tests.go delete mode 100644 server/store/string_store.go delete mode 100644 tracker/middleware.go delete mode 100644 tracker/middleware_test.go delete mode 100644 tracker/tracker.go diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index b62effb..0000000 --- a/AUTHORS +++ /dev/null @@ -1,5 +0,0 @@ -# This is the official list of Chihaya authors for copyright purposes, in alphabetical order. - -Jimmy Zelinskie -Justin Li - diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 9511c1b..0000000 --- a/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# vim: ft=dockerfile -FROM golang -MAINTAINER Jimmy Zelinskie - -# Install glide -WORKDIR /tmp -ADD https://github.com/Masterminds/glide/releases/download/0.10.2/glide-0.10.2-linux-amd64.tar.gz /tmp -RUN tar xvf /tmp/glide-0.10.2-linux-amd64.tar.gz -RUN mv /tmp/linux-amd64/glide /usr/bin/glide - -# Add files -WORKDIR /go/src/github.com/chihaya/chihaya/ -RUN mkdir -p /go/src/github.com/chihaya/chihaya/ - -# Add source -ADD . . - -# Install chihaya -RUN glide install -RUN go install github.com/chihaya/chihaya/cmd/chihaya - -# Configuration/environment -VOLUME ["/config"] -EXPOSE 6880-6882 - -# docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -ENTRYPOINT ["chihaya", "-config=/config/config.json"] diff --git a/chihaya.go b/chihaya.go deleted file mode 100644 index 399e555..0000000 --- a/chihaya.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file.package middleware - -package chihaya - -import ( - "net" - "time" - - "github.com/chihaya/chihaya/pkg/event" -) - -// PeerID represents a peer ID. -type PeerID [20]byte - -// PeerIDFromBytes creates a PeerID from a byte slice. -// -// It panics if b is not 20 bytes long. -func PeerIDFromBytes(b []byte) PeerID { - if len(b) != 20 { - panic("peer ID must be 20 bytes") - } - - var buf [20]byte - copy(buf[:], b) - return PeerID(buf) -} - -// PeerIDFromString creates a PeerID from a string. -// -// It panics if s is not 20 bytes long. -func PeerIDFromString(s string) PeerID { - if len(s) != 20 { - panic("peer ID must be 20 bytes") - } - - var buf [20]byte - copy(buf[:], s) - return PeerID(buf) -} - -// InfoHash represents an infohash. -type InfoHash [20]byte - -// InfoHashFromBytes creates an InfoHash from a byte slice. -// -// It panics if b is not 20 bytes long. -func InfoHashFromBytes(b []byte) InfoHash { - if len(b) != 20 { - panic("infohash must be 20 bytes") - } - - var buf [20]byte - copy(buf[:], b) - return InfoHash(buf) -} - -// InfoHashFromString creates an InfoHash from a string. -// -// It panics if s is not 20 bytes long. -func InfoHashFromString(s string) InfoHash { - if len(s) != 20 { - panic("infohash must be 20 bytes") - } - - var buf [20]byte - copy(buf[:], s) - return InfoHash(buf) -} - -// AnnounceRequest represents the parsed parameters from an announce request. -type AnnounceRequest struct { - Event event.Event - InfoHash InfoHash - PeerID PeerID - - IPv4, IPv6 net.IP - Port uint16 - - Compact bool - NumWant int32 - - Left, Downloaded, Uploaded uint64 - - Params Params -} - -// Peer4 returns a Peer using the IPv4 endpoint of the Announce. -// Note that, if the Announce does not contain an IPv4 address, the IP field of -// the returned Peer can be nil. -func (r *AnnounceRequest) Peer4() Peer { - return Peer{ - IP: r.IPv4, - Port: r.Port, - ID: r.PeerID, - } -} - -// Peer6 returns a Peer using the IPv6 endpoint of the Announce. -// Note that, if the Announce does not contain an IPv6 address, the IP field of -// the returned Peer can be nil. -func (r *AnnounceRequest) Peer6() Peer { - return Peer{ - IP: r.IPv6, - Port: r.Port, - ID: r.PeerID, - } -} - -// AnnounceResponse represents the parameters used to create an announce -// response. -type AnnounceResponse struct { - Compact bool - Complete int32 - Incomplete int32 - Interval time.Duration - MinInterval time.Duration - IPv4Peers []Peer - IPv6Peers []Peer -} - -// ScrapeRequest represents the parsed parameters from a scrape request. -type ScrapeRequest struct { - InfoHashes []InfoHash - Params Params -} - -// ScrapeResponse represents the parameters used to create a scrape response. -type ScrapeResponse struct { - Files map[InfoHash]Scrape -} - -// Scrape represents the state of a swarm that is returned in a scrape response. -type Scrape struct { - Complete int32 - Incomplete int32 -} - -// Peer represents the connection details of a peer that is returned in an -// announce response. -type Peer struct { - ID PeerID - IP net.IP - Port uint16 -} - -// Equal reports whether p and x are the same. -func (p Peer) Equal(x Peer) bool { - return p.EqualEndpoint(x) && p.ID == x.ID -} - -// EqualEndpoint reports whether p and x have the same endpoint. -func (p Peer) EqualEndpoint(x Peer) bool { - return p.Port == x.Port && p.IP.Equal(x.IP) -} - -// Params is used to fetch request parameters. -type Params interface { - String(key string) (string, error) -} diff --git a/chihaya_test.go b/chihaya_test.go deleted file mode 100644 index 6fd513e..0000000 --- a/chihaya_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package chihaya - -import ( - "net" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - peers = []struct { - peerID string - ip string - port uint16 - }{ - {"-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720}, - {"-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878}, - {"-TR0960-6ep6svaa61r4", "fd45:7856:3dae::48", 2878}, - {"-BS5820-oy4La2MWGEFj", "fd0a:29a8:8445::38", 2878}, - {"-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 8999}, - } -) - -func TestPeerEquality(t *testing.T) { - // Build peers from test data. - var builtPeers []Peer - for _, peer := range peers { - builtPeers = append(builtPeers, Peer{ - ID: PeerIDFromString(peer.peerID), - IP: net.ParseIP(peer.ip), - Port: peer.port, - }) - } - - assert.True(t, builtPeers[0].Equal(builtPeers[0])) - assert.False(t, builtPeers[0].Equal(builtPeers[1])) - assert.True(t, builtPeers[1].Equal(builtPeers[1])) - assert.False(t, builtPeers[1].Equal(builtPeers[2])) - assert.False(t, builtPeers[1].Equal(builtPeers[3])) - assert.False(t, builtPeers[1].Equal(builtPeers[4])) -} diff --git a/cmd/chihaya/main.go b/cmd/chihaya/main.go deleted file mode 100644 index 54d9c57..0000000 --- a/cmd/chihaya/main.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package main - -import ( - "flag" - "log" - "os" - "os/signal" - "runtime/pprof" - "syscall" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server" - "github.com/chihaya/chihaya/tracker" - - // Servers - _ "github.com/chihaya/chihaya/server/http" - _ "github.com/chihaya/chihaya/server/prometheus" - _ "github.com/chihaya/chihaya/server/store" - _ "github.com/chihaya/chihaya/server/store/memory" - - // Middleware - _ "github.com/chihaya/chihaya/middleware/deniability" - _ "github.com/chihaya/chihaya/middleware/varinterval" - _ "github.com/chihaya/chihaya/server/store/middleware/client" - _ "github.com/chihaya/chihaya/server/store/middleware/infohash" - _ "github.com/chihaya/chihaya/server/store/middleware/ip" - _ "github.com/chihaya/chihaya/server/store/middleware/response" - _ "github.com/chihaya/chihaya/server/store/middleware/swarm" -) - -var ( - configPath string - cpuprofile string -) - -func init() { - flag.StringVar(&configPath, "config", "", "path to the configuration file") - flag.StringVar(&cpuprofile, "cpuprofile", "", "path to cpu profile output") -} - -func main() { - flag.Parse() - - if cpuprofile != "" { - log.Println("profiling...") - f, err := os.Create(cpuprofile) - if err != nil { - log.Fatal(err) - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } - - cfg, err := chihaya.OpenConfigFile(configPath) - if err != nil { - log.Fatal("failed to load config: " + err.Error()) - } - - tkr, err := tracker.NewTracker(&cfg.Tracker) - if err != nil { - log.Fatal("failed to create tracker: " + err.Error()) - } - - pool, err := server.StartPool(cfg.Servers, tkr) - if err != nil { - log.Fatal("failed to create server pool: " + err.Error()) - } - - shutdown := make(chan os.Signal) - signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) - <-shutdown - pool.Stop() -} diff --git a/config.go b/config.go deleted file mode 100644 index eb26b24..0000000 --- a/config.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package chihaya - -import ( - "io" - "io/ioutil" - "os" - "time" - - "gopkg.in/yaml.v2" -) - -// DefaultConfig is a sane configuration used as a fallback or for testing. -var DefaultConfig = Config{ - Tracker: TrackerConfig{ - AnnounceInterval: 30 * time.Minute, - MinAnnounceInterval: 20 * time.Minute, - AnnounceMiddleware: []MiddlewareConfig{}, - ScrapeMiddleware: []MiddlewareConfig{}, - }, - Servers: []ServerConfig{}, -} - -// Config represents the global configuration of a chihaya binary. -type Config struct { - Tracker TrackerConfig `yaml:"tracker"` - Servers []ServerConfig `yaml:"servers"` -} - -// TrackerConfig represents the configuration of protocol-agnostic BitTorrent -// Tracker used by Servers started by chihaya. -type TrackerConfig struct { - AnnounceInterval time.Duration `yaml:"announce"` - MinAnnounceInterval time.Duration `yaml:"min_announce"` - AnnounceMiddleware []MiddlewareConfig `yaml:"announce_middleware"` - ScrapeMiddleware []MiddlewareConfig `yaml:"scrape_middleware"` -} - -// MiddlewareConfig represents the configuration of a middleware used by -// the tracker. -type MiddlewareConfig struct { - Name string `yaml:"name"` - Config interface{} `yaml:"config"` -} - -// ServerConfig represents the configuration of the Servers started by chihaya. -type ServerConfig struct { - Name string `yaml:"name"` - Config interface{} `yaml:"config"` -} - -// ConfigFile represents a YAML configuration file that namespaces all chihaya -// configuration under the "chihaya" namespace. -type ConfigFile struct { - Chihaya Config `yaml:"chihaya"` -} - -// DecodeConfigFile unmarshals an io.Reader into a new Config. -func DecodeConfigFile(r io.Reader) (*Config, error) { - contents, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - cfgFile := &ConfigFile{} - err = yaml.Unmarshal(contents, cfgFile) - if err != nil { - return nil, err - } - - return &cfgFile.Chihaya, nil -} - -// OpenConfigFile returns a new Config given the path to a YAML configuration -// file. -// It supports relative and absolute paths and environment variables. -// Given "", it returns DefaultConfig. -func OpenConfigFile(path string) (*Config, error) { - if path == "" { - return &DefaultConfig, nil - } - - f, err := os.Open(os.ExpandEnv(path)) - if err != nil { - return nil, err - } - defer f.Close() - - cfg, err := DecodeConfigFile(f) - if err != nil { - return nil, err - } - - return cfg, nil -} diff --git a/config_example.yaml b/config_example.yaml deleted file mode 100644 index 8e8f4a0..0000000 --- a/config_example.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2016 The Chihaya Authors. All rights reserved. -# Use of this source code is governed by the BSD 2-Clause license, -# which can be found in the LICENSE file. - -chihaya: - tracker: - announce: 10m - min_announce: 5m - announce_middleware: -# - name: ip_blacklist -# - name: ip_whitelist -# - name: client_blacklist -# - name: client_whitelist -# - name: infohash_blacklist -# - name: infohash_whitelist -# - name: varinterval -# - name: deniability - - name: store_swarm_interaction - - name: store_response - scrape_middleware: -# - name: infohash_blacklist -# config: -# mode: block - - name: store_response - - servers: - - name: store - config: - addr: localhost:6880 - request_timeout: 10s - read_timeout: 10s - write_timeout: 10s - client_store: - name: memory - ip_store: - name: memory - string_store: - name: memory - peer_store: - name: memory - config: - gcAfter: 30m - shards: 1 - - - name: prometheus - config: - addr: localhost:6881 - shutdown_timeout: 10s - read_timeout: 10s - write_timeout: 10s - - - name: http - config: - addr: localhost:6882 - request_timeout: 10s - read_timeout: 10s - write_timeout: 10s - -# - name: udp -# config: -# addr: localhost:6883 diff --git a/glide.lock b/glide.lock deleted file mode 100644 index 89a301c..0000000 --- a/glide.lock +++ /dev/null @@ -1,44 +0,0 @@ -hash: e7d2be6c361fe6fe6242b56e502829e8a72733f9ff0aa57443c9397c3488174f -updated: 2016-05-21T17:58:26.448148976-04:00 -imports: -- name: github.com/beorn7/perks - version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 - subpackages: - - quantile -- name: github.com/golang/protobuf - version: cd85f19845cc96cc6e5269c894d8cd3c67e9ed83 - subpackages: - - proto -- name: github.com/julienschmidt/httprouter - version: 77366a47451a56bb3ba682481eed85b64fea14e8 -- name: github.com/matttproud/golang_protobuf_extensions - version: c12348ce28de40eed0136aa2b644d0ee0650e56c - subpackages: - - pbutil -- name: github.com/mrd0ll4r/netmatch - version: af335c21c765757f2649dbf1d3d43f77eb6c4eb8 -- name: github.com/prometheus/client_golang - version: d38f1ef46f0d78136db3e585f7ebe1bcc3476f73 - subpackages: - - prometheus -- name: github.com/prometheus/client_model - version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 - subpackages: - - go -- name: github.com/prometheus/common - version: a715f9d07a512e8339f70a275ace0e67c0f9a65f - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -- name: github.com/tylerb/graceful - version: 9a3d4236b03bb5d26f7951134d248f9d5510d599 -- name: golang.org/x/net - version: 0c607074acd38c5f23d1344dfe74c977464d1257 - subpackages: - - netutil -- name: gopkg.in/yaml.v2 - version: a83829b6f1293c91addabc89d0571c246397bbf4 -devImports: [] diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index d3a516d..0000000 --- a/glide.yaml +++ /dev/null @@ -1,9 +0,0 @@ -package: github.com/chihaya/chihaya -import: -- package: github.com/julienschmidt/httprouter -- package: github.com/mrd0ll4r/netmatch -- package: github.com/prometheus/client_golang - subpackages: - - prometheus -- package: github.com/tylerb/graceful -- package: gopkg.in/yaml.v2 diff --git a/middleware/deniability/README.md b/middleware/deniability/README.md deleted file mode 100644 index fd50614..0000000 --- a/middleware/deniability/README.md +++ /dev/null @@ -1,39 +0,0 @@ -## Deniability Middleware - -This package provides the announce middleware `deniability` which inserts ghost peers into announce responses to achieve plausible deniability. - -### Functionality - -This middleware will choose random announces and modify the list of peers returned. -A random number of randomly generated peers will be inserted at random positions into the list of peers. -As soon as the list of peers exceeds `numWant`, peers will be replaced rather than inserted. - -Note that if a response is picked for augmentation, both IPv4 and IPv6 peers will be modified, in case they are not empty. - -Also note that the IP address for the generated peeer consists of bytes in the range [1,254]. - -### Configuration - -This middleware provides the following parameters for configuration: - -- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be augmented with random peers. -- `max_random_peers` (int, >0) sets an upper boundary (inclusive) for the amount of peers added. -- `prefix` (string, 20 characters at most) sets the prefix for generated peer IDs. - The peer ID will be padded to 20 bytes using a random string of alphanumeric characters. -- `min_port` (int, >0, <=65535) sets a lower boundary for the port for generated peers. -- `max_port` (int, >0, <=65536, > `min_port`) sets an upper boundary for the port for generated peers. - -An example config might look like this: - - chihaya: - tracker: - announce_middleware: - - name: deniability - config: - modify_response_probability: 0.2 - max_random_peers: 5 - prefix: -AZ2060- - min_port: 40000 - max_port: 60000 - -For more information about peer IDs and their prefixes, see [this wiki entry](https://wiki.theory.org/BitTorrentSpecification#peer_id). \ No newline at end of file diff --git a/middleware/deniability/config.go b/middleware/deniability/config.go deleted file mode 100644 index 0423493..0000000 --- a/middleware/deniability/config.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package deniability - -import ( - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" -) - -// Config represents the configuration for the deniability middleware. -type Config struct { - // ModifyResponseProbability is the probability by which a response will - // be augmented with random peers. - ModifyResponseProbability float32 `yaml:"modify_response_probability"` - - // MaxRandomPeers is the amount of peers that will be added at most. - MaxRandomPeers int `yaml:"max_random_peers"` - - // Prefix is the prefix to be used for peer IDs. - Prefix string `yaml:"prefix"` - - // MinPort is the minimum port (inclusive) for the generated peer. - MinPort int `yaml:"min_port"` - - // MaxPort is the maximum port (exclusive) for the generated peer. - MaxPort int `yaml:"max_port"` -} - -// newConfig parses the given MiddlewareConfig as a deniability.Config. -func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) { - bytes, err := yaml.Marshal(mwcfg.Config) - if err != nil { - return nil, err - } - - var cfg Config - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} diff --git a/middleware/deniability/config_test.go b/middleware/deniability/config_test.go deleted file mode 100644 index b271ba4..0000000 --- a/middleware/deniability/config_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package deniability - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" -) - -type configTestData struct { - modifyProbability string - maxNewPeers string - prefix string - minPort string - maxPort string - err bool - expected Config -} - -var ( - configTemplate = ` -name: foo -config: - modify_response_probability: %s - max_random_peers: %s - prefix: %s - min_port: %s - max_port: %s` - - configData = []configTestData{ - {"1.0", "5", "abc", "2000", "3000", false, Config{1.0, 5, "abc", 2000, 3000}}, - {"a", "a", "12", "a", "a", true, Config{}}, - } -) - -func TestNewConfig(t *testing.T) { - var mwconfig chihaya.MiddlewareConfig - - cfg, err := newConfig(mwconfig) - assert.Nil(t, err) - assert.NotNil(t, cfg) - - for _, test := range configData { - config := fmt.Sprintf(configTemplate, test.modifyProbability, test.maxNewPeers, test.prefix, test.minPort, test.maxPort) - err = yaml.Unmarshal([]byte(config), &mwconfig) - assert.Nil(t, err) - - cfg, err = newConfig(mwconfig) - if test.err { - assert.NotNil(t, err) - continue - } - assert.Nil(t, err) - assert.Equal(t, test.expected, *cfg) - } -} diff --git a/middleware/deniability/deniability.go b/middleware/deniability/deniability.go deleted file mode 100644 index 0e27347..0000000 --- a/middleware/deniability/deniability.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package deniability - -import ( - "errors" - "math/rand" - "time" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/random" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddlewareConstructor("deniability", constructor) -} - -type deniabilityMiddleware struct { - cfg *Config - r *rand.Rand -} - -// constructor provides a middleware constructor that returns a middleware to -// insert peers into the peer lists returned as a response to an announce. -// -// It returns an error if the config provided is either syntactically or -// semantically incorrect. -func constructor(c chihaya.MiddlewareConfig) (tracker.AnnounceMiddleware, error) { - cfg, err := newConfig(c) - if err != nil { - return nil, err - } - - if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 { - return nil, errors.New("modify_response_probability must be in [0,1)") - } - - if cfg.MaxRandomPeers <= 0 { - return nil, errors.New("max_random_peers must be > 0") - } - - if cfg.MinPort <= 0 { - return nil, errors.New("min_port must not be <= 0") - } - - if cfg.MaxPort > 65536 { - return nil, errors.New("max_port must not be > 65536") - } - - if cfg.MinPort >= cfg.MaxPort { - return nil, errors.New("max_port must not be <= min_port") - } - - if len(cfg.Prefix) > 20 { - return nil, errors.New("prefix must not be longer than 20 bytes") - } - - mw := deniabilityMiddleware{ - cfg: cfg, - r: rand.New(rand.NewSource(time.Now().UnixNano())), - } - - return mw.modifyResponse, nil -} - -func (mw *deniabilityMiddleware) modifyResponse(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - err := next(cfg, req, resp) - if err != nil { - return err - } - - if mw.cfg.ModifyResponseProbability == 1 || mw.r.Float32() < mw.cfg.ModifyResponseProbability { - numNewPeers := mw.r.Intn(mw.cfg.MaxRandomPeers) + 1 - for i := 0; i < numNewPeers; i++ { - if len(resp.IPv6Peers) > 0 { - if len(resp.IPv6Peers) >= int(req.NumWant) { - mw.replacePeer(resp.IPv6Peers, true) - } else { - resp.IPv6Peers = mw.insertPeer(resp.IPv6Peers, true) - } - } - - if len(resp.IPv4Peers) > 0 { - if len(resp.IPv4Peers) >= int(req.NumWant) { - mw.replacePeer(resp.IPv4Peers, false) - } else { - resp.IPv4Peers = mw.insertPeer(resp.IPv4Peers, false) - } - } - } - } - - return nil - } -} - -// replacePeer replaces a peer from a random position within the given slice -// of peers with a randomly generated one. -// -// replacePeer panics if len(peers) == 0. -func (mw *deniabilityMiddleware) replacePeer(peers []chihaya.Peer, v6 bool) { - peers[mw.r.Intn(len(peers))] = random.Peer(mw.r, mw.cfg.Prefix, v6, mw.cfg.MinPort, mw.cfg.MaxPort) -} - -// insertPeer inserts a randomly generated peer at a random position into the -// given slice and returns the new slice. -func (mw *deniabilityMiddleware) insertPeer(peers []chihaya.Peer, v6 bool) []chihaya.Peer { - pos := 0 - if len(peers) > 0 { - pos = mw.r.Intn(len(peers)) - } - peers = append(peers, chihaya.Peer{}) - copy(peers[pos+1:], peers[pos:]) - peers[pos] = random.Peer(mw.r, mw.cfg.Prefix, v6, mw.cfg.MinPort, mw.cfg.MaxPort) - - return peers -} diff --git a/middleware/deniability/deniability_test.go b/middleware/deniability/deniability_test.go deleted file mode 100644 index 4492898..0000000 --- a/middleware/deniability/deniability_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package deniability - -import ( - "fmt" - "math/rand" - "net" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/chihaya/chihaya" -) - -type constructorTestData struct { - cfg Config - error bool -} - -var constructorData = []constructorTestData{ - {Config{1.0, 10, "abc", 1024, 1025}, false}, - {Config{1.1, 10, "abc", 1024, 1025}, true}, - {Config{0, 10, "abc", 1024, 1025}, true}, - {Config{1.0, 0, "abc", 1024, 1025}, true}, - {Config{1.0, 10, "01234567890123456789_", 1024, 1025}, true}, - {Config{1.0, 10, "abc", 0, 1025}, true}, - {Config{1.0, 10, "abc", 1024, 0}, true}, - {Config{1.0, 10, "abc", 1024, 65537}, true}, -} - -func TestReplacePeer(t *testing.T) { - cfg := Config{ - Prefix: "abc", - MinPort: 1024, - MaxPort: 1025, - } - mw := deniabilityMiddleware{ - r: rand.New(rand.NewSource(0)), - cfg: &cfg, - } - peer := chihaya.Peer{ - ID: chihaya.PeerID([20]byte{}), - Port: 2000, - IP: net.ParseIP("10.150.255.23"), - } - peers := []chihaya.Peer{peer} - - mw.replacePeer(peers, false) - assert.Equal(t, 1, len(peers)) - assert.Equal(t, "abc", string(peers[0].ID[:3])) - assert.Equal(t, uint16(1024), peers[0].Port) - assert.NotNil(t, peers[0].IP.To4()) - - mw.replacePeer(peers, true) - assert.Equal(t, 1, len(peers)) - assert.Equal(t, "abc", string(peers[0].ID[:3])) - assert.Equal(t, uint16(1024), peers[0].Port) - assert.Nil(t, peers[0].IP.To4()) - - peers = []chihaya.Peer{peer, peer} - - mw.replacePeer(peers, true) - assert.True(t, (peers[0].Port == peer.Port) != (peers[1].Port == peer.Port), "not exactly one peer was replaced") -} - -func TestInsertPeer(t *testing.T) { - cfg := Config{ - Prefix: "abc", - MinPort: 1024, - MaxPort: 1025, - } - mw := deniabilityMiddleware{ - r: rand.New(rand.NewSource(0)), - cfg: &cfg, - } - peer := chihaya.Peer{ - ID: chihaya.PeerID([20]byte{}), - Port: 2000, - IP: net.ParseIP("10.150.255.23"), - } - var peers []chihaya.Peer - - peers = mw.insertPeer(peers, false) - assert.Equal(t, 1, len(peers)) - assert.Equal(t, uint16(1024), peers[0].Port) - assert.Equal(t, "abc", string(peers[0].ID[:3])) - assert.NotNil(t, peers[0].IP.To4()) - - peers = []chihaya.Peer{peer, peer} - - peers = mw.insertPeer(peers, true) - assert.Equal(t, 3, len(peers)) -} - -func TestConstructor(t *testing.T) { - for _, tt := range constructorData { - _, err := constructor(chihaya.MiddlewareConfig{ - Config: tt.cfg, - }) - - if tt.error { - assert.NotNil(t, err, fmt.Sprintf("error expected for %+v", tt.cfg)) - } else { - assert.Nil(t, err, fmt.Sprintf("no error expected for %+v", tt.cfg)) - } - } -} diff --git a/middleware/varinterval/README.md b/middleware/varinterval/README.md deleted file mode 100644 index 50cd812..0000000 --- a/middleware/varinterval/README.md +++ /dev/null @@ -1,34 +0,0 @@ -## Announce Interval Variation Middleware - -This package provides the announce middleware `varinterval` which randomizes the announce interval. - -### Functionality - -This middleware will choose random announces and modify the `interval` and `min_interval` fields. -A random number of seconds will be added to the `interval` field and, if desired, also to the `min_interval` field. - -Note that if a response is picked for modification and `min_interval` should be changed as well, both `interval` and `min_interval` will be modified by the same amount. - -### Use Case - -Use this middleware to avoid recurring load spikes on the tracker. -By randomizing the announce interval, load spikes will flatten out after a few cycles. - -### Configuration - -This middleware provides the following parameters for configuration: - -- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be chosen to have its announce intervals modified. -- `max_increase_delta` (int, >0) sets an upper boundary (inclusive) for the amount of seconds added. -- `modify_min_interval` (boolean) whether to modify the `min_interval` field as well. - -An example config might look like this: - - chihaya: - tracker: - announce_middleware: - - name: varinterval - config: - modify_response_probability: 0.2 - max_increase_delta: 60 - modify_min_interval: true diff --git a/middleware/varinterval/config.go b/middleware/varinterval/config.go deleted file mode 100644 index 19eeaf8..0000000 --- a/middleware/varinterval/config.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package varinterval - -import ( - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" -) - -// Config represents the configuration for the varinterval middleware. -type Config struct { - // ModifyResponseProbability is the probability by which a response will - // be modified. - ModifyResponseProbability float32 `yaml:"modify_response_probability"` - - // MaxIncreaseDelta is the amount of seconds that will be added at most. - MaxIncreaseDelta int `yaml:"max_increase_delta"` - - // ModifyMinInterval specifies whether min_interval should be increased - // as well. - ModifyMinInterval bool `yaml:"modify_min_interval"` -} - -// newConfig parses the given MiddlewareConfig as a varinterval.Config. -// -// The contents of the config are not checked. -func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) { - bytes, err := yaml.Marshal(mwcfg.Config) - if err != nil { - return nil, err - } - - var cfg Config - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} diff --git a/middleware/varinterval/config_test.go b/middleware/varinterval/config_test.go deleted file mode 100644 index f43c73d..0000000 --- a/middleware/varinterval/config_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package varinterval - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" -) - -type configTestData struct { - modifyProbability string - maxIncreaseDelta string - modifyMinInterval string - err bool - expected Config -} - -var ( - configTemplate = ` -name: foo -config: - modify_response_probability: %s - max_increase_delta: %s - modify_min_interval: %s` - - configData = []configTestData{ - {"1.0", "60", "false", false, Config{1.0, 60, false}}, - {"a", "60", "false", true, Config{}}, - } -) - -func TestNewConfig(t *testing.T) { - var mwconfig chihaya.MiddlewareConfig - - cfg, err := newConfig(mwconfig) - assert.Nil(t, err) - assert.NotNil(t, cfg) - - for _, test := range configData { - config := fmt.Sprintf(configTemplate, test.modifyProbability, test.maxIncreaseDelta, test.modifyMinInterval) - err = yaml.Unmarshal([]byte(config), &mwconfig) - assert.Nil(t, err) - - cfg, err = newConfig(mwconfig) - if test.err { - assert.NotNil(t, err) - continue - } - assert.Nil(t, err) - assert.Equal(t, test.expected, *cfg) - } -} diff --git a/middleware/varinterval/varinterval.go b/middleware/varinterval/varinterval.go deleted file mode 100644 index 2f8893e..0000000 --- a/middleware/varinterval/varinterval.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package varinterval - -import ( - "errors" - "math/rand" - "time" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddlewareConstructor("varinterval", constructor) -} - -type varintervalMiddleware struct { - cfg *Config - r *rand.Rand -} - -// constructor provides a middleware constructor that returns a middleware to -// insert a variation into announce intervals. -// -// It returns an error if the config provided is either syntactically or -// semantically incorrect. -func constructor(c chihaya.MiddlewareConfig) (tracker.AnnounceMiddleware, error) { - cfg, err := newConfig(c) - if err != nil { - return nil, err - } - - if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 { - return nil, errors.New("modify_response_probability must be in [0,1)") - } - - if cfg.MaxIncreaseDelta <= 0 { - return nil, errors.New("max_increase_delta must be > 0") - } - - mw := varintervalMiddleware{ - cfg: cfg, - r: rand.New(rand.NewSource(time.Now().UnixNano())), - } - - return mw.modifyResponse, nil -} - -func (mw *varintervalMiddleware) modifyResponse(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - err := next(cfg, req, resp) - if err != nil { - return err - } - - if mw.cfg.ModifyResponseProbability == 1 || mw.r.Float32() < mw.cfg.ModifyResponseProbability { - addSeconds := time.Duration(mw.r.Intn(mw.cfg.MaxIncreaseDelta)+1) * time.Second - resp.Interval += addSeconds - - if mw.cfg.ModifyMinInterval { - resp.MinInterval += addSeconds - } - } - - return nil - } -} diff --git a/middleware/varinterval/varinterval_test.go b/middleware/varinterval/varinterval_test.go deleted file mode 100644 index 5da0fe6..0000000 --- a/middleware/varinterval/varinterval_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package varinterval - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/tracker" -) - -type constructorTestData struct { - cfg Config - error bool -} - -var constructorData = []constructorTestData{ - {Config{1.0, 10, false}, false}, - {Config{1.1, 10, false}, true}, - {Config{0, 10, true}, true}, - {Config{1.0, 0, false}, true}, -} - -func TestConstructor(t *testing.T) { - for _, tt := range constructorData { - _, err := constructor(chihaya.MiddlewareConfig{ - Config: tt.cfg, - }) - - if tt.error { - assert.NotNil(t, err, fmt.Sprintf("error expected for %+v", tt.cfg)) - } else { - assert.Nil(t, err, fmt.Sprintf("no error expected for %+v", tt.cfg)) - } - } -} - -func TestModifyResponse(t *testing.T) { - var ( - achain tracker.AnnounceChain - req chihaya.AnnounceRequest - resp chihaya.AnnounceResponse - ) - - mw, err := constructor(chihaya.MiddlewareConfig{ - Config: Config{ - ModifyResponseProbability: 1.0, - MaxIncreaseDelta: 10, - ModifyMinInterval: true, - }, - }) - assert.Nil(t, err) - - achain.Append(mw) - handler := achain.Handler() - - err = handler(nil, &req, &resp) - assert.Nil(t, err) - assert.True(t, resp.Interval > 0, "interval should have been increased") - assert.True(t, resp.MinInterval > 0, "min_interval should have been increased") -} diff --git a/pkg/bencode/bencode.go b/pkg/bencode/bencode.go deleted file mode 100644 index 819d0c8..0000000 --- a/pkg/bencode/bencode.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package bencode implements bencoding of data as defined in BEP 3 using -// type assertion over reflection for performance. -package bencode - -// Dict represents a bencode dictionary. -type Dict map[string]interface{} - -// NewDict allocates the memory for a Dict. -func NewDict() Dict { - return make(Dict) -} - -// List represents a bencode list. -type List []interface{} - -// NewList allocates the memory for a List. -func NewList() List { - return make(List, 0) -} diff --git a/pkg/bencode/decoder.go b/pkg/bencode/decoder.go deleted file mode 100644 index 345bb95..0000000 --- a/pkg/bencode/decoder.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package bencode - -import ( - "bufio" - "bytes" - "errors" - "io" - "strconv" -) - -// A Decoder reads bencoded objects from an input stream. -type Decoder struct { - r *bufio.Reader -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: bufio.NewReader(r)} -} - -// Decode unmarshals the next bencoded value in the stream. -func (dec *Decoder) Decode() (interface{}, error) { - return unmarshal(dec.r) -} - -// Unmarshal deserializes and returns the bencoded value in buf. -func Unmarshal(buf []byte) (interface{}, error) { - r := bufio.NewReader(bytes.NewBuffer(buf)) - return unmarshal(r) -} - -// unmarshal reads bencoded values from a bufio.Reader -func unmarshal(r *bufio.Reader) (interface{}, error) { - tok, err := r.ReadByte() - if err != nil { - return nil, err - } - - switch tok { - case 'i': - return readTerminatedInt(r, 'e') - - case 'l': - list := NewList() - for { - ok, err := readTerminator(r, 'e') - if err != nil { - return nil, err - } else if ok { - break - } - - v, err := unmarshal(r) - if err != nil { - return nil, err - } - list = append(list, v) - } - return list, nil - - case 'd': - dict := NewDict() - for { - ok, err := readTerminator(r, 'e') - if err != nil { - return nil, err - } else if ok { - break - } - - v, err := unmarshal(r) - if err != nil { - return nil, err - } - - key, ok := v.(string) - if !ok { - return nil, errors.New("bencode: non-string map key") - } - - dict[key], err = unmarshal(r) - if err != nil { - return nil, err - } - } - return dict, nil - - default: - err = r.UnreadByte() - if err != nil { - return nil, err - } - - length, err := readTerminatedInt(r, ':') - if err != nil { - return nil, errors.New("bencode: unknown input sequence") - } - - buf := make([]byte, length) - n, err := r.Read(buf) - - if err != nil { - return nil, err - } else if int64(n) != length { - return nil, errors.New("bencode: short read") - } - - return string(buf), nil - } -} - -func readTerminator(r io.ByteScanner, term byte) (bool, error) { - tok, err := r.ReadByte() - if err != nil { - return false, err - } else if tok == term { - return true, nil - } - return false, r.UnreadByte() -} - -func readTerminatedInt(r *bufio.Reader, term byte) (int64, error) { - buf, err := r.ReadSlice(term) - if err != nil { - return 0, err - } else if len(buf) <= 1 { - return 0, errors.New("bencode: empty integer field") - } - - return strconv.ParseInt(string(buf[:len(buf)-1]), 10, 64) -} diff --git a/pkg/bencode/decoder_test.go b/pkg/bencode/decoder_test.go deleted file mode 100644 index a5c9406..0000000 --- a/pkg/bencode/decoder_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package bencode - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -var unmarshalTests = []struct { - input string - expected interface{} -}{ - {"i42e", int64(42)}, - {"i-42e", int64(-42)}, - - {"7:example", "example"}, - - {"l3:one3:twoe", List{"one", "two"}}, - {"le", List{}}, - - {"d3:one2:aa3:two2:bbe", Dict{"one": "aa", "two": "bb"}}, - {"de", Dict{}}, -} - -func TestUnmarshal(t *testing.T) { - for _, tt := range unmarshalTests { - got, err := Unmarshal([]byte(tt.input)) - assert.Nil(t, err, "unmarshal should not fail") - assert.Equal(t, got, tt.expected, "unmarshalled values should match the expected results") - } -} - -type bufferLoop struct { - val string -} - -func (r *bufferLoop) Read(b []byte) (int, error) { - n := copy(b, r.val) - return n, nil -} - -func BenchmarkUnmarshalScalar(b *testing.B) { - d1 := NewDecoder(&bufferLoop{"7:example"}) - d2 := NewDecoder(&bufferLoop{"i42e"}) - - for i := 0; i < b.N; i++ { - d1.Decode() - d2.Decode() - } -} - -func TestUnmarshalLarge(t *testing.T) { - data := Dict{ - "k1": List{"a", "b", "c"}, - "k2": int64(42), - "k3": "val", - "k4": int64(-42), - } - - buf, _ := Marshal(data) - dec := NewDecoder(&bufferLoop{string(buf)}) - - got, err := dec.Decode() - assert.Nil(t, err, "decode should not fail") - assert.Equal(t, got, data, "encoding and decoding should equal the original value") -} - -func BenchmarkUnmarshalLarge(b *testing.B) { - data := map[string]interface{}{ - "k1": []string{"a", "b", "c"}, - "k2": 42, - "k3": "val", - "k4": uint(42), - } - - buf, _ := Marshal(data) - dec := NewDecoder(&bufferLoop{string(buf)}) - - for i := 0; i < b.N; i++ { - dec.Decode() - } -} diff --git a/pkg/bencode/encoder.go b/pkg/bencode/encoder.go deleted file mode 100644 index 85ee29e..0000000 --- a/pkg/bencode/encoder.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package bencode - -import ( - "bytes" - "fmt" - "io" - "strconv" - "time" -) - -// An Encoder writes bencoded objects to an output stream. -type Encoder struct { - w io.Writer -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{w: w} -} - -// Encode writes the bencoding of v to the stream. -func (enc *Encoder) Encode(v interface{}) error { - return marshal(enc.w, v) -} - -// Marshal returns the bencoding of v. -func Marshal(v interface{}) ([]byte, error) { - buf := &bytes.Buffer{} - err := marshal(buf, v) - return buf.Bytes(), err -} - -// Marshaler is the interface implemented by objects that can marshal -// themselves. -type Marshaler interface { - MarshalBencode() ([]byte, error) -} - -// marshal writes types bencoded to an io.Writer -func marshal(w io.Writer, data interface{}) error { - switch v := data.(type) { - case Marshaler: - bencoded, err := v.MarshalBencode() - if err != nil { - return err - } - _, err = w.Write(bencoded) - if err != nil { - return err - } - - case string: - marshalString(w, v) - - case int: - marshalInt(w, int64(v)) - - case uint: - marshalUint(w, uint64(v)) - - case int16: - marshalInt(w, int64(v)) - - case uint16: - marshalUint(w, uint64(v)) - - case int32: - marshalInt(w, int64(v)) - - case uint32: - marshalUint(w, uint64(v)) - - case int64: - marshalInt(w, v) - - case uint64: - marshalUint(w, v) - - case []byte: - marshalBytes(w, v) - - case time.Duration: // Assume seconds - marshalInt(w, int64(v/time.Second)) - - case Dict: - marshal(w, map[string]interface{}(v)) - - case []Dict: - w.Write([]byte{'l'}) - for _, val := range v { - err := marshal(w, val) - if err != nil { - return err - } - } - w.Write([]byte{'e'}) - - case map[string]interface{}: - w.Write([]byte{'d'}) - for key, val := range v { - marshalString(w, key) - err := marshal(w, val) - if err != nil { - return err - } - } - w.Write([]byte{'e'}) - - case []string: - w.Write([]byte{'l'}) - for _, val := range v { - err := marshal(w, val) - if err != nil { - return err - } - } - w.Write([]byte{'e'}) - - case List: - marshal(w, []interface{}(v)) - - case []interface{}: - w.Write([]byte{'l'}) - for _, val := range v { - err := marshal(w, val) - if err != nil { - return err - } - } - w.Write([]byte{'e'}) - - default: - return fmt.Errorf("attempted to marshal unsupported type:\n%t", v) - } - - return nil -} - -func marshalInt(w io.Writer, v int64) { - w.Write([]byte{'i'}) - w.Write([]byte(strconv.FormatInt(v, 10))) - w.Write([]byte{'e'}) -} - -func marshalUint(w io.Writer, v uint64) { - w.Write([]byte{'i'}) - w.Write([]byte(strconv.FormatUint(v, 10))) - w.Write([]byte{'e'}) -} - -func marshalBytes(w io.Writer, v []byte) { - w.Write([]byte(strconv.Itoa(len(v)))) - w.Write([]byte{':'}) - w.Write(v) -} - -func marshalString(w io.Writer, v string) { - marshalBytes(w, []byte(v)) -} diff --git a/pkg/bencode/encoder_test.go b/pkg/bencode/encoder_test.go deleted file mode 100644 index 61186d0..0000000 --- a/pkg/bencode/encoder_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package bencode - -import ( - "bytes" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var marshalTests = []struct { - input interface{} - expected []string -}{ - {int(42), []string{"i42e"}}, - {int(-42), []string{"i-42e"}}, - {uint(43), []string{"i43e"}}, - {int64(44), []string{"i44e"}}, - {uint64(45), []string{"i45e"}}, - {int16(44), []string{"i44e"}}, - {uint16(45), []string{"i45e"}}, - - {"example", []string{"7:example"}}, - {[]byte("example"), []string{"7:example"}}, - {30 * time.Minute, []string{"i1800e"}}, - - {[]string{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}}, - {[]interface{}{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}}, - {[]string{}, []string{"le"}}, - - {map[string]interface{}{"one": "aa", "two": "bb"}, []string{"d3:one2:aa3:two2:bbe", "d3:two2:bb3:one2:aae"}}, - {map[string]interface{}{}, []string{"de"}}, -} - -func TestMarshal(t *testing.T) { - for _, test := range marshalTests { - got, err := Marshal(test.input) - assert.Nil(t, err, "marshal should not fail") - assert.Contains(t, test.expected, string(got), "the marshaled result should be one of the expected permutations") - } -} - -func BenchmarkMarshalScalar(b *testing.B) { - buf := &bytes.Buffer{} - encoder := NewEncoder(buf) - - for i := 0; i < b.N; i++ { - encoder.Encode("test") - encoder.Encode(123) - } -} - -func BenchmarkMarshalLarge(b *testing.B) { - data := map[string]interface{}{ - "k1": []string{"a", "b", "c"}, - "k2": 42, - "k3": "val", - "k4": uint(42), - } - - buf := &bytes.Buffer{} - encoder := NewEncoder(buf) - - for i := 0; i < b.N; i++ { - encoder.Encode(data) - } -} diff --git a/pkg/clientid/client_id.go b/pkg/clientid/client_id.go deleted file mode 100644 index 3f97009..0000000 --- a/pkg/clientid/client_id.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package clientid implements the parsing of BitTorrent ClientIDs from -// BitTorrent PeerIDs. -package clientid - -// New returns the part of a PeerID that identifies a peer's client software. -func New(peerID string) (clientID string) { - length := len(peerID) - if length >= 6 { - if peerID[0] == '-' { - if length >= 7 { - clientID = peerID[1:7] - } - } else { - clientID = peerID[:6] - } - } - - return -} diff --git a/pkg/clientid/client_id_test.go b/pkg/clientid/client_id_test.go deleted file mode 100644 index 949ac66..0000000 --- a/pkg/clientid/client_id_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package clientid - -import "testing" - -func TestClientID(t *testing.T) { - var clientTable = []struct { - peerID string - clientID string - }{ - {"-AZ3034-6wfG2wk6wWLc", "AZ3034"}, - {"-AZ3042-6ozMq5q6Q3NX", "AZ3042"}, - {"-BS5820-oy4La2MWGEFj", "BS5820"}, - {"-AR6360-6oZyyMWoOOBe", "AR6360"}, - {"-AG2083-s1hiF8vGAAg0", "AG2083"}, - {"-AG3003-lEl2Mm4NEO4n", "AG3003"}, - {"-MR1100-00HS~T7*65rm", "MR1100"}, - {"-LK0140-ATIV~nbEQAMr", "LK0140"}, - {"-KT2210-347143496631", "KT2210"}, - {"-TR0960-6ep6svaa61r4", "TR0960"}, - {"-XX1150-dv220cotgj4d", "XX1150"}, - {"-AZ2504-192gwethivju", "AZ2504"}, - {"-KT4310-3L4UvarKuqIu", "KT4310"}, - {"-AZ2060-0xJQ02d4309O", "AZ2060"}, - {"-BD0300-2nkdf08Jd890", "BD0300"}, - {"-A~0010-a9mn9DFkj39J", "A~0010"}, - {"-UT2300-MNu93JKnm930", "UT2300"}, - {"-UT2300-KT4310KT4301", "UT2300"}, - - {"T03A0----f089kjsdf6e", "T03A0-"}, - {"S58B-----nKl34GoNb75", "S58B--"}, - {"M4-4-0--9aa757Efd5Bl", "M4-4-0"}, - - {"AZ2500BTeYUzyabAfo6U", "AZ2500"}, // BitTyrant - {"exbc0JdSklm834kj9Udf", "exbc0J"}, // Old BitComet - {"FUTB0L84j542mVc84jkd", "FUTB0L"}, // Alt BitComet - {"XBT054d-8602Jn83NnF9", "XBT054"}, // XBT - {"OP1011affbecbfabeefb", "OP1011"}, // Opera - {"-ML2.7.2-kgjjfkd9762", "ML2.7."}, // MLDonkey - {"-BOWA0C-SDLFJWEIORNM", "BOWA0C"}, // Bits on Wheels - {"Q1-0-0--dsn34DFn9083", "Q1-0-0"}, // Queen Bee - {"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt - {"346------SDFknl33408", "346---"}, // TorreTopia - {"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod - - {"", ""}, - {"-", ""}, - {"12345", ""}, - {"-12345", ""}, - {"123456", "123456"}, - {"-123456", "123456"}, - } - - for _, tt := range clientTable { - if parsedID := New(tt.peerID); parsedID != tt.clientID { - t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID) - } - } -} diff --git a/pkg/event/event.go b/pkg/event/event.go deleted file mode 100644 index c896cda..0000000 --- a/pkg/event/event.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package event implements type-level constraints for dealing with the events -// communicated via BitTorrent announce. -package event - -import ( - "errors" - "strings" -) - -// ErrUnknownEvent is returned when New fails to return an event. -var ErrUnknownEvent = errors.New("unknown event") - -// Event represents an event done by a BitTorrent client. -type Event uint8 - -const ( - // None is the event when a BitTorrent client announces due to time lapsed - // since the previous announce. - None Event = iota - - // Started is the event sent by a BitTorrent client when it joins a swarm. - Started - - // Stopped is the event sent by a BitTorrent client when it leaves a swarm. - Stopped - - // Completed is the event sent by a BitTorrent client when it finishes - // downloading all of the required chunks. - Completed -) - -var ( - eventToString = make(map[Event]string) - stringToEvent = make(map[string]Event) -) - -func init() { - eventToString[None] = "none" - eventToString[Started] = "started" - eventToString[Stopped] = "stopped" - eventToString[Completed] = "completed" - - stringToEvent[""] = None - - for k, v := range eventToString { - stringToEvent[v] = k - } -} - -// New returns the proper Event given a string. -func New(eventStr string) (Event, error) { - if e, ok := stringToEvent[strings.ToLower(eventStr)]; ok { - return e, nil - } - - return None, ErrUnknownEvent -} - -// String implements Stringer for an event. -func (e Event) String() string { - if name, ok := eventToString[e]; ok { - return name - } - - panic("event: event has no associated name") -} diff --git a/pkg/event/event_test.go b/pkg/event/event_test.go deleted file mode 100644 index d586f25..0000000 --- a/pkg/event/event_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package event - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNew(t *testing.T) { - var table = []struct { - data string - expected Event - expectedErr error - }{ - {"", None, nil}, - {"NONE", None, nil}, - {"none", None, nil}, - {"started", Started, nil}, - {"stopped", Stopped, nil}, - {"completed", Completed, nil}, - {"notAnEvent", None, ErrUnknownEvent}, - } - - for _, tt := range table { - got, err := New(tt.data) - assert.Equal(t, err, tt.expectedErr, "errors should equal the expected value") - assert.Equal(t, got, tt.expected, "events should equal the expected value") - } -} diff --git a/pkg/random/peer.go b/pkg/random/peer.go deleted file mode 100644 index c58a11f..0000000 --- a/pkg/random/peer.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package random - -import ( - "math/rand" - "net" - - "github.com/chihaya/chihaya" -) - -// Peer generates a random chihaya.Peer. -// -// prefix is the prefix to use for the peer ID. If len(prefix) > 20, it will be -// truncated to 20 characters. If len(prefix) < 20, it will be padded with an -// alphanumeric random string to have 20 characters. -// -// v6 indicates whether an IPv6 address should be generated. -// Regardless of the length of the generated IP address, its bytes will have -// values in [1,254]. -// -// minPort and maxPort describe the range for the randomly generated port, where -// minPort <= port < maxPort. -// minPort and maxPort will be checked and altered so that -// 1 <= minPort <= maxPort <= 65536. -// If minPort == maxPort, port will be set to minPort. -func Peer(r *rand.Rand, prefix string, v6 bool, minPort, maxPort int) chihaya.Peer { - var ( - port uint16 - ip net.IP - ) - - if minPort <= 0 { - minPort = 1 - } - if maxPort > 65536 { - maxPort = 65536 - } - if maxPort < minPort { - maxPort = minPort - } - if len(prefix) > 20 { - prefix = prefix[:20] - } - - if minPort == maxPort { - port = uint16(minPort) - } else { - port = uint16(r.Int63()%int64(maxPort-minPort)) + uint16(minPort) - } - - if v6 { - b := make([]byte, 16) - ip = net.IP(b) - } else { - b := make([]byte, 4) - ip = net.IP(b) - } - - for i := range ip { - b := r.Intn(254) + 1 - ip[i] = byte(b) - } - - prefix = prefix + AlphaNumericString(r, 20-len(prefix)) - - return chihaya.Peer{ - ID: chihaya.PeerIDFromString(prefix), - Port: port, - IP: ip, - } -} diff --git a/pkg/random/peer_test.go b/pkg/random/peer_test.go deleted file mode 100644 index 636f42d..0000000 --- a/pkg/random/peer_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package random - -import ( - "math/rand" - "net" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPeer(t *testing.T) { - r := rand.New(rand.NewSource(0)) - - for i := 0; i < 100; i++ { - minPort := 2000 - maxPort := 2010 - p := Peer(r, "", false, minPort, maxPort) - assert.Equal(t, 20, len(p.ID)) - assert.True(t, p.Port >= uint16(minPort) && p.Port < uint16(maxPort)) - assert.NotNil(t, p.IP.To4()) - } - - for i := 0; i < 100; i++ { - minPort := 2000 - maxPort := 2010 - p := Peer(r, "", true, minPort, maxPort) - assert.Equal(t, 20, len(p.ID)) - assert.True(t, p.Port >= uint16(minPort) && p.Port < uint16(maxPort)) - assert.True(t, len(p.IP) == net.IPv6len) - } - - p := Peer(r, "abcdefghijklmnopqrst", false, 2000, 2000) - assert.Equal(t, "abcdefghijklmnopqrst", string(p.ID[:])) - assert.Equal(t, uint16(2000), p.Port) - - p = Peer(r, "abcdefghijklmnopqrstUVWXYZ", true, -10, -5) - assert.Equal(t, "abcdefghijklmnopqrst", string(p.ID[:])) - assert.True(t, p.Port >= uint16(1) && p.Port <= uint16(65535)) -} diff --git a/pkg/random/string.go b/pkg/random/string.go deleted file mode 100644 index bdfa9f2..0000000 --- a/pkg/random/string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package random - -import "math/rand" - -// AlphaNumeric is an alphabet with all lower- and uppercase letters and -// numbers. -const AlphaNumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -// AlphaNumericString is a shorthand for String(r, l, AlphaNumeric). -func AlphaNumericString(r rand.Source, l int) string { - return String(r, l, AlphaNumeric) -} - -// String generates a random string of length l, containing only runes from -// the alphabet using the random source r. -func String(r rand.Source, l int, alphabet string) string { - b := make([]byte, l) - for i := range b { - b[i] = alphabet[r.Int63()%int64(len(alphabet))] - } - return string(b) -} diff --git a/pkg/random/string_test.go b/pkg/random/string_test.go deleted file mode 100644 index e2bd0b2..0000000 --- a/pkg/random/string_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package random - -import ( - "math/rand" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAlphaNumericString(t *testing.T) { - r := rand.NewSource(0) - - s := AlphaNumericString(r, 0) - assert.Equal(t, 0, len(s)) - - s = AlphaNumericString(r, 10) - assert.Equal(t, 10, len(s)) - - for i := 0; i < 100; i++ { - s := AlphaNumericString(r, 10) - for _, c := range s { - assert.True(t, strings.Contains(AlphaNumeric, string(c))) - } - } -} diff --git a/pkg/stopper/stopper.go b/pkg/stopper/stopper.go deleted file mode 100644 index ddf1a21..0000000 --- a/pkg/stopper/stopper.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package stopper - -import ( - "sync" -) - -// AlreadyStopped is a closed error channel to be used by StopperFuncs when -// an element was already stopped. -var AlreadyStopped <-chan error - -// AlreadyStoppedFunc is a StopperFunc that returns AlreadyStopped. -var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped } - -func init() { - closeMe := make(chan error) - close(closeMe) - AlreadyStopped = closeMe -} - -// Stopper is an interface that allows a clean shutdown. -type Stopper interface { - // Stop returns a channel that indicates whether the stop was - // successful. - // The channel can either return one error or be closed. Closing the - // channel signals a clean shutdown. - // The Stop function should return immediately and perform the actual - // shutdown in a seperate goroutine. - Stop() <-chan error -} - -// StopGroup is a group that can be stopped. -type StopGroup struct { - stoppables []StopperFunc - stoppablesLock sync.Mutex -} - -// StopperFunc is a function that can be used to provide a clean shutdown. -type StopperFunc func() <-chan error - -// NewStopGroup creates a new StopGroup. -func NewStopGroup() *StopGroup { - return &StopGroup{ - stoppables: make([]StopperFunc, 0), - } -} - -// Add adds a Stopper to the StopGroup. -// On the next call to Stop(), the Stopper will be stopped. -func (cg *StopGroup) Add(toAdd Stopper) { - cg.stoppablesLock.Lock() - defer cg.stoppablesLock.Unlock() - - cg.stoppables = append(cg.stoppables, toAdd.Stop) -} - -// AddFunc adds a StopperFunc to the StopGroup. -// On the next call to Stop(), the StopperFunc will be called. -func (cg *StopGroup) AddFunc(toAddFunc StopperFunc) { - cg.stoppablesLock.Lock() - defer cg.stoppablesLock.Unlock() - - cg.stoppables = append(cg.stoppables, toAddFunc) -} - -// Stop stops all members of the StopGroup. -// Stopping will be done in a concurrent fashion. -// The slice of errors returned contains all errors returned by stopping the -// members. -func (cg *StopGroup) Stop() []error { - cg.stoppablesLock.Lock() - defer cg.stoppablesLock.Unlock() - - var errors []error - whenDone := make(chan struct{}) - - waitChannels := make([]<-chan error, 0, len(cg.stoppables)) - for _, toStop := range cg.stoppables { - waitFor := toStop() - if waitFor == nil { - panic("received a nil chan from Stop") - } - waitChannels = append(waitChannels, waitFor) - } - - go func() { - for _, waitForMe := range waitChannels { - err := <-waitForMe - if err != nil { - errors = append(errors, err) - } - } - close(whenDone) - }() - - <-whenDone - return errors -} diff --git a/server/http/config.go b/server/http/config.go deleted file mode 100644 index 9f14193..0000000 --- a/server/http/config.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package http - -import ( - "time" - - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" -) - -type httpConfig struct { - Addr string `yaml:"addr"` - RequestTimeout time.Duration `yaml:"request_timeout"` - ReadTimeout time.Duration `yaml:"read_timeout"` - WriteTimeout time.Duration `yaml:"write_timeout"` - AllowIPSpoofing bool `yaml:"allow_ip_spoofing"` - DualStackedPeers bool `yaml:"dual_stacked_peers"` - RealIPHeader string `yaml:"real_ip_header"` -} - -func newHTTPConfig(srvcfg *chihaya.ServerConfig) (*httpConfig, error) { - bytes, err := yaml.Marshal(srvcfg.Config) - if err != nil { - return nil, err - } - - var cfg httpConfig - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} diff --git a/server/http/query/query.go b/server/http/query/query.go deleted file mode 100644 index e8fe6e8..0000000 --- a/server/http/query/query.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package query implements a simple, fast URL parser designed to be used to -// parse parameters sent from BitTorrent clients. The last value of a key wins, -// except for they key "info_hash". -package query - -import ( - "errors" - "net/url" - "strconv" - "strings" - - "github.com/chihaya/chihaya" -) - -// ErrKeyNotFound is returned when a provided key has no value associated with -// it. -var ErrKeyNotFound = errors.New("query: value for the provided key does not exist") - -// ErrInvalidInfohash is returned when parsing a query encounters an infohash -// with invalid length. -var ErrInvalidInfohash = errors.New("query: invalid infohash") - -// Query represents a parsed URL.Query. -type Query struct { - query string - params map[string]string - infoHashes []chihaya.InfoHash -} - -// New parses a raw URL query. -func New(query string) (*Query, error) { - var ( - keyStart, keyEnd int - valStart, valEnd int - - onKey = true - - q = &Query{ - query: query, - infoHashes: nil, - params: make(map[string]string), - } - ) - - for i, length := 0, len(query); i < length; i++ { - separator := query[i] == '&' || query[i] == ';' || query[i] == '?' - last := i == length-1 - - if separator || last { - if onKey && !last { - keyStart = i + 1 - continue - } - - if last && !separator && !onKey { - valEnd = i - } - - keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1]) - if err != nil { - return nil, err - } - - var valStr string - - if valEnd > 0 { - valStr, err = url.QueryUnescape(query[valStart : valEnd+1]) - if err != nil { - return nil, err - } - } - - if keyStr == "info_hash" { - if len(valStr) != 20 { - return nil, ErrInvalidInfohash - } - q.infoHashes = append(q.infoHashes, chihaya.InfoHashFromString(valStr)) - } else { - q.params[strings.ToLower(keyStr)] = valStr - } - - valEnd = 0 - onKey = true - keyStart = i + 1 - - } else if query[i] == '=' { - onKey = false - valStart = i + 1 - valEnd = 0 - } else if onKey { - keyEnd = i - } else { - valEnd = i - } - } - - return q, nil -} - -// String returns a string parsed from a query. Every key can be returned as a -// string because they are encoded in the URL as strings. -func (q *Query) String(key string) (string, error) { - val, exists := q.params[key] - if !exists { - return "", ErrKeyNotFound - } - return val, nil -} - -// Uint64 returns a uint parsed from a query. After being called, it is safe to -// cast the uint64 to your desired length. -func (q *Query) Uint64(key string) (uint64, error) { - str, exists := q.params[key] - if !exists { - return 0, ErrKeyNotFound - } - - val, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return 0, err - } - - return val, nil -} - -// InfoHashes returns a list of requested infohashes. -func (q *Query) InfoHashes() []chihaya.InfoHash { - return q.infoHashes -} diff --git a/server/http/query/query_test.go b/server/http/query/query_test.go deleted file mode 100644 index f421f1d..0000000 --- a/server/http/query/query_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package query - -import ( - "net/url" - "testing" -) - -var ( - baseAddr = "https://www.subdomain.tracker.com:80/" - testInfoHash = "01234567890123456789" - testPeerID = "-TEST01-6wfG2wk6wWLc" - - ValidAnnounceArguments = []url.Values{ - url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}}, - url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}}, - url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}}, - url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}}, - url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}}, - url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}}, - url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}}, - url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}}, - url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}}, - url.Values{"peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}}, - url.Values{"peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}}, - url.Values{"peer_id": {""}, "compact": {""}}, - } - - InvalidQueries = []string{ - baseAddr + "announce/?" + "info_hash=%0%a", - } -) - -func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool { - if len(boxed) != len(unboxed) { - return false - } - - for mapKey, mapVal := range boxed { - // Always expect box to hold only one element - if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] { - return false - } - } - - return true -} - -func TestValidQueries(t *testing.T) { - for parseIndex, parseVal := range ValidAnnounceArguments { - parsedQueryObj, err := New(baseAddr + "announce/?" + parseVal.Encode()) - if err != nil { - t.Error(err) - } - - if !mapArrayEqual(parseVal, parsedQueryObj.params) { - t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params) - } - } -} - -func TestInvalidQueries(t *testing.T) { - for parseIndex, parseStr := range InvalidQueries { - parsedQueryObj, err := New(parseStr) - if err == nil { - t.Error("Should have produced error", parseIndex) - } - - if parsedQueryObj != nil { - t.Error("Should be nil after error", parsedQueryObj, parseIndex) - } - } -} - -func BenchmarkParseQuery(b *testing.B) { - for bCount := 0; bCount < b.N; bCount++ { - for parseIndex, parseStr := range ValidAnnounceArguments { - parsedQueryObj, err := New(baseAddr + "announce/?" + parseStr.Encode()) - if err != nil { - b.Error(err, parseIndex) - b.Log(parsedQueryObj) - } - } - } -} - -func BenchmarkURLParseQuery(b *testing.B) { - for bCount := 0; bCount < b.N; bCount++ { - for parseIndex, parseStr := range ValidAnnounceArguments { - parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode()) - if err != nil { - b.Error(err, parseIndex) - b.Log(parsedQueryObj) - } - } - } -} diff --git a/server/http/request.go b/server/http/request.go deleted file mode 100644 index 5d17aad..0000000 --- a/server/http/request.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package http - -import ( - "net" - "net/http" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/event" - "github.com/chihaya/chihaya/server/http/query" - "github.com/chihaya/chihaya/tracker" -) - -func announceRequest(r *http.Request, cfg *httpConfig) (*chihaya.AnnounceRequest, error) { - q, err := query.New(r.URL.RawQuery) - if err != nil { - return nil, err - } - - request := &chihaya.AnnounceRequest{Params: q} - - eventStr, err := q.String("event") - if err == query.ErrKeyNotFound { - eventStr = "" - } else if err != nil { - return nil, tracker.ClientError("failed to parse parameter: event") - } - request.Event, err = event.New(eventStr) - if err != nil { - return nil, tracker.ClientError("failed to provide valid client event") - } - - compactStr, _ := q.String("compact") - request.Compact = compactStr != "" && compactStr != "0" - - infoHashes := q.InfoHashes() - if len(infoHashes) < 1 { - return nil, tracker.ClientError("no info_hash parameter supplied") - } - if len(infoHashes) > 1 { - return nil, tracker.ClientError("multiple info_hash parameters supplied") - } - request.InfoHash = infoHashes[0] - - peerID, err := q.String("peer_id") - if err != nil { - return nil, tracker.ClientError("failed to parse parameter: peer_id") - } - if len(peerID) != 20 { - return nil, tracker.ClientError("failed to provide valid peer_id") - } - request.PeerID = chihaya.PeerIDFromString(peerID) - - request.Left, err = q.Uint64("left") - if err != nil { - return nil, tracker.ClientError("failed to parse parameter: left") - } - - request.Downloaded, err = q.Uint64("downloaded") - if err != nil { - return nil, tracker.ClientError("failed to parse parameter: downloaded") - } - - request.Uploaded, err = q.Uint64("uploaded") - if err != nil { - return nil, tracker.ClientError("failed to parse parameter: uploaded") - } - - numwant, _ := q.Uint64("numwant") - request.NumWant = int32(numwant) - - port, err := q.Uint64("port") - if err != nil { - return nil, tracker.ClientError("failed to parse parameter: port") - } - request.Port = uint16(port) - - v4, v6, err := requestedIP(q, r, cfg) - if err != nil { - return nil, tracker.ClientError("failed to parse remote IP") - } - request.IPv4 = v4 - request.IPv6 = v6 - - return request, nil -} - -func scrapeRequest(r *http.Request, cfg *httpConfig) (*chihaya.ScrapeRequest, error) { - q, err := query.New(r.URL.RawQuery) - if err != nil { - return nil, err - } - - infoHashes := q.InfoHashes() - if len(infoHashes) < 1 { - return nil, tracker.ClientError("no info_hash parameter supplied") - } - - request := &chihaya.ScrapeRequest{ - InfoHashes: infoHashes, - Params: q, - } - - return request, nil -} - -// requestedIP returns the IP address for a request. If there are multiple in -// the request, one IPv4 and one IPv6 will be returned. -func requestedIP(p chihaya.Params, r *http.Request, cfg *httpConfig) (v4, v6 net.IP, err error) { - var done bool - - if cfg.AllowIPSpoofing { - if str, e := p.String("ip"); e == nil { - if v4, v6, done = getIPs(str, v4, v6, cfg); done { - return - } - } - - if str, e := p.String("ipv4"); e == nil { - if v4, v6, done = getIPs(str, v4, v6, cfg); done { - return - } - } - - if str, e := p.String("ipv6"); e == nil { - if v4, v6, done = getIPs(str, v4, v6, cfg); done { - return - } - } - } - - if cfg.RealIPHeader != "" { - if xRealIPs, ok := r.Header[cfg.RealIPHeader]; ok { - if v4, v6, done = getIPs(string(xRealIPs[0]), v4, v6, cfg); done { - return - } - } - } else { - if r.RemoteAddr == "" && v4 == nil { - if v4, v6, done = getIPs("127.0.0.1", v4, v6, cfg); done { - return - } - } - - if v4, v6, done = getIPs(r.RemoteAddr, v4, v6, cfg); done { - return - } - } - - if v4 == nil && v6 == nil { - err = tracker.ClientError("failed to parse IP address") - } - - return -} - -func getIPs(ipstr string, ipv4, ipv6 net.IP, cfg *httpConfig) (net.IP, net.IP, bool) { - host, _, err := net.SplitHostPort(ipstr) - if err != nil { - host = ipstr - } - - if ip := net.ParseIP(host); ip != nil { - ipTo4 := ip.To4() - if ipv4 == nil && ipTo4 != nil { - ipv4 = ipTo4 - } else if ipv6 == nil && ipTo4 == nil { - ipv6 = ip - } - } - - var done bool - if cfg.DualStackedPeers { - done = ipv4 != nil && ipv6 != nil - } else { - done = ipv4 != nil || ipv6 != nil - } - - return ipv4, ipv6, done -} diff --git a/server/http/server.go b/server/http/server.go deleted file mode 100644 index bcd9ac1..0000000 --- a/server/http/server.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package http - -import ( - "errors" - "log" - "net" - "net/http" - - "github.com/julienschmidt/httprouter" - "github.com/tylerb/graceful" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - server.Register("http", constructor) -} - -func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) { - cfg, err := newHTTPConfig(srvcfg) - if err != nil { - return nil, errors.New("http: invalid config: " + err.Error()) - } - - return &httpServer{ - cfg: cfg, - tkr: tkr, - }, nil -} - -type httpServer struct { - cfg *httpConfig - tkr *tracker.Tracker - grace *graceful.Server -} - -// Start runs the server and blocks until it has exited. -// -// It panics if the server exits unexpectedly. -func (s *httpServer) Start() { - s.grace = &graceful.Server{ - Server: &http.Server{ - Addr: s.cfg.Addr, - Handler: s.routes(), - ReadTimeout: s.cfg.ReadTimeout, - WriteTimeout: s.cfg.WriteTimeout, - }, - Timeout: s.cfg.RequestTimeout, - NoSignalHandling: true, - ConnState: func(conn net.Conn, state http.ConnState) { - switch state { - case http.StateNew: - //stats.RecordEvent(stats.AcceptedConnection) - - case http.StateClosed: - //stats.RecordEvent(stats.ClosedConnection) - - case http.StateHijacked: - panic("http: connection impossibly hijacked") - - // Ignore the following cases. - case http.StateActive, http.StateIdle: - - default: - panic("http: connection transitioned to unknown state") - } - }, - } - s.grace.SetKeepAlivesEnabled(false) - - if err := s.grace.ListenAndServe(); err != nil { - if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { - log.Printf("Failed to gracefully run HTTP server: %s", err.Error()) - panic(err) - } - } - - log.Println("HTTP server shut down cleanly") -} - -// Stop stops the server and blocks until the server has exited. -func (s *httpServer) Stop() { - s.grace.Stop(s.grace.Timeout) - <-s.grace.StopChan() -} - -func (s *httpServer) routes() *httprouter.Router { - r := httprouter.New() - r.GET("/announce", s.serveAnnounce) - r.GET("/scrape", s.serveScrape) - return r -} - -func (s *httpServer) serveAnnounce(w http.ResponseWriter, r *http.Request, p httprouter.Params) { - req, err := announceRequest(r, s.cfg) - if err != nil { - writeError(w, err) - return - } - - resp, err := s.tkr.HandleAnnounce(req) - if err != nil { - writeError(w, err) - return - } - - err = writeAnnounceResponse(w, resp) - if err != nil { - log.Println("error serializing response", err) - } -} - -func (s *httpServer) serveScrape(w http.ResponseWriter, r *http.Request, p httprouter.Params) { - req, err := scrapeRequest(r, s.cfg) - if err != nil { - writeError(w, err) - return - } - - resp, err := s.tkr.HandleScrape(req) - if err != nil { - writeError(w, err) - return - } - - writeScrapeResponse(w, resp) -} diff --git a/server/http/writer.go b/server/http/writer.go deleted file mode 100644 index 5de6c3d..0000000 --- a/server/http/writer.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package http - -import ( - "net/http" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/bencode" - "github.com/chihaya/chihaya/tracker" -) - -func writeError(w http.ResponseWriter, err error) error { - message := "internal server error" - if _, clientErr := err.(tracker.ClientError); clientErr { - message = err.Error() - } - - w.WriteHeader(http.StatusOK) - return bencode.NewEncoder(w).Encode(bencode.Dict{ - "failure reason": message, - }) -} - -func writeAnnounceResponse(w http.ResponseWriter, resp *chihaya.AnnounceResponse) error { - bdict := bencode.Dict{ - "complete": resp.Complete, - "incomplete": resp.Incomplete, - "interval": resp.Interval, - "min interval": resp.MinInterval, - } - - // Add the peers to the dictionary in the compact format. - if resp.Compact { - var IPv4CompactDict, IPv6CompactDict []byte - - // Add the IPv4 peers to the dictionary. - for _, peer := range resp.IPv4Peers { - IPv4CompactDict = append(IPv4CompactDict, compact(peer)...) - } - if len(IPv4CompactDict) > 0 { - bdict["peers"] = IPv4CompactDict - } - - // Add the IPv6 peers to the dictionary. - for _, peer := range resp.IPv6Peers { - IPv6CompactDict = append(IPv6CompactDict, compact(peer)...) - } - if len(IPv6CompactDict) > 0 { - bdict["peers6"] = IPv6CompactDict - } - - return bencode.NewEncoder(w).Encode(bdict) - } - - // Add the peers to the dictionary. - var peers []bencode.Dict - for _, peer := range resp.IPv4Peers { - peers = append(peers, dict(peer)) - } - for _, peer := range resp.IPv6Peers { - peers = append(peers, dict(peer)) - } - bdict["peers"] = peers - - return bencode.NewEncoder(w).Encode(bdict) -} - -func writeScrapeResponse(w http.ResponseWriter, resp *chihaya.ScrapeResponse) error { - filesDict := bencode.NewDict() - for infohash, scrape := range resp.Files { - filesDict[string(infohash[:])] = bencode.Dict{ - "complete": scrape.Complete, - "incomplete": scrape.Incomplete, - } - } - - return bencode.NewEncoder(w).Encode(bencode.Dict{ - "files": filesDict, - }) -} - -func compact(peer chihaya.Peer) (buf []byte) { - buf = []byte(peer.IP) - buf = append(buf, byte(peer.Port>>8)) - buf = append(buf, byte(peer.Port&0xff)) - return -} - -func dict(peer chihaya.Peer) bencode.Dict { - return bencode.Dict{ - "peer id": string(peer.ID[:]), - "ip": peer.IP.String(), - "port": peer.Port, - } -} diff --git a/server/http/writer_test.go b/server/http/writer_test.go deleted file mode 100644 index 00c345c..0000000 --- a/server/http/writer_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package http - -import ( - "net/http/httptest" - "testing" - - "github.com/chihaya/chihaya/tracker" - "github.com/stretchr/testify/assert" -) - -func TestWriteError(t *testing.T) { - var table = []struct { - reason, expected string - }{ - {"hello world", "d14:failure reason11:hello worlde"}, - {"what's up", "d14:failure reason9:what's upe"}, - } - - for _, tt := range table { - r := httptest.NewRecorder() - err := writeError(r, tracker.ClientError(tt.reason)) - assert.Nil(t, err) - assert.Equal(t, r.Body.String(), tt.expected) - } -} - -func TestWriteStatus(t *testing.T) { - r := httptest.NewRecorder() - err := writeError(r, tracker.ClientError("something is missing")) - assert.Nil(t, err) - assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge") -} diff --git a/server/pool.go b/server/pool.go deleted file mode 100644 index 9aa368b..0000000 --- a/server/pool.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package server - -import ( - "sync" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/tracker" -) - -// Pool represents a running pool of servers. -type Pool struct { - servers []Server - wg sync.WaitGroup -} - -// StartPool creates a new pool of servers specified by the provided -// configuration and runs them. -func StartPool(cfgs []chihaya.ServerConfig, tkr *tracker.Tracker) (*Pool, error) { - var toReturn Pool - - for _, cfg := range cfgs { - srv, err := New(&cfg, tkr) - if err != nil { - return nil, err - } - - toReturn.wg.Add(1) - go func(srv Server) { - defer toReturn.wg.Done() - srv.Start() - }(srv) - - toReturn.servers = append(toReturn.servers, srv) - } - - return &toReturn, nil -} - -// Stop safely shuts down a pool of servers. -func (p *Pool) Stop() { - for _, srv := range p.servers { - srv.Stop() - } - p.wg.Wait() -} diff --git a/server/prometheus/prometheus.go b/server/prometheus/prometheus.go deleted file mode 100644 index 1dbf3f1..0000000 --- a/server/prometheus/prometheus.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package prometheus implements a chihaya Server for serving metrics to -// Prometheus. -package prometheus - -import ( - "errors" - "log" - "net" - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/tylerb/graceful" - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - server.Register("prometheus", constructor) -} - -func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) { - cfg, err := NewServerConfig(srvcfg) - if err != nil { - return nil, errors.New("prometheus: invalid config: " + err.Error()) - } - - return &Server{ - cfg: cfg, - }, nil -} - -// ServerConfig represents the configuration options for a -// PrometheusServer. -type ServerConfig struct { - Addr string `yaml:"addr"` - ShutdownTimeout time.Duration `yaml:"shutdown_timeout"` - ReadTimeout time.Duration `yaml:"read_timeout"` - WriteTimeout time.Duration `yaml:"write_timeout"` -} - -// NewServerConfig marshals a chihaya.ServerConfig and unmarshals it -// into a more specific prometheus ServerConfig. -func NewServerConfig(srvcfg *chihaya.ServerConfig) (*ServerConfig, error) { - bytes, err := yaml.Marshal(srvcfg.Config) - if err != nil { - return nil, err - } - - var cfg ServerConfig - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} - -// Server implements a chihaya Server for serving metrics to Prometheus. -type Server struct { - cfg *ServerConfig - grace *graceful.Server -} - -var _ server.Server = &Server{} - -// Start starts the prometheus server and blocks until it exits. -// -// It panics if the server exits unexpectedly. -func (s *Server) Start() { - s.grace = &graceful.Server{ - Server: &http.Server{ - Addr: s.cfg.Addr, - Handler: prometheus.Handler(), - ReadTimeout: s.cfg.ReadTimeout, - WriteTimeout: s.cfg.WriteTimeout, - }, - Timeout: s.cfg.ShutdownTimeout, - NoSignalHandling: true, - } - - if err := s.grace.ListenAndServe(); err != nil { - if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { - log.Printf("Failed to gracefully run Prometheus server: %s", err.Error()) - panic(err) - } - } - - log.Println("Prometheus server shut down cleanly") -} - -// Stop stops the prometheus server and blocks until it exits. -func (s *Server) Stop() { - s.grace.Stop(s.cfg.ShutdownTimeout) - <-s.grace.StopChan() -} diff --git a/server/server.go b/server/server.go deleted file mode 100644 index 1adcb16..0000000 --- a/server/server.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package server implements an abstraction over servers meant to be run . -// alongside a tracker. -// -// Servers may be implementations of different transport protocols or have their -// own custom behavior. -package server - -import ( - "fmt" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/tracker" -) - -var constructors = make(map[string]Constructor) - -// Constructor is a function that creates a new Server. -type Constructor func(*chihaya.ServerConfig, *tracker.Tracker) (Server, error) - -// Register makes a Constructor available by the provided name. -// -// If this function is called twice with the same name or if the Constructor is -// nil, it panics. -func Register(name string, con Constructor) { - if con == nil { - panic("server: could not register nil Constructor") - } - if _, dup := constructors[name]; dup { - panic("server: could not register duplicate Constructor: " + name) - } - constructors[name] = con -} - -// New creates a Server specified by a configuration. -func New(cfg *chihaya.ServerConfig, tkr *tracker.Tracker) (Server, error) { - con, ok := constructors[cfg.Name] - if !ok { - return nil, fmt.Errorf("server: unknown Constructor %q (forgotten import?)", cfg.Name) - } - return con(cfg, tkr) -} - -// Server represents one instance of a server accessing the tracker. -type Server interface { - // Start starts a server and blocks until the server exits. - // - // It should panic if the server exits unexpectedly. - Start() - - // Stop stops a server and blocks until the server exits. - Stop() -} diff --git a/server/store/README.md b/server/store/README.md deleted file mode 100644 index 05ff3ff..0000000 --- a/server/store/README.md +++ /dev/null @@ -1,43 +0,0 @@ -## The store Package - -The `store` package offers a storage interface and middlewares sufficient to run a public tracker based on it. - -### Architecture - -The store consists of three parts: -- A set of interfaces, tests based on these interfaces and the store logic, unifying these interfaces into the store -- Drivers, implementing the store interfaces and -- Middleware that depends on the store - -The store interfaces are `IPStore`, `PeerStore` and `StringStore`. -During runtime, each of them will be implemented by a driver. -Even though all different drivers for one interface provide the same functionality, their behaviour can be very different. -For example: The memory implementation keeps all state in-memory - this is very fast, but not persistent, it loses its state on every restart. -A database-backed driver on the other hand could provide persistence, at the cost of performance. - -The pluggable design of Chihaya allows for the different interfaces to use different drivers. -For example: A typical use case of the `StringStore` is to provide blacklists or whitelists for infohashes/client IDs/.... -You'd typically want these lists to be persistent, so you'd choose a driver that provides persistence. -The `PeerStore` on the other hand rarely needs to be persistent, as all peer state will be restored after one announce interval. -You'd therefore typically choose a very performant but non-persistent driver for the `PeerStore`. - -### Testing - -The main store package also contains a set of tests and benchmarks for drivers. -Both use the store interfaces and can work with any driver that implements these interfaces. -The tests verify that the driver behaves as specified by the interface and its documentation. -The benchmarks can be used to compare performance of a wide range of operations on the interfaces. - -This makes it very easy to implement a new driver: -All functions that are part of the store interfaces can be tested easily with the tests that come with the store package. -Generally the memory implementation can be used as a guideline for implementing new drivers. - -Both benchmarks and tests require a clean state to work correctly. -All of the test and benchmark functions therefore take a `*DriverConfig` as a parameter, this should be used to configure the driver in a way that it provides a clean state for every test or benchmark. -For example: Imagine a file-based driver that achieves persistence by storing its state in a file. -It must then be possible to provide the location of this file in the `'DriverConfig`, so that every different benchmark gets to work with a new file. - -Most benchmarks come in two flavors: The "normal" version and the "1K" version. -A normal benchmark uses the same value over and over again to benchmark one operation. -A 1K benchmark uses a different value from a set of 1000 values for every iteration, this can show caching effects, if the driver uses them. -The 1K benchmarks require a little more computation to select the values and thus typically yield slightly lower results even for a "perfect" cache, i.e. the memory implementation. diff --git a/server/store/ip_store.go b/server/store/ip_store.go deleted file mode 100644 index 7d7fc93..0000000 --- a/server/store/ip_store.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package store - -import ( - "fmt" - "net" - - "github.com/chihaya/chihaya/pkg/stopper" -) - -var ipStoreDrivers = make(map[string]IPStoreDriver) - -// IPStore represents an interface for manipulating IPs and IP ranges. -type IPStore interface { - // AddIP adds a single IP address to the IPStore. - AddIP(ip net.IP) error - - // AddNetwork adds a range of IP addresses, denoted by a network in CIDR - // notation, to the IPStore. - AddNetwork(network string) error - - // HasIP returns whether the given IP address is contained in the IPStore - // or belongs to any of the stored networks. - HasIP(ip net.IP) (bool, error) - - // HasAnyIP returns whether any of the given IP addresses are contained - // in the IPStore or belongs to any of the stored networks. - HasAnyIP(ips []net.IP) (bool, error) - - // HassAllIPs returns whether all of the given IP addresses are - // contained in the IPStore or belongs to any of the stored networks. - HasAllIPs(ips []net.IP) (bool, error) - - // RemoveIP removes a single IP address from the IPStore. - // - // This wil not remove the given address from any networks it belongs to - // that are stored in the IPStore. - // - // Returns ErrResourceDoesNotExist if the given IP address is not - // contained in the store. - RemoveIP(ip net.IP) error - - // RemoveNetwork removes a range of IP addresses that was previously - // added through AddNetwork. - // - // The given network must not, as a string, match the previously added - // network, but rather denote the same network, e.g. if the network - // 192.168.22.255/24 was added, removing the network 192.168.22.123/24 - // will succeed. - // - // Returns ErrResourceDoesNotExist if the given network is not - // contained in the store. - RemoveNetwork(network string) error - - // Stopper provides the Stop method that stops the IPStore. - // Stop should shut down the IPStore in a separate goroutine and send - // an error to the channel if the shutdown failed. If the shutdown - // was successful, the channel is to be closed. - stopper.Stopper -} - -// IPStoreDriver represents an interface for creating a handle to the -// storage of IPs. -type IPStoreDriver interface { - New(*DriverConfig) (IPStore, error) -} - -// RegisterIPStoreDriver makes a driver available by the provided name. -// -// If this function is called twice with the same name or if the driver is nil, -// it panics. -func RegisterIPStoreDriver(name string, driver IPStoreDriver) { - if driver == nil { - panic("store: could not register nil IPStoreDriver") - } - if _, dup := ipStoreDrivers[name]; dup { - panic("store: could not register duplicate IPStoreDriver: " + name) - } - ipStoreDrivers[name] = driver -} - -// OpenIPStore returns an IPStore specified by a configuration. -func OpenIPStore(cfg *DriverConfig) (IPStore, error) { - driver, ok := ipStoreDrivers[cfg.Name] - if !ok { - return nil, fmt.Errorf("store: unknown IPStoreDriver %q (forgotten import?)", cfg) - } - - return driver.New(cfg) -} diff --git a/server/store/memory/ip_store.go b/server/store/memory/ip_store.go deleted file mode 100644 index 8b01e66..0000000 --- a/server/store/memory/ip_store.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package memory - -import ( - "net" - "sync" - - "github.com/mrd0ll4r/netmatch" - - "github.com/chihaya/chihaya/server/store" -) - -func init() { - store.RegisterIPStoreDriver("memory", &ipStoreDriver{}) -} - -type ipStoreDriver struct{} - -func (d *ipStoreDriver) New(_ *store.DriverConfig) (store.IPStore, error) { - return &ipStore{ - ips: make(map[[16]byte]struct{}), - networks: netmatch.New(), - closed: make(chan struct{}), - }, nil -} - -// ipStore implements store.IPStore using an in-memory map of byte arrays and -// a trie-like structure. -type ipStore struct { - ips map[[16]byte]struct{} - networks *netmatch.Trie - closed chan struct{} - sync.RWMutex -} - -var ( - _ store.IPStore = &ipStore{} - v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff} -) - -// key converts an IP address to a [16]byte. -// The byte array can then be used as a key for a map, unlike net.IP, which is a -// []byte. -// If an IPv4 address is specified, it will be prefixed with -// the net.v4InV6Prefix and thus becomes a valid IPv6 address. -func key(ip net.IP) [16]byte { - var array [16]byte - - if len(ip) == net.IPv4len { - copy(array[:], v4InV6Prefix) - copy(array[12:], ip) - } else { - copy(array[:], ip) - } - return array -} - -func (s *ipStore) AddNetwork(network string) error { - key, length, err := netmatch.ParseNetwork(network) - if err != nil { - return err - } - - s.Lock() - defer s.Unlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - return s.networks.Add(key, length) -} - -func (s *ipStore) AddIP(ip net.IP) error { - s.Lock() - defer s.Unlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - s.ips[key(ip)] = struct{}{} - - return nil -} - -func (s *ipStore) HasIP(ip net.IP) (bool, error) { - key := key(ip) - s.RLock() - defer s.RUnlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - _, ok := s.ips[key] - if ok { - return true, nil - } - - match, err := s.networks.Match(key) - if err != nil { - return false, err - } - - return match, nil -} - -func (s *ipStore) HasAnyIP(ips []net.IP) (bool, error) { - s.RLock() - defer s.RUnlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - for _, ip := range ips { - key := key(ip) - if _, ok := s.ips[key]; ok { - return true, nil - } - - match, err := s.networks.Match(key) - if err != nil { - return false, err - } - if match { - return true, nil - } - } - - return false, nil -} - -func (s *ipStore) HasAllIPs(ips []net.IP) (bool, error) { - s.RLock() - defer s.RUnlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - for _, ip := range ips { - key := key(ip) - if _, ok := s.ips[key]; !ok { - match, err := s.networks.Match(key) - if err != nil { - return false, err - } - if !match { - return false, nil - } - } - } - - return true, nil -} - -func (s *ipStore) RemoveIP(ip net.IP) error { - key := key(ip) - s.Lock() - defer s.Unlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - if _, ok := s.ips[key]; !ok { - return store.ErrResourceDoesNotExist - } - - delete(s.ips, key) - - return nil -} - -func (s *ipStore) RemoveNetwork(network string) error { - key, length, err := netmatch.ParseNetwork(network) - if err != nil { - return err - } - - s.Lock() - defer s.Unlock() - - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - err = s.networks.Remove(key, length) - if err != nil && err == netmatch.ErrNotContained { - return store.ErrResourceDoesNotExist - } - return err -} - -func (s *ipStore) Stop() <-chan error { - toReturn := make(chan error) - go func() { - s.Lock() - defer s.Unlock() - s.ips = make(map[[16]byte]struct{}) - s.networks = netmatch.New() - close(s.closed) - close(toReturn) - }() - return toReturn -} diff --git a/server/store/memory/ip_store_test.go b/server/store/memory/ip_store_test.go deleted file mode 100644 index 0497ab9..0000000 --- a/server/store/memory/ip_store_test.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package memory - -import ( - "net" - "testing" - - "github.com/chihaya/chihaya/server/store" - - "github.com/stretchr/testify/require" -) - -var ( - v6 = net.ParseIP("0c22:384e:0:0c22:384e::68") - v4 = net.ParseIP("12.13.14.15") - v4s = net.ParseIP("12.13.14.15").To4() - - ipStoreTester = store.PrepareIPStoreTester(&ipStoreDriver{}) - ipStoreBenchmarker = store.PrepareIPStoreBenchmarker(&ipStoreDriver{}) - ipStoreTestConfig = &store.DriverConfig{} -) - -func TestKey(t *testing.T) { - var table = []struct { - input net.IP - expected [16]byte - }{ - {v6, [16]byte{12, 34, 56, 78, 0, 0, 12, 34, 56, 78, 0, 0, 0, 0, 0, 104}}, - {v4, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 12, 13, 14, 15}}, // IPv4 in IPv6 prefix - {v4s, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 12, 13, 14, 15}}, // is equal to the one above, should produce equal output - } - - for _, tt := range table { - got := key(tt.input) - require.Equal(t, got, tt.expected) - } -} - -func TestIPStore(t *testing.T) { - ipStoreTester.TestIPStore(t, ipStoreTestConfig) -} - -func TestHasAllHasAny(t *testing.T) { - ipStoreTester.TestHasAllHasAny(t, ipStoreTestConfig) -} - -func TestNetworks(t *testing.T) { - ipStoreTester.TestNetworks(t, ipStoreTestConfig) -} - -func TestHasAllHasAnyNetworks(t *testing.T) { - ipStoreTester.TestHasAllHasAnyNetworks(t, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddV4(b *testing.B) { - ipStoreBenchmarker.AddV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddV6(b *testing.B) { - ipStoreBenchmarker.AddV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupV4(b *testing.B) { - ipStoreBenchmarker.LookupV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupV6(b *testing.B) { - ipStoreBenchmarker.LookupV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemoveV4(b *testing.B) { - ipStoreBenchmarker.AddRemoveV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemoveV6(b *testing.B) { - ipStoreBenchmarker.AddRemoveV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupNonExistV4(b *testing.B) { - ipStoreBenchmarker.LookupNonExistV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupNonExistV6(b *testing.B) { - ipStoreBenchmarker.LookupNonExistV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExistV4(b *testing.B) { - ipStoreBenchmarker.RemoveNonExistV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExistV6(b *testing.B) { - ipStoreBenchmarker.RemoveNonExistV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddV4Network(b *testing.B) { - ipStoreBenchmarker.AddV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddV6Network(b *testing.B) { - ipStoreBenchmarker.AddV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupV4Network(b *testing.B) { - ipStoreBenchmarker.LookupV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupV6Network(b *testing.B) { - ipStoreBenchmarker.LookupV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemoveV4Network(b *testing.B) { - ipStoreBenchmarker.AddRemoveV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemoveV6Network(b *testing.B) { - ipStoreBenchmarker.AddRemoveV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExistV4Network(b *testing.B) { - ipStoreBenchmarker.RemoveNonExistV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExistV6Network(b *testing.B) { - ipStoreBenchmarker.RemoveNonExistV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Add1KV4(b *testing.B) { - ipStoreBenchmarker.Add1KV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Add1KV6(b *testing.B) { - ipStoreBenchmarker.Add1KV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Lookup1KV4(b *testing.B) { - ipStoreBenchmarker.Lookup1KV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Lookup1KV6(b *testing.B) { - ipStoreBenchmarker.Lookup1KV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemove1KV4(b *testing.B) { - ipStoreBenchmarker.AddRemove1KV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemove1KV6(b *testing.B) { - ipStoreBenchmarker.AddRemove1KV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupNonExist1KV4(b *testing.B) { - ipStoreBenchmarker.LookupNonExist1KV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_LookupNonExist1KV6(b *testing.B) { - ipStoreBenchmarker.LookupNonExist1KV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExist1KV4(b *testing.B) { - ipStoreBenchmarker.RemoveNonExist1KV4(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExist1KV6(b *testing.B) { - ipStoreBenchmarker.RemoveNonExist1KV6(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Add1KV4Network(b *testing.B) { - ipStoreBenchmarker.Add1KV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Add1KV6Network(b *testing.B) { - ipStoreBenchmarker.Add1KV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Lookup1KV4Network(b *testing.B) { - ipStoreBenchmarker.Lookup1KV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_Lookup1KV6Network(b *testing.B) { - ipStoreBenchmarker.Lookup1KV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemove1KV4Network(b *testing.B) { - ipStoreBenchmarker.AddRemove1KV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_AddRemove1KV6Network(b *testing.B) { - ipStoreBenchmarker.AddRemove1KV6Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExist1KV4Network(b *testing.B) { - ipStoreBenchmarker.RemoveNonExist1KV4Network(b, ipStoreTestConfig) -} - -func BenchmarkIPStore_RemoveNonExist1KV6Network(b *testing.B) { - ipStoreBenchmarker.RemoveNonExist1KV6Network(b, ipStoreTestConfig) -} diff --git a/server/store/memory/peer_store.go b/server/store/memory/peer_store.go deleted file mode 100644 index 8654ab1..0000000 --- a/server/store/memory/peer_store.go +++ /dev/null @@ -1,478 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package memory - -import ( - "encoding/binary" - "log" - "net" - "runtime" - "sync" - "time" - - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server/store" -) - -func init() { - store.RegisterPeerStoreDriver("memory", &peerStoreDriver{}) -} - -type peerStoreDriver struct{} - -func (d *peerStoreDriver) New(storecfg *store.DriverConfig) (store.PeerStore, error) { - cfg, err := newPeerStoreConfig(storecfg) - if err != nil { - return nil, err - } - - shards := make([]*peerShard, cfg.Shards) - for i := 0; i < cfg.Shards; i++ { - shards[i] = &peerShard{} - shards[i].swarms = make(map[chihaya.InfoHash]swarm) - } - return &peerStore{ - shards: shards, - closed: make(chan struct{}), - }, nil -} - -type peerStoreConfig struct { - Shards int `yaml:"shards"` -} - -func newPeerStoreConfig(storecfg *store.DriverConfig) (*peerStoreConfig, error) { - bytes, err := yaml.Marshal(storecfg.Config) - if err != nil { - return nil, err - } - - var cfg peerStoreConfig - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - if cfg.Shards < 1 { - cfg.Shards = 1 - } - return &cfg, nil -} - -type serializedPeer string - -type peerShard struct { - swarms map[chihaya.InfoHash]swarm - sync.RWMutex -} - -type swarm struct { - // map serialized peer to mtime - seeders map[serializedPeer]int64 - leechers map[serializedPeer]int64 -} - -type peerStore struct { - shards []*peerShard - closed chan struct{} -} - -var _ store.PeerStore = &peerStore{} - -func (s *peerStore) shardIndex(infoHash chihaya.InfoHash) uint32 { - return binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards)) -} - -func peerKey(p chihaya.Peer) serializedPeer { - b := make([]byte, 20+2+len(p.IP)) - copy(b[:20], p.ID[:]) - binary.BigEndian.PutUint16(b[20:22], p.Port) - copy(b[22:], p.IP) - - return serializedPeer(b) -} - -func decodePeerKey(pk serializedPeer) chihaya.Peer { - return chihaya.Peer{ - ID: chihaya.PeerIDFromString(string(pk[:20])), - Port: binary.BigEndian.Uint16([]byte(pk[20:22])), - IP: net.IP(pk[22:]), - } -} - -func (s *peerStore) PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.Lock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.swarms[infoHash] = swarm{ - seeders: make(map[serializedPeer]int64), - leechers: make(map[serializedPeer]int64), - } - } - - shard.swarms[infoHash].seeders[peerKey(p)] = time.Now().UnixNano() - - shard.Unlock() - return nil -} - -func (s *peerStore) DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - pk := peerKey(p) - shard.Lock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.Unlock() - return store.ErrResourceDoesNotExist - } - - if _, ok := shard.swarms[infoHash].seeders[pk]; !ok { - shard.Unlock() - return store.ErrResourceDoesNotExist - } - - delete(shard.swarms[infoHash].seeders, pk) - - if len(shard.swarms[infoHash].seeders)|len(shard.swarms[infoHash].leechers) == 0 { - delete(shard.swarms, infoHash) - } - - shard.Unlock() - return nil -} - -func (s *peerStore) PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.Lock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.swarms[infoHash] = swarm{ - seeders: make(map[serializedPeer]int64), - leechers: make(map[serializedPeer]int64), - } - } - - shard.swarms[infoHash].leechers[peerKey(p)] = time.Now().UnixNano() - - shard.Unlock() - return nil -} - -func (s *peerStore) DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - pk := peerKey(p) - shard.Lock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.Unlock() - return store.ErrResourceDoesNotExist - } - - if _, ok := shard.swarms[infoHash].leechers[pk]; !ok { - shard.Unlock() - return store.ErrResourceDoesNotExist - } - - delete(shard.swarms[infoHash].leechers, pk) - - if len(shard.swarms[infoHash].seeders)|len(shard.swarms[infoHash].leechers) == 0 { - delete(shard.swarms, infoHash) - } - - shard.Unlock() - return nil -} - -func (s *peerStore) GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - key := peerKey(p) - shard := s.shards[s.shardIndex(infoHash)] - shard.Lock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.swarms[infoHash] = swarm{ - seeders: make(map[serializedPeer]int64), - leechers: make(map[serializedPeer]int64), - } - } - - delete(shard.swarms[infoHash].leechers, key) - - shard.swarms[infoHash].seeders[key] = time.Now().UnixNano() - - shard.Unlock() - return nil -} - -func (s *peerStore) CollectGarbage(cutoff time.Time) error { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String()) - cutoffUnix := cutoff.UnixNano() - for _, shard := range s.shards { - shard.RLock() - var infohashes []chihaya.InfoHash - for key := range shard.swarms { - infohashes = append(infohashes, key) - } - shard.RUnlock() - runtime.Gosched() - - for _, infohash := range infohashes { - shard.Lock() - - for peerKey, mtime := range shard.swarms[infohash].leechers { - if mtime <= cutoffUnix { - delete(shard.swarms[infohash].leechers, peerKey) - } - } - - for peerKey, mtime := range shard.swarms[infohash].seeders { - if mtime <= cutoffUnix { - delete(shard.swarms[infohash].seeders, peerKey) - } - } - - if len(shard.swarms[infohash].seeders)|len(shard.swarms[infohash].leechers) == 0 { - delete(shard.swarms, infohash) - } - - shard.Unlock() - runtime.Gosched() - } - - runtime.Gosched() - } - - return nil -} - -func (s *peerStore) AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers, peers6 []chihaya.Peer, err error) { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.RLock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.RUnlock() - return nil, nil, store.ErrResourceDoesNotExist - } - - if seeder { - // Append leechers as possible. - leechers := shard.swarms[infoHash].leechers - for p := range leechers { - decodedPeer := decodePeerKey(p) - if numWant == 0 { - break - } - - if decodedPeer.IP.To4() == nil { - peers6 = append(peers6, decodedPeer) - } else { - peers = append(peers, decodedPeer) - } - numWant-- - } - } else { - // Append as many seeders as possible. - seeders := shard.swarms[infoHash].seeders - for p := range seeders { - decodedPeer := decodePeerKey(p) - if numWant == 0 { - break - } - - if decodedPeer.IP.To4() == nil { - peers6 = append(peers6, decodedPeer) - } else { - peers = append(peers, decodedPeer) - } - numWant-- - } - - // Append leechers until we reach numWant. - leechers := shard.swarms[infoHash].leechers - if numWant > 0 { - for p := range leechers { - decodedPeer := decodePeerKey(p) - if numWant == 0 { - break - } - - if decodedPeer.IP.To4() == nil { - if decodedPeer.Equal(peer6) { - continue - } - peers6 = append(peers6, decodedPeer) - } else { - if decodedPeer.Equal(peer4) { - continue - } - peers = append(peers, decodedPeer) - } - numWant-- - } - } - } - - shard.RUnlock() - return -} - -func (s *peerStore) GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.RLock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.RUnlock() - return nil, nil, store.ErrResourceDoesNotExist - } - - seeders := shard.swarms[infoHash].seeders - for p := range seeders { - decodedPeer := decodePeerKey(p) - if decodedPeer.IP.To4() == nil { - peers6 = append(peers6, decodedPeer) - } else { - peers = append(peers, decodedPeer) - } - } - - shard.RUnlock() - return -} - -func (s *peerStore) GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.RLock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.RUnlock() - return nil, nil, store.ErrResourceDoesNotExist - } - - leechers := shard.swarms[infoHash].leechers - for p := range leechers { - decodedPeer := decodePeerKey(p) - if decodedPeer.IP.To4() == nil { - peers6 = append(peers6, decodedPeer) - } else { - peers = append(peers, decodedPeer) - } - } - - shard.RUnlock() - return -} - -func (s *peerStore) NumSeeders(infoHash chihaya.InfoHash) int { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.RLock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.RUnlock() - return 0 - } - - numSeeders := len(shard.swarms[infoHash].seeders) - - shard.RUnlock() - return numSeeders -} - -func (s *peerStore) NumLeechers(infoHash chihaya.InfoHash) int { - select { - case <-s.closed: - panic("attempted to interact with stopped store") - default: - } - - shard := s.shards[s.shardIndex(infoHash)] - shard.RLock() - - if _, ok := shard.swarms[infoHash]; !ok { - shard.RUnlock() - return 0 - } - - numLeechers := len(shard.swarms[infoHash].leechers) - - shard.RUnlock() - return numLeechers -} - -func (s *peerStore) Stop() <-chan error { - toReturn := make(chan error) - go func() { - shards := make([]*peerShard, len(s.shards)) - for i := 0; i < len(s.shards); i++ { - shards[i] = &peerShard{} - shards[i].swarms = make(map[chihaya.InfoHash]swarm) - } - s.shards = shards - close(s.closed) - close(toReturn) - }() - return toReturn -} diff --git a/server/store/memory/peer_store_test.go b/server/store/memory/peer_store_test.go deleted file mode 100644 index 9a00b17..0000000 --- a/server/store/memory/peer_store_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package memory - -import ( - "testing" - - "github.com/chihaya/chihaya/server/store" -) - -var ( - peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{}) - peerStoreBenchmarker = store.PreparePeerStoreBenchmarker(&peerStoreDriver{}) - peerStoreTestConfig = &store.DriverConfig{} -) - -func init() { - unmarshalledConfig := struct { - Shards int - }{ - 1, - } - peerStoreTestConfig.Config = unmarshalledConfig -} - -func TestPeerStore(t *testing.T) { - peerStoreTester.TestPeerStore(t, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder(b *testing.B) { - peerStoreBenchmarker.PutSeeder(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder1KInfohash(b *testing.B) { - peerStoreBenchmarker.PutSeeder1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutSeeder1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder1KInfohash(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist1KSeeders(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist1KInfohash1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist1KSeeders(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeers(b *testing.B) { - peerStoreBenchmarker.AnnouncePeers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeers1KInfohash(b *testing.B) { - peerStoreBenchmarker.AnnouncePeers1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeersSeeder(b *testing.B) { - peerStoreBenchmarker.AnnouncePeersSeeder(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeersSeeder1KInfohash(b *testing.B) { - peerStoreBenchmarker.AnnouncePeersSeeder1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GetSeeders(b *testing.B) { - peerStoreBenchmarker.GetSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GetSeeders1KInfohash(b *testing.B) { - peerStoreBenchmarker.GetSeeders1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_NumSeeders(b *testing.B) { - peerStoreBenchmarker.NumSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_NumSeeders1KInfohash(b *testing.B) { - peerStoreBenchmarker.NumSeeders1KInfohash(b, peerStoreTestConfig) -} diff --git a/server/store/memory/string_store.go b/server/store/memory/string_store.go deleted file mode 100644 index 531e3bb..0000000 --- a/server/store/memory/string_store.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package memory - -import ( - "sync" - - "github.com/chihaya/chihaya/server/store" -) - -func init() { - store.RegisterStringStoreDriver("memory", &stringStoreDriver{}) -} - -type stringStoreDriver struct{} - -func (d *stringStoreDriver) New(_ *store.DriverConfig) (store.StringStore, error) { - return &stringStore{ - strings: make(map[string]struct{}), - closed: make(chan struct{}), - }, nil -} - -type stringStore struct { - strings map[string]struct{} - closed chan struct{} - sync.RWMutex -} - -var _ store.StringStore = &stringStore{} - -func (ss *stringStore) PutString(s string) error { - ss.Lock() - defer ss.Unlock() - - select { - case <-ss.closed: - panic("attempted to interact with stopped store") - default: - } - - ss.strings[s] = struct{}{} - - return nil -} - -func (ss *stringStore) HasString(s string) (bool, error) { - ss.RLock() - defer ss.RUnlock() - - select { - case <-ss.closed: - panic("attempted to interact with stopped store") - default: - } - - _, ok := ss.strings[s] - - return ok, nil -} - -func (ss *stringStore) RemoveString(s string) error { - ss.Lock() - defer ss.Unlock() - - select { - case <-ss.closed: - panic("attempted to interact with stopped store") - default: - } - - if _, ok := ss.strings[s]; !ok { - return store.ErrResourceDoesNotExist - } - - delete(ss.strings, s) - - return nil -} - -func (ss *stringStore) Stop() <-chan error { - toReturn := make(chan error) - go func() { - ss.Lock() - defer ss.Unlock() - ss.strings = make(map[string]struct{}) - close(ss.closed) - close(toReturn) - }() - return toReturn -} diff --git a/server/store/memory/string_store_test.go b/server/store/memory/string_store_test.go deleted file mode 100644 index 3e020c2..0000000 --- a/server/store/memory/string_store_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package memory - -import ( - "testing" - - "github.com/chihaya/chihaya/server/store" -) - -var ( - stringStoreTester = store.PrepareStringStoreTester(&stringStoreDriver{}) - stringStoreBenchmarker = store.PrepareStringStoreBenchmarker(&stringStoreDriver{}) - stringStoreTestConfig = &store.DriverConfig{} -) - -func TestStringStore(t *testing.T) { - stringStoreTester.TestStringStore(t, stringStoreTestConfig) -} - -func BenchmarkStringStore_AddShort(b *testing.B) { - stringStoreBenchmarker.AddShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_AddLong(b *testing.B) { - stringStoreBenchmarker.AddLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_LookupShort(b *testing.B) { - stringStoreBenchmarker.LookupShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_LookupLong(b *testing.B) { - stringStoreBenchmarker.LookupLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_AddRemoveShort(b *testing.B) { - stringStoreBenchmarker.AddRemoveShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_AddRemoveLong(b *testing.B) { - stringStoreBenchmarker.AddRemoveLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_LookupNonExistShort(b *testing.B) { - stringStoreBenchmarker.LookupNonExistShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_LookupNonExistLong(b *testing.B) { - stringStoreBenchmarker.LookupNonExistLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_RemoveNonExistShort(b *testing.B) { - stringStoreBenchmarker.RemoveNonExistShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_RemoveNonExistLong(b *testing.B) { - stringStoreBenchmarker.RemoveNonExistLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_Add1KShort(b *testing.B) { - stringStoreBenchmarker.Add1KShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_Add1KLong(b *testing.B) { - stringStoreBenchmarker.Add1KLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_Lookup1KShort(b *testing.B) { - stringStoreBenchmarker.Lookup1KShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_Lookup1KLong(b *testing.B) { - stringStoreBenchmarker.Lookup1KLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_AddRemove1KShort(b *testing.B) { - stringStoreBenchmarker.AddRemove1KShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_AddRemove1KLong(b *testing.B) { - stringStoreBenchmarker.AddRemove1KLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_LookupNonExist1KShort(b *testing.B) { - stringStoreBenchmarker.LookupNonExist1KShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_LookupNonExist1KLong(b *testing.B) { - stringStoreBenchmarker.LookupNonExist1KLong(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_RemoveNonExist1KShort(b *testing.B) { - stringStoreBenchmarker.RemoveNonExist1KShort(b, stringStoreTestConfig) -} - -func BenchmarkStringStore_RemoveNonExist1KLong(b *testing.B) { - stringStoreBenchmarker.RemoveNonExist1KLong(b, stringStoreTestConfig) -} diff --git a/server/store/middleware/client/README.md b/server/store/middleware/client/README.md deleted file mode 100644 index c33ac26..0000000 --- a/server/store/middleware/client/README.md +++ /dev/null @@ -1,25 +0,0 @@ -## Client Blacklisting/Whitelisting Middlewares - -This package provides the announce middlewares `client_whitelist` and `client_blacklist` for blacklisting or whitelisting clients for announces. - -### `client_blacklist` - -The `client_blacklist` middleware uses all clientIDs stored in the `StringStore` to blacklist, i.e. block announces. - -The clientID part of the peerID of an announce is matched against the `StringStore`, if it's contained within the `StringStore`, the announce is aborted. - -### `client_whitelist` - -The `client_whitelist` middleware uses all clientIDs stored in the `StringStore` to whitelist, i.e. allow announces. - -The clientID part of the peerID of an announce is matched against the `StringStore`, if it's _not_ contained within the `StringStore`, the announce is aborted. - -### Important things to notice - -Both middlewares operate on announce requests only. - -Both middlewares use the same `StringStore`. -It is therefore not advised to have both the `client_blacklist` and the `client_whitelist` middleware running. -(If you add clientID to the `StringStore`, it will be used for blacklisting and whitelisting. -If your store contains no clientIDs, no announces will be blocked by the blacklist, but all announces will be blocked by the whitelist. -If your store contains all clientIDs, no announces will be blocked by the whitelist, but all announces will be blocked by the blacklist.) \ No newline at end of file diff --git a/server/store/middleware/client/blacklist.go b/server/store/middleware/client/blacklist.go deleted file mode 100644 index e994d2f..0000000 --- a/server/store/middleware/client/blacklist.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package client - -import ( - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/clientid" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("client_blacklist", blacklistAnnounceClient) -} - -// ErrBlacklistedClient is returned by an announce middleware if the announcing -// Client is blacklisted. -var ErrBlacklistedClient = tracker.ClientError("client blacklisted") - -// blacklistAnnounceClient provides a middleware that only allows Clients to -// announce that are not stored in the StringStore. -func blacklistAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - blacklisted, err := store.MustGetStore().HasString(PrefixClient + clientid.New(string(req.PeerID[:]))) - if err != nil { - return err - } else if blacklisted { - return ErrBlacklistedClient - } - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/client/whitelist.go b/server/store/middleware/client/whitelist.go deleted file mode 100644 index 275b56e..0000000 --- a/server/store/middleware/client/whitelist.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package client - -import ( - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/clientid" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("client_whitelist", whitelistAnnounceClient) -} - -// PrefixClient is the prefix to be used for client peer IDs. -const PrefixClient = "c-" - -// ErrNotWhitelistedClient is returned by an announce middleware if the -// announcing Client is not whitelisted. -var ErrNotWhitelistedClient = tracker.ClientError("client not whitelisted") - -// whitelistAnnounceClient provides a middleware that only allows Clients to -// announce that are stored in the StringStore. -func whitelistAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - whitelisted, err := store.MustGetStore().HasString(PrefixClient + clientid.New(string(req.PeerID[:]))) - if err != nil { - return err - } else if !whitelisted { - return ErrNotWhitelistedClient - } - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/infohash/README.md b/server/store/middleware/infohash/README.md deleted file mode 100644 index 213bcec..0000000 --- a/server/store/middleware/infohash/README.md +++ /dev/null @@ -1,69 +0,0 @@ -## Infohash Blacklisting/Whitelisting Middlewares - -This package provides the middleware `infohash_blacklist` and `infohash_whitelist` for blacklisting or whitelisting infohashes. -It also provides the configurable scrape middleware `infohash_blacklist` and `infohash_whitelist` for blacklisting or whitelisting infohashes. - -### `infohash_blacklist` - -#### For Announces - -The `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to blacklist, i.e. block announces. - -#### For Scrapes - -The configurable `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to blacklist scrape requests. - -The scrape middleware has two modes of operation: _Block_ and _Filter_. - -- _Block_ will drop a scrape request if it contains a blacklisted infohash. -- _Filter_ will filter all blacklisted infohashes from a scrape request, potentially leaving behind an empty scrape request. - **IMPORTANT**: This mode **does not work with UDP servers**. - -See the configuration section for information about how to configure the scrape middleware. - -### `infohash_whitelist` - -#### For Announces - -The `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to whitelist, i.e. allow announces. - -#### For Scrapes - -The configurable `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to whitelist scrape requests. - -The scrape middleware has two modes of operation: _Block_ and _Filter_. - -- _Block_ will drop a scrape request if it contains a non-whitelisted infohash. -- _Filter_ will filter all non-whitelisted infohashes from a scrape request, potentially leaving behind an empty scrape request. - **IMPORTANT**: This mode **does not work with UDP servers**. - -See the configuration section for information about how to configure the scrape middleware. - -### Important things to notice - -Both blacklist and whitelist middleware use the same `StringStore`. -It is therefore not advised to have both the `infohash_blacklist` and the `infohash_whitelist` announce or scrape middleware running. -(If you add an infohash to the `StringStore`, it will be used for blacklisting and whitelisting. -If your store contains no infohashes, no announces/scrapes will be blocked by the blacklist, but all will be blocked by the whitelist. -If your store contains all addresses, no announces/scrapes will be blocked by the whitelist, but all will be blocked by the blacklist.) - -Also note that the announce and scrape middleware both use the same `StringStore`. -It is therefore not possible to use different infohashes for black-/whitelisting on announces and scrape requests. - -### Configuration - -The scrape middleware is configurable. - -The configuration uses a single required parameter `mode` to determine the mode of operation for the middleware. -An example configuration might look like this: - - chihaya: - tracker: - scrape_middleware: - - name: infohash_blacklist - config: - mode: block - -`mode` accepts two values: `block` and `filter`. - -**IMPORTANT**: The `filter` mode **does not work with UDP servers**. \ No newline at end of file diff --git a/server/store/middleware/infohash/blacklist.go b/server/store/middleware/infohash/blacklist.go deleted file mode 100644 index ff883b1..0000000 --- a/server/store/middleware/infohash/blacklist.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package infohash - -import ( - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("infohash_blacklist", blacklistAnnounceInfohash) - tracker.RegisterScrapeMiddlewareConstructor("infohash_blacklist", blacklistScrapeInfohash) - mustGetStore = func() store.StringStore { - return store.MustGetStore().StringStore - } -} - -// ErrBlockedInfohash is returned by a middleware if any of the infohashes -// contained in an announce or scrape are disallowed. -var ErrBlockedInfohash = tracker.ClientError("disallowed infohash") - -var mustGetStore func() store.StringStore - -// blacklistAnnounceInfohash provides a middleware that only allows announces -// for infohashes that are not stored in a StringStore. -func blacklistAnnounceInfohash(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) { - blacklisted, err := mustGetStore().HasString(PrefixInfohash + string(req.InfoHash[:])) - if err != nil { - return err - } else if blacklisted { - return ErrBlockedInfohash - } - - return next(cfg, req, resp) - } -} - -// blacklistScrapeInfohash provides a middleware constructor for a middleware -// that blocks or filters scrape requests based on the infohashes scraped. -// -// The middleware works in two modes: block and filter. -// The block mode blocks a scrape completely if any of the infohashes is -// disallowed. -// The filter mode filters any disallowed infohashes from the scrape, -// potentially leaving an empty scrape. -// -// ErrUnknownMode is returned if the Mode specified in the config is unknown. -func blacklistScrapeInfohash(c chihaya.MiddlewareConfig) (tracker.ScrapeMiddleware, error) { - cfg, err := newConfig(c) - if err != nil { - return nil, err - } - - switch cfg.Mode { - case ModeFilter: - return blacklistFilterScrape, nil - case ModeBlock: - return blacklistBlockScrape, nil - default: - panic("unknown mode") - } -} - -func blacklistFilterScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) { - blacklisted := false - storage := mustGetStore() - infohashes := req.InfoHashes - - for i, ih := range infohashes { - blacklisted, err = storage.HasString(PrefixInfohash + string(ih[:])) - - if err != nil { - return err - } else if blacklisted { - req.InfoHashes[i] = req.InfoHashes[len(req.InfoHashes)-1] - req.InfoHashes = req.InfoHashes[:len(req.InfoHashes)-1] - } - } - - return next(cfg, req, resp) - } -} - -func blacklistBlockScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) { - blacklisted := false - storage := mustGetStore() - - for _, ih := range req.InfoHashes { - blacklisted, err = storage.HasString(PrefixInfohash + string(ih[:])) - - if err != nil { - return err - } else if blacklisted { - return ErrBlockedInfohash - } - } - - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/infohash/blacklist_test.go b/server/store/middleware/infohash/blacklist_test.go deleted file mode 100644 index 3d06b51..0000000 --- a/server/store/middleware/infohash/blacklist_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package infohash - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/stopper" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -type storeMock struct { - strings map[string]struct{} -} - -func (ss *storeMock) PutString(s string) error { - ss.strings[s] = struct{}{} - - return nil -} - -func (ss *storeMock) HasString(s string) (bool, error) { - _, ok := ss.strings[s] - - return ok, nil -} - -func (ss *storeMock) RemoveString(s string) error { - delete(ss.strings, s) - - return nil -} - -func (ss *storeMock) Stop() <-chan error { - return stopper.AlreadyStopped -} - -var mock store.StringStore = &storeMock{ - strings: make(map[string]struct{}), -} - -var ( - ih1 = chihaya.InfoHash([20]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) - ih2 = chihaya.InfoHash([20]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) -) - -func TestASetUp(t *testing.T) { - mustGetStore = func() store.StringStore { - return mock - } - - mustGetStore().PutString(PrefixInfohash + string(ih1[:])) -} - -func TestBlacklistAnnounceMiddleware(t *testing.T) { - var ( - achain tracker.AnnounceChain - req chihaya.AnnounceRequest - resp chihaya.AnnounceResponse - ) - - achain.Append(blacklistAnnounceInfohash) - handler := achain.Handler() - - err := handler(nil, &req, &resp) - assert.Nil(t, err) - - req.InfoHash = chihaya.InfoHash(ih1) - err = handler(nil, &req, &resp) - assert.Equal(t, ErrBlockedInfohash, err) - - req.InfoHash = chihaya.InfoHash(ih2) - err = handler(nil, &req, &resp) - assert.Nil(t, err) -} - -func TestBlacklistScrapeMiddlewareBlock(t *testing.T) { - var ( - schain tracker.ScrapeChain - req chihaya.ScrapeRequest - resp chihaya.ScrapeResponse - ) - - mw, err := blacklistScrapeInfohash(chihaya.MiddlewareConfig{ - Name: "blacklist_infohash", - Config: Config{ - Mode: ModeBlock, - }, - }) - assert.Nil(t, err) - schain.Append(mw) - handler := schain.Handler() - - err = handler(nil, &req, &resp) - assert.Nil(t, err) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)} - err = handler(nil, &req, &resp) - assert.Equal(t, ErrBlockedInfohash, err) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih2)} - err = handler(nil, &req, &resp) - assert.Nil(t, err) -} - -func TestBlacklistScrapeMiddlewareFilter(t *testing.T) { - var ( - schain tracker.ScrapeChain - req chihaya.ScrapeRequest - resp chihaya.ScrapeResponse - ) - - mw, err := blacklistScrapeInfohash(chihaya.MiddlewareConfig{ - Name: "blacklist_infohash", - Config: Config{ - Mode: ModeFilter, - }, - }) - assert.Nil(t, err) - schain.Append(mw) - handler := schain.Handler() - - err = handler(nil, &req, &resp) - assert.Nil(t, err) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)} - err = handler(nil, &req, &resp) - assert.Nil(t, err) - assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih2)}, req.InfoHashes) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih2)} - err = handler(nil, &req, &resp) - assert.Nil(t, err) -} diff --git a/server/store/middleware/infohash/config.go b/server/store/middleware/infohash/config.go deleted file mode 100644 index 7c399a0..0000000 --- a/server/store/middleware/infohash/config.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package infohash - -import ( - "errors" - - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" -) - -// ErrUnknownMode is returned by a MiddlewareConstructor if the Mode specified -// in the configuration is unknown. -var ErrUnknownMode = errors.New("unknown mode") - -// Mode represents the mode of operation for an infohash scrape middleware. -type Mode string - -const ( - // ModeFilter makes the middleware filter disallowed infohashes from a - // scrape request. - ModeFilter = Mode("filter") - - // ModeBlock makes the middleware block a scrape request if it contains - // at least one disallowed infohash. - ModeBlock = Mode("block") -) - -// Config represents the configuration for an infohash scrape middleware. -type Config struct { - Mode Mode `yaml:"mode"` -} - -// newConfig parses the given MiddlewareConfig as an infohash.Config. -// ErrUnknownMode is returned if the mode is unknown. -func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) { - bytes, err := yaml.Marshal(mwcfg.Config) - if err != nil { - return nil, err - } - - var cfg Config - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - if cfg.Mode != ModeBlock && cfg.Mode != ModeFilter { - return nil, ErrUnknownMode - } - - return &cfg, nil -} diff --git a/server/store/middleware/infohash/config_test.go b/server/store/middleware/infohash/config_test.go deleted file mode 100644 index f7cc57a..0000000 --- a/server/store/middleware/infohash/config_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package infohash - -import ( - "fmt" - "testing" - - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" - "github.com/stretchr/testify/assert" -) - -var ( - configTemplate = `name: foo -config: - %s: %s` - - data = []testData{ - {"mode", "block", false, ModeBlock}, - {"mode", "filter", false, ModeFilter}, - {"some", "stuff", true, ModeBlock}, - } -) - -type testData struct { - key string - value string - err bool - expected Mode -} - -func TestNewConfig(t *testing.T) { - var mwconfig chihaya.MiddlewareConfig - - cfg, err := newConfig(mwconfig) - assert.NotNil(t, err) - assert.Nil(t, cfg) - - for _, test := range data { - config := fmt.Sprintf(configTemplate, test.key, test.value) - err = yaml.Unmarshal([]byte(config), &mwconfig) - assert.Nil(t, err) - - cfg, err = newConfig(mwconfig) - if test.err { - assert.NotNil(t, err) - continue - } - assert.Nil(t, err) - assert.Equal(t, test.expected, cfg.Mode) - } -} diff --git a/server/store/middleware/infohash/whitelist.go b/server/store/middleware/infohash/whitelist.go deleted file mode 100644 index 99caeda..0000000 --- a/server/store/middleware/infohash/whitelist.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package infohash - -import ( - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("infohash_whitelist", whitelistAnnounceInfohash) - tracker.RegisterScrapeMiddlewareConstructor("infohash_whitelist", whitelistScrapeInfohash) -} - -// PrefixInfohash is the prefix to be used for infohashes. -const PrefixInfohash = "ih-" - -// whitelistAnnounceInfohash provides a middleware that only allows announces -// for infohashes that are not stored in a StringStore -func whitelistAnnounceInfohash(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) { - whitelisted, err := mustGetStore().HasString(PrefixInfohash + string(req.InfoHash[:])) - - if err != nil { - return err - } else if !whitelisted { - return ErrBlockedInfohash - } - return next(cfg, req, resp) - } -} - -// whitelistScrapeInfohash provides a middleware constructor for a middleware -// that blocks or filters scrape requests based on the infohashes scraped. -// -// The middleware works in two modes: block and filter. -// The block mode blocks a scrape completely if any of the infohashes is -// disallowed. -// The filter mode filters any disallowed infohashes from the scrape, -// potentially leaving an empty scrape. -// -// ErrUnknownMode is returned if the Mode specified in the config is unknown. -func whitelistScrapeInfohash(c chihaya.MiddlewareConfig) (tracker.ScrapeMiddleware, error) { - cfg, err := newConfig(c) - if err != nil { - return nil, err - } - - switch cfg.Mode { - case ModeFilter: - return whitelistFilterScrape, nil - case ModeBlock: - return whitelistBlockScrape, nil - default: - panic("unknown mode") - } -} - -func whitelistFilterScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) { - whitelisted := false - storage := mustGetStore() - infohashes := req.InfoHashes - - for i, ih := range infohashes { - whitelisted, err = storage.HasString(PrefixInfohash + string(ih[:])) - - if err != nil { - return err - } else if !whitelisted { - req.InfoHashes[i] = req.InfoHashes[len(req.InfoHashes)-1] - req.InfoHashes = req.InfoHashes[:len(req.InfoHashes)-1] - } - } - - return next(cfg, req, resp) - } -} - -func whitelistBlockScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) { - whitelisted := false - storage := mustGetStore() - - for _, ih := range req.InfoHashes { - whitelisted, err = storage.HasString(PrefixInfohash + string(ih[:])) - - if err != nil { - return err - } else if !whitelisted { - return ErrBlockedInfohash - } - } - - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/infohash/whitelist_test.go b/server/store/middleware/infohash/whitelist_test.go deleted file mode 100644 index 3a68386..0000000 --- a/server/store/middleware/infohash/whitelist_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package infohash - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/tracker" -) - -func TestWhitelistAnnounceMiddleware(t *testing.T) { - var ( - achain tracker.AnnounceChain - req chihaya.AnnounceRequest - resp chihaya.AnnounceResponse - ) - - achain.Append(whitelistAnnounceInfohash) - handler := achain.Handler() - - err := handler(nil, &req, &resp) - assert.Equal(t, ErrBlockedInfohash, err) - - req.InfoHash = chihaya.InfoHash(ih2) - err = handler(nil, &req, &resp) - assert.Equal(t, ErrBlockedInfohash, err) - - req.InfoHash = chihaya.InfoHash(ih1) - err = handler(nil, &req, &resp) - assert.Nil(t, err) -} - -func TestWhitelistScrapeMiddlewareBlock(t *testing.T) { - var ( - schain tracker.ScrapeChain - req chihaya.ScrapeRequest - resp chihaya.ScrapeResponse - ) - - mw, err := whitelistScrapeInfohash(chihaya.MiddlewareConfig{ - Name: "whitelist_infohash", - Config: Config{ - Mode: ModeBlock, - }, - }) - assert.Nil(t, err) - schain.Append(mw) - handler := schain.Handler() - - err = handler(nil, &req, &resp) - assert.Nil(t, err) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)} - err = handler(nil, &req, &resp) - assert.Equal(t, ErrBlockedInfohash, err) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1)} - err = handler(nil, &req, &resp) - assert.Nil(t, err) -} - -func TestWhitelistScrapeMiddlewareFilter(t *testing.T) { - var ( - schain tracker.ScrapeChain - req chihaya.ScrapeRequest - resp chihaya.ScrapeResponse - ) - - mw, err := whitelistScrapeInfohash(chihaya.MiddlewareConfig{ - Name: "whitelist_infohash", - Config: Config{ - Mode: ModeFilter, - }, - }) - assert.Nil(t, err) - schain.Append(mw) - handler := schain.Handler() - - err = handler(nil, &req, &resp) - assert.Nil(t, err) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)} - err = handler(nil, &req, &resp) - assert.Nil(t, err) - assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih1)}, req.InfoHashes) - - req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1)} - err = handler(nil, &req, &resp) - assert.Nil(t, err) - assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih1)}, req.InfoHashes) -} diff --git a/server/store/middleware/ip/README.md b/server/store/middleware/ip/README.md deleted file mode 100644 index 16b8cc9..0000000 --- a/server/store/middleware/ip/README.md +++ /dev/null @@ -1,32 +0,0 @@ -## IP Blacklisting/Whitelisting Middlewares - -This package provides the announce middlewares `ip_blacklist` and `ip_whitelist` for blacklisting or whitelisting IP addresses and networks for announces. - -### `ip_blacklist` - -The `ip_blacklist` middleware uses all IP addresses and networks stored in the `IPStore` to blacklist, i.e. block announces. - -Both the IPv4 and the IPv6 addresses contained in the announce are matched against the `IPStore`. -If one or both of the two are contained in the `IPStore`, the announce will be rejected _completely_. - -### `ip_whitelist` - -The `ip_whitelist` middleware uses all IP addresses and networks stored in the `IPStore` to whitelist, i.e. allow announces. - -If present, both the IPv4 and the IPv6 addresses contained in the announce are matched against the `IPStore`. -Only if all IP address that are present in the announce are also present in the `IPStore` will the announce be allowed, otherwise it will be rejected _completely_. - -### Important things to notice - -Both middlewares operate on announce requests only. -The middlewares will check the IPv4 and IPv6 IPs a client announces to the tracker against an `IPStore`. -Normally the IP address embedded in the announce is the public IP address of the machine the client is running on. -Note however, that a client can override this behaviour by specifying an IP address in the announce itself. -_This middleware does not (dis)allow announces coming from certain IP addresses, but announces containing certain IP addresses_. -Always keep that in mind. - -Both middlewares use the same `IPStore`. -It is therefore not advised to have both the `ip_blacklist` and the `ip_whitelist` middleware running. -(If you add an IP address or network to the `IPStore`, it will be used for blacklisting and whitelisting. -If your store contains no addresses, no announces will be blocked by the blacklist, but all announces will be blocked by the whitelist. -If your store contains all addresses, no announces will be blocked by the whitelist, but all announces will be blocked by the blacklist.) \ No newline at end of file diff --git a/server/store/middleware/ip/blacklist.go b/server/store/middleware/ip/blacklist.go deleted file mode 100644 index deee714..0000000 --- a/server/store/middleware/ip/blacklist.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package ip - -import ( - "net" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("ip_blacklist", blacklistAnnounceIP) -} - -// ErrBlockedIP is returned by an announce middleware if any of the announcing -// IPs is disallowed. -var ErrBlockedIP = tracker.ClientError("disallowed IP address") - -// blacklistAnnounceIP provides a middleware that only allows IPs to announce -// that are not stored in an IPStore. -func blacklistAnnounceIP(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) { - blacklisted := false - storage := store.MustGetStore() - - // We have to check explicitly if they are present, because someone - // could have added a net.IP to the store. - if req.IPv6 != nil && req.IPv4 != nil { - blacklisted, err = storage.HasAnyIP([]net.IP{req.IPv4, req.IPv6}) - } else if req.IPv4 != nil { - blacklisted, err = storage.HasIP(req.IPv4) - } else { - blacklisted, err = storage.HasIP(req.IPv6) - } - - if err != nil { - return err - } else if blacklisted { - return ErrBlockedIP - } - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/ip/whitelist.go b/server/store/middleware/ip/whitelist.go deleted file mode 100644 index 71cd7d9..0000000 --- a/server/store/middleware/ip/whitelist.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package ip - -import ( - "net" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("ip_whitelist", whitelistAnnounceIP) -} - -// whitelistAnnounceIP provides a middleware that only allows IPs to announce -// that are stored in an IPStore. -func whitelistAnnounceIP(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) { - whitelisted := false - storage := store.MustGetStore() - - // We have to check explicitly if they are present, because someone - // could have added a net.IP to the store. - if req.IPv4 != nil && req.IPv6 != nil { - whitelisted, err = storage.HasAllIPs([]net.IP{req.IPv4, req.IPv6}) - } else if req.IPv4 != nil { - whitelisted, err = storage.HasIP(req.IPv4) - } else { - whitelisted, err = storage.HasIP(req.IPv6) - } - - if err != nil { - return err - } else if !whitelisted { - return ErrBlockedIP - } - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/response/README.md b/server/store/middleware/response/README.md deleted file mode 100644 index f1becc2..0000000 --- a/server/store/middleware/response/README.md +++ /dev/null @@ -1,11 +0,0 @@ -## Response Middleware - -This package provides the final response for a chain of middleware using the “store” package. - -### `store_response` - -The `store_response` middleware uses the peer data stored in the peerStore to create a response for the request. - -### Important things to notice - -This middleware is very basic, and may not do everything that you require. \ No newline at end of file diff --git a/server/store/middleware/response/response.go b/server/store/middleware/response/response.go deleted file mode 100644 index aaab25c..0000000 --- a/server/store/middleware/response/response.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package response - -import ( - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("store_response", responseAnnounceClient) - tracker.RegisterScrapeMiddleware("store_response", responseScrapeClient) -} - -// FailedToRetrievePeers represents an error that has been return when -// attempting to fetch peers from the store. -type FailedToRetrievePeers string - -// Error interface for FailedToRetrievePeers. -func (f FailedToRetrievePeers) Error() string { return string(f) } - -// responseAnnounceClient provides a middleware to make a response to an -// announce based on the current request. -func responseAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) { - storage := store.MustGetStore() - - resp.Interval = cfg.AnnounceInterval - resp.MinInterval = cfg.MinAnnounceInterval - resp.Compact = req.Compact - resp.Complete = int32(storage.NumSeeders(req.InfoHash)) - resp.Incomplete = int32(storage.NumLeechers(req.InfoHash)) - resp.IPv4Peers, resp.IPv6Peers, err = storage.AnnouncePeers(req.InfoHash, req.Left == 0, int(req.NumWant), req.Peer4(), req.Peer6()) - if err != nil { - return FailedToRetrievePeers(err.Error()) - } - - return next(cfg, req, resp) - } -} - -// responseScrapeClient provides a middleware to make a response to an -// scrape based on the current request. -func responseScrapeClient(next tracker.ScrapeHandler) tracker.ScrapeHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) { - storage := store.MustGetStore() - for _, infoHash := range req.InfoHashes { - resp.Files[infoHash] = chihaya.Scrape{ - Complete: int32(storage.NumSeeders(infoHash)), - Incomplete: int32(storage.NumLeechers(infoHash)), - } - } - - return next(cfg, req, resp) - } -} diff --git a/server/store/middleware/swarm/README.md b/server/store/middleware/swarm/README.md deleted file mode 100644 index 60444e7..0000000 --- a/server/store/middleware/swarm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## Swarm Interaction Middleware - -This package provides the announce middleware that modifies peer data stored in the `store` package. - -### `store_swarm_interaction` - -The `store_swarm_interaction` middleware updates the data stored in the `peerStore` based on the announce. - -### Important things to notice - -It is recommended to have this middleware run before the `store_response` middleware. -The `store_response` middleware assumes the store to be already updated by the announce. \ No newline at end of file diff --git a/server/store/middleware/swarm/swarm.go b/server/store/middleware/swarm/swarm.go deleted file mode 100644 index 87ff15b..0000000 --- a/server/store/middleware/swarm/swarm.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package response - -import ( - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/event" - "github.com/chihaya/chihaya/server/store" - "github.com/chihaya/chihaya/tracker" -) - -func init() { - tracker.RegisterAnnounceMiddleware("store_swarm_interaction", announceSwarmInteraction) -} - -// FailedSwarmInteraction represents an error that indicates that the -// interaction of a peer with a swarm failed. -type FailedSwarmInteraction string - -// Error satisfies the error interface for FailedSwarmInteraction. -func (f FailedSwarmInteraction) Error() string { return string(f) } - -// announceSwarmInteraction provides a middleware that manages swarm -// interactions for a peer based on the announce. -func announceSwarmInteraction(next tracker.AnnounceHandler) tracker.AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) { - if req.IPv4 != nil { - err = updatePeerStore(req, req.Peer4()) - if err != nil { - return FailedSwarmInteraction(err.Error()) - } - } - - if req.IPv6 != nil { - err = updatePeerStore(req, req.Peer6()) - if err != nil { - return FailedSwarmInteraction(err.Error()) - } - } - - return next(cfg, req, resp) - } -} - -func updatePeerStore(req *chihaya.AnnounceRequest, peer chihaya.Peer) (err error) { - storage := store.MustGetStore() - - switch { - case req.Event == event.Stopped: - err = storage.DeleteSeeder(req.InfoHash, peer) - if err != nil && err != store.ErrResourceDoesNotExist { - return err - } - - err = storage.DeleteLeecher(req.InfoHash, peer) - if err != nil && err != store.ErrResourceDoesNotExist { - return err - } - - case req.Event == event.Completed || req.Left == 0: - err = storage.GraduateLeecher(req.InfoHash, peer) - if err != nil { - return err - } - default: - err = storage.PutLeecher(req.InfoHash, peer) - if err != nil { - return err - } - } - - return nil -} diff --git a/server/store/peer_store.go b/server/store/peer_store.go deleted file mode 100644 index fff01c3..0000000 --- a/server/store/peer_store.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package store - -import ( - "fmt" - "time" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/stopper" -) - -var peerStoreDrivers = make(map[string]PeerStoreDriver) - -// PeerStore represents an interface for manipulating peers. -type PeerStore interface { - // PutSeeder adds a seeder for the infoHash to the PeerStore. - PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error - // DeleteSeeder removes a seeder for the infoHash from the PeerStore. - // - // Returns ErrResourceDoesNotExist if the infoHash or peer does not - // exist. - DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error - - // PutLeecher adds a leecher for the infoHash to the PeerStore. - PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error - // DeleteLeecher removes a leecher for the infoHash from the PeerStore. - // - // Returns ErrResourceDoesNotExist if the infoHash or peer does not - // exist. - DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error - - // GraduateLeecher promotes a peer from a leecher to a seeder for the - // infoHash within the PeerStore. - // - // If the given Peer is not a leecher, it will still be added to the - // list of seeders and no error will be returned. - GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error - - // AnnouncePeers returns a list of both IPv4, and IPv6 peers for an - // announce. - // - // If seeder is true then the peers returned will only be leechers, the - // ammount of leechers returned will be the smaller value of numWant or - // the available leechers. - // If it is false then seeders will be returned up until numWant or the - // available seeders, whichever is smaller. If the available seeders is - // less than numWant then peers are returned until numWant or they run out. - AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers, peers6 []chihaya.Peer, err error) - // CollectGarbage deletes peers from the peerStore which are older than the - // cutoff time. - CollectGarbage(cutoff time.Time) error - - // GetSeeders gets all the seeders for a particular infoHash. - GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) - // GetLeechers gets all the leechers for a particular infoHash. - GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) - - // NumSeeders gets the amount of seeders for a particular infoHash. - NumSeeders(infoHash chihaya.InfoHash) int - // NumLeechers gets the amount of leechers for a particular infoHash. - NumLeechers(infoHash chihaya.InfoHash) int - - // Stopper provides the Stop method that stops the PeerStore. - // Stop should shut down the PeerStore in a separate goroutine and send - // an error to the channel if the shutdown failed. If the shutdown - // was successful, the channel is to be closed. - stopper.Stopper -} - -// PeerStoreDriver represents an interface for creating a handle to the storage -// of peers. -type PeerStoreDriver interface { - New(*DriverConfig) (PeerStore, error) -} - -// RegisterPeerStoreDriver makes a driver available by the provided name. -// -// If this function is called twice with the same name or if the driver is nil, -// it panics. -func RegisterPeerStoreDriver(name string, driver PeerStoreDriver) { - if driver == nil { - panic("storage: could not register nil PeerStoreDriver") - } - - if _, dup := peerStoreDrivers[name]; dup { - panic("storage: could not register duplicate PeerStoreDriver: " + name) - } - - peerStoreDrivers[name] = driver -} - -// OpenPeerStore returns a PeerStore specified by a configuration. -func OpenPeerStore(cfg *DriverConfig) (PeerStore, error) { - driver, ok := peerStoreDrivers[cfg.Name] - if !ok { - return nil, fmt.Errorf("storage: unknown PeerStoreDriver %q (forgotten import?)", cfg) - } - - return driver.New(cfg) -} diff --git a/server/store/store.go b/server/store/store.go deleted file mode 100644 index e47ec94..0000000 --- a/server/store/store.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package store - -import ( - "errors" - "log" - "time" - - "gopkg.in/yaml.v2" - - "github.com/chihaya/chihaya" - "github.com/chihaya/chihaya/pkg/stopper" - "github.com/chihaya/chihaya/server" - "github.com/chihaya/chihaya/tracker" -) - -var theStore *Store - -func init() { - server.Register("store", constructor) -} - -// ErrResourceDoesNotExist is the error returned by all delete methods in the -// store if the requested resource does not exist. -var ErrResourceDoesNotExist = errors.New("resource does not exist") - -func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) { - if theStore == nil { - cfg, err := newConfig(srvcfg) - if err != nil { - return nil, errors.New("store: invalid store config: " + err.Error()) - } - - theStore = &Store{ - cfg: cfg, - tkr: tkr, - shutdown: make(chan struct{}), - sg: stopper.NewStopGroup(), - } - - ps, err := OpenPeerStore(&cfg.PeerStore) - if err != nil { - return nil, err - } - theStore.sg.Add(ps) - - ips, err := OpenIPStore(&cfg.IPStore) - if err != nil { - return nil, err - } - theStore.sg.Add(ips) - - ss, err := OpenStringStore(&cfg.StringStore) - if err != nil { - return nil, err - } - theStore.sg.Add(ss) - - theStore.PeerStore = ps - theStore.IPStore = ips - theStore.StringStore = ss - } - return theStore, nil -} - -// Config represents the configuration for the store. -type Config struct { - Addr string `yaml:"addr"` - RequestTimeout time.Duration `yaml:"request_timeout"` - ReadTimeout time.Duration `yaml:"read_timeout"` - WriteTimeout time.Duration `yaml:"write_timeout"` - GCAfter time.Duration `yaml:"gc_after"` - PeerStore DriverConfig `yaml:"peer_store"` - IPStore DriverConfig `yaml:"ip_store"` - StringStore DriverConfig `yaml:"string_store"` -} - -// DriverConfig represents the configuration for a store driver. -type DriverConfig struct { - Name string `yaml:"name"` - Config interface{} `yaml:"config"` -} - -func newConfig(srvcfg *chihaya.ServerConfig) (*Config, error) { - bytes, err := yaml.Marshal(srvcfg.Config) - if err != nil { - return nil, err - } - - var cfg Config - err = yaml.Unmarshal(bytes, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} - -// MustGetStore is used by middleware to access the store. -// -// This function calls log.Fatal if a server hasn't been already created by -// the server package. -func MustGetStore() *Store { - if theStore == nil { - log.Fatal("store middleware used without store server") - } - return theStore -} - -// Store provides storage for a tracker. -type Store struct { - cfg *Config - tkr *tracker.Tracker - shutdown chan struct{} - sg *stopper.StopGroup - - PeerStore - IPStore - StringStore -} - -// Start starts the store drivers and blocks until all of them exit. -func (s *Store) Start() { - <-s.shutdown -} - -// Stop stops the store drivers and waits for them to exit. -func (s *Store) Stop() { - errors := s.sg.Stop() - if len(errors) == 0 { - log.Println("Store server shut down cleanly") - } else { - log.Println("Store server: failed to shutdown drivers") - for _, err := range errors { - log.Println(err.Error()) - } - } - close(s.shutdown) -} diff --git a/server/store/store_bench.go b/server/store/store_bench.go deleted file mode 100644 index b1cbfc5..0000000 --- a/server/store/store_bench.go +++ /dev/null @@ -1,1262 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package store - -import ( - "fmt" - "net" - "strings" - "testing" - - "github.com/chihaya/chihaya" - "github.com/stretchr/testify/require" -) - -const num1KElements = 1000 - -// StringStoreBenchmarker is a collection of benchmarks for StringStore drivers. -// Every benchmark expects a new, clean storage. Every benchmark should be -// called with a DriverConfig that ensures this. -type StringStoreBenchmarker interface { - AddShort(*testing.B, *DriverConfig) - AddLong(*testing.B, *DriverConfig) - LookupShort(*testing.B, *DriverConfig) - LookupLong(*testing.B, *DriverConfig) - AddRemoveShort(*testing.B, *DriverConfig) - AddRemoveLong(*testing.B, *DriverConfig) - LookupNonExistShort(*testing.B, *DriverConfig) - LookupNonExistLong(*testing.B, *DriverConfig) - RemoveNonExistShort(*testing.B, *DriverConfig) - RemoveNonExistLong(*testing.B, *DriverConfig) - - Add1KShort(*testing.B, *DriverConfig) - Add1KLong(*testing.B, *DriverConfig) - Lookup1KShort(*testing.B, *DriverConfig) - Lookup1KLong(*testing.B, *DriverConfig) - AddRemove1KShort(*testing.B, *DriverConfig) - AddRemove1KLong(*testing.B, *DriverConfig) - LookupNonExist1KShort(*testing.B, *DriverConfig) - LookupNonExist1KLong(*testing.B, *DriverConfig) - RemoveNonExist1KShort(*testing.B, *DriverConfig) - RemoveNonExist1KLong(*testing.B, *DriverConfig) -} - -var _ StringStoreBenchmarker = &stringStoreBench{} - -type stringStoreBench struct { - // sShort holds differentStrings unique strings of length 10. - sShort [num1KElements]string - // sLong holds differentStrings unique strings of length 1000. - sLong [num1KElements]string - - driver StringStoreDriver -} - -func generateLongStrings() (a [num1KElements]string) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = strings.Repeat(fmt.Sprintf("%x", b), 250) - } - - return -} - -func generateShortStrings() (a [num1KElements]string) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = strings.Repeat(fmt.Sprintf("%x", b), 3)[:10] - } - - return -} - -// PrepareStringStoreBenchmarker prepares a reusable suite for StringStore driver -// benchmarks. -func PrepareStringStoreBenchmarker(driver StringStoreDriver) StringStoreBenchmarker { - return stringStoreBench{ - sShort: generateShortStrings(), - sLong: generateLongStrings(), - driver: driver, - } -} - -type stringStoreSetupFunc func(StringStore) error - -func stringStoreSetupNOP(StringStore) error { return nil } - -type stringStoreBenchFunc func(StringStore, int) error - -func (sb stringStoreBench) runBenchmark(b *testing.B, cfg *DriverConfig, setup stringStoreSetupFunc, execute stringStoreBenchFunc) { - ss, err := sb.driver.New(cfg) - require.Nil(b, err, "Constructor error must be nil") - require.NotNil(b, ss, "String store must not be nil") - - err = setup(ss) - require.Nil(b, err, "Benchmark setup must not fail") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - execute(ss, i) - } - b.StopTimer() - - errChan := ss.Stop() - err = <-errChan - require.Nil(b, err, "StringStore shutdown must not fail") -} - -func (sb stringStoreBench) AddShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sShort[0]) - return nil - }) -} - -func (sb stringStoreBench) AddLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sLong[0]) - return nil - }) -} - -func (sb stringStoreBench) Add1KShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sShort[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) Add1KLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sLong[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) LookupShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, - func(ss StringStore) error { - return ss.PutString(sb.sShort[0]) - }, - func(ss StringStore, i int) error { - ss.HasString(sb.sShort[0]) - return nil - }) -} - -func (sb stringStoreBench) LookupLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, - func(ss StringStore) error { - return ss.PutString(sb.sLong[0]) - }, - func(ss StringStore, i int) error { - ss.HasString(sb.sLong[0]) - return nil - }) -} - -func (sb stringStoreBench) Lookup1KShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, - func(ss StringStore) error { - for i := 0; i < num1KElements; i++ { - err := ss.PutString(sb.sShort[i]) - if err != nil { - return err - } - } - return nil - }, - func(ss StringStore, i int) error { - ss.HasString(sb.sShort[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) Lookup1KLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, - func(ss StringStore) error { - for i := 0; i < num1KElements; i++ { - err := ss.PutString(sb.sLong[i]) - if err != nil { - return err - } - } - return nil - }, - func(ss StringStore, i int) error { - ss.HasString(sb.sLong[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) AddRemoveShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sShort[0]) - ss.RemoveString(sb.sShort[0]) - return nil - }) -} - -func (sb stringStoreBench) AddRemoveLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sLong[0]) - ss.RemoveString(sb.sLong[0]) - return nil - }) -} - -func (sb stringStoreBench) AddRemove1KShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sShort[i%num1KElements]) - ss.RemoveString(sb.sShort[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) AddRemove1KLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.PutString(sb.sLong[i%num1KElements]) - ss.RemoveString(sb.sLong[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) LookupNonExistShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.HasString(sb.sShort[0]) - return nil - }) -} - -func (sb stringStoreBench) LookupNonExistLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.HasString(sb.sLong[0]) - return nil - }) -} - -func (sb stringStoreBench) LookupNonExist1KShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.HasString(sb.sShort[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) LookupNonExist1KLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.HasString(sb.sLong[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) RemoveNonExistShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.RemoveString(sb.sShort[0]) - return nil - }) -} - -func (sb stringStoreBench) RemoveNonExistLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.RemoveString(sb.sLong[0]) - return nil - }) -} - -func (sb stringStoreBench) RemoveNonExist1KShort(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.RemoveString(sb.sShort[i%num1KElements]) - return nil - }) -} - -func (sb stringStoreBench) RemoveNonExist1KLong(b *testing.B, cfg *DriverConfig) { - sb.runBenchmark(b, cfg, stringStoreSetupNOP, - func(ss StringStore, i int) error { - ss.RemoveString(sb.sLong[i%num1KElements]) - return nil - }) -} - -// IPStoreBenchmarker is a collection of benchmarks for IPStore drivers. -// Every benchmark expects a new, clean storage. Every benchmark should be -// called with a DriverConfig that ensures this. -type IPStoreBenchmarker interface { - AddV4(*testing.B, *DriverConfig) - AddV6(*testing.B, *DriverConfig) - LookupV4(*testing.B, *DriverConfig) - LookupV6(*testing.B, *DriverConfig) - AddRemoveV4(*testing.B, *DriverConfig) - AddRemoveV6(*testing.B, *DriverConfig) - LookupNonExistV4(*testing.B, *DriverConfig) - LookupNonExistV6(*testing.B, *DriverConfig) - RemoveNonExistV4(*testing.B, *DriverConfig) - RemoveNonExistV6(*testing.B, *DriverConfig) - - AddV4Network(*testing.B, *DriverConfig) - AddV6Network(*testing.B, *DriverConfig) - LookupV4Network(*testing.B, *DriverConfig) - LookupV6Network(*testing.B, *DriverConfig) - AddRemoveV4Network(*testing.B, *DriverConfig) - AddRemoveV6Network(*testing.B, *DriverConfig) - RemoveNonExistV4Network(*testing.B, *DriverConfig) - RemoveNonExistV6Network(*testing.B, *DriverConfig) - - Add1KV4(*testing.B, *DriverConfig) - Add1KV6(*testing.B, *DriverConfig) - Lookup1KV4(*testing.B, *DriverConfig) - Lookup1KV6(*testing.B, *DriverConfig) - AddRemove1KV4(*testing.B, *DriverConfig) - AddRemove1KV6(*testing.B, *DriverConfig) - LookupNonExist1KV4(*testing.B, *DriverConfig) - LookupNonExist1KV6(*testing.B, *DriverConfig) - RemoveNonExist1KV4(*testing.B, *DriverConfig) - RemoveNonExist1KV6(*testing.B, *DriverConfig) - - Add1KV4Network(*testing.B, *DriverConfig) - Add1KV6Network(*testing.B, *DriverConfig) - Lookup1KV4Network(*testing.B, *DriverConfig) - Lookup1KV6Network(*testing.B, *DriverConfig) - AddRemove1KV4Network(*testing.B, *DriverConfig) - AddRemove1KV6Network(*testing.B, *DriverConfig) - RemoveNonExist1KV4Network(*testing.B, *DriverConfig) - RemoveNonExist1KV6Network(*testing.B, *DriverConfig) -} - -func generateV4Networks() (a [num1KElements]string) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = fmt.Sprintf("64.%d.%d.255/24", b[0], b[1]) - } - - return -} - -func generateV6Networks() (a [num1KElements]string) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = fmt.Sprintf("6464:6464:6464:%02x%02x:ffff:ffff:ffff:ffff/64", b[0], b[1]) - } - - return -} - -func generateV4IPs() (a [num1KElements]net.IP) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = net.ParseIP(fmt.Sprintf("64.%d.%d.64", b[0], b[1])).To4() - } - - return -} - -func generateV6IPs() (a [num1KElements]net.IP) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = net.ParseIP(fmt.Sprintf("6464:6464:6464:%02x%02x:6464:6464:6464:6464", b[0], b[1])) - } - - return -} - -type ipStoreBench struct { - v4IPs [num1KElements]net.IP - v6IPs [num1KElements]net.IP - - v4Networks [num1KElements]string - v6Networks [num1KElements]string - - driver IPStoreDriver -} - -// PrepareIPStoreBenchmarker prepares a reusable suite for StringStore driver -// benchmarks. -func PrepareIPStoreBenchmarker(driver IPStoreDriver) IPStoreBenchmarker { - return ipStoreBench{ - v4IPs: generateV4IPs(), - v6IPs: generateV6IPs(), - v4Networks: generateV4Networks(), - v6Networks: generateV6Networks(), - driver: driver, - } -} - -type ipStoreSetupFunc func(IPStore) error - -func ipStoreSetupNOP(IPStore) error { return nil } - -type ipStoreBenchFunc func(IPStore, int) error - -func (ib ipStoreBench) runBenchmark(b *testing.B, cfg *DriverConfig, setup ipStoreSetupFunc, execute ipStoreBenchFunc) { - is, err := ib.driver.New(cfg) - require.Nil(b, err, "Constructor error must be nil") - require.NotNil(b, is, "IP store must not be nil") - - err = setup(is) - require.Nil(b, err, "Benchmark setup must not fail") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - execute(is, i) - } - b.StopTimer() - - errChan := is.Stop() - err = <-errChan - require.Nil(b, err, "IPStore shutdown must not fail") -} - -func (ib ipStoreBench) AddV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v4IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) AddV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v6IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) LookupV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - return is.AddIP(ib.v4IPs[0]) - }, - func(is IPStore, i int) error { - is.HasIP(ib.v4IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) LookupV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - return is.AddIP(ib.v6IPs[0]) - }, - func(is IPStore, i int) error { - is.HasIP(ib.v6IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) AddRemoveV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v4IPs[0]) - is.RemoveIP(ib.v4IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) AddRemoveV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v6IPs[0]) - is.RemoveIP(ib.v6IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) LookupNonExistV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.HasIP(ib.v4IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) LookupNonExistV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.HasIP(ib.v6IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExistV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveIP(ib.v4IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExistV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveIP(ib.v6IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) AddV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v4Networks[0]) - return nil - }) -} - -func (ib ipStoreBench) AddV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v6Networks[0]) - return nil - }) -} - -func (ib ipStoreBench) LookupV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - return is.AddNetwork(ib.v4Networks[0]) - }, - func(is IPStore, i int) error { - is.HasIP(ib.v4IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) LookupV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - return is.AddNetwork(ib.v6Networks[0]) - }, - func(is IPStore, i int) error { - is.HasIP(ib.v6IPs[0]) - return nil - }) -} - -func (ib ipStoreBench) AddRemoveV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v4Networks[0]) - is.RemoveNetwork(ib.v4Networks[0]) - return nil - }) -} - -func (ib ipStoreBench) AddRemoveV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v6Networks[0]) - is.RemoveNetwork(ib.v6Networks[0]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExistV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveNetwork(ib.v4Networks[0]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExistV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveNetwork(ib.v6Networks[0]) - return nil - }) -} - -func (ib ipStoreBench) Add1KV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v4IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Add1KV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v6IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Lookup1KV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - for i := 0; i < num1KElements; i++ { - err := is.AddIP(ib.v4IPs[i%num1KElements]) - if err != nil { - return err - } - } - return nil - }, - func(is IPStore, i int) error { - is.HasIP(ib.v4IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Lookup1KV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - for i := 0; i < num1KElements; i++ { - err := is.AddIP(ib.v6IPs[i%num1KElements]) - if err != nil { - return err - } - } - return nil - }, - func(is IPStore, i int) error { - is.HasIP(ib.v6IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) AddRemove1KV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v4IPs[i%num1KElements]) - is.RemoveIP(ib.v4IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) AddRemove1KV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddIP(ib.v6IPs[i%num1KElements]) - is.RemoveIP(ib.v6IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) LookupNonExist1KV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.HasIP(ib.v4IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) LookupNonExist1KV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.HasIP(ib.v6IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExist1KV4(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveIP(ib.v4IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExist1KV6(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveIP(ib.v6IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Add1KV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v4Networks[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Add1KV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v6Networks[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Lookup1KV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - for i := 0; i < num1KElements; i++ { - err := is.AddNetwork(ib.v4Networks[i%num1KElements]) - if err != nil { - return err - } - } - return nil - }, - func(is IPStore, i int) error { - is.HasIP(ib.v4IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) Lookup1KV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, - func(is IPStore) error { - for i := 0; i < num1KElements; i++ { - err := is.AddNetwork(ib.v6Networks[i%num1KElements]) - if err != nil { - return err - } - } - return nil - }, - func(is IPStore, i int) error { - is.HasIP(ib.v6IPs[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) AddRemove1KV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v4Networks[i%num1KElements]) - is.RemoveNetwork(ib.v4Networks[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) AddRemove1KV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.AddNetwork(ib.v6Networks[i%num1KElements]) - is.RemoveNetwork(ib.v6Networks[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExist1KV4Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveNetwork(ib.v4Networks[i%num1KElements]) - return nil - }) -} - -func (ib ipStoreBench) RemoveNonExist1KV6Network(b *testing.B, cfg *DriverConfig) { - ib.runBenchmark(b, cfg, ipStoreSetupNOP, - func(is IPStore, i int) error { - is.RemoveNetwork(ib.v6Networks[i%num1KElements]) - return nil - }) -} - -// PeerStoreBenchmarker is a collection of benchmarks for PeerStore drivers. -// Every benchmark expects a new, clean storage. Every benchmark should be -// called with a DriverConfig that ensures this. -type PeerStoreBenchmarker interface { - PutSeeder(*testing.B, *DriverConfig) - PutSeeder1KInfohash(*testing.B, *DriverConfig) - PutSeeder1KSeeders(*testing.B, *DriverConfig) - PutSeeder1KInfohash1KSeeders(*testing.B, *DriverConfig) - - PutDeleteSeeder(*testing.B, *DriverConfig) - PutDeleteSeeder1KInfohash(*testing.B, *DriverConfig) - PutDeleteSeeder1KSeeders(*testing.B, *DriverConfig) - PutDeleteSeeder1KInfohash1KSeeders(*testing.B, *DriverConfig) - - DeleteSeederNonExist(*testing.B, *DriverConfig) - DeleteSeederNonExist1KInfohash(*testing.B, *DriverConfig) - DeleteSeederNonExist1KSeeders(*testing.B, *DriverConfig) - DeleteSeederNonExist1KInfohash1KSeeders(*testing.B, *DriverConfig) - - PutGraduateDeleteLeecher(*testing.B, *DriverConfig) - PutGraduateDeleteLeecher1KInfohash(*testing.B, *DriverConfig) - PutGraduateDeleteLeecher1KLeechers(*testing.B, *DriverConfig) - PutGraduateDeleteLeecher1KInfohash1KLeechers(*testing.B, *DriverConfig) - - GraduateLeecherNonExist(*testing.B, *DriverConfig) - GraduateLeecherNonExist1KInfohash(*testing.B, *DriverConfig) - GraduateLeecherNonExist1KLeechers(*testing.B, *DriverConfig) - GraduateLeecherNonExist1KInfohash1KLeechers(*testing.B, *DriverConfig) - - AnnouncePeers(*testing.B, *DriverConfig) - AnnouncePeers1KInfohash(*testing.B, *DriverConfig) - AnnouncePeersSeeder(*testing.B, *DriverConfig) - AnnouncePeersSeeder1KInfohash(*testing.B, *DriverConfig) - - GetSeeders(*testing.B, *DriverConfig) - GetSeeders1KInfohash(*testing.B, *DriverConfig) - - NumSeeders(*testing.B, *DriverConfig) - NumSeeders1KInfohash(*testing.B, *DriverConfig) -} - -type peerStoreBench struct { - infohashes [num1KElements]chihaya.InfoHash - peers [num1KElements]chihaya.Peer - driver PeerStoreDriver -} - -func generateInfohashes() (a [num1KElements]chihaya.InfoHash) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = chihaya.InfoHash([20]byte{b[0], b[1]}) - } - - return -} - -func generatePeers() (a [num1KElements]chihaya.Peer) { - b := make([]byte, 2) - for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = chihaya.Peer{ - ID: chihaya.PeerID([20]byte{b[0], b[1]}), - IP: net.ParseIP(fmt.Sprintf("64.%d.%d.64", b[0], b[1])), - Port: uint16(i), - } - } - - return -} - -// PreparePeerStoreBenchmarker prepares a reusable suite for PeerStore driver -// benchmarks. -func PreparePeerStoreBenchmarker(driver PeerStoreDriver) PeerStoreBenchmarker { - return peerStoreBench{ - driver: driver, - infohashes: generateInfohashes(), - peers: generatePeers(), - } -} - -type peerStoreSetupFunc func(PeerStore) error - -func peerStoreSetupNOP(PeerStore) error { return nil } - -type peerStoreBenchFunc func(PeerStore, int) error - -func (pb peerStoreBench) runBenchmark(b *testing.B, cfg *DriverConfig, setup peerStoreSetupFunc, execute peerStoreBenchFunc) { - ps, err := pb.driver.New(cfg) - require.Nil(b, err, "Constructor error must be nil") - require.NotNil(b, ps, "Peer store must not be nil") - - err = setup(ps) - require.Nil(b, err, "Benchmark setup must not fail") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - execute(ps, i) - } - b.StopTimer() - - errChan := ps.Stop() - err = <-errChan - require.Nil(b, err, "PeerStore shutdown must not fail") -} - -func (pb peerStoreBench) PutSeeder(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[0], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) PutSeeder1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) PutSeeder1KSeeders(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) - return nil - }) -} - -func (pb peerStoreBench) PutSeeder1KInfohash1KSeeders(b *testing.B, cfg *DriverConfig) { - j := 0 - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - j += 3 - return nil - }) -} - -func (pb peerStoreBench) PutDeleteSeeder(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[0], pb.peers[0]) - ps.DeleteSeeder(pb.infohashes[0], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) PutDeleteSeeder1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) - ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) PutDeleteSeeder1KSeeders(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) - ps.DeleteSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) - return nil - }) -} - -func (pb peerStoreBench) PutDeleteSeeder1KInfohash1KSeeders(b *testing.B, cfg *DriverConfig) { - j := 0 - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - j += 3 - return nil - }) -} - -func (pb peerStoreBench) DeleteSeederNonExist(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.DeleteSeeder(pb.infohashes[0], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) DeleteSeederNonExist1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) DeleteSeederNonExist1KSeeders(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.DeleteSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) - return nil - }) -} - -func (pb peerStoreBench) DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B, cfg *DriverConfig) { - j := 0 - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - j += 3 - return nil - }) -} - -func (pb peerStoreBench) GraduateLeecherNonExist(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.GraduateLeecher(pb.infohashes[0], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) GraduateLeecherNonExist1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) GraduateLeecherNonExist1KLeechers(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.GraduateLeecher(pb.infohashes[0], pb.peers[i%num1KElements]) - return nil - }) -} - -func (pb peerStoreBench) GraduateLeecherNonExist1KInfohash1KLeechers(b *testing.B, cfg *DriverConfig) { - j := 0 - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - j += 3 - return nil - }) -} - -func (pb peerStoreBench) PutGraduateDeleteLeecher(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutLeecher(pb.infohashes[0], pb.peers[0]) - ps.GraduateLeecher(pb.infohashes[0], pb.peers[0]) - ps.DeleteSeeder(pb.infohashes[0], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) PutGraduateDeleteLeecher1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutLeecher(pb.infohashes[i%num1KElements], pb.peers[0]) - ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[0]) - ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[0]) - return nil - }) -} - -func (pb peerStoreBench) PutGraduateDeleteLeecher1KLeechers(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutLeecher(pb.infohashes[0], pb.peers[i%num1KElements]) - ps.GraduateLeecher(pb.infohashes[0], pb.peers[i%num1KElements]) - ps.DeleteSeeder(pb.infohashes[0], pb.peers[i%num1KElements]) - return nil - }) -} - -func (pb peerStoreBench) PutGraduateDeleteLeecher1KInfohash1KLeechers(b *testing.B, cfg *DriverConfig) { - j := 0 - pb.runBenchmark(b, cfg, peerStoreSetupNOP, - func(ps PeerStore, i int) error { - ps.PutLeecher(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - ps.GraduateLeecher(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - ps.DeleteSeeder(pb.infohashes[i%num1KElements], pb.peers[j%num1KElements]) - j += 3 - return nil - }) -} - -func (pb peerStoreBench) AnnouncePeers(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.AnnouncePeers(pb.infohashes[0], false, 50, pb.peers[0], chihaya.Peer{}) - return nil - }) -} - -func (pb peerStoreBench) AnnouncePeers1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.AnnouncePeers(pb.infohashes[i%num1KElements], false, 50, pb.peers[0], chihaya.Peer{}) - return nil - }) -} - -func (pb peerStoreBench) AnnouncePeersSeeder(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.AnnouncePeers(pb.infohashes[0], true, 50, pb.peers[0], chihaya.Peer{}) - return nil - }) -} - -func (pb peerStoreBench) AnnouncePeersSeeder1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.AnnouncePeers(pb.infohashes[i%num1KElements], true, 50, pb.peers[0], chihaya.Peer{}) - return nil - }) -} - -func (pb peerStoreBench) GetSeeders(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.GetSeeders(pb.infohashes[0]) - return nil - }) -} - -func (pb peerStoreBench) GetSeeders1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.GetSeeders(pb.infohashes[i%num1KElements]) - return nil - }) -} - -func (pb peerStoreBench) NumSeeders(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.NumSeeders(pb.infohashes[0]) - return nil - }) -} - -func (pb peerStoreBench) NumSeeders1KInfohash(b *testing.B, cfg *DriverConfig) { - pb.runBenchmark(b, cfg, - func(ps PeerStore) error { - for i := 0; i < num1KElements; i++ { - for j := 0; j < num1KElements; j++ { - var err error - if j < num1KElements/2 { - err = ps.PutLeecher(pb.infohashes[i], pb.peers[j]) - } else { - err = ps.PutSeeder(pb.infohashes[i], pb.peers[j]) - } - if err != nil { - return err - } - } - } - return nil - }, - func(ps PeerStore, i int) error { - ps.NumSeeders(pb.infohashes[i%num1KElements]) - return nil - }) -} diff --git a/server/store/store_tests.go b/server/store/store_tests.go deleted file mode 100644 index 3c513d8..0000000 --- a/server/store/store_tests.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package store - -import ( - "testing" - - "net" - - "time" - - "github.com/chihaya/chihaya" - "github.com/stretchr/testify/require" -) - -// StringStoreTester is a collection of tests for a StringStore driver. -// Every benchmark expects a new, clean storage. Every benchmark should be -// called with a DriverConfig that ensures this. -type StringStoreTester interface { - TestStringStore(*testing.T, *DriverConfig) -} - -var _ StringStoreTester = &stringStoreTester{} - -type stringStoreTester struct { - s1, s2 string - driver StringStoreDriver -} - -// PrepareStringStoreTester prepares a reusable suite for StringStore driver -// tests. -func PrepareStringStoreTester(driver StringStoreDriver) StringStoreTester { - return &stringStoreTester{ - s1: "abc", - s2: "def", - driver: driver, - } -} - -func (s *stringStoreTester) TestStringStore(t *testing.T, cfg *DriverConfig) { - ss, err := s.driver.New(cfg) - require.Nil(t, err) - require.NotNil(t, ss) - - has, err := ss.HasString(s.s1) - require.Nil(t, err) - require.False(t, has) - - has, err = ss.HasString(s.s2) - require.Nil(t, err) - require.False(t, has) - - err = ss.RemoveString(s.s1) - require.NotNil(t, err) - - err = ss.PutString(s.s1) - require.Nil(t, err) - - has, err = ss.HasString(s.s1) - require.Nil(t, err) - require.True(t, has) - - has, err = ss.HasString(s.s2) - require.Nil(t, err) - require.False(t, has) - - err = ss.PutString(s.s1) - require.Nil(t, err) - - err = ss.PutString(s.s2) - require.Nil(t, err) - - has, err = ss.HasString(s.s1) - require.Nil(t, err) - require.True(t, has) - - has, err = ss.HasString(s.s2) - require.Nil(t, err) - require.True(t, has) - - err = ss.RemoveString(s.s1) - require.Nil(t, err) - - err = ss.RemoveString(s.s2) - require.Nil(t, err) - - has, err = ss.HasString(s.s1) - require.Nil(t, err) - require.False(t, has) - - has, err = ss.HasString(s.s2) - require.Nil(t, err) - require.False(t, has) - - errChan := ss.Stop() - err = <-errChan - require.Nil(t, err, "StringStore shutdown must not fail") -} - -// IPStoreTester is a collection of tests for an IPStore driver. -// Every benchmark expects a new, clean storage. Every benchmark should be -// called with a DriverConfig that ensures this. -type IPStoreTester interface { - TestIPStore(*testing.T, *DriverConfig) - TestHasAllHasAny(*testing.T, *DriverConfig) - TestNetworks(*testing.T, *DriverConfig) - TestHasAllHasAnyNetworks(*testing.T, *DriverConfig) -} - -var _ IPStoreTester = &ipStoreTester{} - -type ipStoreTester struct { - v6, v4, v4s net.IP - net1, net2 string - inNet1, inNet2 net.IP - excluded net.IP - driver IPStoreDriver -} - -// PrepareIPStoreTester prepares a reusable suite for IPStore driver -// tests. -func PrepareIPStoreTester(driver IPStoreDriver) IPStoreTester { - return &ipStoreTester{ - v6: net.ParseIP("0c22:384e:0:0c22:384e::68"), - v4: net.ParseIP("12.13.14.15"), - v4s: net.ParseIP("12.13.14.15").To4(), - net1: "192.168.22.255/24", - net2: "192.168.23.255/24", - inNet1: net.ParseIP("192.168.22.22"), - inNet2: net.ParseIP("192.168.23.23"), - excluded: net.ParseIP("10.154.243.22"), - driver: driver, - } -} - -func (s *ipStoreTester) TestIPStore(t *testing.T, cfg *DriverConfig) { - is, err := s.driver.New(cfg) - require.Nil(t, err) - require.NotNil(t, is) - - // check default state - found, err := is.HasIP(s.v4) - require.Nil(t, err) - require.False(t, found) - - // check IPv4 - err = is.AddIP(s.v4) - require.Nil(t, err) - - found, err = is.HasIP(s.v4) - require.Nil(t, err) - require.True(t, found) - - found, err = is.HasIP(s.v4s) - require.Nil(t, err) - require.True(t, found) - - found, err = is.HasIP(s.v6) - require.Nil(t, err) - require.False(t, found) - - // check removes - err = is.RemoveIP(s.v6) - require.NotNil(t, err) - - err = is.RemoveIP(s.v4s) - require.Nil(t, err) - - found, err = is.HasIP(s.v4) - require.Nil(t, err) - require.False(t, found) - - // check IPv6 - err = is.AddIP(s.v6) - require.Nil(t, err) - - found, err = is.HasIP(s.v6) - require.Nil(t, err) - require.True(t, found) - - err = is.RemoveIP(s.v6) - require.Nil(t, err) - - found, err = is.HasIP(s.v6) - require.Nil(t, err) - require.False(t, found) - - errChan := is.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") -} - -func (s *ipStoreTester) TestHasAllHasAny(t *testing.T, cfg *DriverConfig) { - is, err := s.driver.New(cfg) - require.Nil(t, err) - require.NotNil(t, is) - - found, err := is.HasAnyIP(nil) - require.Nil(t, err) - require.False(t, found) - - found, err = is.HasAllIPs(nil) - require.Nil(t, err) - require.True(t, found) - - found, err = is.HasAllIPs([]net.IP{s.v6}) - require.Nil(t, err) - require.False(t, found) - - err = is.AddIP(s.v4) - require.Nil(t, err) - - found, err = is.HasAnyIP([]net.IP{s.v6, s.v4}) - require.Nil(t, err) - require.True(t, found) - - found, err = is.HasAllIPs([]net.IP{s.v6, s.v4}) - require.Nil(t, err) - require.False(t, found) - - found, err = is.HasAllIPs([]net.IP{s.v4}) - require.Nil(t, err) - require.True(t, found) - - err = is.AddIP(s.v6) - require.Nil(t, err) - - found, err = is.HasAnyIP([]net.IP{s.v6, s.v6}) - require.Nil(t, err) - require.True(t, found) - - found, err = is.HasAllIPs([]net.IP{s.v6, s.v6}) - require.Nil(t, err) - require.True(t, found) - - errChan := is.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") -} - -func (s *ipStoreTester) TestNetworks(t *testing.T, cfg *DriverConfig) { - is, err := s.driver.New(cfg) - require.Nil(t, err) - require.NotNil(t, is) - - match, err := is.HasIP(s.inNet1) - require.Nil(t, err) - require.False(t, match) - - match, err = is.HasIP(s.inNet2) - require.Nil(t, err) - require.False(t, match) - - err = is.AddNetwork("") - require.NotNil(t, err) - - err = is.RemoveNetwork("") - require.NotNil(t, err) - - err = is.AddNetwork(s.net1) - require.Nil(t, err) - - match, err = is.HasIP(s.inNet1) - require.Nil(t, err) - require.True(t, match) - - match, err = is.HasIP(s.inNet2) - require.Nil(t, err) - require.False(t, match) - - err = is.RemoveNetwork(s.net2) - require.NotNil(t, err) - - err = is.RemoveNetwork(s.net1) - require.Nil(t, err) - - match, err = is.HasIP(s.inNet1) - require.Nil(t, err) - require.False(t, match) - - match, err = is.HasIP(s.inNet2) - require.Nil(t, err) - require.False(t, match) - - errChan := is.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") -} - -func (s *ipStoreTester) TestHasAllHasAnyNetworks(t *testing.T, cfg *DriverConfig) { - is, err := s.driver.New(cfg) - require.Nil(t, err) - require.NotNil(t, s) - - match, err := is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded}) - require.Nil(t, err) - require.False(t, match) - - match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded}) - require.Nil(t, err) - require.False(t, match) - - err = is.AddNetwork(s.net1) - require.Nil(t, err) - - match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.True(t, match) - - match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.False(t, match) - - err = is.AddNetwork(s.net2) - require.Nil(t, err) - - match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded}) - require.Nil(t, err) - require.True(t, match) - - match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.True(t, match) - - match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded}) - require.Nil(t, err) - require.False(t, match) - - err = is.RemoveNetwork(s.net1) - require.Nil(t, err) - - match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.True(t, match) - - match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.False(t, match) - - err = is.RemoveNetwork(s.net2) - require.Nil(t, err) - - match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.False(t, match) - - match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2}) - require.Nil(t, err) - require.False(t, match) - - errChan := is.Stop() - err = <-errChan - require.Nil(t, err, "IPStore shutdown must not fail") -} - -// PeerStoreTester is a collection of tests for a PeerStore driver. -// Every benchmark expects a new, clean storage. Every benchmark should be -// called with a DriverConfig that ensures this. -type PeerStoreTester interface { - // CompareEndpoints sets the function used to compare peers to a - // comparison that only compares endpoints and omits PeerIDs. - CompareEndpoints() - - TestPeerStore(*testing.T, *DriverConfig) -} - -var _ PeerStoreTester = &peerStoreTester{} - -type peerStoreTester struct { - driver PeerStoreDriver - equalityFunc func(a, b chihaya.Peer) bool -} - -// PreparePeerStoreTester prepares a reusable suite for PeerStore driver -// tests. -// The tester will use PeerIDs and endpoints to compare peers. -func PreparePeerStoreTester(driver PeerStoreDriver) PeerStoreTester { - return &peerStoreTester{ - driver: driver, - equalityFunc: func(a, b chihaya.Peer) bool { return a.Equal(b) }, - } -} - -func (pt *peerStoreTester) CompareEndpoints() { - pt.equalityFunc = func(a, b chihaya.Peer) bool { return a.EqualEndpoint(b) } -} - -func (pt *peerStoreTester) peerInSlice(peer chihaya.Peer, peers []chihaya.Peer) bool { - for _, v := range peers { - if pt.equalityFunc(peer, v) { - return true - } - } - return false -} - -func (pt *peerStoreTester) TestPeerStore(t *testing.T, cfg *DriverConfig) { - var ( - hash = chihaya.InfoHash([20]byte{}) - - peers = []struct { - seeder bool - peerID string - ip string - port uint16 - }{ - {false, "-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720}, - {false, "-AZ3042-6ozMq5q6Q3NX", "38.241.13.19", 4833}, - {false, "-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878}, - {false, "-AR6360-6oZyyMWoOOBe", "fd0a:29a8:8445::38", 3167}, - {true, "-AG2083-s1hiF8vGAAg0", "231.231.49.173", 1453}, - {true, "-AG3003-lEl2Mm4NEO4n", "254.99.84.77", 7032}, - {true, "-MR1100-00HS~T7*65rm", "211.229.100.129", 2614}, - {true, "-LK0140-ATIV~nbEQAMr", "fdad:c435:bf79::12", 4114}, - {true, "-KT2210-347143496631", "fdda:1b35:7d6e::9", 6179}, - {true, "-TR0960-6ep6svaa61r4", "fd7f:78f0:4c77::55", 4727}, - } - ) - s, err := pt.driver.New(cfg) - require.Nil(t, err) - require.NotNil(t, s) - - for _, p := range peers { - // Construct chihaya.Peer from test data. - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(p.peerID), - IP: net.ParseIP(p.ip), - Port: p.port, - } - - if p.seeder { - err = s.PutSeeder(hash, peer) - } else { - err = s.PutLeecher(hash, peer) - } - require.Nil(t, err) - } - - leechers1, leechers61, err := s.GetLeechers(hash) - require.Nil(t, err) - require.NotEmpty(t, leechers1) - require.NotEmpty(t, leechers61) - num := s.NumLeechers(hash) - require.Equal(t, len(leechers1)+len(leechers61), num) - - seeders1, seeders61, err := s.GetSeeders(hash) - require.Nil(t, err) - require.NotEmpty(t, seeders1) - require.NotEmpty(t, seeders61) - num = s.NumSeeders(hash) - require.Equal(t, len(seeders1)+len(seeders61), num) - - leechers := append(leechers1, leechers61...) - seeders := append(seeders1, seeders61...) - - for _, p := range peers { - // Construct chihaya.Peer from test data. - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(p.peerID), - IP: net.ParseIP(p.ip), - Port: p.port, - } - - if p.seeder { - require.True(t, pt.peerInSlice(peer, seeders)) - } else { - require.True(t, pt.peerInSlice(peer, leechers)) - } - - if p.seeder { - err = s.DeleteSeeder(hash, peer) - } else { - err = s.DeleteLeecher(hash, peer) - } - require.Nil(t, err) - } - - require.Zero(t, s.NumLeechers(hash)) - require.Zero(t, s.NumSeeders(hash)) - - // Re-add all the peers to the peerStore. - for _, p := range peers { - // Construct chihaya.Peer from test data. - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(p.peerID), - IP: net.ParseIP(p.ip), - Port: p.port, - } - if p.seeder { - s.PutSeeder(hash, peer) - } else { - s.PutLeecher(hash, peer) - } - } - - // Check that there are 6 seeders, and 4 leechers. - require.Equal(t, 6, s.NumSeeders(hash)) - require.Equal(t, 4, s.NumLeechers(hash)) - peer := chihaya.Peer{ - ID: chihaya.PeerIDFromString(peers[0].peerID), - IP: net.ParseIP(peers[0].ip), - Port: peers[0].port, - } - err = s.GraduateLeecher(hash, peer) - require.Nil(t, err) - // Check that there are 7 seeders, and 3 leechers after graduating a - // leecher to a seeder. - require.Equal(t, 7, s.NumSeeders(hash)) - require.Equal(t, 3, s.NumLeechers(hash)) - - _, _, err = s.AnnouncePeers(hash, true, 5, peer, chihaya.Peer{}) - // Only test if it works, do not test the slices returned. They change - // depending on the driver. - require.Nil(t, err) - - err = s.CollectGarbage(time.Now()) - require.Nil(t, err) - require.Equal(t, 0, s.NumLeechers(hash)) - require.Equal(t, 0, s.NumSeeders(hash)) - - errChan := s.Stop() - err = <-errChan - require.Nil(t, err, "PeerStore shutdown must not fail") -} diff --git a/server/store/string_store.go b/server/store/string_store.go deleted file mode 100644 index 77ce849..0000000 --- a/server/store/string_store.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package store - -import ( - "fmt" - - "github.com/chihaya/chihaya/pkg/stopper" -) - -var stringStoreDrivers = make(map[string]StringStoreDriver) - -// StringStore represents an interface for manipulating strings. -type StringStore interface { - // PutString adds the given string to the StringStore. - PutString(s string) error - - // HasString returns whether or not the StringStore contains the given - // string. - HasString(s string) (bool, error) - - // RemoveString removes the string from the string store. - // Returns ErrResourceDoesNotExist if the given string is not contained - // in the store. - RemoveString(s string) error - - // Stopper provides the Stop method that stops the StringStore. - // Stop should shut down the StringStore in a separate goroutine and send - // an error to the channel if the shutdown failed. If the shutdown - // was successful, the channel is to be closed. - stopper.Stopper -} - -// StringStoreDriver represents an interface for creating a handle to the -// storage of strings. -type StringStoreDriver interface { - New(*DriverConfig) (StringStore, error) -} - -// RegisterStringStoreDriver makes a driver available by the provided name. -// -// If this function is called twice with the same name or if the driver is nil, -// it panics. -func RegisterStringStoreDriver(name string, driver StringStoreDriver) { - if driver == nil { - panic("store: could not register nil StringStoreDriver") - } - if _, dup := stringStoreDrivers[name]; dup { - panic("store: could not register duplicate StringStoreDriver: " + name) - } - stringStoreDrivers[name] = driver -} - -// OpenStringStore returns a StringStore specified by a configuration. -func OpenStringStore(cfg *DriverConfig) (StringStore, error) { - driver, ok := stringStoreDrivers[cfg.Name] - if !ok { - return nil, fmt.Errorf("store: unknown StringStoreDriver %q (forgotten import?)", cfg) - } - - return driver.New(cfg) -} diff --git a/tracker/middleware.go b/tracker/middleware.go deleted file mode 100644 index ba8e133..0000000 --- a/tracker/middleware.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package tracker - -import "github.com/chihaya/chihaya" - -// AnnounceHandler is a function that operates on an AnnounceResponse before it -// has been delivered to a client. -type AnnounceHandler func(*chihaya.TrackerConfig, *chihaya.AnnounceRequest, *chihaya.AnnounceResponse) error - -// AnnounceMiddleware is a higher-order function used to implement the chaining -// of AnnounceHandlers. -type AnnounceMiddleware func(AnnounceHandler) AnnounceHandler - -// AnnounceMiddlewareConstructor is a function that creates a new -// AnnounceMiddleware from a MiddlewareConfig. -type AnnounceMiddlewareConstructor func(chihaya.MiddlewareConfig) (AnnounceMiddleware, error) - -// AnnounceChain is a chain of AnnounceMiddlewares. -type AnnounceChain struct{ mw []AnnounceMiddleware } - -// Append appends AnnounceMiddlewares to the AnnounceChain. -func (c *AnnounceChain) Append(mw ...AnnounceMiddleware) { - c.mw = append(c.mw, mw...) -} - -// Handler builds an AnnounceChain into an AnnounceHandler. -func (c *AnnounceChain) Handler() AnnounceHandler { - final := func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - return nil - } - - for i := len(c.mw) - 1; i >= 0; i-- { - final = c.mw[i](final) - } - return final -} - -var announceMiddlewareConstructors = make(map[string]AnnounceMiddlewareConstructor) - -// RegisterAnnounceMiddlewareConstructor makes a configurable middleware -// globally available under the provided name. -// -// If this function is called twice with the same name or if the constructor is -// nil, it panics. -func RegisterAnnounceMiddlewareConstructor(name string, mw AnnounceMiddlewareConstructor) { - if mw == nil { - panic("tracker: could not register nil AnnounceMiddlewareConstructor") - } - - if _, dup := announceMiddlewareConstructors[name]; dup { - panic("tracker: could not register duplicate AnnounceMiddleware: " + name) - } - - announceMiddlewareConstructors[name] = mw -} - -// RegisterAnnounceMiddleware makes a middleware globally available under the -// provided name. -// -// This function is intended to register middleware that has no configuration. -// If this function is called twice with the same name or if the middleware is -// nil, it panics. -func RegisterAnnounceMiddleware(name string, mw AnnounceMiddleware) { - if mw == nil { - panic("tracker: could not register nil AnnounceMiddleware") - } - - RegisterAnnounceMiddlewareConstructor(name, func(_ chihaya.MiddlewareConfig) (AnnounceMiddleware, error) { - return mw, nil - }) -} - -// ScrapeHandler is a function that operates on a ScrapeResponse before it has -// been delivered to a client. -type ScrapeHandler func(*chihaya.TrackerConfig, *chihaya.ScrapeRequest, *chihaya.ScrapeResponse) error - -// ScrapeMiddleware is higher-order function used to implement the chaining of -// ScrapeHandlers. -type ScrapeMiddleware func(ScrapeHandler) ScrapeHandler - -// ScrapeMiddlewareConstructor is a function that creates a new -// ScrapeMiddleware from a MiddlewareConfig. -type ScrapeMiddlewareConstructor func(chihaya.MiddlewareConfig) (ScrapeMiddleware, error) - -// ScrapeChain is a chain of ScrapeMiddlewares. -type ScrapeChain struct{ mw []ScrapeMiddleware } - -// Append appends ScrapeMiddlewares to the ScrapeChain. -func (c *ScrapeChain) Append(mw ...ScrapeMiddleware) { - c.mw = append(c.mw, mw...) -} - -// Handler builds the ScrapeChain into a ScrapeHandler. -func (c *ScrapeChain) Handler() ScrapeHandler { - final := func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) error { - return nil - } - for i := len(c.mw) - 1; i >= 0; i-- { - final = c.mw[i](final) - } - return final -} - -var scrapeMiddlewareConstructors = make(map[string]ScrapeMiddlewareConstructor) - -// RegisterScrapeMiddlewareConstructor makes a configurable middleware globally -// available under the provided name. -// -// If this function is called twice with the same name or if the constructor is -// nil, it panics. -func RegisterScrapeMiddlewareConstructor(name string, mw ScrapeMiddlewareConstructor) { - if mw == nil { - panic("tracker: could not register nil ScrapeMiddlewareConstructor") - } - - if _, dup := scrapeMiddlewareConstructors[name]; dup { - panic("tracker: could not register duplicate ScrapeMiddleware: " + name) - } - - scrapeMiddlewareConstructors[name] = mw -} - -// RegisterScrapeMiddleware makes a middleware globally available under the -// provided name. -// -// This function is intended to register middleware that has no configuration. -// If this function is called twice with the same name or if the middleware is -// nil, it panics. -func RegisterScrapeMiddleware(name string, mw ScrapeMiddleware) { - if mw == nil { - panic("tracker: could not register nil ScrapeMiddleware") - } - - RegisterScrapeMiddlewareConstructor(name, func(_ chihaya.MiddlewareConfig) (ScrapeMiddleware, error) { - return mw, nil - }) -} diff --git a/tracker/middleware_test.go b/tracker/middleware_test.go deleted file mode 100644 index 4d5f19d..0000000 --- a/tracker/middleware_test.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -package tracker - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/chihaya/chihaya" -) - -func testAnnounceMW1(next AnnounceHandler) AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - resp.IPv4Peers = append(resp.IPv4Peers, chihaya.Peer{ - Port: 1, - }) - return next(cfg, req, resp) - } -} - -func testAnnounceMW2(next AnnounceHandler) AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - resp.IPv4Peers = append(resp.IPv4Peers, chihaya.Peer{ - Port: 2, - }) - return next(cfg, req, resp) - } -} - -func testAnnounceMW3(next AnnounceHandler) AnnounceHandler { - return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error { - resp.IPv4Peers = append(resp.IPv4Peers, chihaya.Peer{ - Port: 3, - }) - return next(cfg, req, resp) - } -} - -func TestAnnounceChain(t *testing.T) { - var achain AnnounceChain - achain.Append(testAnnounceMW1) - achain.Append(testAnnounceMW2) - achain.Append(testAnnounceMW3) - handler := achain.Handler() - resp := &chihaya.AnnounceResponse{} - err := handler(nil, &chihaya.AnnounceRequest{}, resp) - assert.Nil(t, err, "the handler should not return an error") - assert.Equal(t, resp.IPv4Peers, []chihaya.Peer{{Port: 1}, {Port: 2}, {Port: 3}}, "the list of peers added from the middleware should be in the same order.") -} diff --git a/tracker/tracker.go b/tracker/tracker.go deleted file mode 100644 index a0b8391..0000000 --- a/tracker/tracker.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - -// Package tracker implements a protocol-independent, middleware-composed -// BitTorrent tracker. -package tracker - -import ( - "errors" - "fmt" - - "github.com/chihaya/chihaya" -) - -// ClientError represents an error that should be exposed to the client over -// the BitTorrent protocol implementation. -type ClientError string - -// Error implements the error interface for ClientError. -func (c ClientError) Error() string { return string(c) } - -// Tracker represents a protocol-independent, middleware-composed BitTorrent -// tracker. -type Tracker struct { - cfg *chihaya.TrackerConfig - handleAnnounce AnnounceHandler - handleScrape ScrapeHandler -} - -// NewTracker constructs a newly allocated Tracker composed of the middleware -// in the provided configuration. -func NewTracker(cfg *chihaya.TrackerConfig) (*Tracker, error) { - var achain AnnounceChain - for _, mwConfig := range cfg.AnnounceMiddleware { - mw, ok := announceMiddlewareConstructors[mwConfig.Name] - if !ok { - return nil, errors.New("failed to find announce middleware: " + mwConfig.Name) - } - middleware, err := mw(mwConfig) - if err != nil { - return nil, fmt.Errorf("failed to load announce middleware %q: %s", mwConfig.Name, err.Error()) - } - achain.Append(middleware) - } - - var schain ScrapeChain - for _, mwConfig := range cfg.ScrapeMiddleware { - mw, ok := scrapeMiddlewareConstructors[mwConfig.Name] - if !ok { - return nil, errors.New("failed to find scrape middleware: " + mwConfig.Name) - } - middleware, err := mw(mwConfig) - if err != nil { - return nil, fmt.Errorf("failed to load scrape middleware %q: %s", mwConfig.Name, err.Error()) - } - schain.Append(middleware) - } - - return &Tracker{ - cfg: cfg, - handleAnnounce: achain.Handler(), - handleScrape: schain.Handler(), - }, nil -} - -// HandleAnnounce runs an AnnounceRequest through the Tracker's middleware and -// returns the result. -func (t *Tracker) HandleAnnounce(req *chihaya.AnnounceRequest) (*chihaya.AnnounceResponse, error) { - resp := &chihaya.AnnounceResponse{} - err := t.handleAnnounce(t.cfg, req, resp) - return resp, err -} - -// HandleScrape runs a ScrapeRequest through the Tracker's middleware and -// returns the result. -func (t *Tracker) HandleScrape(req *chihaya.ScrapeRequest) (*chihaya.ScrapeResponse, error) { - resp := &chihaya.ScrapeResponse{ - Files: make(map[chihaya.InfoHash]chihaya.Scrape), - } - err := t.handleScrape(t.cfg, req, resp) - return resp, err -} From a2d3080a6000bc2e729e4161fcb61f51f190903a Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Wed, 3 Aug 2016 03:11:52 -0400 Subject: [PATCH 02/74] initial foundation Definitely doesn't work, certainly has the right ideas. --- DCO | 36 +++++ MAINTAINERS | 1 + bittorrent/bencode/bencode.go | 33 ++++ bittorrent/bencode/decoder.go | 145 +++++++++++++++++ bittorrent/bencode/decoder_test.go | 96 +++++++++++ bittorrent/bencode/encoder.go | 173 ++++++++++++++++++++ bittorrent/bencode/encoder_test.go | 81 ++++++++++ bittorrent/bittorrent.go | 177 ++++++++++++++++++++ bittorrent/client_id.go | 32 ++++ bittorrent/client_id_test.go | 72 +++++++++ bittorrent/event.go | 78 +++++++++ bittorrent/event_test.go | 43 +++++ bittorrent/http/parser.go | 168 +++++++++++++++++++ bittorrent/http/query_params.go | 141 ++++++++++++++++ bittorrent/http/query_params_test.go | 110 +++++++++++++ bittorrent/http/server.go | 136 ++++++++++++++++ bittorrent/http/writer.go | 111 +++++++++++++ bittorrent/http/writer_test.go | 46 ++++++ bittorrent/udp/connection_id.go | 64 ++++++++ bittorrent/udp/connection_id_test.go | 43 +++++ bittorrent/udp/parser.go | 178 ++++++++++++++++++++ bittorrent/udp/server.go | 234 +++++++++++++++++++++++++++ bittorrent/udp/writer.go | 75 +++++++++ cmd/trakr/config.go | 0 cmd/trakr/main.go | 0 hook.go | 77 +++++++++ server.go | 28 ++++ 27 files changed, 2378 insertions(+) create mode 100644 DCO create mode 100644 MAINTAINERS create mode 100644 bittorrent/bencode/bencode.go create mode 100644 bittorrent/bencode/decoder.go create mode 100644 bittorrent/bencode/decoder_test.go create mode 100644 bittorrent/bencode/encoder.go create mode 100644 bittorrent/bencode/encoder_test.go create mode 100644 bittorrent/bittorrent.go create mode 100644 bittorrent/client_id.go create mode 100644 bittorrent/client_id_test.go create mode 100644 bittorrent/event.go create mode 100644 bittorrent/event_test.go create mode 100644 bittorrent/http/parser.go create mode 100644 bittorrent/http/query_params.go create mode 100644 bittorrent/http/query_params_test.go create mode 100644 bittorrent/http/server.go create mode 100644 bittorrent/http/writer.go create mode 100644 bittorrent/http/writer_test.go create mode 100644 bittorrent/udp/connection_id.go create mode 100644 bittorrent/udp/connection_id_test.go create mode 100644 bittorrent/udp/parser.go create mode 100644 bittorrent/udp/server.go create mode 100644 bittorrent/udp/writer.go create mode 100644 cmd/trakr/config.go create mode 100644 cmd/trakr/main.go create mode 100644 hook.go create mode 100644 server.go diff --git a/DCO b/DCO new file mode 100644 index 0000000..716561d --- /dev/null +++ b/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 0000000..6f09a91 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1 @@ +Jimmy Zelinskie (@jzelinskie) pkg:* diff --git a/bittorrent/bencode/bencode.go b/bittorrent/bencode/bencode.go new file mode 100644 index 0000000..7985adc --- /dev/null +++ b/bittorrent/bencode/bencode.go @@ -0,0 +1,33 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package bencode implements bencoding of data as defined in BEP 3 using +// type assertion over reflection for performance. +package bencode + +// Dict represents a bencode dictionary. +type Dict map[string]interface{} + +// NewDict allocates the memory for a Dict. +func NewDict() Dict { + return make(Dict) +} + +// List represents a bencode list. +type List []interface{} + +// NewList allocates the memory for a List. +func NewList() List { + return make(List, 0) +} diff --git a/bittorrent/bencode/decoder.go b/bittorrent/bencode/decoder.go new file mode 100644 index 0000000..dba087f --- /dev/null +++ b/bittorrent/bencode/decoder.go @@ -0,0 +1,145 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bencode + +import ( + "bufio" + "bytes" + "errors" + "io" + "strconv" +) + +// A Decoder reads bencoded objects from an input stream. +type Decoder struct { + r *bufio.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: bufio.NewReader(r)} +} + +// Decode unmarshals the next bencoded value in the stream. +func (dec *Decoder) Decode() (interface{}, error) { + return unmarshal(dec.r) +} + +// Unmarshal deserializes and returns the bencoded value in buf. +func Unmarshal(buf []byte) (interface{}, error) { + r := bufio.NewReader(bytes.NewBuffer(buf)) + return unmarshal(r) +} + +// unmarshal reads bencoded values from a bufio.Reader +func unmarshal(r *bufio.Reader) (interface{}, error) { + tok, err := r.ReadByte() + if err != nil { + return nil, err + } + + switch tok { + case 'i': + return readTerminatedInt(r, 'e') + + case 'l': + list := NewList() + for { + ok, err := readTerminator(r, 'e') + if err != nil { + return nil, err + } else if ok { + break + } + + v, err := unmarshal(r) + if err != nil { + return nil, err + } + list = append(list, v) + } + return list, nil + + case 'd': + dict := NewDict() + for { + ok, err := readTerminator(r, 'e') + if err != nil { + return nil, err + } else if ok { + break + } + + v, err := unmarshal(r) + if err != nil { + return nil, err + } + + key, ok := v.(string) + if !ok { + return nil, errors.New("bencode: non-string map key") + } + + dict[key], err = unmarshal(r) + if err != nil { + return nil, err + } + } + return dict, nil + + default: + err = r.UnreadByte() + if err != nil { + return nil, err + } + + length, err := readTerminatedInt(r, ':') + if err != nil { + return nil, errors.New("bencode: unknown input sequence") + } + + buf := make([]byte, length) + n, err := r.Read(buf) + + if err != nil { + return nil, err + } else if int64(n) != length { + return nil, errors.New("bencode: short read") + } + + return string(buf), nil + } +} + +func readTerminator(r io.ByteScanner, term byte) (bool, error) { + tok, err := r.ReadByte() + if err != nil { + return false, err + } else if tok == term { + return true, nil + } + return false, r.UnreadByte() +} + +func readTerminatedInt(r *bufio.Reader, term byte) (int64, error) { + buf, err := r.ReadSlice(term) + if err != nil { + return 0, err + } else if len(buf) <= 1 { + return 0, errors.New("bencode: empty integer field") + } + + return strconv.ParseInt(string(buf[:len(buf)-1]), 10, 64) +} diff --git a/bittorrent/bencode/decoder_test.go b/bittorrent/bencode/decoder_test.go new file mode 100644 index 0000000..375b69a --- /dev/null +++ b/bittorrent/bencode/decoder_test.go @@ -0,0 +1,96 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bencode + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var unmarshalTests = []struct { + input string + expected interface{} +}{ + {"i42e", int64(42)}, + {"i-42e", int64(-42)}, + + {"7:example", "example"}, + + {"l3:one3:twoe", List{"one", "two"}}, + {"le", List{}}, + + {"d3:one2:aa3:two2:bbe", Dict{"one": "aa", "two": "bb"}}, + {"de", Dict{}}, +} + +func TestUnmarshal(t *testing.T) { + for _, tt := range unmarshalTests { + got, err := Unmarshal([]byte(tt.input)) + assert.Nil(t, err, "unmarshal should not fail") + assert.Equal(t, got, tt.expected, "unmarshalled values should match the expected results") + } +} + +type bufferLoop struct { + val string +} + +func (r *bufferLoop) Read(b []byte) (int, error) { + n := copy(b, r.val) + return n, nil +} + +func BenchmarkUnmarshalScalar(b *testing.B) { + d1 := NewDecoder(&bufferLoop{"7:example"}) + d2 := NewDecoder(&bufferLoop{"i42e"}) + + for i := 0; i < b.N; i++ { + d1.Decode() + d2.Decode() + } +} + +func TestUnmarshalLarge(t *testing.T) { + data := Dict{ + "k1": List{"a", "b", "c"}, + "k2": int64(42), + "k3": "val", + "k4": int64(-42), + } + + buf, _ := Marshal(data) + dec := NewDecoder(&bufferLoop{string(buf)}) + + got, err := dec.Decode() + assert.Nil(t, err, "decode should not fail") + assert.Equal(t, got, data, "encoding and decoding should equal the original value") +} + +func BenchmarkUnmarshalLarge(b *testing.B) { + data := map[string]interface{}{ + "k1": []string{"a", "b", "c"}, + "k2": 42, + "k3": "val", + "k4": uint(42), + } + + buf, _ := Marshal(data) + dec := NewDecoder(&bufferLoop{string(buf)}) + + for i := 0; i < b.N; i++ { + dec.Decode() + } +} diff --git a/bittorrent/bencode/encoder.go b/bittorrent/bencode/encoder.go new file mode 100644 index 0000000..bd8701c --- /dev/null +++ b/bittorrent/bencode/encoder.go @@ -0,0 +1,173 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bencode + +import ( + "bytes" + "fmt" + "io" + "strconv" + "time" +) + +// An Encoder writes bencoded objects to an output stream. +type Encoder struct { + w io.Writer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// Encode writes the bencoding of v to the stream. +func (enc *Encoder) Encode(v interface{}) error { + return marshal(enc.w, v) +} + +// Marshal returns the bencoding of v. +func Marshal(v interface{}) ([]byte, error) { + buf := &bytes.Buffer{} + err := marshal(buf, v) + return buf.Bytes(), err +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves. +type Marshaler interface { + MarshalBencode() ([]byte, error) +} + +// marshal writes types bencoded to an io.Writer +func marshal(w io.Writer, data interface{}) error { + switch v := data.(type) { + case Marshaler: + bencoded, err := v.MarshalBencode() + if err != nil { + return err + } + _, err = w.Write(bencoded) + if err != nil { + return err + } + + case string: + marshalString(w, v) + + case int: + marshalInt(w, int64(v)) + + case uint: + marshalUint(w, uint64(v)) + + case int16: + marshalInt(w, int64(v)) + + case uint16: + marshalUint(w, uint64(v)) + + case int32: + marshalInt(w, int64(v)) + + case uint32: + marshalUint(w, uint64(v)) + + case int64: + marshalInt(w, v) + + case uint64: + marshalUint(w, v) + + case []byte: + marshalBytes(w, v) + + case time.Duration: // Assume seconds + marshalInt(w, int64(v/time.Second)) + + case Dict: + marshal(w, map[string]interface{}(v)) + + case []Dict: + w.Write([]byte{'l'}) + for _, val := range v { + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + case map[string]interface{}: + w.Write([]byte{'d'}) + for key, val := range v { + marshalString(w, key) + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + case []string: + w.Write([]byte{'l'}) + for _, val := range v { + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + case List: + marshal(w, []interface{}(v)) + + case []interface{}: + w.Write([]byte{'l'}) + for _, val := range v { + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + default: + return fmt.Errorf("attempted to marshal unsupported type:\n%t", v) + } + + return nil +} + +func marshalInt(w io.Writer, v int64) { + w.Write([]byte{'i'}) + w.Write([]byte(strconv.FormatInt(v, 10))) + w.Write([]byte{'e'}) +} + +func marshalUint(w io.Writer, v uint64) { + w.Write([]byte{'i'}) + w.Write([]byte(strconv.FormatUint(v, 10))) + w.Write([]byte{'e'}) +} + +func marshalBytes(w io.Writer, v []byte) { + w.Write([]byte(strconv.Itoa(len(v)))) + w.Write([]byte{':'}) + w.Write(v) +} + +func marshalString(w io.Writer, v string) { + marshalBytes(w, []byte(v)) +} diff --git a/bittorrent/bencode/encoder_test.go b/bittorrent/bencode/encoder_test.go new file mode 100644 index 0000000..c432208 --- /dev/null +++ b/bittorrent/bencode/encoder_test.go @@ -0,0 +1,81 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bencode + +import ( + "bytes" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var marshalTests = []struct { + input interface{} + expected []string +}{ + {int(42), []string{"i42e"}}, + {int(-42), []string{"i-42e"}}, + {uint(43), []string{"i43e"}}, + {int64(44), []string{"i44e"}}, + {uint64(45), []string{"i45e"}}, + {int16(44), []string{"i44e"}}, + {uint16(45), []string{"i45e"}}, + + {"example", []string{"7:example"}}, + {[]byte("example"), []string{"7:example"}}, + {30 * time.Minute, []string{"i1800e"}}, + + {[]string{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}}, + {[]interface{}{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}}, + {[]string{}, []string{"le"}}, + + {map[string]interface{}{"one": "aa", "two": "bb"}, []string{"d3:one2:aa3:two2:bbe", "d3:two2:bb3:one2:aae"}}, + {map[string]interface{}{}, []string{"de"}}, +} + +func TestMarshal(t *testing.T) { + for _, test := range marshalTests { + got, err := Marshal(test.input) + assert.Nil(t, err, "marshal should not fail") + assert.Contains(t, test.expected, string(got), "the marshaled result should be one of the expected permutations") + } +} + +func BenchmarkMarshalScalar(b *testing.B) { + buf := &bytes.Buffer{} + encoder := NewEncoder(buf) + + for i := 0; i < b.N; i++ { + encoder.Encode("test") + encoder.Encode(123) + } +} + +func BenchmarkMarshalLarge(b *testing.B) { + data := map[string]interface{}{ + "k1": []string{"a", "b", "c"}, + "k2": 42, + "k3": "val", + "k4": uint(42), + } + + buf := &bytes.Buffer{} + encoder := NewEncoder(buf) + + for i := 0; i < b.N; i++ { + encoder.Encode(data) + } +} diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go new file mode 100644 index 0000000..b7f400f --- /dev/null +++ b/bittorrent/bittorrent.go @@ -0,0 +1,177 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bittorrent + +import ( + "net" + "time" +) + +// PeerID represents a peer ID. +type PeerID [20]byte + +// PeerIDFromBytes creates a PeerID from a byte slice. +// +// It panics if b is not 20 bytes long. +func PeerIDFromBytes(b []byte) PeerID { + if len(b) != 20 { + panic("peer ID must be 20 bytes") + } + + var buf [20]byte + copy(buf[:], b) + return PeerID(buf) +} + +// PeerIDFromString creates a PeerID from a string. +// +// It panics if s is not 20 bytes long. +func PeerIDFromString(s string) PeerID { + if len(s) != 20 { + panic("peer ID must be 20 bytes") + } + + var buf [20]byte + copy(buf[:], s) + return PeerID(buf) +} + +// InfoHash represents an infohash. +type InfoHash [20]byte + +// InfoHashFromBytes creates an InfoHash from a byte slice. +// +// It panics if b is not 20 bytes long. +func InfoHashFromBytes(b []byte) InfoHash { + if len(b) != 20 { + panic("infohash must be 20 bytes") + } + + var buf [20]byte + copy(buf[:], b) + return InfoHash(buf) +} + +// InfoHashFromString creates an InfoHash from a string. +// +// It panics if s is not 20 bytes long. +func InfoHashFromString(s string) InfoHash { + if len(s) != 20 { + panic("infohash must be 20 bytes") + } + + var buf [20]byte + copy(buf[:], s) + return InfoHash(buf) +} + +// AnnounceRequest represents the parsed parameters from an announce request. +type AnnounceRequest struct { + Event Event + InfoHash InfoHash + Compact bool + NumWant uint32 + Left uint64 + Downloaded uint64 + Uploaded uint64 + + Peer + Params +} + +// AnnounceResponse represents the parameters used to create an announce +// response. +type AnnounceResponse struct { + Compact bool + Complete int32 + Incomplete int32 + Interval time.Duration + MinInterval time.Duration + IPv4Peers []Peer + IPv6Peers []Peer +} + +// AnnounceHandler is a function that generates a response for an Announce. +type AnnounceHandler func(*AnnounceRequest) *AnnounceResponse + +// AnnounceCallback is a function that does something with the results of an +// Announce after it has been completed. +type AnnounceCallback func(*AnnounceRequest, *AnnounceResponse) + +// ScrapeRequest represents the parsed parameters from a scrape request. +type ScrapeRequest struct { + InfoHashes []InfoHash + Params Params +} + +// ScrapeResponse represents the parameters used to create a scrape response. +type ScrapeResponse struct { + Files map[InfoHash]Scrape +} + +// Scrape represents the state of a swarm that is returned in a scrape response. +type Scrape struct { + Snatches uint32 + Complete uint32 + Incomplete uint32 +} + +// ScrapeHandler is a function that generates a response for a Scrape. +type ScrapeHandler func(*ScrapeRequest) *ScrapeResponse + +// ScrapeCallback is a function that does something with the results of a +// Scrape after it has been completed. +type ScrapeCallback func(*ScrapeRequest, *ScrapeResponse) + +// Peer represents the connection details of a peer that is returned in an +// announce response. +type Peer struct { + ID PeerID + IP net.IP + Port uint16 +} + +// Equal reports whether p and x are the same. +func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID } + +// EqualEndpoint reports whether p and x have the same endpoint. +func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP) } + +// Params is used to fetch request optional parameters. +type Params interface { + String(key string) (string, error) +} + +// ClientError represents an error that should be exposed to the client over +// the BitTorrent protocol implementation. +type ClientError string + +// Error implements the error interface for ClientError. +func (c ClientError) Error() string { return string(c) } + +// Server represents an implementation of the BitTorrent tracker protocol. +type Server interface { + ListenAndServe() error + Stop() +} + +// ServerFuncs are the collection of protocol-agnostic functions used to handle +// requests in a Server. +type ServerFuncs struct { + HandleAnnounce AnnounceHandler + HandleScrape ScrapeHandler + AfterAnnounce AnnounceCallback + AfterScrape ScrapeCallback +} diff --git a/bittorrent/client_id.go b/bittorrent/client_id.go new file mode 100644 index 0000000..4089639 --- /dev/null +++ b/bittorrent/client_id.go @@ -0,0 +1,32 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bittorrent + +// NewClientID returns the part of a PeerID that identifies a peer's client +// software. +func NewClientID(peerID string) (clientID string) { + length := len(peerID) + if length >= 6 { + if peerID[0] == '-' { + if length >= 7 { + clientID = peerID[1:7] + } + } else { + clientID = peerID[:6] + } + } + + return +} diff --git a/bittorrent/client_id_test.go b/bittorrent/client_id_test.go new file mode 100644 index 0000000..699da3e --- /dev/null +++ b/bittorrent/client_id_test.go @@ -0,0 +1,72 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bittorrent + +import "testing" + +func TestClientID(t *testing.T) { + var clientTable = []struct { + peerID string + clientID string + }{ + {"-AZ3034-6wfG2wk6wWLc", "AZ3034"}, + {"-AZ3042-6ozMq5q6Q3NX", "AZ3042"}, + {"-BS5820-oy4La2MWGEFj", "BS5820"}, + {"-AR6360-6oZyyMWoOOBe", "AR6360"}, + {"-AG2083-s1hiF8vGAAg0", "AG2083"}, + {"-AG3003-lEl2Mm4NEO4n", "AG3003"}, + {"-MR1100-00HS~T7*65rm", "MR1100"}, + {"-LK0140-ATIV~nbEQAMr", "LK0140"}, + {"-KT2210-347143496631", "KT2210"}, + {"-TR0960-6ep6svaa61r4", "TR0960"}, + {"-XX1150-dv220cotgj4d", "XX1150"}, + {"-AZ2504-192gwethivju", "AZ2504"}, + {"-KT4310-3L4UvarKuqIu", "KT4310"}, + {"-AZ2060-0xJQ02d4309O", "AZ2060"}, + {"-BD0300-2nkdf08Jd890", "BD0300"}, + {"-A~0010-a9mn9DFkj39J", "A~0010"}, + {"-UT2300-MNu93JKnm930", "UT2300"}, + {"-UT2300-KT4310KT4301", "UT2300"}, + + {"T03A0----f089kjsdf6e", "T03A0-"}, + {"S58B-----nKl34GoNb75", "S58B--"}, + {"M4-4-0--9aa757Efd5Bl", "M4-4-0"}, + + {"AZ2500BTeYUzyabAfo6U", "AZ2500"}, // BitTyrant + {"exbc0JdSklm834kj9Udf", "exbc0J"}, // Old BitComet + {"FUTB0L84j542mVc84jkd", "FUTB0L"}, // Alt BitComet + {"XBT054d-8602Jn83NnF9", "XBT054"}, // XBT + {"OP1011affbecbfabeefb", "OP1011"}, // Opera + {"-ML2.7.2-kgjjfkd9762", "ML2.7."}, // MLDonkey + {"-BOWA0C-SDLFJWEIORNM", "BOWA0C"}, // Bits on Wheels + {"Q1-0-0--dsn34DFn9083", "Q1-0-0"}, // Queen Bee + {"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt + {"346------SDFknl33408", "346---"}, // TorreTopia + {"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod + + {"", ""}, + {"-", ""}, + {"12345", ""}, + {"-12345", ""}, + {"123456", "123456"}, + {"-123456", "123456"}, + } + + for _, tt := range clientTable { + if parsedID := NewClientID(tt.peerID); parsedID != tt.clientID { + t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID) + } + } +} diff --git a/bittorrent/event.go b/bittorrent/event.go new file mode 100644 index 0000000..e5991e6 --- /dev/null +++ b/bittorrent/event.go @@ -0,0 +1,78 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bittorrent + +import ( + "errors" + "strings" +) + +// ErrUnknownEvent is returned when New fails to return an event. +var ErrUnknownEvent = errors.New("unknown event") + +// Event represents an event done by a BitTorrent client. +type Event uint8 + +const ( + // None is the event when a BitTorrent client announces due to time lapsed + // since the previous announce. + None Event = iota + + // Started is the event sent by a BitTorrent client when it joins a swarm. + Started + + // Stopped is the event sent by a BitTorrent client when it leaves a swarm. + Stopped + + // Completed is the event sent by a BitTorrent client when it finishes + // downloading all of the required chunks. + Completed +) + +var ( + eventToString = make(map[Event]string) + stringToEvent = make(map[string]Event) +) + +func init() { + eventToString[None] = "none" + eventToString[Started] = "started" + eventToString[Stopped] = "stopped" + eventToString[Completed] = "completed" + + stringToEvent[""] = None + + for k, v := range eventToString { + stringToEvent[v] = k + } +} + +// NewEvent returns the proper Event given a string. +func NewEvent(eventStr string) (Event, error) { + if e, ok := stringToEvent[strings.ToLower(eventStr)]; ok { + return e, nil + } + + return None, ErrUnknownEvent +} + +// String implements Stringer for an event. +func (e Event) String() string { + if name, ok := eventToString[e]; ok { + return name + } + + panic("bittorrent: event has no associated name") +} diff --git a/bittorrent/event_test.go b/bittorrent/event_test.go new file mode 100644 index 0000000..0ce7944 --- /dev/null +++ b/bittorrent/event_test.go @@ -0,0 +1,43 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bittorrent + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNew(t *testing.T) { + var table = []struct { + data string + expected Event + expectedErr error + }{ + {"", None, nil}, + {"NONE", None, nil}, + {"none", None, nil}, + {"started", Started, nil}, + {"stopped", Stopped, nil}, + {"completed", Completed, nil}, + {"notAnEvent", None, ErrUnknownEvent}, + } + + for _, tt := range table { + got, err := NewEvent(tt.data) + assert.Equal(t, err, tt.expectedErr, "errors should equal the expected value") + assert.Equal(t, got, tt.expected, "events should equal the expected value") + } +} diff --git a/bittorrent/http/parser.go b/bittorrent/http/parser.go new file mode 100644 index 0000000..cbf6ba9 --- /dev/null +++ b/bittorrent/http/parser.go @@ -0,0 +1,168 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "net" + "net/http" + + "github.com/jzelinskie/trakr/bittorrent" +) + +// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request. +// +// If allowIPSpoofing is true, IPs provided via params will be used. +// If realIPHeader is not empty string, the first value of the HTTP Header with +// that name will be used. +func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) { + qp, err := NewQueryParams(r.URL.RawQuery) + if err != nil { + return nil, err + } + + request := &bittorrent.AnnounceRequest{Params: q} + + eventStr, err := qp.String("event") + if err == query.ErrKeyNotFound { + eventStr = "" + } else if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: event") + } + request.Event, err = bittorrent.NewEvent(eventStr) + if err != nil { + return nil, bittorrent.ClientError("failed to provide valid client event") + } + + compactStr, _ := qp.String("compact") + request.Compact = compactStr != "" && compactStr != "0" + + infoHashes := qp.InfoHashes() + if len(infoHashes) < 1 { + return nil, bittorrent.ClientError("no info_hash parameter supplied") + } + if len(infoHashes) > 1 { + return nil, bittorrent.ClientError("multiple info_hash parameters supplied") + } + request.InfoHash = infoHashes[0] + + peerID, err := qp.String("peer_id") + if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: peer_id") + } + if len(peerID) != 20 { + return nil, bittorrent.ClientError("failed to provide valid peer_id") + } + request.PeerID = bittorrent.PeerIDFromString(peerID) + + request.Left, err = qp.Uint64("left") + if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: left") + } + + request.Downloaded, err = qp.Uint64("downloaded") + if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: downloaded") + } + + request.Uploaded, err = qp.Uint64("uploaded") + if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: uploaded") + } + + numwant, _ := qp.Uint64("numwant") + request.NumWant = int32(numwant) + + port, err := qp.Uint64("port") + if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: port") + } + request.Port = uint16(port) + + request.IP, err = requestedIP(q, r, realIPHeader, allowIPSpoofing) + if err != nil { + return nil, bittorrent.ClientError("failed to parse peer IP address: " + err.Error()) + } + + return request, nil +} + +// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request. +func ParseScrape(r *http.Request) (*bittorent.ScrapeRequest, error) { + qp, err := NewQueryParams(r.URL.RawQuery) + if err != nil { + return nil, err + } + + infoHashes := qp.InfoHashes() + if len(infoHashes) < 1 { + return nil, bittorrent.ClientError("no info_hash parameter supplied") + } + + request := &bittorrent.ScrapeRequest{ + InfoHashes: infoHashes, + Params: q, + } + + return request, nil +} + +// requestedIP determines the IP address for a BitTorrent client request. +// +// If allowIPSpoofing is true, IPs provided via params will be used. +// If realIPHeader is not empty string, the first value of the HTTP Header with +// that name will be used. +func requestedIP(r *http.Request, p bittorent.Params, realIPHeader string, allowIPSpoofing bool) (net.IP, error) { + if allowIPSpoofing { + if ipstr, err := p.String("ip"); err == nil { + ip, err := net.ParseIP(str) + if err != nil { + return nil, err + } + + return ip, nil + } + + if ipstr, err := p.String("ipv4"); err == nil { + ip, err := net.ParseIP(str) + if err != nil { + return nil, err + } + + return ip, nil + } + + if ipstr, err := p.String("ipv6"); err == nil { + ip, err := net.ParseIP(str) + if err != nil { + return nil, err + } + + return ip, nil + } + } + + if realIPHeader != "" { + if ips, ok := r.Header[realIPHeader]; ok && len(ips) > 0 { + ip, err := net.ParseIP(ips[0]) + if err != nil { + return nil, err + } + + return ip, nil + } + } + + return r.RemoteAddr +} diff --git a/bittorrent/http/query_params.go b/bittorrent/http/query_params.go new file mode 100644 index 0000000..b3fc62c --- /dev/null +++ b/bittorrent/http/query_params.go @@ -0,0 +1,141 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "errors" + "net/url" + "strconv" + "strings" + + "github.com/jzelinskie/trakr/bittorrent" +) + +// ErrKeyNotFound is returned when a provided key has no value associated with +// it. +var ErrKeyNotFound = errors.New("http: value for the provided key does not exist") + +// ErrInvalidInfohash is returned when parsing a query encounters an infohash +// with invalid length. +var ErrInvalidInfohash = errors.New("http: invalid infohash") + +// QueryParams parses an HTTP Query and implements the bittorrent.Params +// interface with some additional helpers. +type QueryParams struct { + query string + params map[string]string + infoHashes []bittorrent.InfoHash +} + +// NewQueryParams parses a raw URL query. +func NewQueryParams(query string) (*Query, error) { + var ( + keyStart, keyEnd int + valStart, valEnd int + + onKey = true + + q = &Query{ + query: query, + infoHashes: nil, + params: make(map[string]string), + } + ) + + for i, length := 0, len(query); i < length; i++ { + separator := query[i] == '&' || query[i] == ';' || query[i] == '?' + last := i == length-1 + + if separator || last { + if onKey && !last { + keyStart = i + 1 + continue + } + + if last && !separator && !onKey { + valEnd = i + } + + keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1]) + if err != nil { + return nil, err + } + + var valStr string + + if valEnd > 0 { + valStr, err = url.QueryUnescape(query[valStart : valEnd+1]) + if err != nil { + return nil, err + } + } + + if keyStr == "info_hash" { + if len(valStr) != 20 { + return nil, ErrInvalidInfohash + } + q.infoHashes = append(q.infoHashes, bittorrent.InfoHashFromString(valStr)) + } else { + q.params[strings.ToLower(keyStr)] = valStr + } + + valEnd = 0 + onKey = true + keyStart = i + 1 + + } else if query[i] == '=' { + onKey = false + valStart = i + 1 + valEnd = 0 + } else if onKey { + keyEnd = i + } else { + valEnd = i + } + } + + return q, nil +} + +// String returns a string parsed from a query. Every key can be returned as a +// string because they are encoded in the URL as strings. +func (q *Query) String(key string) (string, error) { + val, exists := q.params[key] + if !exists { + return "", ErrKeyNotFound + } + return val, nil +} + +// Uint64 returns a uint parsed from a query. After being called, it is safe to +// cast the uint64 to your desired length. +func (q *Query) Uint64(key string) (uint64, error) { + str, exists := q.params[key] + if !exists { + return 0, ErrKeyNotFound + } + + val, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return 0, err + } + + return val, nil +} + +// InfoHashes returns a list of requested infohashes. +func (q *Query) InfoHashes() []bittorrent.InfoHash { + return q.infoHashes +} diff --git a/bittorrent/http/query_params_test.go b/bittorrent/http/query_params_test.go new file mode 100644 index 0000000..0d96fa5 --- /dev/null +++ b/bittorrent/http/query_params_test.go @@ -0,0 +1,110 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "net/url" + "testing" +) + +var ( + baseAddr = "https://www.subdomain.tracker.com:80/" + testInfoHash = "01234567890123456789" + testPeerID = "-TEST01-6wfG2wk6wWLc" + + ValidAnnounceArguments = []url.Values{ + {"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}}, + {"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}}, + {"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}}, + {"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}}, + {"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}}, + {"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}}, + {"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}}, + {"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}}, + {"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}}, + {"peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}}, + {"peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}}, + {"peer_id": {""}, "compact": {""}}, + } + + InvalidQueries = []string{ + baseAddr + "announce/?" + "info_hash=%0%a", + } +) + +func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool { + if len(boxed) != len(unboxed) { + return false + } + + for mapKey, mapVal := range boxed { + // Always expect box to hold only one element + if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] { + return false + } + } + + return true +} + +func TestValidQueries(t *testing.T) { + for parseIndex, parseVal := range ValidAnnounceArguments { + parsedQueryObj, err := NewQueryParams(baseAddr + "announce/?" + parseVal.Encode()) + if err != nil { + t.Error(err) + } + + if !mapArrayEqual(parseVal, parsedQueryObj.params) { + t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params) + } + } +} + +func TestInvalidQueries(t *testing.T) { + for parseIndex, parseStr := range InvalidQueries { + parsedQueryObj, err := NewQueryParams(parseStr) + if err == nil { + t.Error("Should have produced error", parseIndex) + } + + if parsedQueryObj != nil { + t.Error("Should be nil after error", parsedQueryObj, parseIndex) + } + } +} + +func BenchmarkParseQuery(b *testing.B) { + for bCount := 0; bCount < b.N; bCount++ { + for parseIndex, parseStr := range ValidAnnounceArguments { + parsedQueryObj, err := NewQueryParams(baseAddr + "announce/?" + parseStr.Encode()) + if err != nil { + b.Error(err, parseIndex) + b.Log(parsedQueryObj) + } + } + } +} + +func BenchmarkURLParseQuery(b *testing.B) { + for bCount := 0; bCount < b.N; bCount++ { + for parseIndex, parseStr := range ValidAnnounceArguments { + parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode()) + if err != nil { + b.Error(err, parseIndex) + b.Log(parsedQueryObj) + } + } + } +} diff --git a/bittorrent/http/server.go b/bittorrent/http/server.go new file mode 100644 index 0000000..fb6ec74 --- /dev/null +++ b/bittorrent/http/server.go @@ -0,0 +1,136 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +type Config struct { + Addr string + ReadTimeout time.Duration + WriteTimeout time.Duration + RequestTimeout time.Duration + AllowIPSpoofing bool + RealIPHeader string +} + +type Server struct { + grace *graceful.Server + + bittorrent.ServerFuncs + Config +} + +func NewServer(funcs bittorrent.ServerFuncs, cfg Config) { + return &Server{ + ServerFuncs: funcs, + Config: cfg, + } +} + +func (s *Server) Stop() { + s.grace.Stop(s.grace.Timeout) + <-s.grace.StopChan() +} + +func (s *Server) handler() { + router := httprouter.New() + router.GET("/announce", s.announceRoute) + router.GET("/scrape", s.scrapeRoute) + return server +} + +func (s *Server) ListenAndServe() error { + s.grace = &graceful.Server{ + Server: &http.Server{ + Addr: s.Addr, + Handler: s.handler(), + ReadTimeout: s.ReadTimeout, + WriteTimeout: s.WriteTimeout, + }, + Timeout: s.RequestTimeout, + NoSignalHandling: true, + ConnState: func(conn net.Conn, state http.ConnState) { + switch state { + case http.StateNew: + //stats.RecordEvent(stats.AcceptedConnection) + + case http.StateClosed: + //stats.RecordEvent(stats.ClosedConnection) + + case http.StateHijacked: + panic("http: connection impossibly hijacked") + + // Ignore the following cases. + case http.StateActive, http.StateIdle: + + default: + panic("http: connection transitioned to unknown state") + } + }, + } + s.grace.SetKeepAlivesEnabled(false) + + if err := s.grace.ListenAndServe(); err != nil { + if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { + panic("http: failed to gracefully run HTTP server: " + err.Error()) + } + } +} + +func (s *Server) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + req, err := ParseAnnounce(r, s.RealIPHeader, s.AllowIPSpoofing) + if err != nil { + WriteError(w, err) + return + } + + resp, err := s.HandleAnnounce(req) + if err != nil { + WriteError(w, err) + return + } + + err = WriteAnnounceResponse(w, resp) + if err != nil { + WriteError(w, err) + return + } + + if s.AfterAnnounce != nil { + s.AfterAnnounce(req, resp) + } +} + +func (s *Server) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + req, err := ParseScrape(r) + if err != nil { + WriteError(w, err) + return + } + + resp, err := s.HandleScrape(req) + if err != nil { + WriteError(w, err) + return + } + + err = WriteScrapeResponse(w, resp) + if err != nil { + WriteError(w, err) + return + } + + if s.AfterScrape != nil { + s.AfterScrape(req, resp) + } +} diff --git a/bittorrent/http/writer.go b/bittorrent/http/writer.go new file mode 100644 index 0000000..a0da645 --- /dev/null +++ b/bittorrent/http/writer.go @@ -0,0 +1,111 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "net/http" + + "github.com/jzelinskie/trakr/bittorrent" +) + +// WriteError communicates an error to a BitTorrent client over HTTP. +func WriteError(w http.ResponseWriter, err error) error { + message := "internal server error" + if _, clientErr := err.(bittorrent.ClientError); clientErr { + message = err.Error() + } + + w.WriteHeader(http.StatusOK) + return bencode.NewEncoder(w).Encode(bencode.Dict{ + "failure reason": message, + }) +} + +// WriteAnnounceResponse communicates the results of an Announce to a +// BitTorrent client over HTTP. +func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceResponse) error { + bdict := bencode.Dict{ + "complete": resp.Complete, + "incomplete": resp.Incomplete, + "interval": resp.Interval, + "min interval": resp.MinInterval, + } + + // Add the peers to the dictionary in the compact format. + if resp.Compact { + var IPv4CompactDict, IPv6CompactDict []byte + + // Add the IPv4 peers to the dictionary. + for _, peer := range resp.IPv4Peers { + IPv4CompactDict = append(IPv4CompactDict, compact(peer)...) + } + if len(IPv4CompactDict) > 0 { + bdict["peers"] = IPv4CompactDict + } + + // Add the IPv6 peers to the dictionary. + for _, peer := range resp.IPv6Peers { + IPv6CompactDict = append(IPv6CompactDict, compact(peer)...) + } + if len(IPv6CompactDict) > 0 { + bdict["peers6"] = IPv6CompactDict + } + + return bencode.NewEncoder(w).Encode(bdict) + } + + // Add the peers to the dictionary. + var peers []bencode.Dict + for _, peer := range resp.IPv4Peers { + peers = append(peers, dict(peer)) + } + for _, peer := range resp.IPv6Peers { + peers = append(peers, dict(peer)) + } + bdict["peers"] = peers + + return bencode.NewEncoder(w).Encode(bdict) +} + +// WriteScrapeResponse communicates the results of a Scrape to a BitTorrent +// client over HTTP. +func WriteScrapeResponse(w http.ResponseWriter, resp *bittorrent.ScrapeResponse) error { + filesDict := bencode.NewDict() + for infohash, scrape := range resp.Files { + filesDict[string(infohash[:])] = bencode.Dict{ + "complete": scrape.Complete, + "incomplete": scrape.Incomplete, + } + } + + return bencode.NewEncoder(w).Encode(bencode.Dict{ + "files": filesDict, + }) +} + +func compact(peer bittorrent.Peer) (buf []byte) { + buf = []byte(peer.IP) + buf = append(buf, byte(peer.Port>>8)) + buf = append(buf, byte(peer.Port&0xff)) + return +} + +func dict(peer bittorrent.Peer) bencode.Dict { + return bencode.Dict{ + "peer id": string(peer.ID[:]), + "ip": peer.IP.String(), + "port": peer.Port, + } +} diff --git a/bittorrent/http/writer_test.go b/bittorrent/http/writer_test.go new file mode 100644 index 0000000..4c9b185 --- /dev/null +++ b/bittorrent/http/writer_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package http + +import ( + "net/http/httptest" + "testing" + + "github.com/jzelinskie/trakr/bittorrent" + "github.com/stretchr/testify/assert" +) + +func TestWriteError(t *testing.T) { + var table = []struct { + reason, expected string + }{ + {"hello world", "d14:failure reason11:hello worlde"}, + {"what's up", "d14:failure reason9:what's upe"}, + } + + for _, tt := range table { + r := httptest.NewRecorder() + err := writeError(r, bittorrent.ClientError(tt.reason)) + assert.Nil(t, err) + assert.Equal(t, r.Body.String(), tt.expected) + } +} + +func TestWriteStatus(t *testing.T) { + r := httptest.NewRecorder() + err := writeError(r, bittorrent.ClientError("something is missing")) + assert.Nil(t, err) + assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge") +} diff --git a/bittorrent/udp/connection_id.go b/bittorrent/udp/connection_id.go new file mode 100644 index 0000000..944f4d8 --- /dev/null +++ b/bittorrent/udp/connection_id.go @@ -0,0 +1,64 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package udp + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "net" + "time" +) + +// ttl is the number of seconds a connection ID should be valid according to +// BEP 15. +const ttl = 2 * time.Minute + +// NewConnectionID creates a new 8 byte connection identifier for UDP packets +// as described by BEP 15. +// +// The first 4 bytes of the connection identifier is a unix timestamp and the +// last 4 bytes are a truncated HMAC token created from the aforementioned +// unix timestamp and the source IP address of the UDP packet. +// +// Truncated HMAC is known to be safe for 2^(-n) where n is the size in bits +// of the truncated HMAC token. In this use case we have 32 bits, thus a +// forgery probability of approximately 1 in 4 billion. +func NewConnectionID(ip net.IP, now time.Time, key string) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint32(buf, uint32(now.UTC().Unix())) + + mac := hmac.New(sha256.New, []byte(key)) + mac.Write(buf[:4]) + mac.Write(ip) + macBytes := mac.Sum(nil)[:4] + copy(buf[4:], macBytes) + + return buf +} + +// ValidConnectionID determines whether a connection identifier is legitimate. +func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool { + ts := time.Unix(int64(binary.BigEndian.Uint32(connectionID[:4])), 0) + if now.After(ts.Add(ttl)) || ts.After(now.Add(maxClockSkew)) { + return false + } + + mac := hmac.New(sha256.New, []byte(key)) + mac.Write(connectionID[:4]) + mac.Write(ip) + expectedMAC := mac.Sum(nil)[:4] + return hmac.Equal(expectedMAC, connectionID[4:]) +} diff --git a/bittorrent/udp/connection_id_test.go b/bittorrent/udp/connection_id_test.go new file mode 100644 index 0000000..776b61f --- /dev/null +++ b/bittorrent/udp/connection_id_test.go @@ -0,0 +1,43 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package udp + +import ( + "net" + "testing" + "time" +) + +var golden = []struct { + createdAt int64 + now int64 + ip string + key string + valid bool +}{ + {0, 1, "127.0.0.1", "", true}, + {0, 420420, "127.0.0.1", "", false}, + {0, 0, "[::]", "", true}, +} + +func TestVerification(t *testing.T) { + for _, tt := range golden { + cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key) + got := ValidConnectionID(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute, tt.key) + if got != tt.valid { + t.Errorf("expected validity: %t got validity: %t", tt.valid, got) + } + } +} diff --git a/bittorrent/udp/parser.go b/bittorrent/udp/parser.go new file mode 100644 index 0000000..85e4469 --- /dev/null +++ b/bittorrent/udp/parser.go @@ -0,0 +1,178 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package udp + +import ( + "encoding/binary" + "net" + + "github.com/jzelinskie/trakr/bittorrent" +) + +const ( + connectActionID uint32 = iota + announceActionID + scrapeActionID + errorActionID + announceDualStackActionID +) + +// Option-Types as described in BEP 41 and BEP 45. +const ( + optionEndOfOptions byte = 0x0 + optionNOP = 0x1 + optionURLData = 0x2 +) + +var ( + // initialConnectionID is the magic initial connection ID specified by BEP 15. + initialConnectionID = []byte{0, 0, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80} + + // emptyIPs are the value of an IP field that has been left blank. + emptyIPv4 = []byte{0, 0, 0, 0} + emptyIPv6 = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + + // eventIDs map values described in BEP 15 to Events. + eventIDs = []bittorrent.Event{ + bittorrent.None, + bittorrent.Completed, + bittorrent.Started, + bittorrent.Stopped, + } + + errMalformedPacket = bittorrent.ClientError("malformed packet") + errMalformedIP = bittorrent.ClientError("malformed IP address") + errMalformedEvent = bittorrent.ClientError("malformed event ID") + errUnknownAction = bittorrent.ClientError("unknown action ID") + errBadConnectionID = bittorrent.ClientError("bad connection ID") +) + +// ParseAnnounce parses an AnnounceRequest from a UDP request. +// +// If allowIPSpoofing is true, IPs provided via params will be used. +func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) { + if len(r.packet) < 98 { + return nil, errMalformedPacket + } + + infohash := r.packet[16:36] + peerID := r.packet[36:56] + downloaded := binary.BigEndian.Uint64(r.packet[56:64]) + left := binary.BigEndian.Uint64(r.packet[64:72]) + uploaded := binary.BigEndian.Uint64(r.packet[72:80]) + + eventID := int(r.packet[83]) + if eventID >= len(eventIDs) { + return nil, errMalformedEvent + } + + ip := r.IP + ipbytes := r.packet[84:88] + if allowIPSpoofing { + ip = net.IP(ipbytes) + } + if !allowIPSpoofing && r.ip == nil { + // We have no IP address to fallback on. + return nil, errMalformedIP + } + + numWant := binary.BigEndian.Uint32(r.packet[92:96]) + port := binary.BigEndian.Uint16(r.packet[96:98]) + + params, err := handleOptionalParameters(r.packet) + if err != nil { + return nil, err + } + + return &bittorrent.AnnounceRequest{ + Event: eventIDs[eventID], + InfoHash: bittorrent.InfoHashFromBytes(infohash), + NumWant: uint32(numWant), + Left: left, + Downloaded: downloaded, + Uploaded: uploaded, + Peer: bittorrent.Peer{ + ID: bittorrent.PeerIDFromBytes(peerID), + IP: ip, + Port: port, + }, + Params: params, + }, nil +} + +// handleOptionalParameters parses the optional parameters as described in BEP +// 41 and updates an announce with the values parsed. +func handleOptionalParameters(packet []byte) (params bittorrent.Params, err error) { + if len(packet) <= 98 { + return + } + + optionStartIndex := 98 + for optionStartIndex < len(packet)-1 { + option := packet[optionStartIndex] + switch option { + case optionEndOfOptions: + return + + case optionNOP: + optionStartIndex++ + + case optionURLData: + if optionStartIndex+1 > len(packet)-1 { + return params, errMalformedPacket + } + + length := int(packet[optionStartIndex+1]) + if optionStartIndex+1+length > len(packet)-1 { + return params, errMalformedPacket + } + + // TODO(jzelinskie): Actually parse the URL Data as described in BEP 41 + // into something that fulfills the bittorrent.Params interface. + + optionStartIndex += 1 + length + default: + return + } + } + + return +} + +// ParseScrape parses a ScrapeRequest from a UDP request. +func parseScrape(r Request) (*bittorrent.ScrapeRequest, error) { + // If a scrape isn't at least 36 bytes long, it's malformed. + if len(r.packet) < 36 { + return nil, errMalformedPacket + } + + // Skip past the initial headers and check that the bytes left equal the + // length of a valid list of infohashes. + r.packet = r.packet[16:] + if len(r.packet)%20 != 0 { + return nil, errMalformedPacket + } + + // Allocate a list of infohashes and append it to the list until we're out. + var infohashes []bittorrent.InfoHash + for len(r.packet) >= 20 { + infohashes = append(infohashes, bittorrent.InfoHashFromBytes(r.packet[:20])) + r.packet = r.packet[20:] + } + + return &bittorrent.ScrapeRequest{ + InfoHashes: infohashes, + }, nil +} diff --git a/bittorrent/udp/server.go b/bittorrent/udp/server.go new file mode 100644 index 0000000..5ea4a36 --- /dev/null +++ b/bittorrent/udp/server.go @@ -0,0 +1,234 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package udp + +import ( + "bytes" + "encoding/binary" + "net" + "time" + + "github.com/jzelinskie/trakr/bittorrent" +) + +var promResponseDurationMilliseconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "trakr_udp_response_duration_milliseconds", + Help: "The duration of time it takes to receive and write a response to an API request", + Buckets: prometheus.ExponentialBuckets(9.375, 2, 10), + }, + []string{"action", "error"}, +) + +type Config struct { + Addr string + PrivateKey string + AllowIPSpoofing bool +} + +type Server struct { + sock *net.UDPConn + closing chan struct{} + wg sync.WaitGroup + + bittorrent.ServerFuncs + Config +} + +func NewServer(funcs bittorrent.ServerFuncs, cfg Config) { + return &Server{ + closing: make(chan struct{}), + ServerFuncs: funcs, + Config: cfg, + } +} + +func (s *udpServer) Stop() { + close(s.closing) + s.sock.SetReadDeadline(time.Now()) + s.wg.Wait() +} + +func (s *Server) ListenAndServe() error { + udpAddr, err := net.ResolveUDPAddr("udp", s.Addr) + if err != nil { + return err + } + + s.sock, err = net.ListenUDP("udp", udpAddr) + if err != nil { + return err + } + defer s.sock.Close() + + pool := bytepool.New(256, 2048) + + for { + // Check to see if we need to shutdown. + select { + case <-s.closing: + s.wg.Wait() + return nil + default: + } + + // Read a UDP packet into a reusable buffer. + buffer := pool.Get() + s.sock.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err := s.sock.ReadFromUDP(buffer) + if err != nil { + pool.Put(buffer) + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + // A temporary failure is not fatal; just pretend it never happened. + continue + } + return err + } + + // We got nothin' + if n == 0 { + pool.Put(buffer) + continue + } + + log.Println("Got UDP packet") + start := time.Now() + s.wg.Add(1) + go func(start time.Time) { + defer s.wg.Done() + defer pool.Put(buffer) + + // Handle the response. + response, action, err := s.handlePacket(buffer[:n], addr) + log.Printf("Handled UDP packet: %s, %s, %s\n", response, action, err) + + // Record to Prometheus the time in milliseconds to receive, handle, and + // respond to the request. + duration := time.Since(start) + if err != nil { + promResponseDurationMilliseconds.WithLabelValues(action, err.Error()).Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) + } else { + promResponseDurationMilliseconds.WithLabelValues(action, "").Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) + } + }(start) + } +} + +type Request struct { + Packet []byte + IP net.IP +} + +type ResponseWriter struct { + socket net.UDPConn + addr net.UDPAddr +} + +func (w *ResponseWriter) Write(b []byte) (int, error) { + w.socket.WriteToUDP(b, w.addr) + return len(b), nil +} + +func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, actionName string, err error) { + if len(r.packet) < 16 { + // Malformed, no client packets are less than 16 bytes. + // We explicitly return nothing in case this is a DoS attempt. + err = errMalformedPacket + return + } + + // Parse the headers of the UDP packet. + connID := r.packet[0:8] + actionID := binary.BigEndian.Uint32(r.packet[8:12]) + txID := r.packet[12:16] + + // If this isn't requesting a new connection ID and the connection ID is + // invalid, then fail. + if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), s.PrivateKey) { + err = errBadConnectionID + WriteError(w, txID, err) + return + } + + // Handle the requested action. + switch actionID { + case connectActionID: + actionName = "connect" + + if !bytes.Equal(connID, initialConnectionID) { + err = errMalformedPacket + return + } + + WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), s.PrivateKey)) + return + + case announceActionID: + actionName = "announce" + + var req *bittorrent.AnnounceRequest + req, err = ParseAnnounce(r, s.AllowIPSpoofing) + if err != nil { + WriteError(w, txID, err) + return + } + + var resp *bittorrent.AnnounceResponse + resp, err = s.HandleAnnounce(req) + if err != nil { + WriteError(w, txID, err) + return + } + + WriteAnnounce(w, txID, resp) + + if s.AfterAnnounce != nil { + s.AfterAnnounce(req, resp) + } + + return + + case scrapeActionID: + actionName = "scrape" + + var req *bittorrent.ScrapeRequest + req, err = ParseScrape(r) + if err != nil { + WriteError(w, txID, err) + return + } + + var resp *bittorrent.ScrapeResponse + ctx := context.TODO() + resp, err = s.HandleScrape(ctx, req) + if err != nil { + WriteError(w, txID, err) + return + } + + WriteScrape(w, txID, resp) + + if s.AfterScrape != nil { + s.AfterScrape(req, resp) + } + + return + + default: + err = errUnknownAction + WriteError(w, txID, err) + return + } +} diff --git a/bittorrent/udp/writer.go b/bittorrent/udp/writer.go new file mode 100644 index 0000000..068741a --- /dev/null +++ b/bittorrent/udp/writer.go @@ -0,0 +1,75 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package udp + +import ( + "bytes" + "encoding/binary" + "fmt" + "time" + + "github.com/jzelinskie/trakr/bittorrent" +) + +// WriteError writes the failure reason as a null-terminated string. +func WriteError(writer io.Writer, txID []byte, err error) { + // If the client wasn't at fault, acknowledge it. + if _, ok := err.(bittorrent.ClientError); !ok { + err = fmt.Errorf("internal error occurred: %s", err.Error()) + } + + var buf bytes.Buffer + writeHeader(buf, txID, errorActionID) + buf.WriteString(err.Error()) + buf.WriteRune('\000') + writer.Write(buf.Bytes()) +} + +// WriteAnnounce encodes an announce response according to BEP 15. +func WriteAnnounce(respBuf *bytes.Buffer, txID []byte, resp *bittorrent.AnnounceResponse) { + writeHeader(respBuf, txID, announceActionID) + binary.Write(respBuf, binary.BigEndian, uint32(resp.Interval/time.Second)) + binary.Write(respBuf, binary.BigEndian, uint32(resp.Incomplete)) + binary.Write(respBuf, binary.BigEndian, uint32(resp.Complete)) + + for _, peer := range resp.IPv4Peers { + respBuf.Write(peer.IP) + binary.Write(respBuf, binary.BigEndian, peer.Port) + } +} + +// WriteScrape encodes a scrape response according to BEP 15. +func WriteScrape(respBuf *bytes.Buffer, txID []byte, resp *bittorrent.ScrapeResponse) { + writeHeader(respBuf, txID, scrapeActionID) + + for _, scrape := range resp.Files { + binary.Write(respBuf, binary.BigEndian, scrape.Complete) + binary.Write(respBuf, binary.BigEndian, scrape.Snatches) + binary.Write(respBuf, binary.BigEndian, scrape.Incomplete) + } +} + +// WriteConnectionID encodes a new connection response according to BEP 15. +func WriteConnectionID(respBuf *bytes.Buffer, txID, connID []byte) { + writeHeader(respBuf, txID, connectActionID) + respBuf.Write(connID) +} + +// writeHeader writes the action and transaction ID to the provided response +// buffer. +func writeHeader(respBuf *bytes.Buffer, txID []byte, action uint32) { + binary.Write(respBuf, binary.BigEndian, action) + respBuf.Write(txID) +} diff --git a/cmd/trakr/config.go b/cmd/trakr/config.go new file mode 100644 index 0000000..e69de29 diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go new file mode 100644 index 0000000..e69de29 diff --git a/hook.go b/hook.go new file mode 100644 index 0000000..8707d0e --- /dev/null +++ b/hook.go @@ -0,0 +1,77 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trakr + +import "github.com/jzelinskie/trakr/bittorrent" + +// HookConstructor is a function used to create a new instance of a Hook. +type HookConstructor func(interface{}) (Hook, error) + +// Hook abstracts the concept of anything that needs to interact with a +// BitTorrent client's request and response to a BitTorrent tracker. +type Hook interface { + HandleAnnounce(context.Context, bittorrent.AnnounceRequest, bittorrent.AnnounceResponse) error + HandleScrape(context.Context, bittorrent.ScrapeRequest, bittorrent.ScrapeResponse) error +} + +var preHooks = make(map[string]HookConstructor) + +// RegisterPreHook makes a HookConstructor available by the provided name. +// +// If this function is called twice with the same name or if the +// HookConstructor is nil, it panics. +func RegisterPreHook(name string, con HookConstructor) { + if con == nil { + panic("trakr: could not register nil HookConstructor") + } + if _, dup := constructors[name]; dup { + panic("trakr: could not register duplicate HookConstructor: " + name) + } + preHooks[name] = con +} + +// NewPreHook creates an instance of the given PreHook by name. +func NewPreHook(name string, config interface{}) (Hook, error) { + con := preHooks[name] + if !ok { + return nil, fmt.Errorf("trakr: unknown PreHook %q (forgotten import?)", name) + } + return con(config) +} + +var postHooks = make(map[string]HookConstructor) + +// RegisterPostHook makes a HookConstructor available by the provided name. +// +// If this function is called twice with the same name or if the +// HookConstructor is nil, it panics. +func RegisterPostHook(name string, con HookConstructor) { + if con == nil { + panic("trakr: could not register nil HookConstructor") + } + if _, dup := constructors[name]; dup { + panic("trakr: could not register duplicate HookConstructor: " + name) + } + preHooks[name] = con +} + +// NewPostHook creates an instance of the given PostHook by name. +func NewPostHook(name string, config interface{}) (Hook, error) { + con := preHooks[name] + if !ok { + return nil, fmt.Errorf("trakr: unknown PostHook %q (forgotten import?)", name) + } + return con(config) +} diff --git a/server.go b/server.go new file mode 100644 index 0000000..8361a6b --- /dev/null +++ b/server.go @@ -0,0 +1,28 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trakr + +type Server struct { + HTTPConfig http.Config + UDPConfig udp.Config + Interval time.Duration + PreHooks []string + PostHooks []string + + udpserver +} + +func (s *Server) ListenAndServe() error { +} From dc25c8cab25c6d6e4f8f2789c8735950a8ddcc6f Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Wed, 3 Aug 2016 08:25:45 +0000 Subject: [PATCH 03/74] add example_config.yaml --- example_config.yaml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 example_config.yaml diff --git a/example_config.yaml b/example_config.yaml new file mode 100644 index 0000000..199275c --- /dev/null +++ b/example_config.yaml @@ -0,0 +1,34 @@ +trakr: + announce_interval: 15m + allow_ip_spoofing: true + default_num_want: 50 + + http: + addr: 0.0.0.0:6881 + real_ip_header: x-real-ip + read_timeout: 5s + write_timeout: 5s + request_timeout: 5s + + udp: + addr: 0.0.0.0:6881 + + storage: + name: memory + config: + shards: 1 + + prehooks: + - name: jwt + config: + jwk_set_uri: "" + jwk_set_update_interval: 5m + jwt_audience: "" + - name: approved_client + config: + type: whitelist + clients: + - OP1011 + + posthooks: + - name: gossip \ No newline at end of file From 0d054414ab0ad2269e8b8d4e734df7d5ed76f341 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 00:18:58 -0400 Subject: [PATCH 04/74] s/Server/Tracker --- bittorrent/bittorrent.go | 11 ++-- bittorrent/http/{server.go => tracker.go} | 58 ++++++++++---------- bittorrent/udp/{server.go => tracker.go} | 64 +++++++++++------------ 3 files changed, 67 insertions(+), 66 deletions(-) rename bittorrent/http/{server.go => tracker.go} (64%) rename bittorrent/udp/{server.go => tracker.go} (78%) diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index b7f400f..0d87ebf 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -161,15 +161,16 @@ type ClientError string // Error implements the error interface for ClientError. func (c ClientError) Error() string { return string(c) } -// Server represents an implementation of the BitTorrent tracker protocol. -type Server interface { +// Tracker represents an implementation of the BitTorrent tracker protocol. +type Tracker interface { ListenAndServe() error Stop() } -// ServerFuncs are the collection of protocol-agnostic functions used to handle -// requests in a Server. -type ServerFuncs struct { +// TrackerFuncs is the collection of callback functions provided to a Tracker +// to (1) generate a response from a parsed request, and (2) observe anything +// after the response has been delivered to the client. +type TrackerFuncs struct { HandleAnnounce AnnounceHandler HandleScrape ScrapeHandler AfterAnnounce AnnounceCallback diff --git a/bittorrent/http/server.go b/bittorrent/http/tracker.go similarity index 64% rename from bittorrent/http/server.go rename to bittorrent/http/tracker.go index fb6ec74..76d9b17 100644 --- a/bittorrent/http/server.go +++ b/bittorrent/http/tracker.go @@ -23,41 +23,41 @@ type Config struct { RealIPHeader string } -type Server struct { +type Tracker struct { grace *graceful.Server - bittorrent.ServerFuncs + bittorrent.TrackerFuncs Config } -func NewServer(funcs bittorrent.ServerFuncs, cfg Config) { +func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { return &Server{ - ServerFuncs: funcs, - Config: cfg, + TrackerFuncs: funcs, + Config: cfg, } } -func (s *Server) Stop() { - s.grace.Stop(s.grace.Timeout) - <-s.grace.StopChan() +func (t *Tracker) Stop() { + t.grace.Stop(t.grace.Timeout) + <-t.grace.StopChan() } -func (s *Server) handler() { +func (t *Tracker) handler() { router := httprouter.New() - router.GET("/announce", s.announceRoute) - router.GET("/scrape", s.scrapeRoute) + router.GET("/announce", t.announceRoute) + router.GET("/scrape", t.scrapeRoute) return server } -func (s *Server) ListenAndServe() error { - s.grace = &graceful.Server{ +func (t *Tracker) ListenAndServe() error { + t.grace = &graceful.Server{ Server: &http.Server{ - Addr: s.Addr, - Handler: s.handler(), - ReadTimeout: s.ReadTimeout, - WriteTimeout: s.WriteTimeout, + Addr: t.Addr, + Handler: t.handler(), + ReadTimeout: t.ReadTimeout, + WriteTimeout: t.WriteTimeout, }, - Timeout: s.RequestTimeout, + Timeout: t.RequestTimeout, NoSignalHandling: true, ConnState: func(conn net.Conn, state http.ConnState) { switch state { @@ -78,23 +78,23 @@ func (s *Server) ListenAndServe() error { } }, } - s.grace.SetKeepAlivesEnabled(false) + t.grace.SetKeepAlivesEnabled(false) - if err := s.grace.ListenAndServe(); err != nil { + if err := t.grace.ListenAndServe(); err != nil { if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { panic("http: failed to gracefully run HTTP server: " + err.Error()) } } } -func (s *Server) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { - req, err := ParseAnnounce(r, s.RealIPHeader, s.AllowIPSpoofing) +func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing) if err != nil { WriteError(w, err) return } - resp, err := s.HandleAnnounce(req) + resp, err := t.HandleAnnounce(req) if err != nil { WriteError(w, err) return @@ -106,19 +106,19 @@ func (s *Server) announceRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - if s.AfterAnnounce != nil { - s.AfterAnnounce(req, resp) + if t.AfterAnnounce != nil { + t.AfterAnnounce(req, resp) } } -func (s *Server) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { req, err := ParseScrape(r) if err != nil { WriteError(w, err) return } - resp, err := s.HandleScrape(req) + resp, err := t.HandleScrape(req) if err != nil { WriteError(w, err) return @@ -130,7 +130,7 @@ func (s *Server) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httproute return } - if s.AfterScrape != nil { - s.AfterScrape(req, resp) + if t.AfterScrape != nil { + t.AfterScrape(req, resp) } } diff --git a/bittorrent/udp/server.go b/bittorrent/udp/tracker.go similarity index 78% rename from bittorrent/udp/server.go rename to bittorrent/udp/tracker.go index 5ea4a36..b21dd05 100644 --- a/bittorrent/udp/server.go +++ b/bittorrent/udp/tracker.go @@ -38,56 +38,56 @@ type Config struct { AllowIPSpoofing bool } -type Server struct { +type Tracker struct { sock *net.UDPConn closing chan struct{} wg sync.WaitGroup - bittorrent.ServerFuncs + bittorrent.TrackerFuncs Config } -func NewServer(funcs bittorrent.ServerFuncs, cfg Config) { - return &Server{ - closing: make(chan struct{}), - ServerFuncs: funcs, - Config: cfg, +func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { + return &Tracker{ + closing: make(chan struct{}), + TrackerFuncs: funcs, + Config: cfg, } } -func (s *udpServer) Stop() { - close(s.closing) - s.sock.SetReadDeadline(time.Now()) - s.wg.Wait() +func (t *Tracker) Stop() { + close(t.closing) + t.sock.SetReadDeadline(time.Now()) + t.wg.Wait() } -func (s *Server) ListenAndServe() error { - udpAddr, err := net.ResolveUDPAddr("udp", s.Addr) +func (t *Tracker) ListenAndServe() error { + udpAddr, err := net.ResolveUDPAddr("udp", t.Addr) if err != nil { return err } - s.sock, err = net.ListenUDP("udp", udpAddr) + t.sock, err = net.ListenUDP("udp", udpAddr) if err != nil { return err } - defer s.sock.Close() + defer t.sock.Close() pool := bytepool.New(256, 2048) for { // Check to see if we need to shutdown. select { - case <-s.closing: - s.wg.Wait() + case <-t.closing: + t.wg.Wait() return nil default: } // Read a UDP packet into a reusable buffer. buffer := pool.Get() - s.sock.SetReadDeadline(time.Now().Add(time.Second)) - n, addr, err := s.sock.ReadFromUDP(buffer) + t.sock.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err := t.sock.ReadFromUDP(buffer) if err != nil { pool.Put(buffer) if netErr, ok := err.(net.Error); ok && netErr.Temporary() { @@ -105,13 +105,13 @@ func (s *Server) ListenAndServe() error { log.Println("Got UDP packet") start := time.Now() - s.wg.Add(1) + t.wg.Add(1) go func(start time.Time) { - defer s.wg.Done() + defer t.wg.Done() defer pool.Put(buffer) // Handle the response. - response, action, err := s.handlePacket(buffer[:n], addr) + response, action, err := t.handlePacket(buffer[:n], addr) log.Printf("Handled UDP packet: %s, %s, %s\n", response, action, err) // Record to Prometheus the time in milliseconds to receive, handle, and @@ -141,7 +141,7 @@ func (w *ResponseWriter) Write(b []byte) (int, error) { return len(b), nil } -func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, actionName string, err error) { +func (t *Tracker) handlePacket(r *Request, w *ResponseWriter) (response []byte, actionName string, err error) { if len(r.packet) < 16 { // Malformed, no client packets are less than 16 bytes. // We explicitly return nothing in case this is a DoS attempt. @@ -156,7 +156,7 @@ func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, a // If this isn't requesting a new connection ID and the connection ID is // invalid, then fail. - if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), s.PrivateKey) { + if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), t.PrivateKey) { err = errBadConnectionID WriteError(w, txID, err) return @@ -172,21 +172,21 @@ func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, a return } - WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), s.PrivateKey)) + WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), t.PrivateKey)) return case announceActionID: actionName = "announce" var req *bittorrent.AnnounceRequest - req, err = ParseAnnounce(r, s.AllowIPSpoofing) + req, err = ParseAnnounce(r, t.AllowIPSpoofing) if err != nil { WriteError(w, txID, err) return } var resp *bittorrent.AnnounceResponse - resp, err = s.HandleAnnounce(req) + resp, err = t.HandleAnnounce(req) if err != nil { WriteError(w, txID, err) return @@ -194,8 +194,8 @@ func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, a WriteAnnounce(w, txID, resp) - if s.AfterAnnounce != nil { - s.AfterAnnounce(req, resp) + if t.AfterAnnounce != nil { + t.AfterAnnounce(req, resp) } return @@ -212,7 +212,7 @@ func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, a var resp *bittorrent.ScrapeResponse ctx := context.TODO() - resp, err = s.HandleScrape(ctx, req) + resp, err = t.HandleScrape(ctx, req) if err != nil { WriteError(w, txID, err) return @@ -220,8 +220,8 @@ func (s *Server) handlePacket(r *Request, w *ResponseWriter) (response []byte, a WriteScrape(w, txID, resp) - if s.AfterScrape != nil { - s.AfterScrape(req, resp) + if t.AfterScrape != nil { + t.AfterScrape(req, resp) } return From 0ebadd31d0e42bf77a2b3c32a47af56e46d93f9d Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 00:33:55 -0400 Subject: [PATCH 05/74] add a type for ClientIDs --- bittorrent/client_id.go | 10 +++++++--- bittorrent/client_id_test.go | 7 ++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/bittorrent/client_id.go b/bittorrent/client_id.go index 4089639..6aab52c 100644 --- a/bittorrent/client_id.go +++ b/bittorrent/client_id.go @@ -14,9 +14,13 @@ package bittorrent -// NewClientID returns the part of a PeerID that identifies a peer's client +// ClientID represents the part of a PeerID that identifies a Peer's client // software. -func NewClientID(peerID string) (clientID string) { +type ClientID string + +// NewClientID parses a ClientID from a PeerID. +func NewClientID(peerID string) ClientID { + var clientID string length := len(peerID) if length >= 6 { if peerID[0] == '-' { @@ -28,5 +32,5 @@ func NewClientID(peerID string) (clientID string) { } } - return + return ClientID(clientID) } diff --git a/bittorrent/client_id_test.go b/bittorrent/client_id_test.go index 699da3e..956d0fc 100644 --- a/bittorrent/client_id_test.go +++ b/bittorrent/client_id_test.go @@ -17,10 +17,7 @@ package bittorrent import "testing" func TestClientID(t *testing.T) { - var clientTable = []struct { - peerID string - clientID string - }{ + var clientTable = []struct{ peerID, clientID string }{ {"-AZ3034-6wfG2wk6wWLc", "AZ3034"}, {"-AZ3042-6ozMq5q6Q3NX", "AZ3042"}, {"-BS5820-oy4La2MWGEFj", "BS5820"}, @@ -65,7 +62,7 @@ func TestClientID(t *testing.T) { } for _, tt := range clientTable { - if parsedID := NewClientID(tt.peerID); parsedID != tt.clientID { + if parsedID := NewClientID(tt.peerID); parsedID != ClientID(tt.clientID) { t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID) } } From 2b5140bd55b5460eedaa974dc082f560f3d5f85d Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 14:08:26 -0400 Subject: [PATCH 06/74] add doc comments --- bittorrent/bittorrent.go | 3 +++ bittorrent/http/tracker.go | 11 +++++++++++ bittorrent/udp/tracker.go | 14 +++++++++++++- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index 0d87ebf..035dcbe 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package bittorrent implements all of the abstractions used to decouple the +// protocol of a BitTorrent tracker from the logic of handling Announces and +// Scrapes. package bittorrent import ( diff --git a/bittorrent/http/tracker.go b/bittorrent/http/tracker.go index 76d9b17..00d0f0b 100644 --- a/bittorrent/http/tracker.go +++ b/bittorrent/http/tracker.go @@ -12,8 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package http implements a BitTorrent tracker via the HTTP protocol as +// described in BEP 3 and BEP 23. package http +// Config represents all of the configurable options for an HTTP BitTorrent +// Tracker. type Config struct { Addr string ReadTimeout time.Duration @@ -23,6 +27,7 @@ type Config struct { RealIPHeader string } +// Tracker holds the state of an HTTP BitTorrent Tracker. type Tracker struct { grace *graceful.Server @@ -30,6 +35,7 @@ type Tracker struct { Config } +// NewTracker allocates a new instance of a Tracker. func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { return &Server{ TrackerFuncs: funcs, @@ -37,6 +43,7 @@ func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { } } +// Stop provides a thread-safe way to shutdown a currently running Tracker. func (t *Tracker) Stop() { t.grace.Stop(t.grace.Timeout) <-t.grace.StopChan() @@ -49,6 +56,8 @@ func (t *Tracker) handler() { return server } +// ListenAndServe listens on the TCP network address t.Addr and blocks serving +// BitTorrent requests until t.Stop() is called or an error is returned. func (t *Tracker) ListenAndServe() error { t.grace = &graceful.Server{ Server: &http.Server{ @@ -87,6 +96,7 @@ func (t *Tracker) ListenAndServe() error { } } +// announceRoute parses and responds to an Announce by using t.TrackerFuncs. func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing) if err != nil { @@ -111,6 +121,7 @@ func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httpro } } +// scrapeRoute parses and responds to a Scrape by using t.TrackerFuncs. func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { req, err := ParseScrape(r) if err != nil { diff --git a/bittorrent/udp/tracker.go b/bittorrent/udp/tracker.go index b21dd05..53c1525 100644 --- a/bittorrent/udp/tracker.go +++ b/bittorrent/udp/tracker.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package udp implements a BitTorrent tracker via the UDP protocol as +// described in BEP 15. package udp import ( @@ -38,6 +40,7 @@ type Config struct { AllowIPSpoofing bool } +// Tracker holds the state of a UDP BitTorrent Tracker. type Tracker struct { sock *net.UDPConn closing chan struct{} @@ -47,6 +50,7 @@ type Tracker struct { Config } +// NewTracker allocates a new instance of a Tracker. func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { return &Tracker{ closing: make(chan struct{}), @@ -55,12 +59,15 @@ func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { } } +// Stop provides a thread-safe way to shutdown a currently running Tracker. func (t *Tracker) Stop() { close(t.closing) t.sock.SetReadDeadline(time.Now()) t.wg.Wait() } +// ListenAndServe listens on the UDP network address t.Addr and blocks serving +// BitTorrent requests until t.Stop() is called or an error is returned. func (t *Tracker) ListenAndServe() error { udpAddr, err := net.ResolveUDPAddr("udp", t.Addr) if err != nil { @@ -126,22 +133,27 @@ func (t *Tracker) ListenAndServe() error { } } +// Request represents a UDP payload received by a Tracker. type Request struct { Packet []byte IP net.IP } +// ResponseWriter implements the ability to respond to a Request via the +// io.Writer interface. type ResponseWriter struct { socket net.UDPConn addr net.UDPAddr } +// Write implements the io.Writer interface for a ResponseWriter. func (w *ResponseWriter) Write(b []byte) (int, error) { w.socket.WriteToUDP(b, w.addr) return len(b), nil } -func (t *Tracker) handlePacket(r *Request, w *ResponseWriter) (response []byte, actionName string, err error) { +// handleRequest parses and responds to a UDP Request. +func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, actionName string, err error) { if len(r.packet) < 16 { // Malformed, no client packets are less than 16 bytes. // We explicitly return nothing in case this is a DoS attempt. From 437c3be9ece3f2be64c900d5f602c73aed6786c4 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 14:48:00 -0400 Subject: [PATCH 07/74] move bencode package under http package --- bittorrent/{ => http}/bencode/bencode.go | 0 bittorrent/{ => http}/bencode/decoder.go | 0 bittorrent/{ => http}/bencode/decoder_test.go | 0 bittorrent/{ => http}/bencode/encoder.go | 0 bittorrent/{ => http}/bencode/encoder_test.go | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename bittorrent/{ => http}/bencode/bencode.go (100%) rename bittorrent/{ => http}/bencode/decoder.go (100%) rename bittorrent/{ => http}/bencode/decoder_test.go (100%) rename bittorrent/{ => http}/bencode/encoder.go (100%) rename bittorrent/{ => http}/bencode/encoder_test.go (100%) diff --git a/bittorrent/bencode/bencode.go b/bittorrent/http/bencode/bencode.go similarity index 100% rename from bittorrent/bencode/bencode.go rename to bittorrent/http/bencode/bencode.go diff --git a/bittorrent/bencode/decoder.go b/bittorrent/http/bencode/decoder.go similarity index 100% rename from bittorrent/bencode/decoder.go rename to bittorrent/http/bencode/decoder.go diff --git a/bittorrent/bencode/decoder_test.go b/bittorrent/http/bencode/decoder_test.go similarity index 100% rename from bittorrent/bencode/decoder_test.go rename to bittorrent/http/bencode/decoder_test.go diff --git a/bittorrent/bencode/encoder.go b/bittorrent/http/bencode/encoder.go similarity index 100% rename from bittorrent/bencode/encoder.go rename to bittorrent/http/bencode/encoder.go diff --git a/bittorrent/bencode/encoder_test.go b/bittorrent/http/bencode/encoder_test.go similarity index 100% rename from bittorrent/bencode/encoder_test.go rename to bittorrent/http/bencode/encoder_test.go From 1a0dd899680f08a634c213eb1be5c22530f808d1 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 14:48:32 -0400 Subject: [PATCH 08/74] add metrics to http & udp trackers --- bittorrent/http/tracker.go | 40 ++++++++++++++++++++++++++++++++++++-- bittorrent/udp/tracker.go | 36 +++++++++++++++++++++------------- 2 files changed, 60 insertions(+), 16 deletions(-) diff --git a/bittorrent/http/tracker.go b/bittorrent/http/tracker.go index 00d0f0b..0b6f88f 100644 --- a/bittorrent/http/tracker.go +++ b/bittorrent/http/tracker.go @@ -16,6 +16,23 @@ // described in BEP 3 and BEP 23. package http +var promResponseDurationMilliseconds = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "trakr_http_response_duration_milliseconds", + Help: "The duration of time it takes to receive and write a response to an API request", + Buckets: prometheus.ExponentialBuckets(9.375, 2, 10), + }, + []string{"action", "error"}, +) + +// recordResponseDuration records the duration of time to respond to a UDP +// Request in milliseconds . +func recordResponseDuration(action, err error, duration time.Duration) { + promResponseDurationMilliseconds. + WithLabelValues(action, err.Error()). + Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) +} + // Config represents all of the configurable options for an HTTP BitTorrent // Tracker. type Config struct { @@ -98,6 +115,15 @@ func (t *Tracker) ListenAndServe() error { // announceRoute parses and responds to an Announce by using t.TrackerFuncs. func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + start := time.Now() + defer func() { + var errString string + if err != nil { + errString = err.Error() + } + recordResponseDuration("announce", errString, time.Since(start)) + }() + req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing) if err != nil { WriteError(w, err) @@ -117,12 +143,22 @@ func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httpro } if t.AfterAnnounce != nil { - t.AfterAnnounce(req, resp) + go t.AfterAnnounce(req, resp) } + recordResponseDuration("announce") } // scrapeRoute parses and responds to a Scrape by using t.TrackerFuncs. func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + start := time.Now() + defer func() { + var errString string + if err != nil { + errString = err.Error() + } + recordResponseDuration("scrape", errString, time.Since(start)) + }() + req, err := ParseScrape(r) if err != nil { WriteError(w, err) @@ -142,6 +178,6 @@ func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprout } if t.AfterScrape != nil { - t.AfterScrape(req, resp) + go t.AfterScrape(req, resp) } } diff --git a/bittorrent/udp/tracker.go b/bittorrent/udp/tracker.go index 53c1525..366721b 100644 --- a/bittorrent/udp/tracker.go +++ b/bittorrent/udp/tracker.go @@ -34,6 +34,16 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec( []string{"action", "error"}, ) +// recordResponseDuration records the duration of time to respond to a UDP +// Request in milliseconds . +func recordResponseDuration(action, err error, duration time.Duration) { + promResponseDurationMilliseconds. + WithLabelValues(action, err.Error()). + Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) +} + +// Config represents all of the configurable options for a UDP BitTorrent +// Tracker. type Config struct { Addr string PrivateKey string @@ -110,26 +120,24 @@ func (t *Tracker) ListenAndServe() error { continue } - log.Println("Got UDP packet") - start := time.Now() + log.Println("Got UDP Request") t.wg.Add(1) go func(start time.Time) { defer t.wg.Done() defer pool.Put(buffer) - // Handle the response. - response, action, err := t.handlePacket(buffer[:n], addr) - log.Printf("Handled UDP packet: %s, %s, %s\n", response, action, err) + // Handle the request. + start := time.Now() + response, action, err := t.handleRequest(&Request{buffer[:n], addr.IP}) + log.Printf("Handled UDP Request: %s, %s, %s\n", response, action, err) - // Record to Prometheus the time in milliseconds to receive, handle, and - // respond to the request. - duration := time.Since(start) + // Record to the duration of time used to respond to the request. + var errString string if err != nil { - promResponseDurationMilliseconds.WithLabelValues(action, err.Error()).Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) - } else { - promResponseDurationMilliseconds.WithLabelValues(action, "").Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) + errString = err.Error() } - }(start) + recordResponseDuration(action, errString, time.Since(start)) + }() } } @@ -207,7 +215,7 @@ func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, WriteAnnounce(w, txID, resp) if t.AfterAnnounce != nil { - t.AfterAnnounce(req, resp) + go t.AfterAnnounce(req, resp) } return @@ -233,7 +241,7 @@ func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, WriteScrape(w, txID, resp) if t.AfterScrape != nil { - t.AfterScrape(req, resp) + go t.AfterScrape(req, resp) } return From 9ab04ddd572bd0329e81f84f3136a09014151262 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 15:00:56 -0400 Subject: [PATCH 09/74] mv hook.go hooks.go --- hook.go => hooks.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename hook.go => hooks.go (100%) diff --git a/hook.go b/hooks.go similarity index 100% rename from hook.go rename to hooks.go From e57638382debeebd095c7fc46d59e7a1ab90abce Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 15:53:38 -0400 Subject: [PATCH 10/74] maintainers: add Justin --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 6f09a91..5e7376c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1 +1,2 @@ Jimmy Zelinskie (@jzelinskie) pkg:* +Justin Li (@pushrax) pkg:* From ae36a14949229c3f98194146602844a982b447e3 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 15:54:24 -0400 Subject: [PATCH 11/74] stopper: initial --- stopper/stopper.go | 97 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 stopper/stopper.go diff --git a/stopper/stopper.go b/stopper/stopper.go new file mode 100644 index 0000000..75dc4f8 --- /dev/null +++ b/stopper/stopper.go @@ -0,0 +1,97 @@ +package stopper + +import ( + "sync" +) + +// AlreadyStopped is a closed error channel to be used by StopperFuncs when +// an element was already stopped. +var AlreadyStopped <-chan error + +// AlreadyStoppedFunc is a StopperFunc that returns AlreadyStopped. +var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped } + +func init() { + closeMe := make(chan error) + close(closeMe) + AlreadyStopped = closeMe +} + +// Stopper is an interface that allows a clean shutdown. +type Stopper interface { + // Stop returns a channel that indicates whether the stop was + // successful. + // The channel can either return one error or be closed. Closing the + // channel signals a clean shutdown. + // The Stop function should return immediately and perform the actual + // shutdown in a seperate goroutine. + Stop() <-chan error +} + +// StopGroup is a group that can be stopped. +type StopGroup struct { + stoppables []StopperFunc + stoppablesLock sync.Mutex +} + +// Func is a function that can be used to provide a clean shutdown. +type Func func() <-chan error + +// NewStopGroup creates a new StopGroup. +func NewStopGroup() *StopGroup { + return &StopGroup{ + stoppables: make([]StopperFunc, 0), + } +} + +// Add adds a Stopper to the StopGroup. +// On the next call to Stop(), the Stopper will be stopped. +func (cg *StopGroup) Add(toAdd Stopper) { + cg.stoppablesLock.Lock() + defer cg.stoppablesLock.Unlock() + + cg.stoppables = append(cg.stoppables, toAdd.Stop) +} + +// AddFunc adds a StopperFunc to the StopGroup. +// On the next call to Stop(), the StopperFunc will be called. +func (cg *StopGroup) AddFunc(toAddFunc StopperFunc) { + cg.stoppablesLock.Lock() + defer cg.stoppablesLock.Unlock() + + cg.stoppables = append(cg.stoppables, toAddFunc) +} + +// Stop stops all members of the StopGroup. +// Stopping will be done in a concurrent fashion. +// The slice of errors returned contains all errors returned by stopping the +// members. +func (cg *StopGroup) Stop() []error { + cg.stoppablesLock.Lock() + defer cg.stoppablesLock.Unlock() + + var errors []error + whenDone := make(chan struct{}) + + waitChannels := make([]<-chan error, 0, len(cg.stoppables)) + for _, toStop := range cg.stoppables { + waitFor := toStop() + if waitFor == nil { + panic("received a nil chan from Stop") + } + waitChannels = append(waitChannels, waitFor) + } + + go func() { + for _, waitForMe := range waitChannels { + err := <-waitForMe + if err != nil { + errors = append(errors, err) + } + } + close(whenDone) + }() + + <-whenDone + return errors +} From 8a2d89419168690722cc534919b6fc0518df196e Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 15:54:30 -0400 Subject: [PATCH 12/74] storage: add storage interface and registration This also fixes bugs in the Hooks registration. --- hooks.go | 10 +++--- storage.go | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 5 deletions(-) create mode 100644 storage.go diff --git a/hooks.go b/hooks.go index 8707d0e..bbe12a8 100644 --- a/hooks.go +++ b/hooks.go @@ -16,9 +16,6 @@ package trakr import "github.com/jzelinskie/trakr/bittorrent" -// HookConstructor is a function used to create a new instance of a Hook. -type HookConstructor func(interface{}) (Hook, error) - // Hook abstracts the concept of anything that needs to interact with a // BitTorrent client's request and response to a BitTorrent tracker. type Hook interface { @@ -26,6 +23,9 @@ type Hook interface { HandleScrape(context.Context, bittorrent.ScrapeRequest, bittorrent.ScrapeResponse) error } +// HookConstructor is a function used to create a new instance of a Hook. +type HookConstructor func(interface{}) (Hook, error) + var preHooks = make(map[string]HookConstructor) // RegisterPreHook makes a HookConstructor available by the provided name. @@ -44,7 +44,7 @@ func RegisterPreHook(name string, con HookConstructor) { // NewPreHook creates an instance of the given PreHook by name. func NewPreHook(name string, config interface{}) (Hook, error) { - con := preHooks[name] + con, ok := preHooks[name] if !ok { return nil, fmt.Errorf("trakr: unknown PreHook %q (forgotten import?)", name) } @@ -69,7 +69,7 @@ func RegisterPostHook(name string, con HookConstructor) { // NewPostHook creates an instance of the given PostHook by name. func NewPostHook(name string, config interface{}) (Hook, error) { - con := preHooks[name] + con, ok := preHooks[name] if !ok { return nil, fmt.Errorf("trakr: unknown PostHook %q (forgotten import?)", name) } diff --git a/storage.go b/storage.go new file mode 100644 index 0000000..8d7056c --- /dev/null +++ b/storage.go @@ -0,0 +1,97 @@ +package trakr + +import ( + "fmt" + "time" + + "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/stopper" +) + +// ErrResourceDoesNotExist is the error returned by all delete methods in the +// store if the requested resource does not exist. +var ErrResourceDoesNotExist = bittorrent.ClientError(errors.New("resource does not exist")) + +// PeerStore is an interface that abstracts the interactions of storing and +// manipulating Peers such that it can be implemented for various data stores. +type PeerStore interface { + // PutSeeder adds a Seeder to the Swarm identified by the provided infoHash. + PutSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error + + // DeleteSeeder removes a Seeder from the Swarm identified by the provided + // infoHash. + // + // If the Swarm or Peer does not exist, this function should return + // ErrResourceDoesNotExist. + DeleteSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error + + // PutLeecher adds a Leecher to the Swarm identified by the provided + // infoHash. + PutLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error + + // DeleteLeecher removes a Leecher from the Swarm identified by the provided + // infoHash. + // + // If the Swarm or Peer does not exist, this function should return + // ErrResourceDoesNotExist. + DeleteLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error + + // GraduateLeecher promotes a Leecher to a Seeder in the Swarm identified by + // the provided infoHash. + // + // If the given Peer is not present as a Leecher, add the Peer as a Seeder + // and return no error. + GraduateLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error + + // AnnouncePeers is a best effort attempt to return Peers from the Swarm + // identified by the provided infoHash. The returned Peers are required to be + // either all IPv4 or all IPv6. + // + // The returned Peers should strive be: + // - as close to length equal to numWant as possible without going over + // - all IPv4 or all IPv6 depending on the provided ipv6 boolean + // - if seeder is true, should ideally return more leechers than seeders + // - if seeder is false, should ideally return more seeders than leechers + AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, ipv6 bool) (peers []bittorrent.Peer, err error) + + // CollectGarbage deletes all Peers from the PeerStore which are older than + // the cutoff time. This function must be able to execute while other methods + // on this interface are being executed in parallel. + CollectGarbage(cutoff time.Time) error + + // Stopper is an interface that expects a Stop method to stops the PeerStore. + // For more details see the documentation in the stopper package. + stopper.Stopper +} + +// PeerStoreConstructor is a function used to create a new instance of a +// PeerStore. +type PeerStoreConstructor func(interface{}) (PeerStore, error) + +var peerStores = make(map[string]PeerStoreConstructors) + +// RegisterPeerStore makes a PeerStoreConstructor available by the provided +// name. +// +// If this function is called twice with the same name or if the +// PeerStoreConstructor is nil, it panics. +func RegisterPeerStore(name string, con PeerStoreConstructor) { + if con == nil { + panic("trakr: could not register nil PeerStoreConstructor") + } + + if _, dup := peerStore[name]; dup { + panic("trakr: could not register duplicate PeerStoreConstructor: " + name) + } + + peerStores[name] = con +} + +// NewPeerStore creates an instance of the given PeerStore by name. +func NewPeerStore(name, config interface{}) (PeerStore, error) { + con, ok := peerStores[name] + if !ok { + return nil, fmt.Errorf("trakr: unknown PeerStore %q (forgotten import?)", name) + } + return con(config) +} From 9366e601cd4f58458f8fb1932bcb1c4a3250cb1f Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 16:26:44 -0400 Subject: [PATCH 13/74] http: fail when numwant missing --- bittorrent/http/parser.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bittorrent/http/parser.go b/bittorrent/http/parser.go index cbf6ba9..9c8d8d9 100644 --- a/bittorrent/http/parser.go +++ b/bittorrent/http/parser.go @@ -81,7 +81,10 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) ( return nil, bittorrent.ClientError("failed to parse parameter: uploaded") } - numwant, _ := qp.Uint64("numwant") + numwant, err := qp.Uint64("numwant") + if err != nil { + return nil, bittorrent.ClientError("failed to parse parameter: numwant") + } request.NumWant = int32(numwant) port, err := qp.Uint64("port") From b5de90345ecb9f2fc05e7de4fd778e1a1ac22fef Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 4 Aug 2016 16:27:28 -0400 Subject: [PATCH 14/74] s/Server/MultiTracker + add docs --- server.go | 28 ---------------------------- tracker.go | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 28 deletions(-) delete mode 100644 server.go create mode 100644 tracker.go diff --git a/server.go b/server.go deleted file mode 100644 index 8361a6b..0000000 --- a/server.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trakr - -type Server struct { - HTTPConfig http.Config - UDPConfig udp.Config - Interval time.Duration - PreHooks []string - PostHooks []string - - udpserver -} - -func (s *Server) ListenAndServe() error { -} diff --git a/tracker.go b/tracker.go new file mode 100644 index 0000000..4cc86cb --- /dev/null +++ b/tracker.go @@ -0,0 +1,38 @@ +// Copyright 2016 Jimmy Zelinskie +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package trakr implements a BitTorrent Tracker that supports multiple +// protocols and configurable Hooks that execute before and after a Response +// has been delievered to a BitTorrent client. +package trakr + +// MultiTracker is a multi-protocol, customizable BitTorrent Tracker. +type MultiTracker struct { + HTTPConfig http.Config + UDPConfig udp.Config + AnnounceInterval time.Duration + GCInterval time.Duration + GCExpiration time.Duration + PreHooks []Hook + PostHooks []Hook + + httpTracker http.Tracker + udpTracker udp.Tracker +} + +// ListenAndServe listens on the protocols and addresses specified in the +// HTTPConfig and UDPConfig then blocks serving BitTorrent requests until +// t.Stop() is called or an error is returned. +func (t *MultiTracker) ListenAndServe() error { +} From 5c99738b7fecab6c0731cc4440fb4c4aab7ce2a6 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Fri, 5 Aug 2016 01:47:04 -0400 Subject: [PATCH 15/74] make it compile! --- bittorrent/bittorrent.go | 10 ++-- bittorrent/http/parser.go | 72 +++++++++--------------- bittorrent/http/query_params.go | 21 +++---- bittorrent/http/tracker.go | 55 +++++++++++-------- bittorrent/http/writer.go | 1 + bittorrent/http/writer_test.go | 3 +- bittorrent/udp/bytepool/bytepool.go | 35 ++++++++++++ bittorrent/udp/parser.go | 38 ++++++------- bittorrent/udp/tracker.go | 68 +++++++++++++---------- bittorrent/udp/writer.go | 43 ++++++++------- cmd/trakr/config.go | 0 cmd/trakr/main.go | 65 ++++++++++++++++++++++ example_config.yaml | 12 ++-- hooks.go | 16 ++++-- stopper/stopper.go | 14 ++--- storage.go | 8 +-- tracker.go | 85 ++++++++++++++++++++++++++--- 17 files changed, 361 insertions(+), 185 deletions(-) create mode 100644 bittorrent/udp/bytepool/bytepool.go delete mode 100644 cmd/trakr/config.go diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index 035dcbe..34294fe 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -20,6 +20,8 @@ package bittorrent import ( "net" "time" + + "golang.org/x/net/context" ) // PeerID represents a peer ID. @@ -107,7 +109,7 @@ type AnnounceResponse struct { } // AnnounceHandler is a function that generates a response for an Announce. -type AnnounceHandler func(*AnnounceRequest) *AnnounceResponse +type AnnounceHandler func(context.Context, *AnnounceRequest) (*AnnounceResponse, error) // AnnounceCallback is a function that does something with the results of an // Announce after it has been completed. @@ -132,7 +134,7 @@ type Scrape struct { } // ScrapeHandler is a function that generates a response for a Scrape. -type ScrapeHandler func(*ScrapeRequest) *ScrapeResponse +type ScrapeHandler func(context.Context, *ScrapeRequest) (*ScrapeResponse, error) // ScrapeCallback is a function that does something with the results of a // Scrape after it has been completed. @@ -152,9 +154,9 @@ func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID } // EqualEndpoint reports whether p and x have the same endpoint. func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP) } -// Params is used to fetch request optional parameters. +// Params is used to fetch request optional parameters from an Announce. type Params interface { - String(key string) (string, error) + String(key string) (string, bool) } // ClientError represents an error that should be exposed to the client over diff --git a/bittorrent/http/parser.go b/bittorrent/http/parser.go index 9c8d8d9..a43742e 100644 --- a/bittorrent/http/parser.go +++ b/bittorrent/http/parser.go @@ -32,14 +32,9 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) ( return nil, err } - request := &bittorrent.AnnounceRequest{Params: q} + request := &bittorrent.AnnounceRequest{Params: qp} - eventStr, err := qp.String("event") - if err == query.ErrKeyNotFound { - eventStr = "" - } else if err != nil { - return nil, bittorrent.ClientError("failed to parse parameter: event") - } + eventStr, _ := qp.String("event") request.Event, err = bittorrent.NewEvent(eventStr) if err != nil { return nil, bittorrent.ClientError("failed to provide valid client event") @@ -57,14 +52,14 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) ( } request.InfoHash = infoHashes[0] - peerID, err := qp.String("peer_id") - if err != nil { + peerID, ok := qp.String("peer_id") + if !ok { return nil, bittorrent.ClientError("failed to parse parameter: peer_id") } if len(peerID) != 20 { return nil, bittorrent.ClientError("failed to provide valid peer_id") } - request.PeerID = bittorrent.PeerIDFromString(peerID) + request.Peer.ID = bittorrent.PeerIDFromString(peerID) request.Left, err = qp.Uint64("left") if err != nil { @@ -85,24 +80,24 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) ( if err != nil { return nil, bittorrent.ClientError("failed to parse parameter: numwant") } - request.NumWant = int32(numwant) + request.NumWant = uint32(numwant) port, err := qp.Uint64("port") if err != nil { return nil, bittorrent.ClientError("failed to parse parameter: port") } - request.Port = uint16(port) + request.Peer.Port = uint16(port) - request.IP, err = requestedIP(q, r, realIPHeader, allowIPSpoofing) - if err != nil { - return nil, bittorrent.ClientError("failed to parse peer IP address: " + err.Error()) + request.Peer.IP = requestedIP(r, qp, realIPHeader, allowIPSpoofing) + if request.Peer.IP == nil { + return nil, bittorrent.ClientError("failed to parse peer IP address") } return request, nil } // ParseScrape parses an bittorrent.ScrapeRequest from an http.Request. -func ParseScrape(r *http.Request) (*bittorent.ScrapeRequest, error) { +func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) { qp, err := NewQueryParams(r.URL.RawQuery) if err != nil { return nil, err @@ -115,7 +110,7 @@ func ParseScrape(r *http.Request) (*bittorent.ScrapeRequest, error) { request := &bittorrent.ScrapeRequest{ InfoHashes: infoHashes, - Params: q, + Params: qp, } return request, nil @@ -126,46 +121,31 @@ func ParseScrape(r *http.Request) (*bittorent.ScrapeRequest, error) { // If allowIPSpoofing is true, IPs provided via params will be used. // If realIPHeader is not empty string, the first value of the HTTP Header with // that name will be used. -func requestedIP(r *http.Request, p bittorent.Params, realIPHeader string, allowIPSpoofing bool) (net.IP, error) { +func requestedIP(r *http.Request, p bittorrent.Params, realIPHeader string, allowIPSpoofing bool) net.IP { if allowIPSpoofing { - if ipstr, err := p.String("ip"); err == nil { - ip, err := net.ParseIP(str) - if err != nil { - return nil, err - } - - return ip, nil + if ipstr, ok := p.String("ip"); ok { + ip := net.ParseIP(ipstr) + return ip } - if ipstr, err := p.String("ipv4"); err == nil { - ip, err := net.ParseIP(str) - if err != nil { - return nil, err - } - - return ip, nil + if ipstr, ok := p.String("ipv4"); ok { + ip := net.ParseIP(ipstr) + return ip } - if ipstr, err := p.String("ipv6"); err == nil { - ip, err := net.ParseIP(str) - if err != nil { - return nil, err - } - - return ip, nil + if ipstr, ok := p.String("ipv6"); ok { + ip := net.ParseIP(ipstr) + return ip } } if realIPHeader != "" { if ips, ok := r.Header[realIPHeader]; ok && len(ips) > 0 { - ip, err := net.ParseIP(ips[0]) - if err != nil { - return nil, err - } - - return ip, nil + ip := net.ParseIP(ips[0]) + return ip } } - return r.RemoteAddr + host, _, _ := net.SplitHostPort(r.RemoteAddr) + return net.ParseIP(host) } diff --git a/bittorrent/http/query_params.go b/bittorrent/http/query_params.go index b3fc62c..5607e3e 100644 --- a/bittorrent/http/query_params.go +++ b/bittorrent/http/query_params.go @@ -40,14 +40,14 @@ type QueryParams struct { } // NewQueryParams parses a raw URL query. -func NewQueryParams(query string) (*Query, error) { +func NewQueryParams(query string) (*QueryParams, error) { var ( keyStart, keyEnd int valStart, valEnd int onKey = true - q = &Query{ + q = &QueryParams{ query: query, infoHashes: nil, params: make(map[string]string), @@ -111,18 +111,15 @@ func NewQueryParams(query string) (*Query, error) { // String returns a string parsed from a query. Every key can be returned as a // string because they are encoded in the URL as strings. -func (q *Query) String(key string) (string, error) { - val, exists := q.params[key] - if !exists { - return "", ErrKeyNotFound - } - return val, nil +func (qp *QueryParams) String(key string) (string, bool) { + value, ok := qp.params[key] + return value, ok } // Uint64 returns a uint parsed from a query. After being called, it is safe to // cast the uint64 to your desired length. -func (q *Query) Uint64(key string) (uint64, error) { - str, exists := q.params[key] +func (qp *QueryParams) Uint64(key string) (uint64, error) { + str, exists := qp.params[key] if !exists { return 0, ErrKeyNotFound } @@ -136,6 +133,6 @@ func (q *Query) Uint64(key string) (uint64, error) { } // InfoHashes returns a list of requested infohashes. -func (q *Query) InfoHashes() []bittorrent.InfoHash { - return q.infoHashes +func (qp *QueryParams) InfoHashes() []bittorrent.InfoHash { + return qp.infoHashes } diff --git a/bittorrent/http/tracker.go b/bittorrent/http/tracker.go index 0b6f88f..a2edabc 100644 --- a/bittorrent/http/tracker.go +++ b/bittorrent/http/tracker.go @@ -16,6 +16,19 @@ // described in BEP 3 and BEP 23. package http +import ( + "net" + "net/http" + "time" + + "github.com/julienschmidt/httprouter" + "github.com/prometheus/client_golang/prometheus" + "github.com/tylerb/graceful" + "golang.org/x/net/context" + + "github.com/jzelinskie/trakr/bittorrent" +) + var promResponseDurationMilliseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "trakr_http_response_duration_milliseconds", @@ -27,9 +40,14 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec( // recordResponseDuration records the duration of time to respond to a UDP // Request in milliseconds . -func recordResponseDuration(action, err error, duration time.Duration) { +func recordResponseDuration(action string, err error, duration time.Duration) { + var errString string + if err != nil { + errString = err.Error() + } + promResponseDurationMilliseconds. - WithLabelValues(action, err.Error()). + WithLabelValues(action, errString). Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) } @@ -53,8 +71,8 @@ type Tracker struct { } // NewTracker allocates a new instance of a Tracker. -func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { - return &Server{ +func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) *Tracker { + return &Tracker{ TrackerFuncs: funcs, Config: cfg, } @@ -66,11 +84,11 @@ func (t *Tracker) Stop() { <-t.grace.StopChan() } -func (t *Tracker) handler() { +func (t *Tracker) handler() http.Handler { router := httprouter.New() router.GET("/announce", t.announceRoute) router.GET("/scrape", t.scrapeRoute) - return server + return router } // ListenAndServe listens on the TCP network address t.Addr and blocks serving @@ -111,18 +129,15 @@ func (t *Tracker) ListenAndServe() error { panic("http: failed to gracefully run HTTP server: " + err.Error()) } } + + return nil } // announceRoute parses and responds to an Announce by using t.TrackerFuncs. func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + var err error start := time.Now() - defer func() { - var errString string - if err != nil { - errString = err.Error() - } - recordResponseDuration("announce", errString, time.Since(start)) - }() + defer recordResponseDuration("announce", err, time.Since(start)) req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing) if err != nil { @@ -130,7 +145,7 @@ func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httpro return } - resp, err := t.HandleAnnounce(req) + resp, err := t.HandleAnnounce(context.TODO(), req) if err != nil { WriteError(w, err) return @@ -145,19 +160,13 @@ func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httpro if t.AfterAnnounce != nil { go t.AfterAnnounce(req, resp) } - recordResponseDuration("announce") } // scrapeRoute parses and responds to a Scrape by using t.TrackerFuncs. func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + var err error start := time.Now() - defer func() { - var errString string - if err != nil { - errString = err.Error() - } - recordResponseDuration("scrape", errString, time.Since(start)) - }() + defer recordResponseDuration("scrape", err, time.Since(start)) req, err := ParseScrape(r) if err != nil { @@ -165,7 +174,7 @@ func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprout return } - resp, err := t.HandleScrape(req) + resp, err := t.HandleScrape(context.TODO(), req) if err != nil { WriteError(w, err) return diff --git a/bittorrent/http/writer.go b/bittorrent/http/writer.go index a0da645..c1e9266 100644 --- a/bittorrent/http/writer.go +++ b/bittorrent/http/writer.go @@ -18,6 +18,7 @@ import ( "net/http" "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/bittorrent/http/bencode" ) // WriteError communicates an error to a BitTorrent client over HTTP. diff --git a/bittorrent/http/writer_test.go b/bittorrent/http/writer_test.go index 4c9b185..e8a5d31 100644 --- a/bittorrent/http/writer_test.go +++ b/bittorrent/http/writer_test.go @@ -18,8 +18,9 @@ import ( "net/http/httptest" "testing" - "github.com/jzelinskie/trakr/bittorrent" "github.com/stretchr/testify/assert" + + "github.com/jzelinskie/trakr/bittorrent" ) func TestWriteError(t *testing.T) { diff --git a/bittorrent/udp/bytepool/bytepool.go b/bittorrent/udp/bytepool/bytepool.go new file mode 100644 index 0000000..adc1207 --- /dev/null +++ b/bittorrent/udp/bytepool/bytepool.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package bytepool + +import "sync" + +// BytePool is a cached pool of reusable byte slices. +type BytePool struct { + sync.Pool +} + +// New allocates a new BytePool with slices of the provided capacity. +func New(length, capacity int) *BytePool { + var bp BytePool + bp.Pool.New = func() interface{} { + return make([]byte, length, capacity) + } + return &bp +} + +// Get returns a byte slice from the pool. +func (bp *BytePool) Get() []byte { + return bp.Pool.Get().([]byte) +} + +// Put returns a byte slice to the pool. +func (bp *BytePool) Put(b []byte) { + // Zero out the bytes. + for i := 0; i < cap(b); i++ { + b[i] = 0x0 + } + bp.Pool.Put(b) +} diff --git a/bittorrent/udp/parser.go b/bittorrent/udp/parser.go index 85e4469..31ca9d8 100644 --- a/bittorrent/udp/parser.go +++ b/bittorrent/udp/parser.go @@ -63,35 +63,35 @@ var ( // // If allowIPSpoofing is true, IPs provided via params will be used. func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) { - if len(r.packet) < 98 { + if len(r.Packet) < 98 { return nil, errMalformedPacket } - infohash := r.packet[16:36] - peerID := r.packet[36:56] - downloaded := binary.BigEndian.Uint64(r.packet[56:64]) - left := binary.BigEndian.Uint64(r.packet[64:72]) - uploaded := binary.BigEndian.Uint64(r.packet[72:80]) + infohash := r.Packet[16:36] + peerID := r.Packet[36:56] + downloaded := binary.BigEndian.Uint64(r.Packet[56:64]) + left := binary.BigEndian.Uint64(r.Packet[64:72]) + uploaded := binary.BigEndian.Uint64(r.Packet[72:80]) - eventID := int(r.packet[83]) + eventID := int(r.Packet[83]) if eventID >= len(eventIDs) { return nil, errMalformedEvent } ip := r.IP - ipbytes := r.packet[84:88] + ipbytes := r.Packet[84:88] if allowIPSpoofing { ip = net.IP(ipbytes) } - if !allowIPSpoofing && r.ip == nil { + if !allowIPSpoofing && r.IP == nil { // We have no IP address to fallback on. return nil, errMalformedIP } - numWant := binary.BigEndian.Uint32(r.packet[92:96]) - port := binary.BigEndian.Uint16(r.packet[96:98]) + numWant := binary.BigEndian.Uint32(r.Packet[92:96]) + port := binary.BigEndian.Uint16(r.Packet[96:98]) - params, err := handleOptionalParameters(r.packet) + params, err := handleOptionalParameters(r.Packet) if err != nil { return nil, err } @@ -152,24 +152,24 @@ func handleOptionalParameters(packet []byte) (params bittorrent.Params, err erro } // ParseScrape parses a ScrapeRequest from a UDP request. -func parseScrape(r Request) (*bittorrent.ScrapeRequest, error) { +func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) { // If a scrape isn't at least 36 bytes long, it's malformed. - if len(r.packet) < 36 { + if len(r.Packet) < 36 { return nil, errMalformedPacket } // Skip past the initial headers and check that the bytes left equal the // length of a valid list of infohashes. - r.packet = r.packet[16:] - if len(r.packet)%20 != 0 { + r.Packet = r.Packet[16:] + if len(r.Packet)%20 != 0 { return nil, errMalformedPacket } // Allocate a list of infohashes and append it to the list until we're out. var infohashes []bittorrent.InfoHash - for len(r.packet) >= 20 { - infohashes = append(infohashes, bittorrent.InfoHashFromBytes(r.packet[:20])) - r.packet = r.packet[20:] + for len(r.Packet) >= 20 { + infohashes = append(infohashes, bittorrent.InfoHashFromBytes(r.Packet[:20])) + r.Packet = r.Packet[20:] } return &bittorrent.ScrapeRequest{ diff --git a/bittorrent/udp/tracker.go b/bittorrent/udp/tracker.go index 366721b..4efa640 100644 --- a/bittorrent/udp/tracker.go +++ b/bittorrent/udp/tracker.go @@ -19,10 +19,16 @@ package udp import ( "bytes" "encoding/binary" + "log" "net" + "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" + "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/bittorrent/udp/bytepool" ) var promResponseDurationMilliseconds = prometheus.NewHistogramVec( @@ -36,9 +42,14 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec( // recordResponseDuration records the duration of time to respond to a UDP // Request in milliseconds . -func recordResponseDuration(action, err error, duration time.Duration) { +func recordResponseDuration(action string, err error, duration time.Duration) { + var errString string + if err != nil { + errString = err.Error() + } + promResponseDurationMilliseconds. - WithLabelValues(action, err.Error()). + WithLabelValues(action, errString). Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond)) } @@ -47,12 +58,13 @@ func recordResponseDuration(action, err error, duration time.Duration) { type Config struct { Addr string PrivateKey string + MaxClockSkew time.Duration AllowIPSpoofing bool } // Tracker holds the state of a UDP BitTorrent Tracker. type Tracker struct { - sock *net.UDPConn + socket *net.UDPConn closing chan struct{} wg sync.WaitGroup @@ -61,7 +73,7 @@ type Tracker struct { } // NewTracker allocates a new instance of a Tracker. -func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { +func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) *Tracker { return &Tracker{ closing: make(chan struct{}), TrackerFuncs: funcs, @@ -72,7 +84,7 @@ func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) { // Stop provides a thread-safe way to shutdown a currently running Tracker. func (t *Tracker) Stop() { close(t.closing) - t.sock.SetReadDeadline(time.Now()) + t.socket.SetReadDeadline(time.Now()) t.wg.Wait() } @@ -84,11 +96,11 @@ func (t *Tracker) ListenAndServe() error { return err } - t.sock, err = net.ListenUDP("udp", udpAddr) + t.socket, err = net.ListenUDP("udp", udpAddr) if err != nil { return err } - defer t.sock.Close() + defer t.socket.Close() pool := bytepool.New(256, 2048) @@ -103,8 +115,8 @@ func (t *Tracker) ListenAndServe() error { // Read a UDP packet into a reusable buffer. buffer := pool.Get() - t.sock.SetReadDeadline(time.Now().Add(time.Second)) - n, addr, err := t.sock.ReadFromUDP(buffer) + t.socket.SetReadDeadline(time.Now().Add(time.Second)) + n, addr, err := t.socket.ReadFromUDP(buffer) if err != nil { pool.Put(buffer) if netErr, ok := err.(net.Error); ok && netErr.Temporary() { @@ -122,21 +134,18 @@ func (t *Tracker) ListenAndServe() error { log.Println("Got UDP Request") t.wg.Add(1) - go func(start time.Time) { + go func() { defer t.wg.Done() defer pool.Put(buffer) // Handle the request. start := time.Now() - response, action, err := t.handleRequest(&Request{buffer[:n], addr.IP}) + response, action, err := t.handleRequest( + Request{buffer[:n], addr.IP}, + ResponseWriter{t.socket, addr}, + ) log.Printf("Handled UDP Request: %s, %s, %s\n", response, action, err) - - // Record to the duration of time used to respond to the request. - var errString string - if err != nil { - errString = err.Error() - } - recordResponseDuration(action, errString, time.Since(start)) + recordResponseDuration(action, err, time.Since(start)) }() } } @@ -150,19 +159,19 @@ type Request struct { // ResponseWriter implements the ability to respond to a Request via the // io.Writer interface. type ResponseWriter struct { - socket net.UDPConn - addr net.UDPAddr + socket *net.UDPConn + addr *net.UDPAddr } // Write implements the io.Writer interface for a ResponseWriter. -func (w *ResponseWriter) Write(b []byte) (int, error) { +func (w ResponseWriter) Write(b []byte) (int, error) { w.socket.WriteToUDP(b, w.addr) return len(b), nil } // handleRequest parses and responds to a UDP Request. -func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, actionName string, err error) { - if len(r.packet) < 16 { +func (t *Tracker) handleRequest(r Request, w ResponseWriter) (response []byte, actionName string, err error) { + if len(r.Packet) < 16 { // Malformed, no client packets are less than 16 bytes. // We explicitly return nothing in case this is a DoS attempt. err = errMalformedPacket @@ -170,13 +179,13 @@ func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, } // Parse the headers of the UDP packet. - connID := r.packet[0:8] - actionID := binary.BigEndian.Uint32(r.packet[8:12]) - txID := r.packet[12:16] + connID := r.Packet[0:8] + actionID := binary.BigEndian.Uint32(r.Packet[8:12]) + txID := r.Packet[12:16] // If this isn't requesting a new connection ID and the connection ID is // invalid, then fail. - if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), t.PrivateKey) { + if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), t.MaxClockSkew, t.PrivateKey) { err = errBadConnectionID WriteError(w, txID, err) return @@ -206,7 +215,7 @@ func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, } var resp *bittorrent.AnnounceResponse - resp, err = t.HandleAnnounce(req) + resp, err = t.HandleAnnounce(context.TODO(), req) if err != nil { WriteError(w, txID, err) return @@ -231,8 +240,7 @@ func (t *Tracker) handleRequest(r *Request, w *ResponseWriter) (response []byte, } var resp *bittorrent.ScrapeResponse - ctx := context.TODO() - resp, err = t.HandleScrape(ctx, req) + resp, err = t.HandleScrape(context.TODO(), req) if err != nil { WriteError(w, txID, err) return diff --git a/bittorrent/udp/writer.go b/bittorrent/udp/writer.go index 068741a..211635f 100644 --- a/bittorrent/udp/writer.go +++ b/bittorrent/udp/writer.go @@ -18,58 +18,59 @@ import ( "bytes" "encoding/binary" "fmt" + "io" "time" "github.com/jzelinskie/trakr/bittorrent" ) // WriteError writes the failure reason as a null-terminated string. -func WriteError(writer io.Writer, txID []byte, err error) { +func WriteError(w io.Writer, txID []byte, err error) { // If the client wasn't at fault, acknowledge it. if _, ok := err.(bittorrent.ClientError); !ok { err = fmt.Errorf("internal error occurred: %s", err.Error()) } var buf bytes.Buffer - writeHeader(buf, txID, errorActionID) + writeHeader(&buf, txID, errorActionID) buf.WriteString(err.Error()) buf.WriteRune('\000') - writer.Write(buf.Bytes()) + w.Write(buf.Bytes()) } // WriteAnnounce encodes an announce response according to BEP 15. -func WriteAnnounce(respBuf *bytes.Buffer, txID []byte, resp *bittorrent.AnnounceResponse) { - writeHeader(respBuf, txID, announceActionID) - binary.Write(respBuf, binary.BigEndian, uint32(resp.Interval/time.Second)) - binary.Write(respBuf, binary.BigEndian, uint32(resp.Incomplete)) - binary.Write(respBuf, binary.BigEndian, uint32(resp.Complete)) +func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse) { + writeHeader(w, txID, announceActionID) + binary.Write(w, binary.BigEndian, uint32(resp.Interval/time.Second)) + binary.Write(w, binary.BigEndian, uint32(resp.Incomplete)) + binary.Write(w, binary.BigEndian, uint32(resp.Complete)) for _, peer := range resp.IPv4Peers { - respBuf.Write(peer.IP) - binary.Write(respBuf, binary.BigEndian, peer.Port) + w.Write(peer.IP) + binary.Write(w, binary.BigEndian, peer.Port) } } // WriteScrape encodes a scrape response according to BEP 15. -func WriteScrape(respBuf *bytes.Buffer, txID []byte, resp *bittorrent.ScrapeResponse) { - writeHeader(respBuf, txID, scrapeActionID) +func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) { + writeHeader(w, txID, scrapeActionID) for _, scrape := range resp.Files { - binary.Write(respBuf, binary.BigEndian, scrape.Complete) - binary.Write(respBuf, binary.BigEndian, scrape.Snatches) - binary.Write(respBuf, binary.BigEndian, scrape.Incomplete) + binary.Write(w, binary.BigEndian, scrape.Complete) + binary.Write(w, binary.BigEndian, scrape.Snatches) + binary.Write(w, binary.BigEndian, scrape.Incomplete) } } // WriteConnectionID encodes a new connection response according to BEP 15. -func WriteConnectionID(respBuf *bytes.Buffer, txID, connID []byte) { - writeHeader(respBuf, txID, connectActionID) - respBuf.Write(connID) +func WriteConnectionID(w io.Writer, txID, connID []byte) { + writeHeader(w, txID, connectActionID) + w.Write(connID) } // writeHeader writes the action and transaction ID to the provided response // buffer. -func writeHeader(respBuf *bytes.Buffer, txID []byte, action uint32) { - binary.Write(respBuf, binary.BigEndian, action) - respBuf.Write(txID) +func writeHeader(w io.Writer, txID []byte, action uint32) { + binary.Write(w, binary.BigEndian, action) + w.Write(txID) } diff --git a/cmd/trakr/config.go b/cmd/trakr/config.go deleted file mode 100644 index e69de29..0000000 diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index e69de29..65409c5 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -0,0 +1,65 @@ +package main + +import ( + "errors" + "log" + "os" + "os/signal" + "runtime/pprof" + "syscall" + + "github.com/spf13/cobra" + + "github.com/jzelinskie/trakr" +) + +func main() { + var configFilePath string + var cpuProfilePath string + + var rootCmd = &cobra.Command{ + Use: "trakr", + Short: "BitTorrent Tracker", + Long: "A customizible, multi-protocol BitTorrent Tracker", + Run: func(cmd *cobra.Command, args []string) { + if err := func() error { + if cpuProfilePath != "" { + log.Println("enabled CPU profiling to " + cpuProfilePath) + f, err := os.Create(cpuProfilePath) + if err != nil { + return err + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + mt, err := trakr.MultiTrackerFromFile(configFilePath) + if err != nil { + return errors.New("failed to read config: " + err.Error()) + } + + go func() { + shutdown := make(chan os.Signal) + signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) + <-shutdown + mt.Stop() + }() + + if err := mt.ListenAndServe(); err != nil { + return errors.New("failed to cleanly shutdown: " + err.Error()) + } + + return nil + }(); err != nil { + log.Fatal(err) + } + }, + } + + rootCmd.Flags().StringVar(&configFilePath, "config", "/etc/trakr.yaml", "location of configuration file (defaults to /etc/trakr.yaml)") + rootCmd.Flags().StringVarP(&cpuProfilePath, "cpuprofile", "", "", "location to save a CPU profile") + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/example_config.yaml b/example_config.yaml index 199275c..66e8ef1 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -2,22 +2,22 @@ trakr: announce_interval: 15m allow_ip_spoofing: true default_num_want: 50 - + http: addr: 0.0.0.0:6881 real_ip_header: x-real-ip read_timeout: 5s write_timeout: 5s request_timeout: 5s - + udp: addr: 0.0.0.0:6881 - + storage: name: memory config: shards: 1 - + prehooks: - name: jwt config: @@ -29,6 +29,6 @@ trakr: type: whitelist clients: - OP1011 - + posthooks: - - name: gossip \ No newline at end of file + - name: gossip diff --git a/hooks.go b/hooks.go index bbe12a8..03c4430 100644 --- a/hooks.go +++ b/hooks.go @@ -14,13 +14,19 @@ package trakr -import "github.com/jzelinskie/trakr/bittorrent" +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/jzelinskie/trakr/bittorrent" +) // Hook abstracts the concept of anything that needs to interact with a // BitTorrent client's request and response to a BitTorrent tracker. type Hook interface { - HandleAnnounce(context.Context, bittorrent.AnnounceRequest, bittorrent.AnnounceResponse) error - HandleScrape(context.Context, bittorrent.ScrapeRequest, bittorrent.ScrapeResponse) error + HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error + HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error } // HookConstructor is a function used to create a new instance of a Hook. @@ -36,7 +42,7 @@ func RegisterPreHook(name string, con HookConstructor) { if con == nil { panic("trakr: could not register nil HookConstructor") } - if _, dup := constructors[name]; dup { + if _, dup := preHooks[name]; dup { panic("trakr: could not register duplicate HookConstructor: " + name) } preHooks[name] = con @@ -61,7 +67,7 @@ func RegisterPostHook(name string, con HookConstructor) { if con == nil { panic("trakr: could not register nil HookConstructor") } - if _, dup := constructors[name]; dup { + if _, dup := postHooks[name]; dup { panic("trakr: could not register duplicate HookConstructor: " + name) } preHooks[name] = con diff --git a/stopper/stopper.go b/stopper/stopper.go index 75dc4f8..2b9bd34 100644 --- a/stopper/stopper.go +++ b/stopper/stopper.go @@ -4,11 +4,11 @@ import ( "sync" ) -// AlreadyStopped is a closed error channel to be used by StopperFuncs when +// AlreadyStopped is a closed error channel to be used by Funcs when // an element was already stopped. var AlreadyStopped <-chan error -// AlreadyStoppedFunc is a StopperFunc that returns AlreadyStopped. +// AlreadyStoppedFunc is a Func that returns AlreadyStopped. var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped } func init() { @@ -30,7 +30,7 @@ type Stopper interface { // StopGroup is a group that can be stopped. type StopGroup struct { - stoppables []StopperFunc + stoppables []Func stoppablesLock sync.Mutex } @@ -40,7 +40,7 @@ type Func func() <-chan error // NewStopGroup creates a new StopGroup. func NewStopGroup() *StopGroup { return &StopGroup{ - stoppables: make([]StopperFunc, 0), + stoppables: make([]Func, 0), } } @@ -53,9 +53,9 @@ func (cg *StopGroup) Add(toAdd Stopper) { cg.stoppables = append(cg.stoppables, toAdd.Stop) } -// AddFunc adds a StopperFunc to the StopGroup. -// On the next call to Stop(), the StopperFunc will be called. -func (cg *StopGroup) AddFunc(toAddFunc StopperFunc) { +// AddFunc adds a Func to the StopGroup. +// On the next call to Stop(), the Func will be called. +func (cg *StopGroup) AddFunc(toAddFunc Func) { cg.stoppablesLock.Lock() defer cg.stoppablesLock.Unlock() diff --git a/storage.go b/storage.go index 8d7056c..02c719a 100644 --- a/storage.go +++ b/storage.go @@ -10,7 +10,7 @@ import ( // ErrResourceDoesNotExist is the error returned by all delete methods in the // store if the requested resource does not exist. -var ErrResourceDoesNotExist = bittorrent.ClientError(errors.New("resource does not exist")) +var ErrResourceDoesNotExist = bittorrent.ClientError("resource does not exist") // PeerStore is an interface that abstracts the interactions of storing and // manipulating Peers such that it can be implemented for various data stores. @@ -68,7 +68,7 @@ type PeerStore interface { // PeerStore. type PeerStoreConstructor func(interface{}) (PeerStore, error) -var peerStores = make(map[string]PeerStoreConstructors) +var peerStores = make(map[string]PeerStoreConstructor) // RegisterPeerStore makes a PeerStoreConstructor available by the provided // name. @@ -80,7 +80,7 @@ func RegisterPeerStore(name string, con PeerStoreConstructor) { panic("trakr: could not register nil PeerStoreConstructor") } - if _, dup := peerStore[name]; dup { + if _, dup := peerStores[name]; dup { panic("trakr: could not register duplicate PeerStoreConstructor: " + name) } @@ -88,7 +88,7 @@ func RegisterPeerStore(name string, con PeerStoreConstructor) { } // NewPeerStore creates an instance of the given PeerStore by name. -func NewPeerStore(name, config interface{}) (PeerStore, error) { +func NewPeerStore(name string, config interface{}) (PeerStore, error) { con, ok := peerStores[name] if !ok { return nil, fmt.Errorf("trakr: unknown PeerStore %q (forgotten import?)", name) diff --git a/tracker.go b/tracker.go index 4cc86cb..27d2686 100644 --- a/tracker.go +++ b/tracker.go @@ -17,22 +17,93 @@ // has been delievered to a BitTorrent client. package trakr +import ( + "errors" + "io" + "io/ioutil" + "os" + "time" + + "github.com/jzelinskie/trakr/bittorrent/http" + "github.com/jzelinskie/trakr/bittorrent/udp" + "gopkg.in/yaml.v2" +) + +// GenericConfig is a block of configuration who's structure is unknown. +type GenericConfig struct { + name string `yaml:"name"` + config interface{} `yaml:"config"` +} + // MultiTracker is a multi-protocol, customizable BitTorrent Tracker. type MultiTracker struct { - HTTPConfig http.Config - UDPConfig udp.Config - AnnounceInterval time.Duration - GCInterval time.Duration - GCExpiration time.Duration - PreHooks []Hook - PostHooks []Hook + AnnounceInterval time.Duration `yaml:"announce_interval"` + GCInterval time.Duration `yaml:"gc_interval"` + GCExpiration time.Duration `yaml:"gc_expiration"` + HTTPConfig http.Config `yaml:"http"` + UDPConfig udp.Config `yaml:"udp"` + PeerStoreConfig []GenericConfig `yaml:"storage"` + PreHooks []GenericConfig `yaml:"prehooks"` + PostHooks []GenericConfig `yaml:"posthooks"` + peerStore PeerStore httpTracker http.Tracker udpTracker udp.Tracker } +// decodeConfigFile unmarshals an io.Reader into a new MultiTracker. +func decodeConfigFile(r io.Reader) (*MultiTracker, error) { + contents, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + cfgFile := struct { + mt MultiTracker `yaml:"trakr"` + }{} + err = yaml.Unmarshal(contents, cfgFile) + if err != nil { + return nil, err + } + + return &cfgFile.mt, nil +} + +// MultiTrackerFromFile returns a new MultiTracker given the path to a YAML +// configuration file. +// +// It supports relative and absolute paths and environment variables. +func MultiTrackerFromFile(path string) (*MultiTracker, error) { + if path == "" { + return nil, errors.New("no config path specified") + } + + f, err := os.Open(os.ExpandEnv(path)) + if err != nil { + return nil, err + } + defer f.Close() + + cfg, err := decodeConfigFile(f) + if err != nil { + return nil, err + } + + return cfg, nil +} + +// Stop provides a thread-safe way to shutdown a currently running +// MultiTracker. +func (t *MultiTracker) Stop() { +} + // ListenAndServe listens on the protocols and addresses specified in the // HTTPConfig and UDPConfig then blocks serving BitTorrent requests until // t.Stop() is called or an error is returned. func (t *MultiTracker) ListenAndServe() error { + // Build an TrackerFuncs from the PreHooks and PostHooks. + // Create a PeerStore instance. + // Create a HTTP Tracker instance. + // Create a UDP Tracker instance. + return nil } From 11d135ce49d3546899c9244d49bdec7537c612ef Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Fri, 5 Aug 2016 03:35:17 -0400 Subject: [PATCH 16/74] get prom working --- bittorrent/http/tracker.go | 5 ++++ bittorrent/udp/tracker.go | 5 ++++ cmd/trakr/main.go | 57 ++++++++++++++++++++++++++++++++++++-- example_config.yaml | 3 ++ tracker.go | 54 ++++++------------------------------ 5 files changed, 75 insertions(+), 49 deletions(-) diff --git a/bittorrent/http/tracker.go b/bittorrent/http/tracker.go index a2edabc..f029de2 100644 --- a/bittorrent/http/tracker.go +++ b/bittorrent/http/tracker.go @@ -29,6 +29,11 @@ import ( "github.com/jzelinskie/trakr/bittorrent" ) +func init() { + prometheus.MustRegister(promResponseDurationMilliseconds) + recordResponseDuration("action", nil, time.Second) +} + var promResponseDurationMilliseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "trakr_http_response_duration_milliseconds", diff --git a/bittorrent/udp/tracker.go b/bittorrent/udp/tracker.go index 4efa640..2ff6ac6 100644 --- a/bittorrent/udp/tracker.go +++ b/bittorrent/udp/tracker.go @@ -31,6 +31,11 @@ import ( "github.com/jzelinskie/trakr/bittorrent/udp/bytepool" ) +func init() { + prometheus.MustRegister(promResponseDurationMilliseconds) + recordResponseDuration("action", nil, time.Second) +} + var promResponseDurationMilliseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "trakr_udp_response_duration_milliseconds", diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 65409c5..f250c87 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -2,17 +2,57 @@ package main import ( "errors" + "io/ioutil" "log" + "net/http" "os" "os/signal" "runtime/pprof" "syscall" + "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" + "gopkg.in/yaml.v2" "github.com/jzelinskie/trakr" ) +type ConfigFile struct { + Config struct { + PrometheusAddr string `yaml:"prometheus_addr"` + trakr.MultiTracker + } `yaml:"trakr"` +} + +// ParseConfigFile returns a new ConfigFile given the path to a YAML +// configuration file. +// +// It supports relative and absolute paths and environment variables. +func ParseConfigFile(path string) (*ConfigFile, error) { + if path == "" { + return nil, errors.New("no config path specified") + } + + f, err := os.Open(os.ExpandEnv(path)) + if err != nil { + return nil, err + } + defer f.Close() + + contents, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + var cfgFile ConfigFile + err = yaml.Unmarshal(contents, &cfgFile) + if err != nil { + return nil, err + } + + return &cfgFile, nil +} + func main() { var configFilePath string var cpuProfilePath string @@ -33,19 +73,30 @@ func main() { defer pprof.StopCPUProfile() } - mt, err := trakr.MultiTrackerFromFile(configFilePath) + configFile, err := ParseConfigFile(configFilePath) if err != nil { return errors.New("failed to read config: " + err.Error()) } + go func() { + promServer := http.Server{ + Addr: configFile.Config.PrometheusAddr, + Handler: prometheus.Handler(), + } + log.Println("started serving prometheus stats on", configFile.Config.PrometheusAddr) + if err := promServer.ListenAndServe(); err != nil { + log.Fatal(err) + } + }() + go func() { shutdown := make(chan os.Signal) signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) <-shutdown - mt.Stop() + configFile.Config.MultiTracker.Stop() }() - if err := mt.ListenAndServe(); err != nil { + if err := configFile.Config.MultiTracker.ListenAndServe(); err != nil { return errors.New("failed to cleanly shutdown: " + err.Error()) } diff --git a/example_config.yaml b/example_config.yaml index 66e8ef1..b6e7760 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -1,7 +1,10 @@ trakr: announce_interval: 15m + gc_interval: 15m + gc_expiration: 15m allow_ip_spoofing: true default_num_want: 50 + prometheus_addr: localhost:6880 http: addr: 0.0.0.0:6881 diff --git a/tracker.go b/tracker.go index 27d2686..a4dab7c 100644 --- a/tracker.go +++ b/tracker.go @@ -18,15 +18,10 @@ package trakr import ( - "errors" - "io" - "io/ioutil" - "os" "time" "github.com/jzelinskie/trakr/bittorrent/http" "github.com/jzelinskie/trakr/bittorrent/udp" - "gopkg.in/yaml.v2" ) // GenericConfig is a block of configuration who's structure is unknown. @@ -49,61 +44,28 @@ type MultiTracker struct { peerStore PeerStore httpTracker http.Tracker udpTracker udp.Tracker -} - -// decodeConfigFile unmarshals an io.Reader into a new MultiTracker. -func decodeConfigFile(r io.Reader) (*MultiTracker, error) { - contents, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - cfgFile := struct { - mt MultiTracker `yaml:"trakr"` - }{} - err = yaml.Unmarshal(contents, cfgFile) - if err != nil { - return nil, err - } - - return &cfgFile.mt, nil -} - -// MultiTrackerFromFile returns a new MultiTracker given the path to a YAML -// configuration file. -// -// It supports relative and absolute paths and environment variables. -func MultiTrackerFromFile(path string) (*MultiTracker, error) { - if path == "" { - return nil, errors.New("no config path specified") - } - - f, err := os.Open(os.ExpandEnv(path)) - if err != nil { - return nil, err - } - defer f.Close() - - cfg, err := decodeConfigFile(f) - if err != nil { - return nil, err - } - - return cfg, nil + closing chan struct{} } // Stop provides a thread-safe way to shutdown a currently running // MultiTracker. func (t *MultiTracker) Stop() { + close(t.closing) } // ListenAndServe listens on the protocols and addresses specified in the // HTTPConfig and UDPConfig then blocks serving BitTorrent requests until // t.Stop() is called or an error is returned. func (t *MultiTracker) ListenAndServe() error { + t.closing = make(chan struct{}) // Build an TrackerFuncs from the PreHooks and PostHooks. // Create a PeerStore instance. // Create a HTTP Tracker instance. // Create a UDP Tracker instance. + select { + case <-t.closing: + return nil + } + return nil } From 8f67c1018e551c735fae750d85bb6be5a841c26d Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 6 Aug 2016 22:41:33 -0400 Subject: [PATCH 17/74] restructure into backend and frontends --- tracker.go => backend/backend.go | 56 ++++++++++----- hooks.go => backend/hooks.go | 2 +- storage.go => backend/storage.go | 2 +- bittorrent/bittorrent.go | 26 ------- cmd/trakr/main.go | 72 ++++++++++++++++--- .../http/bencode/bencode.go | 0 .../http/bencode/decoder.go | 0 .../http/bencode/decoder_test.go | 0 .../http/bencode/encoder.go | 0 .../http/bencode/encoder_test.go | 0 .../tracker.go => frontends/http/frontend.go | 32 ++++----- {bittorrent => frontends}/http/parser.go | 0 .../http/query_params.go | 0 .../http/query_params_test.go | 0 {bittorrent => frontends}/http/writer.go | 2 +- {bittorrent => frontends}/http/writer_test.go | 0 .../udp/bytepool/bytepool.go | 0 .../udp/connection_id.go | 0 .../udp/connection_id_test.go | 0 .../tracker.go => frontends/udp/frontend.go | 23 +++--- {bittorrent => frontends}/udp/parser.go | 0 {bittorrent => frontends}/udp/writer.go | 0 22 files changed, 132 insertions(+), 83 deletions(-) rename tracker.go => backend/backend.go (50%) rename hooks.go => backend/hooks.go (99%) rename storage.go => backend/storage.go (99%) rename {bittorrent => frontends}/http/bencode/bencode.go (100%) rename {bittorrent => frontends}/http/bencode/decoder.go (100%) rename {bittorrent => frontends}/http/bencode/decoder_test.go (100%) rename {bittorrent => frontends}/http/bencode/encoder.go (100%) rename {bittorrent => frontends}/http/bencode/encoder_test.go (100%) rename bittorrent/http/tracker.go => frontends/http/frontend.go (86%) rename {bittorrent => frontends}/http/parser.go (100%) rename {bittorrent => frontends}/http/query_params.go (100%) rename {bittorrent => frontends}/http/query_params_test.go (100%) rename {bittorrent => frontends}/http/writer.go (98%) rename {bittorrent => frontends}/http/writer_test.go (100%) rename {bittorrent => frontends}/udp/bytepool/bytepool.go (100%) rename {bittorrent => frontends}/udp/connection_id.go (100%) rename {bittorrent => frontends}/udp/connection_id_test.go (100%) rename bittorrent/udp/tracker.go => frontends/udp/frontend.go (91%) rename {bittorrent => frontends}/udp/parser.go (100%) rename {bittorrent => frontends}/udp/writer.go (100%) diff --git a/tracker.go b/backend/backend.go similarity index 50% rename from tracker.go rename to backend/backend.go index a4dab7c..2e00989 100644 --- a/tracker.go +++ b/backend/backend.go @@ -15,13 +15,13 @@ // Package trakr implements a BitTorrent Tracker that supports multiple // protocols and configurable Hooks that execute before and after a Response // has been delievered to a BitTorrent client. -package trakr +package backend import ( "time" - "github.com/jzelinskie/trakr/bittorrent/http" - "github.com/jzelinskie/trakr/bittorrent/udp" + "github.com/jzelinskie/trakr/bittorrent" + "golang.org/x/net/context" ) // GenericConfig is a block of configuration who's structure is unknown. @@ -30,38 +30,32 @@ type GenericConfig struct { config interface{} `yaml:"config"` } -// MultiTracker is a multi-protocol, customizable BitTorrent Tracker. -type MultiTracker struct { +// Backend is a multi-protocol, customizable BitTorrent Tracker. +type Backend struct { AnnounceInterval time.Duration `yaml:"announce_interval"` GCInterval time.Duration `yaml:"gc_interval"` GCExpiration time.Duration `yaml:"gc_expiration"` - HTTPConfig http.Config `yaml:"http"` - UDPConfig udp.Config `yaml:"udp"` PeerStoreConfig []GenericConfig `yaml:"storage"` PreHooks []GenericConfig `yaml:"prehooks"` PostHooks []GenericConfig `yaml:"posthooks"` - peerStore PeerStore - httpTracker http.Tracker - udpTracker udp.Tracker - closing chan struct{} + peerStore PeerStore + closing chan struct{} } // Stop provides a thread-safe way to shutdown a currently running -// MultiTracker. -func (t *MultiTracker) Stop() { +// Backend. +func (t *Backend) Stop() { close(t.closing) } -// ListenAndServe listens on the protocols and addresses specified in the -// HTTPConfig and UDPConfig then blocks serving BitTorrent requests until -// t.Stop() is called or an error is returned. -func (t *MultiTracker) ListenAndServe() error { +// Start starts the Backend. +// It blocks until t.Stop() is called or an error is returned. +func (t *Backend) Start() error { t.closing = make(chan struct{}) // Build an TrackerFuncs from the PreHooks and PostHooks. // Create a PeerStore instance. - // Create a HTTP Tracker instance. - // Create a UDP Tracker instance. + // Make TrackerFuncs available to be used by frontends. select { case <-t.closing: return nil @@ -69,3 +63,27 @@ func (t *MultiTracker) ListenAndServe() error { return nil } + +// TrackerFuncs is the collection of callback functions provided by the Backend +// to (1) generate a response from a parsed request, and (2) observe anything +// after the response has been delivered to the client. +type TrackerFuncs struct { + HandleAnnounce AnnounceHandler + HandleScrape ScrapeHandler + AfterAnnounce AnnounceCallback + AfterScrape ScrapeCallback +} + +// AnnounceHandler is a function that generates a response for an Announce. +type AnnounceHandler func(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) + +// AnnounceCallback is a function that does something with the results of an +// Announce after it has been completed. +type AnnounceCallback func(*bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) + +// ScrapeHandler is a function that generates a response for a Scrape. +type ScrapeHandler func(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) + +// ScrapeCallback is a function that does something with the results of a +// Scrape after it has been completed. +type ScrapeCallback func(*bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) diff --git a/hooks.go b/backend/hooks.go similarity index 99% rename from hooks.go rename to backend/hooks.go index 03c4430..484c4b9 100644 --- a/hooks.go +++ b/backend/hooks.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package trakr +package backend import ( "fmt" diff --git a/storage.go b/backend/storage.go similarity index 99% rename from storage.go rename to backend/storage.go index 02c719a..541742f 100644 --- a/storage.go +++ b/backend/storage.go @@ -1,4 +1,4 @@ -package trakr +package backend import ( "fmt" diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index 34294fe..20c9b66 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -20,8 +20,6 @@ package bittorrent import ( "net" "time" - - "golang.org/x/net/context" ) // PeerID represents a peer ID. @@ -108,13 +106,6 @@ type AnnounceResponse struct { IPv6Peers []Peer } -// AnnounceHandler is a function that generates a response for an Announce. -type AnnounceHandler func(context.Context, *AnnounceRequest) (*AnnounceResponse, error) - -// AnnounceCallback is a function that does something with the results of an -// Announce after it has been completed. -type AnnounceCallback func(*AnnounceRequest, *AnnounceResponse) - // ScrapeRequest represents the parsed parameters from a scrape request. type ScrapeRequest struct { InfoHashes []InfoHash @@ -133,13 +124,6 @@ type Scrape struct { Incomplete uint32 } -// ScrapeHandler is a function that generates a response for a Scrape. -type ScrapeHandler func(context.Context, *ScrapeRequest) (*ScrapeResponse, error) - -// ScrapeCallback is a function that does something with the results of a -// Scrape after it has been completed. -type ScrapeCallback func(*ScrapeRequest, *ScrapeResponse) - // Peer represents the connection details of a peer that is returned in an // announce response. type Peer struct { @@ -171,13 +155,3 @@ type Tracker interface { ListenAndServe() error Stop() } - -// TrackerFuncs is the collection of callback functions provided to a Tracker -// to (1) generate a response from a parsed request, and (2) observe anything -// after the response has been delivered to the client. -type TrackerFuncs struct { - HandleAnnounce AnnounceHandler - HandleScrape ScrapeHandler - AfterAnnounce AnnounceCallback - AfterScrape ScrapeCallback -} diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index f250c87..f44d764 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -14,13 +14,18 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "github.com/jzelinskie/trakr" + "github.com/jzelinskie/trakr/backend" + + httpfrontend "github.com/jzelinskie/trakr/frontends/http" + udpfrontend "github.com/jzelinskie/trakr/frontends/udp" ) type ConfigFile struct { Config struct { PrometheusAddr string `yaml:"prometheus_addr"` - trakr.MultiTracker + backend.Backend + HTTPConfig httpfrontend.Config `yaml:"http"` + UDPConfig udpfrontend.Config `yaml:"udp"` } `yaml:"trakr"` } @@ -89,15 +94,66 @@ func main() { } }() + errChan := make(chan error) + closedChan := make(chan struct{}) + go func() { - shutdown := make(chan os.Signal) - signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) - <-shutdown - configFile.Config.MultiTracker.Stop() + if err := configFile.Config.Backend.Start(); err != nil { + errChan <- errors.New("failed to cleanly shutdown: " + err.Error()) + } }() - if err := configFile.Config.MultiTracker.ListenAndServe(); err != nil { - return errors.New("failed to cleanly shutdown: " + err.Error()) + var hFrontend *httpfrontend.Frontend + var uFrontend *udpfrontend.Frontend + + if configFile.Config.HTTPConfig.Addr != "" { + // TODO get the real TrackerFuncs + hFrontend = httpfrontend.NewFrontend(backend.TrackerFuncs{}, configFile.Config.HTTPConfig) + + go func() { + log.Println("started serving HTTP on", configFile.Config.HTTPConfig.Addr) + if err := hFrontend.ListenAndServe(); err != nil { + errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error()) + } + }() + } + + if configFile.Config.UDPConfig.Addr != "" { + // TODO get the real TrackerFuncs + uFrontend = udpfrontend.NewFrontend(backend.TrackerFuncs{}, configFile.Config.UDPConfig) + + go func() { + log.Println("started serving UDP on", configFile.Config.UDPConfig.Addr) + if err := uFrontend.ListenAndServe(); err != nil { + errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error()) + } + }() + } + + shutdown := make(chan os.Signal) + signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-shutdown + + if uFrontend != nil { + uFrontend.Stop() + } + + if hFrontend != nil { + hFrontend.Stop() + } + + configFile.Config.Backend.Stop() + + close(errChan) + close(closedChan) + }() + + err = <-errChan + if err != nil { + close(shutdown) + <-closedChan + return err } return nil diff --git a/bittorrent/http/bencode/bencode.go b/frontends/http/bencode/bencode.go similarity index 100% rename from bittorrent/http/bencode/bencode.go rename to frontends/http/bencode/bencode.go diff --git a/bittorrent/http/bencode/decoder.go b/frontends/http/bencode/decoder.go similarity index 100% rename from bittorrent/http/bencode/decoder.go rename to frontends/http/bencode/decoder.go diff --git a/bittorrent/http/bencode/decoder_test.go b/frontends/http/bencode/decoder_test.go similarity index 100% rename from bittorrent/http/bencode/decoder_test.go rename to frontends/http/bencode/decoder_test.go diff --git a/bittorrent/http/bencode/encoder.go b/frontends/http/bencode/encoder.go similarity index 100% rename from bittorrent/http/bencode/encoder.go rename to frontends/http/bencode/encoder.go diff --git a/bittorrent/http/bencode/encoder_test.go b/frontends/http/bencode/encoder_test.go similarity index 100% rename from bittorrent/http/bencode/encoder_test.go rename to frontends/http/bencode/encoder_test.go diff --git a/bittorrent/http/tracker.go b/frontends/http/frontend.go similarity index 86% rename from bittorrent/http/tracker.go rename to frontends/http/frontend.go index f029de2..716da66 100644 --- a/bittorrent/http/tracker.go +++ b/frontends/http/frontend.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package http implements a BitTorrent tracker via the HTTP protocol as +// Package http implements a BitTorrent frontend via the HTTP protocol as // described in BEP 3 and BEP 23. package http @@ -26,7 +26,7 @@ import ( "github.com/tylerb/graceful" "golang.org/x/net/context" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/backend" ) func init() { @@ -43,8 +43,8 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec( []string{"action", "error"}, ) -// recordResponseDuration records the duration of time to respond to a UDP -// Request in milliseconds . +// recordResponseDuration records the duration of time to respond to a Request +// in milliseconds . func recordResponseDuration(action string, err error, duration time.Duration) { var errString string if err != nil { @@ -57,7 +57,7 @@ func recordResponseDuration(action string, err error, duration time.Duration) { } // Config represents all of the configurable options for an HTTP BitTorrent -// Tracker. +// Frontend. type Config struct { Addr string ReadTimeout time.Duration @@ -67,29 +67,29 @@ type Config struct { RealIPHeader string } -// Tracker holds the state of an HTTP BitTorrent Tracker. -type Tracker struct { +// Frontend holds the state of an HTTP BitTorrent Frontend. +type Frontend struct { grace *graceful.Server - bittorrent.TrackerFuncs + backend.TrackerFuncs Config } -// NewTracker allocates a new instance of a Tracker. -func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) *Tracker { - return &Tracker{ +// NewFrontend allocates a new instance of a Frontend. +func NewFrontend(funcs backend.TrackerFuncs, cfg Config) *Frontend { + return &Frontend{ TrackerFuncs: funcs, Config: cfg, } } // Stop provides a thread-safe way to shutdown a currently running Tracker. -func (t *Tracker) Stop() { +func (t *Frontend) Stop() { t.grace.Stop(t.grace.Timeout) <-t.grace.StopChan() } -func (t *Tracker) handler() http.Handler { +func (t *Frontend) handler() http.Handler { router := httprouter.New() router.GET("/announce", t.announceRoute) router.GET("/scrape", t.scrapeRoute) @@ -98,7 +98,7 @@ func (t *Tracker) handler() http.Handler { // ListenAndServe listens on the TCP network address t.Addr and blocks serving // BitTorrent requests until t.Stop() is called or an error is returned. -func (t *Tracker) ListenAndServe() error { +func (t *Frontend) ListenAndServe() error { t.grace = &graceful.Server{ Server: &http.Server{ Addr: t.Addr, @@ -139,7 +139,7 @@ func (t *Tracker) ListenAndServe() error { } // announceRoute parses and responds to an Announce by using t.TrackerFuncs. -func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var err error start := time.Now() defer recordResponseDuration("announce", err, time.Since(start)) @@ -168,7 +168,7 @@ func (t *Tracker) announceRoute(w http.ResponseWriter, r *http.Request, _ httpro } // scrapeRoute parses and responds to a Scrape by using t.TrackerFuncs. -func (t *Tracker) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var err error start := time.Now() defer recordResponseDuration("scrape", err, time.Since(start)) diff --git a/bittorrent/http/parser.go b/frontends/http/parser.go similarity index 100% rename from bittorrent/http/parser.go rename to frontends/http/parser.go diff --git a/bittorrent/http/query_params.go b/frontends/http/query_params.go similarity index 100% rename from bittorrent/http/query_params.go rename to frontends/http/query_params.go diff --git a/bittorrent/http/query_params_test.go b/frontends/http/query_params_test.go similarity index 100% rename from bittorrent/http/query_params_test.go rename to frontends/http/query_params_test.go diff --git a/bittorrent/http/writer.go b/frontends/http/writer.go similarity index 98% rename from bittorrent/http/writer.go rename to frontends/http/writer.go index c1e9266..0565fc3 100644 --- a/bittorrent/http/writer.go +++ b/frontends/http/writer.go @@ -18,7 +18,7 @@ import ( "net/http" "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/bittorrent/http/bencode" + "github.com/jzelinskie/trakr/frontends/http/bencode" ) // WriteError communicates an error to a BitTorrent client over HTTP. diff --git a/bittorrent/http/writer_test.go b/frontends/http/writer_test.go similarity index 100% rename from bittorrent/http/writer_test.go rename to frontends/http/writer_test.go diff --git a/bittorrent/udp/bytepool/bytepool.go b/frontends/udp/bytepool/bytepool.go similarity index 100% rename from bittorrent/udp/bytepool/bytepool.go rename to frontends/udp/bytepool/bytepool.go diff --git a/bittorrent/udp/connection_id.go b/frontends/udp/connection_id.go similarity index 100% rename from bittorrent/udp/connection_id.go rename to frontends/udp/connection_id.go diff --git a/bittorrent/udp/connection_id_test.go b/frontends/udp/connection_id_test.go similarity index 100% rename from bittorrent/udp/connection_id_test.go rename to frontends/udp/connection_id_test.go diff --git a/bittorrent/udp/tracker.go b/frontends/udp/frontend.go similarity index 91% rename from bittorrent/udp/tracker.go rename to frontends/udp/frontend.go index 2ff6ac6..7da88f7 100644 --- a/bittorrent/udp/tracker.go +++ b/frontends/udp/frontend.go @@ -27,8 +27,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" + "github.com/jzelinskie/trakr/backend" "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/bittorrent/udp/bytepool" + "github.com/jzelinskie/trakr/frontends/udp/bytepool" ) func init() { @@ -67,27 +68,27 @@ type Config struct { AllowIPSpoofing bool } -// Tracker holds the state of a UDP BitTorrent Tracker. -type Tracker struct { +// Frontend holds the state of a UDP BitTorrent Frontend. +type Frontend struct { socket *net.UDPConn closing chan struct{} wg sync.WaitGroup - bittorrent.TrackerFuncs + backend.TrackerFuncs Config } -// NewTracker allocates a new instance of a Tracker. -func NewTracker(funcs bittorrent.TrackerFuncs, cfg Config) *Tracker { - return &Tracker{ +// NewFrontend allocates a new instance of a Frontend. +func NewFrontend(funcs backend.TrackerFuncs, cfg Config) *Frontend { + return &Frontend{ closing: make(chan struct{}), TrackerFuncs: funcs, Config: cfg, } } -// Stop provides a thread-safe way to shutdown a currently running Tracker. -func (t *Tracker) Stop() { +// Stop provides a thread-safe way to shutdown a currently running Frontend. +func (t *Frontend) Stop() { close(t.closing) t.socket.SetReadDeadline(time.Now()) t.wg.Wait() @@ -95,7 +96,7 @@ func (t *Tracker) Stop() { // ListenAndServe listens on the UDP network address t.Addr and blocks serving // BitTorrent requests until t.Stop() is called or an error is returned. -func (t *Tracker) ListenAndServe() error { +func (t *Frontend) ListenAndServe() error { udpAddr, err := net.ResolveUDPAddr("udp", t.Addr) if err != nil { return err @@ -175,7 +176,7 @@ func (w ResponseWriter) Write(b []byte) (int, error) { } // handleRequest parses and responds to a UDP Request. -func (t *Tracker) handleRequest(r Request, w ResponseWriter) (response []byte, actionName string, err error) { +func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, actionName string, err error) { if len(r.Packet) < 16 { // Malformed, no client packets are less than 16 bytes. // We explicitly return nothing in case this is a DoS attempt. diff --git a/bittorrent/udp/parser.go b/frontends/udp/parser.go similarity index 100% rename from bittorrent/udp/parser.go rename to frontends/udp/parser.go diff --git a/bittorrent/udp/writer.go b/frontends/udp/writer.go similarity index 100% rename from bittorrent/udp/writer.go rename to frontends/udp/writer.go From 736026d9d302301dcf2dfe75351d2c17b70f6ee7 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 6 Aug 2016 22:43:33 -0400 Subject: [PATCH 18/74] fix bytepool out of range panic --- frontends/udp/bytepool/bytepool.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/frontends/udp/bytepool/bytepool.go b/frontends/udp/bytepool/bytepool.go index adc1207..1808687 100644 --- a/frontends/udp/bytepool/bytepool.go +++ b/frontends/udp/bytepool/bytepool.go @@ -27,9 +27,12 @@ func (bp *BytePool) Get() []byte { // Put returns a byte slice to the pool. func (bp *BytePool) Put(b []byte) { + b = b[:cap(b)] // Zero out the bytes. - for i := 0; i < cap(b); i++ { - b[i] = 0x0 + // Apparently this specific expression is optimized by the compiler, see + // github.com/golang/go/issues/5373. + for i := range b { + b[i] = 0 } bp.Pool.Put(b) } From 98a7c42ab3ae81261171783f6b45c3590e860040 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sun, 7 Aug 2016 13:40:24 -0400 Subject: [PATCH 19/74] remove PeerStore registration logic, move TrackerFuncs --- backend/backend.go | 49 ++++++++++---------------------------- backend/storage.go | 33 ------------------------- cmd/trakr/main.go | 16 +++++++++---- frontends/frontends.go | 30 +++++++++++++++++++++++ frontends/http/frontend.go | 6 ++--- frontends/udp/frontend.go | 6 ++--- 6 files changed, 60 insertions(+), 80 deletions(-) create mode 100644 frontends/frontends.go diff --git a/backend/backend.go b/backend/backend.go index 2e00989..0bc4425 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -20,8 +20,7 @@ package backend import ( "time" - "github.com/jzelinskie/trakr/bittorrent" - "golang.org/x/net/context" + "github.com/jzelinskie/trakr/frontends" ) // GenericConfig is a block of configuration who's structure is unknown. @@ -30,17 +29,22 @@ type GenericConfig struct { config interface{} `yaml:"config"` } -// Backend is a multi-protocol, customizable BitTorrent Tracker. -type Backend struct { +type BackendConfig struct { AnnounceInterval time.Duration `yaml:"announce_interval"` - GCInterval time.Duration `yaml:"gc_interval"` - GCExpiration time.Duration `yaml:"gc_expiration"` - PeerStoreConfig []GenericConfig `yaml:"storage"` PreHooks []GenericConfig `yaml:"prehooks"` PostHooks []GenericConfig `yaml:"posthooks"` +} - peerStore PeerStore - closing chan struct{} +func New(config BackendConfig, peerStore PeerStore) (*Backend, error) { + // Build TrackerFuncs from the PreHooks and PostHooks + return &Backend{peerStore: peerStore}, nil +} + +// Backend is a multi-protocol, customizable BitTorrent Tracker. +type Backend struct { + TrackerFuncs frontends.TrackerFuncs + peerStore PeerStore + closing chan struct{} } // Stop provides a thread-safe way to shutdown a currently running @@ -53,9 +57,6 @@ func (t *Backend) Stop() { // It blocks until t.Stop() is called or an error is returned. func (t *Backend) Start() error { t.closing = make(chan struct{}) - // Build an TrackerFuncs from the PreHooks and PostHooks. - // Create a PeerStore instance. - // Make TrackerFuncs available to be used by frontends. select { case <-t.closing: return nil @@ -63,27 +64,3 @@ func (t *Backend) Start() error { return nil } - -// TrackerFuncs is the collection of callback functions provided by the Backend -// to (1) generate a response from a parsed request, and (2) observe anything -// after the response has been delivered to the client. -type TrackerFuncs struct { - HandleAnnounce AnnounceHandler - HandleScrape ScrapeHandler - AfterAnnounce AnnounceCallback - AfterScrape ScrapeCallback -} - -// AnnounceHandler is a function that generates a response for an Announce. -type AnnounceHandler func(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) - -// AnnounceCallback is a function that does something with the results of an -// Announce after it has been completed. -type AnnounceCallback func(*bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) - -// ScrapeHandler is a function that generates a response for a Scrape. -type ScrapeHandler func(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) - -// ScrapeCallback is a function that does something with the results of a -// Scrape after it has been completed. -type ScrapeCallback func(*bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) diff --git a/backend/storage.go b/backend/storage.go index 541742f..7761613 100644 --- a/backend/storage.go +++ b/backend/storage.go @@ -1,7 +1,6 @@ package backend import ( - "fmt" "time" "github.com/jzelinskie/trakr/bittorrent" @@ -63,35 +62,3 @@ type PeerStore interface { // For more details see the documentation in the stopper package. stopper.Stopper } - -// PeerStoreConstructor is a function used to create a new instance of a -// PeerStore. -type PeerStoreConstructor func(interface{}) (PeerStore, error) - -var peerStores = make(map[string]PeerStoreConstructor) - -// RegisterPeerStore makes a PeerStoreConstructor available by the provided -// name. -// -// If this function is called twice with the same name or if the -// PeerStoreConstructor is nil, it panics. -func RegisterPeerStore(name string, con PeerStoreConstructor) { - if con == nil { - panic("trakr: could not register nil PeerStoreConstructor") - } - - if _, dup := peerStores[name]; dup { - panic("trakr: could not register duplicate PeerStoreConstructor: " + name) - } - - peerStores[name] = con -} - -// NewPeerStore creates an instance of the given PeerStore by name. -func NewPeerStore(name string, config interface{}) (PeerStore, error) { - con, ok := peerStores[name] - if !ok { - return nil, fmt.Errorf("trakr: unknown PeerStore %q (forgotten import?)", name) - } - return con(config) -} diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index f44d764..1478023 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -23,7 +23,7 @@ import ( type ConfigFile struct { Config struct { PrometheusAddr string `yaml:"prometheus_addr"` - backend.Backend + backend.BackendConfig HTTPConfig httpfrontend.Config `yaml:"http"` UDPConfig udpfrontend.Config `yaml:"udp"` } `yaml:"trakr"` @@ -94,11 +94,17 @@ func main() { } }() + // TODO create PeerStore + trackerBackend, err := backend.New(configFile.Config.BackendConfig, nil) + if err != nil { + return err + } + errChan := make(chan error) closedChan := make(chan struct{}) go func() { - if err := configFile.Config.Backend.Start(); err != nil { + if err := trackerBackend.Start(); err != nil { errChan <- errors.New("failed to cleanly shutdown: " + err.Error()) } }() @@ -108,7 +114,7 @@ func main() { if configFile.Config.HTTPConfig.Addr != "" { // TODO get the real TrackerFuncs - hFrontend = httpfrontend.NewFrontend(backend.TrackerFuncs{}, configFile.Config.HTTPConfig) + hFrontend = httpfrontend.NewFrontend(trackerBackend.TrackerFuncs, configFile.Config.HTTPConfig) go func() { log.Println("started serving HTTP on", configFile.Config.HTTPConfig.Addr) @@ -120,7 +126,7 @@ func main() { if configFile.Config.UDPConfig.Addr != "" { // TODO get the real TrackerFuncs - uFrontend = udpfrontend.NewFrontend(backend.TrackerFuncs{}, configFile.Config.UDPConfig) + uFrontend = udpfrontend.NewFrontend(trackerBackend.TrackerFuncs, configFile.Config.UDPConfig) go func() { log.Println("started serving UDP on", configFile.Config.UDPConfig.Addr) @@ -143,7 +149,7 @@ func main() { hFrontend.Stop() } - configFile.Config.Backend.Stop() + trackerBackend.Stop() close(errChan) close(closedChan) diff --git a/frontends/frontends.go b/frontends/frontends.go new file mode 100644 index 0000000..4004a02 --- /dev/null +++ b/frontends/frontends.go @@ -0,0 +1,30 @@ +package frontends + +import ( + "github.com/jzelinskie/trakr/bittorrent" + "golang.org/x/net/context" +) + +// TrackerFuncs is the collection of callback functions provided by the Backend +// to (1) generate a response from a parsed request, and (2) observe anything +// after the response has been delivered to the client. +type TrackerFuncs struct { + HandleAnnounce AnnounceHandler + HandleScrape ScrapeHandler + AfterAnnounce AnnounceCallback + AfterScrape ScrapeCallback +} + +// AnnounceHandler is a function that generates a response for an Announce. +type AnnounceHandler func(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) + +// AnnounceCallback is a function that does something with the results of an +// Announce after it has been completed. +type AnnounceCallback func(*bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) + +// ScrapeHandler is a function that generates a response for a Scrape. +type ScrapeHandler func(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) + +// ScrapeCallback is a function that does something with the results of a +// Scrape after it has been completed. +type ScrapeCallback func(*bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) diff --git a/frontends/http/frontend.go b/frontends/http/frontend.go index 716da66..3702f94 100644 --- a/frontends/http/frontend.go +++ b/frontends/http/frontend.go @@ -26,7 +26,7 @@ import ( "github.com/tylerb/graceful" "golang.org/x/net/context" - "github.com/jzelinskie/trakr/backend" + "github.com/jzelinskie/trakr/frontends" ) func init() { @@ -71,12 +71,12 @@ type Config struct { type Frontend struct { grace *graceful.Server - backend.TrackerFuncs + frontends.TrackerFuncs Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(funcs backend.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(funcs frontends.TrackerFuncs, cfg Config) *Frontend { return &Frontend{ TrackerFuncs: funcs, Config: cfg, diff --git a/frontends/udp/frontend.go b/frontends/udp/frontend.go index 7da88f7..424b3ff 100644 --- a/frontends/udp/frontend.go +++ b/frontends/udp/frontend.go @@ -27,8 +27,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" - "github.com/jzelinskie/trakr/backend" "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/frontends" "github.com/jzelinskie/trakr/frontends/udp/bytepool" ) @@ -74,12 +74,12 @@ type Frontend struct { closing chan struct{} wg sync.WaitGroup - backend.TrackerFuncs + frontends.TrackerFuncs Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(funcs backend.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(funcs frontends.TrackerFuncs, cfg Config) *Frontend { return &Frontend{ closing: make(chan struct{}), TrackerFuncs: funcs, From 88567d5b2e761d987345d0db4eaf3f81af975638 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sun, 7 Aug 2016 17:20:31 -0400 Subject: [PATCH 20/74] remove backend Start/Stop funcs --- backend/backend.go | 19 ------------------- cmd/trakr/main.go | 19 +++++++------------ 2 files changed, 7 insertions(+), 31 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 0bc4425..bea1d6c 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -44,23 +44,4 @@ func New(config BackendConfig, peerStore PeerStore) (*Backend, error) { type Backend struct { TrackerFuncs frontends.TrackerFuncs peerStore PeerStore - closing chan struct{} -} - -// Stop provides a thread-safe way to shutdown a currently running -// Backend. -func (t *Backend) Stop() { - close(t.closing) -} - -// Start starts the Backend. -// It blocks until t.Stop() is called or an error is returned. -func (t *Backend) Start() error { - t.closing = make(chan struct{}) - select { - case <-t.closing: - return nil - } - - return nil } diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 1478023..5f0590b 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -103,12 +103,6 @@ func main() { errChan := make(chan error) closedChan := make(chan struct{}) - go func() { - if err := trackerBackend.Start(); err != nil { - errChan <- errors.New("failed to cleanly shutdown: " + err.Error()) - } - }() - var hFrontend *httpfrontend.Frontend var uFrontend *udpfrontend.Frontend @@ -149,17 +143,18 @@ func main() { hFrontend.Stop() } - trackerBackend.Stop() + // TODO: stop PeerStore close(errChan) close(closedChan) }() - err = <-errChan - if err != nil { - close(shutdown) - <-closedChan - return err + for err := range errChan { + if err != nil { + close(shutdown) + <-closedChan + return err + } } return nil From ae18d89627a99a7580e323002162c4634ea88eee Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Tue, 9 Aug 2016 15:01:36 -0400 Subject: [PATCH 21/74] rename frontends to frontend --- backend/backend.go | 4 ++-- cmd/trakr/main.go | 4 ++-- frontends/frontends.go => frontend/frontend.go | 2 +- {frontends => frontend}/http/bencode/bencode.go | 0 {frontends => frontend}/http/bencode/decoder.go | 0 {frontends => frontend}/http/bencode/decoder_test.go | 0 {frontends => frontend}/http/bencode/encoder.go | 0 {frontends => frontend}/http/bencode/encoder_test.go | 0 {frontends => frontend}/http/frontend.go | 6 +++--- {frontends => frontend}/http/parser.go | 0 {frontends => frontend}/http/query_params.go | 0 {frontends => frontend}/http/query_params_test.go | 0 {frontends => frontend}/http/writer.go | 2 +- {frontends => frontend}/http/writer_test.go | 0 {frontends => frontend}/udp/bytepool/bytepool.go | 0 {frontends => frontend}/udp/connection_id.go | 0 {frontends => frontend}/udp/connection_id_test.go | 0 {frontends => frontend}/udp/frontend.go | 8 ++++---- {frontends => frontend}/udp/parser.go | 0 {frontends => frontend}/udp/writer.go | 0 20 files changed, 13 insertions(+), 13 deletions(-) rename frontends/frontends.go => frontend/frontend.go (98%) rename {frontends => frontend}/http/bencode/bencode.go (100%) rename {frontends => frontend}/http/bencode/decoder.go (100%) rename {frontends => frontend}/http/bencode/decoder_test.go (100%) rename {frontends => frontend}/http/bencode/encoder.go (100%) rename {frontends => frontend}/http/bencode/encoder_test.go (100%) rename {frontends => frontend}/http/frontend.go (97%) rename {frontends => frontend}/http/parser.go (100%) rename {frontends => frontend}/http/query_params.go (100%) rename {frontends => frontend}/http/query_params_test.go (100%) rename {frontends => frontend}/http/writer.go (98%) rename {frontends => frontend}/http/writer_test.go (100%) rename {frontends => frontend}/udp/bytepool/bytepool.go (100%) rename {frontends => frontend}/udp/connection_id.go (100%) rename {frontends => frontend}/udp/connection_id_test.go (100%) rename {frontends => frontend}/udp/frontend.go (97%) rename {frontends => frontend}/udp/parser.go (100%) rename {frontends => frontend}/udp/writer.go (100%) diff --git a/backend/backend.go b/backend/backend.go index bea1d6c..1d97b02 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -20,7 +20,7 @@ package backend import ( "time" - "github.com/jzelinskie/trakr/frontends" + "github.com/jzelinskie/trakr/frontend" ) // GenericConfig is a block of configuration who's structure is unknown. @@ -42,6 +42,6 @@ func New(config BackendConfig, peerStore PeerStore) (*Backend, error) { // Backend is a multi-protocol, customizable BitTorrent Tracker. type Backend struct { - TrackerFuncs frontends.TrackerFuncs + TrackerFuncs frontend.TrackerFuncs peerStore PeerStore } diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 5f0590b..d8cd4fc 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -16,8 +16,8 @@ import ( "github.com/jzelinskie/trakr/backend" - httpfrontend "github.com/jzelinskie/trakr/frontends/http" - udpfrontend "github.com/jzelinskie/trakr/frontends/udp" + httpfrontend "github.com/jzelinskie/trakr/frontend/http" + udpfrontend "github.com/jzelinskie/trakr/frontend/udp" ) type ConfigFile struct { diff --git a/frontends/frontends.go b/frontend/frontend.go similarity index 98% rename from frontends/frontends.go rename to frontend/frontend.go index 4004a02..f445d99 100644 --- a/frontends/frontends.go +++ b/frontend/frontend.go @@ -1,4 +1,4 @@ -package frontends +package frontend import ( "github.com/jzelinskie/trakr/bittorrent" diff --git a/frontends/http/bencode/bencode.go b/frontend/http/bencode/bencode.go similarity index 100% rename from frontends/http/bencode/bencode.go rename to frontend/http/bencode/bencode.go diff --git a/frontends/http/bencode/decoder.go b/frontend/http/bencode/decoder.go similarity index 100% rename from frontends/http/bencode/decoder.go rename to frontend/http/bencode/decoder.go diff --git a/frontends/http/bencode/decoder_test.go b/frontend/http/bencode/decoder_test.go similarity index 100% rename from frontends/http/bencode/decoder_test.go rename to frontend/http/bencode/decoder_test.go diff --git a/frontends/http/bencode/encoder.go b/frontend/http/bencode/encoder.go similarity index 100% rename from frontends/http/bencode/encoder.go rename to frontend/http/bencode/encoder.go diff --git a/frontends/http/bencode/encoder_test.go b/frontend/http/bencode/encoder_test.go similarity index 100% rename from frontends/http/bencode/encoder_test.go rename to frontend/http/bencode/encoder_test.go diff --git a/frontends/http/frontend.go b/frontend/http/frontend.go similarity index 97% rename from frontends/http/frontend.go rename to frontend/http/frontend.go index 3702f94..4e3e5d7 100644 --- a/frontends/http/frontend.go +++ b/frontend/http/frontend.go @@ -26,7 +26,7 @@ import ( "github.com/tylerb/graceful" "golang.org/x/net/context" - "github.com/jzelinskie/trakr/frontends" + "github.com/jzelinskie/trakr/frontend" ) func init() { @@ -71,12 +71,12 @@ type Config struct { type Frontend struct { grace *graceful.Server - frontends.TrackerFuncs + frontend.TrackerFuncs Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(funcs frontends.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(funcs frontend.TrackerFuncs, cfg Config) *Frontend { return &Frontend{ TrackerFuncs: funcs, Config: cfg, diff --git a/frontends/http/parser.go b/frontend/http/parser.go similarity index 100% rename from frontends/http/parser.go rename to frontend/http/parser.go diff --git a/frontends/http/query_params.go b/frontend/http/query_params.go similarity index 100% rename from frontends/http/query_params.go rename to frontend/http/query_params.go diff --git a/frontends/http/query_params_test.go b/frontend/http/query_params_test.go similarity index 100% rename from frontends/http/query_params_test.go rename to frontend/http/query_params_test.go diff --git a/frontends/http/writer.go b/frontend/http/writer.go similarity index 98% rename from frontends/http/writer.go rename to frontend/http/writer.go index 0565fc3..36ea1cd 100644 --- a/frontends/http/writer.go +++ b/frontend/http/writer.go @@ -18,7 +18,7 @@ import ( "net/http" "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/frontends/http/bencode" + "github.com/jzelinskie/trakr/frontend/http/bencode" ) // WriteError communicates an error to a BitTorrent client over HTTP. diff --git a/frontends/http/writer_test.go b/frontend/http/writer_test.go similarity index 100% rename from frontends/http/writer_test.go rename to frontend/http/writer_test.go diff --git a/frontends/udp/bytepool/bytepool.go b/frontend/udp/bytepool/bytepool.go similarity index 100% rename from frontends/udp/bytepool/bytepool.go rename to frontend/udp/bytepool/bytepool.go diff --git a/frontends/udp/connection_id.go b/frontend/udp/connection_id.go similarity index 100% rename from frontends/udp/connection_id.go rename to frontend/udp/connection_id.go diff --git a/frontends/udp/connection_id_test.go b/frontend/udp/connection_id_test.go similarity index 100% rename from frontends/udp/connection_id_test.go rename to frontend/udp/connection_id_test.go diff --git a/frontends/udp/frontend.go b/frontend/udp/frontend.go similarity index 97% rename from frontends/udp/frontend.go rename to frontend/udp/frontend.go index 424b3ff..f916e3c 100644 --- a/frontends/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -28,8 +28,8 @@ import ( "golang.org/x/net/context" "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/frontends" - "github.com/jzelinskie/trakr/frontends/udp/bytepool" + "github.com/jzelinskie/trakr/frontend" + "github.com/jzelinskie/trakr/frontend/udp/bytepool" ) func init() { @@ -74,12 +74,12 @@ type Frontend struct { closing chan struct{} wg sync.WaitGroup - frontends.TrackerFuncs + frontend.TrackerFuncs Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(funcs frontends.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(funcs frontend.TrackerFuncs, cfg Config) *Frontend { return &Frontend{ closing: make(chan struct{}), TrackerFuncs: funcs, diff --git a/frontends/udp/parser.go b/frontend/udp/parser.go similarity index 100% rename from frontends/udp/parser.go rename to frontend/udp/parser.go diff --git a/frontends/udp/writer.go b/frontend/udp/writer.go similarity index 100% rename from frontends/udp/writer.go rename to frontend/udp/writer.go From 9a8cdccc6c6885d579f03c231476d9a26835fabe Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Tue, 9 Aug 2016 15:21:59 -0400 Subject: [PATCH 22/74] make frontend.TrackerFuncs an interface --- backend/backend.go | 40 +++++++++++++++++++++++++++++++++++++-- frontend/frontend.go | 35 +++++++++++++++------------------- frontend/http/frontend.go | 20 ++++++++------------ frontend/udp/frontend.go | 23 ++++++++++------------ 4 files changed, 71 insertions(+), 47 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 1d97b02..3c06c18 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -20,7 +20,11 @@ package backend import ( "time" + "log" + + "github.com/jzelinskie/trakr/bittorrent" "github.com/jzelinskie/trakr/frontend" + "golang.org/x/net/context" ) // GenericConfig is a block of configuration who's structure is unknown. @@ -35,6 +39,8 @@ type BackendConfig struct { PostHooks []GenericConfig `yaml:"posthooks"` } +var _ frontend.TrackerFuncs = &Backend{} + func New(config BackendConfig, peerStore PeerStore) (*Backend, error) { // Build TrackerFuncs from the PreHooks and PostHooks return &Backend{peerStore: peerStore}, nil @@ -42,6 +48,36 @@ func New(config BackendConfig, peerStore PeerStore) (*Backend, error) { // Backend is a multi-protocol, customizable BitTorrent Tracker. type Backend struct { - TrackerFuncs frontend.TrackerFuncs - peerStore PeerStore + peerStore PeerStore + handleAnnounce func(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) + afterAnnounce func(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error + handleScrape func(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) + afterScrape func(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error +} + +// HandleAnnounce generates a response for an Announce. +func (b *Backend) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) { + return b.handleAnnounce(ctx, req) +} + +// AfterAnnounce does something with the results of an Announce after it +// has been completed. +func (b *Backend) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { + err := b.afterAnnounce(ctx, req, resp) + if err != nil { + log.Println("trakr: post-announce hooks failed:", err.Error()) + } +} + +// HandleScrape generates a response for a Scrape. +func (b *Backend) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) { + return b.handleScrape(ctx, req) +} + +// AfterScrape does something with the results of a Scrape after it has been completed. +func (b *Backend) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { + err := b.afterScrape(ctx, req, resp) + if err != nil { + log.Println("trakr: post-scrape hooks failed:", err.Error()) + } } diff --git a/frontend/frontend.go b/frontend/frontend.go index f445d99..89fa128 100644 --- a/frontend/frontend.go +++ b/frontend/frontend.go @@ -1,30 +1,25 @@ package frontend import ( - "github.com/jzelinskie/trakr/bittorrent" "golang.org/x/net/context" + + "github.com/jzelinskie/trakr/bittorrent" ) // TrackerFuncs is the collection of callback functions provided by the Backend // to (1) generate a response from a parsed request, and (2) observe anything // after the response has been delivered to the client. -type TrackerFuncs struct { - HandleAnnounce AnnounceHandler - HandleScrape ScrapeHandler - AfterAnnounce AnnounceCallback - AfterScrape ScrapeCallback +type TrackerFuncs interface { + // HandleAnnounce generates a response for an Announce. + HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) + + // AfterAnnounce does something with the results of an Announce after it + // has been completed. + AfterAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) + + // HandleScrape generates a response for a Scrape. + HandleScrape(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) + + // AfterScrape does something with the results of a Scrape after it has been completed. + AfterScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) } - -// AnnounceHandler is a function that generates a response for an Announce. -type AnnounceHandler func(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) - -// AnnounceCallback is a function that does something with the results of an -// Announce after it has been completed. -type AnnounceCallback func(*bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) - -// ScrapeHandler is a function that generates a response for a Scrape. -type ScrapeHandler func(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) - -// ScrapeCallback is a function that does something with the results of a -// Scrape after it has been completed. -type ScrapeCallback func(*bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 4e3e5d7..991e064 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -71,15 +71,15 @@ type Config struct { type Frontend struct { grace *graceful.Server - frontend.TrackerFuncs + backend frontend.TrackerFuncs Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(funcs frontend.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(backend frontend.TrackerFuncs, cfg Config) *Frontend { return &Frontend{ - TrackerFuncs: funcs, - Config: cfg, + backend: backend, + Config: cfg, } } @@ -150,7 +150,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr return } - resp, err := t.HandleAnnounce(context.TODO(), req) + resp, err := t.backend.HandleAnnounce(context.TODO(), req) if err != nil { WriteError(w, err) return @@ -162,9 +162,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr return } - if t.AfterAnnounce != nil { - go t.AfterAnnounce(req, resp) - } + go t.backend.AfterAnnounce(context.TODO(), req, resp) } // scrapeRoute parses and responds to a Scrape by using t.TrackerFuncs. @@ -179,7 +177,7 @@ func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - resp, err := t.HandleScrape(context.TODO(), req) + resp, err := t.backend.HandleScrape(context.TODO(), req) if err != nil { WriteError(w, err) return @@ -191,7 +189,5 @@ func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - if t.AfterScrape != nil { - go t.AfterScrape(req, resp) - } + go t.backend.AfterScrape(context.TODO(), req, resp) } diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index f916e3c..70b4b19 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -74,16 +74,16 @@ type Frontend struct { closing chan struct{} wg sync.WaitGroup - frontend.TrackerFuncs + backend frontend.TrackerFuncs Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(funcs frontend.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(backend frontend.TrackerFuncs, cfg Config) *Frontend { return &Frontend{ - closing: make(chan struct{}), - TrackerFuncs: funcs, - Config: cfg, + closing: make(chan struct{}), + backend: backend, + Config: cfg, } } @@ -221,7 +221,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, } var resp *bittorrent.AnnounceResponse - resp, err = t.HandleAnnounce(context.TODO(), req) + resp, err = t.backend.HandleAnnounce(context.TODO(), req) if err != nil { WriteError(w, txID, err) return @@ -229,9 +229,8 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, WriteAnnounce(w, txID, resp) - if t.AfterAnnounce != nil { - go t.AfterAnnounce(req, resp) - } + // TODO(mrd0ll4r): evaluate if it's worth spawning another goroutine. + go t.backend.AfterAnnounce(context.TODO(), req, resp) return @@ -246,7 +245,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, } var resp *bittorrent.ScrapeResponse - resp, err = t.HandleScrape(context.TODO(), req) + resp, err = t.backend.HandleScrape(context.TODO(), req) if err != nil { WriteError(w, txID, err) return @@ -254,9 +253,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, WriteScrape(w, txID, resp) - if t.AfterScrape != nil { - go t.AfterScrape(req, resp) - } + go t.backend.AfterScrape(context.TODO(), req, resp) return From 732b2d536ef38b0db0a3dc62c44901a79101f637 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Tue, 9 Aug 2016 16:01:14 -0400 Subject: [PATCH 23/74] remove register pattern for hooks --- backend/backend.go | 97 ++++++++++++++++++++++++++++++++-------------- backend/hooks.go | 55 +++----------------------- cmd/trakr/main.go | 8 ++-- 3 files changed, 77 insertions(+), 83 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index 3c06c18..ba40324 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -14,70 +14,109 @@ // Package trakr implements a BitTorrent Tracker that supports multiple // protocols and configurable Hooks that execute before and after a Response -// has been delievered to a BitTorrent client. +// has been delivered to a BitTorrent client. package backend import ( + "log" "time" - "log" + "golang.org/x/net/context" "github.com/jzelinskie/trakr/bittorrent" "github.com/jzelinskie/trakr/frontend" - "golang.org/x/net/context" ) -// GenericConfig is a block of configuration who's structure is unknown. -type GenericConfig struct { - name string `yaml:"name"` - config interface{} `yaml:"config"` -} - type BackendConfig struct { - AnnounceInterval time.Duration `yaml:"announce_interval"` - PreHooks []GenericConfig `yaml:"prehooks"` - PostHooks []GenericConfig `yaml:"posthooks"` + AnnounceInterval time.Duration `yaml:"announce_interval"` } var _ frontend.TrackerFuncs = &Backend{} -func New(config BackendConfig, peerStore PeerStore) (*Backend, error) { - // Build TrackerFuncs from the PreHooks and PostHooks - return &Backend{peerStore: peerStore}, nil +func New(config BackendConfig, peerStore PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) (*Backend, error) { + toReturn := &Backend{ + announceInterval: config.AnnounceInterval, + peerStore: peerStore, + announcePreHooks: announcePreHooks, + announcePostHooks: announcePostHooks, + scrapePreHooks: scrapePreHooks, + scrapePostHooks: scrapePostHooks, + } + + if len(toReturn.announcePreHooks) == 0 { + toReturn.announcePreHooks = []Hook{nopHook{}} + } + + if len(toReturn.announcePostHooks) == 0 { + toReturn.announcePostHooks = []Hook{nopHook{}} + } + + if len(toReturn.scrapePreHooks) == 0 { + toReturn.scrapePreHooks = []Hook{nopHook{}} + } + + if len(toReturn.scrapePostHooks) == 0 { + toReturn.scrapePostHooks = []Hook{nopHook{}} + } + + return toReturn, nil } -// Backend is a multi-protocol, customizable BitTorrent Tracker. +// Backend is a protocol-agnostic backend of a BitTorrent tracker. type Backend struct { - peerStore PeerStore - handleAnnounce func(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) - afterAnnounce func(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error - handleScrape func(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) - afterScrape func(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error + announceInterval time.Duration + peerStore PeerStore + announcePreHooks []Hook + announcePostHooks []Hook + scrapePreHooks []Hook + scrapePostHooks []Hook } // HandleAnnounce generates a response for an Announce. func (b *Backend) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) { - return b.handleAnnounce(ctx, req) + resp := &bittorrent.AnnounceResponse{ + Interval: b.announceInterval, + } + for _, h := range b.announcePreHooks { + if err := h.HandleAnnounce(ctx, req, resp); err != nil { + return nil, err + } + } + + return resp, nil } // AfterAnnounce does something with the results of an Announce after it // has been completed. func (b *Backend) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { - err := b.afterAnnounce(ctx, req, resp) - if err != nil { - log.Println("trakr: post-announce hooks failed:", err.Error()) + for _, h := range b.announcePostHooks { + if err := h.HandleAnnounce(ctx, req, resp); err != nil { + log.Println("trakr: post-announce hooks failed:", err.Error()) + return + } } } // HandleScrape generates a response for a Scrape. func (b *Backend) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) { - return b.handleScrape(ctx, req) + resp := &bittorrent.ScrapeResponse{ + Files: make(map[bittorrent.InfoHash]bittorrent.Scrape), + } + for _, h := range b.scrapePreHooks { + if err := h.HandleScrape(ctx, req, resp); err != nil { + return nil, err + } + } + + return resp, nil } // AfterScrape does something with the results of a Scrape after it has been completed. func (b *Backend) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { - err := b.afterScrape(ctx, req, resp) - if err != nil { - log.Println("trakr: post-scrape hooks failed:", err.Error()) + for _, h := range b.scrapePostHooks { + if err := h.HandleScrape(ctx, req, resp); err != nil { + log.Println("trakr: post-scrape hooks failed:", err.Error()) + return + } } } diff --git a/backend/hooks.go b/backend/hooks.go index 484c4b9..2c1792a 100644 --- a/backend/hooks.go +++ b/backend/hooks.go @@ -15,8 +15,6 @@ package backend import ( - "fmt" - "golang.org/x/net/context" "github.com/jzelinskie/trakr/bittorrent" @@ -29,55 +27,12 @@ type Hook interface { HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error } -// HookConstructor is a function used to create a new instance of a Hook. -type HookConstructor func(interface{}) (Hook, error) +type nopHook struct{} -var preHooks = make(map[string]HookConstructor) - -// RegisterPreHook makes a HookConstructor available by the provided name. -// -// If this function is called twice with the same name or if the -// HookConstructor is nil, it panics. -func RegisterPreHook(name string, con HookConstructor) { - if con == nil { - panic("trakr: could not register nil HookConstructor") - } - if _, dup := preHooks[name]; dup { - panic("trakr: could not register duplicate HookConstructor: " + name) - } - preHooks[name] = con +func (nopHook) HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error { + return nil } -// NewPreHook creates an instance of the given PreHook by name. -func NewPreHook(name string, config interface{}) (Hook, error) { - con, ok := preHooks[name] - if !ok { - return nil, fmt.Errorf("trakr: unknown PreHook %q (forgotten import?)", name) - } - return con(config) -} - -var postHooks = make(map[string]HookConstructor) - -// RegisterPostHook makes a HookConstructor available by the provided name. -// -// If this function is called twice with the same name or if the -// HookConstructor is nil, it panics. -func RegisterPostHook(name string, con HookConstructor) { - if con == nil { - panic("trakr: could not register nil HookConstructor") - } - if _, dup := postHooks[name]; dup { - panic("trakr: could not register duplicate HookConstructor: " + name) - } - preHooks[name] = con -} - -// NewPostHook creates an instance of the given PostHook by name. -func NewPostHook(name string, config interface{}) (Hook, error) { - con, ok := preHooks[name] - if !ok { - return nil, fmt.Errorf("trakr: unknown PostHook %q (forgotten import?)", name) - } - return con(config) +func (nopHook) HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error { + return nil } diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index d8cd4fc..9e7f9a2 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -15,7 +15,6 @@ import ( "gopkg.in/yaml.v2" "github.com/jzelinskie/trakr/backend" - httpfrontend "github.com/jzelinskie/trakr/frontend/http" udpfrontend "github.com/jzelinskie/trakr/frontend/udp" ) @@ -95,7 +94,8 @@ func main() { }() // TODO create PeerStore - trackerBackend, err := backend.New(configFile.Config.BackendConfig, nil) + // TODO create Hooks + trackerBackend, err := backend.New(configFile.Config.BackendConfig, nil, nil, nil, nil, nil) if err != nil { return err } @@ -108,7 +108,7 @@ func main() { if configFile.Config.HTTPConfig.Addr != "" { // TODO get the real TrackerFuncs - hFrontend = httpfrontend.NewFrontend(trackerBackend.TrackerFuncs, configFile.Config.HTTPConfig) + hFrontend = httpfrontend.NewFrontend(trackerBackend, configFile.Config.HTTPConfig) go func() { log.Println("started serving HTTP on", configFile.Config.HTTPConfig.Addr) @@ -120,7 +120,7 @@ func main() { if configFile.Config.UDPConfig.Addr != "" { // TODO get the real TrackerFuncs - uFrontend = udpfrontend.NewFrontend(trackerBackend.TrackerFuncs, configFile.Config.UDPConfig) + uFrontend = udpfrontend.NewFrontend(trackerBackend, configFile.Config.UDPConfig) go func() { log.Println("started serving UDP on", configFile.Config.UDPConfig.Addr) From bff3d203a227489a907c9efa976773938077a093 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 19:05:43 -0400 Subject: [PATCH 24/74] add leo to maintainers --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 5e7376c..fe19b72 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,2 +1,3 @@ Jimmy Zelinskie (@jzelinskie) pkg:* Justin Li (@pushrax) pkg:* +Leo Balduf (@mrd0ll4r) pkg:* From 11d90b088c6ce451e6a6e87ccaff37b7071cab2a Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 19:28:59 -0400 Subject: [PATCH 25/74] s/trackerfuncs/trackerlogic --- backend/backend.go | 14 +++++++------- cmd/trakr/main.go | 4 ++-- frontend/frontend.go | 6 +++--- frontend/http/frontend.go | 8 ++++---- frontend/udp/frontend.go | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/backend/backend.go b/backend/backend.go index ba40324..1a17679 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -12,9 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package trakr implements a BitTorrent Tracker that supports multiple -// protocols and configurable Hooks that execute before and after a Response -// has been delivered to a BitTorrent client. +// Package backend implements the TrackerLogic interface by executing +// a series of middleware hooks. package backend import ( @@ -31,7 +30,7 @@ type BackendConfig struct { AnnounceInterval time.Duration `yaml:"announce_interval"` } -var _ frontend.TrackerFuncs = &Backend{} +var _ frontend.TrackerLogic = &Backend{} func New(config BackendConfig, peerStore PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) (*Backend, error) { toReturn := &Backend{ @@ -86,8 +85,8 @@ func (b *Backend) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRe return resp, nil } -// AfterAnnounce does something with the results of an Announce after it -// has been completed. +// AfterAnnounce does something with the results of an Announce after it has +// been completed. func (b *Backend) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { for _, h := range b.announcePostHooks { if err := h.HandleAnnounce(ctx, req, resp); err != nil { @@ -111,7 +110,8 @@ func (b *Backend) HandleScrape(ctx context.Context, req *bittorrent.ScrapeReques return resp, nil } -// AfterScrape does something with the results of a Scrape after it has been completed. +// AfterScrape does something with the results of a Scrape after it has been +// completed. func (b *Backend) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { for _, h := range b.scrapePostHooks { if err := h.HandleScrape(ctx, req, resp); err != nil { diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 9e7f9a2..a49de9e 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -107,7 +107,7 @@ func main() { var uFrontend *udpfrontend.Frontend if configFile.Config.HTTPConfig.Addr != "" { - // TODO get the real TrackerFuncs + // TODO get the real TrackerLogic hFrontend = httpfrontend.NewFrontend(trackerBackend, configFile.Config.HTTPConfig) go func() { @@ -119,7 +119,7 @@ func main() { } if configFile.Config.UDPConfig.Addr != "" { - // TODO get the real TrackerFuncs + // TODO get the real TrackerLogic uFrontend = udpfrontend.NewFrontend(trackerBackend, configFile.Config.UDPConfig) go func() { diff --git a/frontend/frontend.go b/frontend/frontend.go index 89fa128..a577d96 100644 --- a/frontend/frontend.go +++ b/frontend/frontend.go @@ -6,10 +6,10 @@ import ( "github.com/jzelinskie/trakr/bittorrent" ) -// TrackerFuncs is the collection of callback functions provided by the Backend -// to (1) generate a response from a parsed request, and (2) observe anything +// TrackerLogic is the interface used by a frontend in order to: (1) generate a +// response from a parsed request, and (2) asynchronously observe anything // after the response has been delivered to the client. -type TrackerFuncs interface { +type TrackerLogic interface { // HandleAnnounce generates a response for an Announce. HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 991e064..0ba228d 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -71,12 +71,12 @@ type Config struct { type Frontend struct { grace *graceful.Server - backend frontend.TrackerFuncs + backend frontend.TrackerLogic Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(backend frontend.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(backend frontend.TrackerLogic, cfg Config) *Frontend { return &Frontend{ backend: backend, Config: cfg, @@ -138,7 +138,7 @@ func (t *Frontend) ListenAndServe() error { return nil } -// announceRoute parses and responds to an Announce by using t.TrackerFuncs. +// announceRoute parses and responds to an Announce by using t.TrackerLogic. func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var err error start := time.Now() @@ -165,7 +165,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr go t.backend.AfterAnnounce(context.TODO(), req, resp) } -// scrapeRoute parses and responds to a Scrape by using t.TrackerFuncs. +// scrapeRoute parses and responds to a Scrape by using t.TrackerLogic. func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { var err error start := time.Now() diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 70b4b19..a74432e 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -74,12 +74,12 @@ type Frontend struct { closing chan struct{} wg sync.WaitGroup - backend frontend.TrackerFuncs + backend frontend.TrackerLogic Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(backend frontend.TrackerFuncs, cfg Config) *Frontend { +func NewFrontend(backend frontend.TrackerLogic, cfg Config) *Frontend { return &Frontend{ closing: make(chan struct{}), backend: backend, From c7b17d319547b0226fd8832f8f1700d3dde9351a Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 19:30:18 -0400 Subject: [PATCH 26/74] remove default text (this is generated automatically) --- cmd/trakr/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index a49de9e..8527baf 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -164,7 +164,7 @@ func main() { }, } - rootCmd.Flags().StringVar(&configFilePath, "config", "/etc/trakr.yaml", "location of configuration file (defaults to /etc/trakr.yaml)") + rootCmd.Flags().StringVar(&configFilePath, "config", "/etc/trakr.yaml", "location of configuration file") rootCmd.Flags().StringVarP(&cpuProfilePath, "cpuprofile", "", "", "location to save a CPU profile") if err := rootCmd.Execute(); err != nil { From c9fe95b103f8cd2d886afd37f70cc799bf3ee61f Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 20:08:15 -0400 Subject: [PATCH 27/74] s/backend/middleware --- backend/backend.go | 122 ----------------------------- cmd/trakr/main.go | 31 ++++---- frontend/http/frontend.go | 16 ++-- frontend/udp/frontend.go | 15 ++-- {backend => middleware}/hooks.go | 16 +--- middleware/middleware.go | 109 ++++++++++++++++++++++++++ {backend => middleware}/storage.go | 2 +- 7 files changed, 142 insertions(+), 169 deletions(-) delete mode 100644 backend/backend.go rename {backend => middleware}/hooks.go (53%) create mode 100644 middleware/middleware.go rename {backend => middleware}/storage.go (99%) diff --git a/backend/backend.go b/backend/backend.go deleted file mode 100644 index 1a17679..0000000 --- a/backend/backend.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package backend implements the TrackerLogic interface by executing -// a series of middleware hooks. -package backend - -import ( - "log" - "time" - - "golang.org/x/net/context" - - "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/frontend" -) - -type BackendConfig struct { - AnnounceInterval time.Duration `yaml:"announce_interval"` -} - -var _ frontend.TrackerLogic = &Backend{} - -func New(config BackendConfig, peerStore PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) (*Backend, error) { - toReturn := &Backend{ - announceInterval: config.AnnounceInterval, - peerStore: peerStore, - announcePreHooks: announcePreHooks, - announcePostHooks: announcePostHooks, - scrapePreHooks: scrapePreHooks, - scrapePostHooks: scrapePostHooks, - } - - if len(toReturn.announcePreHooks) == 0 { - toReturn.announcePreHooks = []Hook{nopHook{}} - } - - if len(toReturn.announcePostHooks) == 0 { - toReturn.announcePostHooks = []Hook{nopHook{}} - } - - if len(toReturn.scrapePreHooks) == 0 { - toReturn.scrapePreHooks = []Hook{nopHook{}} - } - - if len(toReturn.scrapePostHooks) == 0 { - toReturn.scrapePostHooks = []Hook{nopHook{}} - } - - return toReturn, nil -} - -// Backend is a protocol-agnostic backend of a BitTorrent tracker. -type Backend struct { - announceInterval time.Duration - peerStore PeerStore - announcePreHooks []Hook - announcePostHooks []Hook - scrapePreHooks []Hook - scrapePostHooks []Hook -} - -// HandleAnnounce generates a response for an Announce. -func (b *Backend) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) { - resp := &bittorrent.AnnounceResponse{ - Interval: b.announceInterval, - } - for _, h := range b.announcePreHooks { - if err := h.HandleAnnounce(ctx, req, resp); err != nil { - return nil, err - } - } - - return resp, nil -} - -// AfterAnnounce does something with the results of an Announce after it has -// been completed. -func (b *Backend) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { - for _, h := range b.announcePostHooks { - if err := h.HandleAnnounce(ctx, req, resp); err != nil { - log.Println("trakr: post-announce hooks failed:", err.Error()) - return - } - } -} - -// HandleScrape generates a response for a Scrape. -func (b *Backend) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) { - resp := &bittorrent.ScrapeResponse{ - Files: make(map[bittorrent.InfoHash]bittorrent.Scrape), - } - for _, h := range b.scrapePreHooks { - if err := h.HandleScrape(ctx, req, resp); err != nil { - return nil, err - } - } - - return resp, nil -} - -// AfterScrape does something with the results of a Scrape after it has been -// completed. -func (b *Backend) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { - for _, h := range b.scrapePostHooks { - if err := h.HandleScrape(ctx, req, resp); err != nil { - log.Println("trakr: post-scrape hooks failed:", err.Error()) - return - } - } -} diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 8527baf..1c7e2e8 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -14,17 +14,17 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "github.com/jzelinskie/trakr/backend" httpfrontend "github.com/jzelinskie/trakr/frontend/http" udpfrontend "github.com/jzelinskie/trakr/frontend/udp" + "github.com/jzelinskie/trakr/middleware" ) type ConfigFile struct { - Config struct { - PrometheusAddr string `yaml:"prometheus_addr"` - backend.BackendConfig - HTTPConfig httpfrontend.Config `yaml:"http"` - UDPConfig udpfrontend.Config `yaml:"udp"` + MainConfigBlock struct { + PrometheusAddr string `yaml:"prometheus_addr"` + HTTPConfig httpfrontend.Config `yaml:"http"` + UDPConfig udpfrontend.Config `yaml:"udp"` + middleware.Config } `yaml:"trakr"` } @@ -81,13 +81,14 @@ func main() { if err != nil { return errors.New("failed to read config: " + err.Error()) } + cfg := configFile.MainConfigBlock go func() { promServer := http.Server{ - Addr: configFile.Config.PrometheusAddr, + Addr: cfg.PrometheusAddr, Handler: prometheus.Handler(), } - log.Println("started serving prometheus stats on", configFile.Config.PrometheusAddr) + log.Println("started serving prometheus stats on", cfg.PrometheusAddr) if err := promServer.ListenAndServe(); err != nil { log.Fatal(err) } @@ -95,7 +96,7 @@ func main() { // TODO create PeerStore // TODO create Hooks - trackerBackend, err := backend.New(configFile.Config.BackendConfig, nil, nil, nil, nil, nil) + logic := middleware.NewLogic(cfg.Config, nil, nil, nil, nil, nil) if err != nil { return err } @@ -106,24 +107,24 @@ func main() { var hFrontend *httpfrontend.Frontend var uFrontend *udpfrontend.Frontend - if configFile.Config.HTTPConfig.Addr != "" { + if cfg.HTTPConfig.Addr != "" { // TODO get the real TrackerLogic - hFrontend = httpfrontend.NewFrontend(trackerBackend, configFile.Config.HTTPConfig) + hFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig) go func() { - log.Println("started serving HTTP on", configFile.Config.HTTPConfig.Addr) + log.Println("started serving HTTP on", cfg.HTTPConfig.Addr) if err := hFrontend.ListenAndServe(); err != nil { errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error()) } }() } - if configFile.Config.UDPConfig.Addr != "" { + if cfg.UDPConfig.Addr != "" { // TODO get the real TrackerLogic - uFrontend = udpfrontend.NewFrontend(trackerBackend, configFile.Config.UDPConfig) + uFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig) go func() { - log.Println("started serving UDP on", configFile.Config.UDPConfig.Addr) + log.Println("started serving UDP on", cfg.UDPConfig.Addr) if err := uFrontend.ListenAndServe(); err != nil { errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error()) } diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 0ba228d..41b64ab 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -71,15 +71,15 @@ type Config struct { type Frontend struct { grace *graceful.Server - backend frontend.TrackerLogic + logic frontend.TrackerLogic Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(backend frontend.TrackerLogic, cfg Config) *Frontend { +func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend { return &Frontend{ - backend: backend, - Config: cfg, + logic: logic, + Config: cfg, } } @@ -150,7 +150,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr return } - resp, err := t.backend.HandleAnnounce(context.TODO(), req) + resp, err := t.logic.HandleAnnounce(context.TODO(), req) if err != nil { WriteError(w, err) return @@ -162,7 +162,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr return } - go t.backend.AfterAnnounce(context.TODO(), req, resp) + go t.logic.AfterAnnounce(context.TODO(), req, resp) } // scrapeRoute parses and responds to a Scrape by using t.TrackerLogic. @@ -177,7 +177,7 @@ func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - resp, err := t.backend.HandleScrape(context.TODO(), req) + resp, err := t.logic.HandleScrape(context.TODO(), req) if err != nil { WriteError(w, err) return @@ -189,5 +189,5 @@ func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - go t.backend.AfterScrape(context.TODO(), req, resp) + go t.logic.AfterScrape(context.TODO(), req, resp) } diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index a74432e..44c5552 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -74,15 +74,15 @@ type Frontend struct { closing chan struct{} wg sync.WaitGroup - backend frontend.TrackerLogic + logic frontend.TrackerLogic Config } // NewFrontend allocates a new instance of a Frontend. -func NewFrontend(backend frontend.TrackerLogic, cfg Config) *Frontend { +func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend { return &Frontend{ closing: make(chan struct{}), - backend: backend, + logic: logic, Config: cfg, } } @@ -221,7 +221,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, } var resp *bittorrent.AnnounceResponse - resp, err = t.backend.HandleAnnounce(context.TODO(), req) + resp, err = t.logic.HandleAnnounce(context.TODO(), req) if err != nil { WriteError(w, txID, err) return @@ -229,8 +229,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, WriteAnnounce(w, txID, resp) - // TODO(mrd0ll4r): evaluate if it's worth spawning another goroutine. - go t.backend.AfterAnnounce(context.TODO(), req, resp) + go t.logic.AfterAnnounce(context.TODO(), req, resp) return @@ -245,7 +244,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, } var resp *bittorrent.ScrapeResponse - resp, err = t.backend.HandleScrape(context.TODO(), req) + resp, err = t.logic.HandleScrape(context.TODO(), req) if err != nil { WriteError(w, txID, err) return @@ -253,7 +252,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, WriteScrape(w, txID, resp) - go t.backend.AfterScrape(context.TODO(), req, resp) + go t.logic.AfterScrape(context.TODO(), req, resp) return diff --git a/backend/hooks.go b/middleware/hooks.go similarity index 53% rename from backend/hooks.go rename to middleware/hooks.go index 2c1792a..73797e4 100644 --- a/backend/hooks.go +++ b/middleware/hooks.go @@ -1,18 +1,4 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend +package middleware import ( "golang.org/x/net/context" diff --git a/middleware/middleware.go b/middleware/middleware.go new file mode 100644 index 0000000..91d1cc5 --- /dev/null +++ b/middleware/middleware.go @@ -0,0 +1,109 @@ +// Package middleware implements the TrackerLogic interface by executing +// a series of middleware hooks. +package middleware + +import ( + "log" + "time" + + "golang.org/x/net/context" + + "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/frontend" +) + +type Config struct { + AnnounceInterval time.Duration `yaml:"announce_interval"` +} + +var _ frontend.TrackerLogic = &Logic{} + +func NewLogic(config Config, peerStore PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) *Logic { + l := &Logic{ + announceInterval: config.AnnounceInterval, + peerStore: peerStore, + announcePreHooks: announcePreHooks, + announcePostHooks: announcePostHooks, + scrapePreHooks: scrapePreHooks, + scrapePostHooks: scrapePostHooks, + } + + if len(l.announcePreHooks) == 0 { + l.announcePreHooks = []Hook{nopHook{}} + } + + if len(l.announcePostHooks) == 0 { + l.announcePostHooks = []Hook{nopHook{}} + } + + if len(l.scrapePreHooks) == 0 { + l.scrapePreHooks = []Hook{nopHook{}} + } + + if len(l.scrapePostHooks) == 0 { + l.scrapePostHooks = []Hook{nopHook{}} + } + + return l +} + +// Logic is an implementation of the TrackerLogic that functions by +// executing a series of middleware hooks. +type Logic struct { + announceInterval time.Duration + peerStore PeerStore + announcePreHooks []Hook + announcePostHooks []Hook + scrapePreHooks []Hook + scrapePostHooks []Hook +} + +// HandleAnnounce generates a response for an Announce. +func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) { + resp := &bittorrent.AnnounceResponse{ + Interval: l.announceInterval, + } + for _, h := range l.announcePreHooks { + if err := h.HandleAnnounce(ctx, req, resp); err != nil { + return nil, err + } + } + + return resp, nil +} + +// AfterAnnounce does something with the results of an Announce after it has +// been completed. +func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { + for _, h := range l.announcePostHooks { + if err := h.HandleAnnounce(ctx, req, resp); err != nil { + log.Println("trakr: post-announce hooks failed:", err.Error()) + return + } + } +} + +// HandleScrape generates a response for a Scrape. +func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) { + resp := &bittorrent.ScrapeResponse{ + Files: make(map[bittorrent.InfoHash]bittorrent.Scrape), + } + for _, h := range l.scrapePreHooks { + if err := h.HandleScrape(ctx, req, resp); err != nil { + return nil, err + } + } + + return resp, nil +} + +// AfterScrape does something with the results of a Scrape after it has been +// completed. +func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { + for _, h := range l.scrapePostHooks { + if err := h.HandleScrape(ctx, req, resp); err != nil { + log.Println("trakr: post-scrape hooks failed:", err.Error()) + return + } + } +} diff --git a/backend/storage.go b/middleware/storage.go similarity index 99% rename from backend/storage.go rename to middleware/storage.go index 7761613..f1c6e92 100644 --- a/backend/storage.go +++ b/middleware/storage.go @@ -1,4 +1,4 @@ -package backend +package middleware import ( "time" From 778773cb81861d46151a10139223077488d48b71 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 20:24:09 -0400 Subject: [PATCH 28/74] rm copyright --- bittorrent/bittorrent.go | 14 -------------- bittorrent/client_id.go | 14 -------------- bittorrent/client_id_test.go | 14 -------------- bittorrent/event.go | 14 -------------- bittorrent/event_test.go | 14 -------------- frontend/http/bencode/bencode.go | 14 -------------- frontend/http/bencode/decoder.go | 14 -------------- frontend/http/bencode/decoder_test.go | 14 -------------- frontend/http/bencode/encoder.go | 14 -------------- frontend/http/bencode/encoder_test.go | 14 -------------- frontend/http/frontend.go | 14 -------------- frontend/http/parser.go | 14 -------------- frontend/http/query_params.go | 14 -------------- frontend/http/query_params_test.go | 14 -------------- frontend/http/writer.go | 14 -------------- frontend/http/writer_test.go | 14 -------------- frontend/udp/bytepool/bytepool.go | 4 ---- frontend/udp/connection_id.go | 14 -------------- frontend/udp/connection_id_test.go | 14 -------------- frontend/udp/frontend.go | 14 -------------- frontend/udp/parser.go | 14 -------------- frontend/udp/writer.go | 14 -------------- 22 files changed, 298 deletions(-) diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index 20c9b66..eb18451 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Package bittorrent implements all of the abstractions used to decouple the // protocol of a BitTorrent tracker from the logic of handling Announces and // Scrapes. diff --git a/bittorrent/client_id.go b/bittorrent/client_id.go index 6aab52c..b50be80 100644 --- a/bittorrent/client_id.go +++ b/bittorrent/client_id.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bittorrent // ClientID represents the part of a PeerID that identifies a Peer's client diff --git a/bittorrent/client_id_test.go b/bittorrent/client_id_test.go index 956d0fc..126f701 100644 --- a/bittorrent/client_id_test.go +++ b/bittorrent/client_id_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bittorrent import "testing" diff --git a/bittorrent/event.go b/bittorrent/event.go index e5991e6..2c68ce2 100644 --- a/bittorrent/event.go +++ b/bittorrent/event.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bittorrent import ( diff --git a/bittorrent/event_test.go b/bittorrent/event_test.go index 0ce7944..637f69b 100644 --- a/bittorrent/event_test.go +++ b/bittorrent/event_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bittorrent import ( diff --git a/frontend/http/bencode/bencode.go b/frontend/http/bencode/bencode.go index 7985adc..c08bda1 100644 --- a/frontend/http/bencode/bencode.go +++ b/frontend/http/bencode/bencode.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Package bencode implements bencoding of data as defined in BEP 3 using // type assertion over reflection for performance. package bencode diff --git a/frontend/http/bencode/decoder.go b/frontend/http/bencode/decoder.go index dba087f..a12f9b6 100644 --- a/frontend/http/bencode/decoder.go +++ b/frontend/http/bencode/decoder.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bencode import ( diff --git a/frontend/http/bencode/decoder_test.go b/frontend/http/bencode/decoder_test.go index 375b69a..485a47a 100644 --- a/frontend/http/bencode/decoder_test.go +++ b/frontend/http/bencode/decoder_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bencode import ( diff --git a/frontend/http/bencode/encoder.go b/frontend/http/bencode/encoder.go index bd8701c..f6f1095 100644 --- a/frontend/http/bencode/encoder.go +++ b/frontend/http/bencode/encoder.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bencode import ( diff --git a/frontend/http/bencode/encoder_test.go b/frontend/http/bencode/encoder_test.go index c432208..bbd89b1 100644 --- a/frontend/http/bencode/encoder_test.go +++ b/frontend/http/bencode/encoder_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bencode import ( diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 41b64ab..f21ddf5 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Package http implements a BitTorrent frontend via the HTTP protocol as // described in BEP 3 and BEP 23. package http diff --git a/frontend/http/parser.go b/frontend/http/parser.go index a43742e..823fe25 100644 --- a/frontend/http/parser.go +++ b/frontend/http/parser.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package http import ( diff --git a/frontend/http/query_params.go b/frontend/http/query_params.go index 5607e3e..525b8e3 100644 --- a/frontend/http/query_params.go +++ b/frontend/http/query_params.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package http import ( diff --git a/frontend/http/query_params_test.go b/frontend/http/query_params_test.go index 0d96fa5..ec9a0d0 100644 --- a/frontend/http/query_params_test.go +++ b/frontend/http/query_params_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package http import ( diff --git a/frontend/http/writer.go b/frontend/http/writer.go index 36ea1cd..0d1b100 100644 --- a/frontend/http/writer.go +++ b/frontend/http/writer.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package http import ( diff --git a/frontend/http/writer_test.go b/frontend/http/writer_test.go index e8a5d31..522bd5f 100644 --- a/frontend/http/writer_test.go +++ b/frontend/http/writer_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package http import ( diff --git a/frontend/udp/bytepool/bytepool.go b/frontend/udp/bytepool/bytepool.go index 1808687..f4ec893 100644 --- a/frontend/udp/bytepool/bytepool.go +++ b/frontend/udp/bytepool/bytepool.go @@ -1,7 +1,3 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - package bytepool import "sync" diff --git a/frontend/udp/connection_id.go b/frontend/udp/connection_id.go index 944f4d8..0ba7253 100644 --- a/frontend/udp/connection_id.go +++ b/frontend/udp/connection_id.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package udp import ( diff --git a/frontend/udp/connection_id_test.go b/frontend/udp/connection_id_test.go index 776b61f..0291122 100644 --- a/frontend/udp/connection_id_test.go +++ b/frontend/udp/connection_id_test.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package udp import ( diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 44c5552..5a4a6b9 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - // Package udp implements a BitTorrent tracker via the UDP protocol as // described in BEP 15. package udp diff --git a/frontend/udp/parser.go b/frontend/udp/parser.go index 31ca9d8..c771a42 100644 --- a/frontend/udp/parser.go +++ b/frontend/udp/parser.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package udp import ( diff --git a/frontend/udp/writer.go b/frontend/udp/writer.go index 211635f..6f04d9e 100644 --- a/frontend/udp/writer.go +++ b/frontend/udp/writer.go @@ -1,17 +1,3 @@ -// Copyright 2016 Jimmy Zelinskie -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package udp import ( From 4df538d027d710422bcac723fe4c16883ad7f785 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 20:26:47 -0400 Subject: [PATCH 29/74] pass peer to storage --- middleware/storage.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/middleware/storage.go b/middleware/storage.go index f1c6e92..d9848a0 100644 --- a/middleware/storage.go +++ b/middleware/storage.go @@ -48,10 +48,10 @@ type PeerStore interface { // // The returned Peers should strive be: // - as close to length equal to numWant as possible without going over - // - all IPv4 or all IPv6 depending on the provided ipv6 boolean + // - all IPv4 or all IPv6 depending on the provided peer // - if seeder is true, should ideally return more leechers than seeders // - if seeder is false, should ideally return more seeders than leechers - AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, ipv6 bool) (peers []bittorrent.Peer, err error) + AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, p bittorrent.Peer) (peers []bittorrent.Peer, err error) // CollectGarbage deletes all Peers from the PeerStore which are older than // the cutoff time. This function must be able to execute while other methods From 35f7c5682f604b9d65d884eb84c09191b49f010f Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 20:29:52 -0400 Subject: [PATCH 30/74] update example config --- example_config.yaml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/example_config.yaml b/example_config.yaml index b6e7760..36e769a 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -1,13 +1,10 @@ trakr: announce_interval: 15m - gc_interval: 15m - gc_expiration: 15m - allow_ip_spoofing: true - default_num_want: 50 prometheus_addr: localhost:6880 http: addr: 0.0.0.0:6881 + allow_ip_spoofing: false real_ip_header: x-real-ip read_timeout: 5s write_timeout: 5s @@ -15,11 +12,14 @@ trakr: udp: addr: 0.0.0.0:6881 + allow_ip_spoofing: false storage: name: memory config: shards: 1 + gc_interval: 15m + gc_expiration: 15m prehooks: - name: jwt @@ -35,3 +35,5 @@ trakr: posthooks: - name: gossip + config: + boostrap_node: 127.0.0.1:6881 From c3137508d00b7fb1416196cd6fb7611a63998110 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 9 Aug 2016 21:34:16 -0400 Subject: [PATCH 31/74] make new storage pkg with memory impl --- cmd/trakr/main.go | 4 + middleware/middleware.go | 5 +- storage/memory/peer_store.go | 371 +++++++++++++++++++++++++++++ storage/memory/peer_store_test.go | 142 +++++++++++ {middleware => storage}/storage.go | 2 +- 5 files changed, 521 insertions(+), 3 deletions(-) create mode 100644 storage/memory/peer_store.go create mode 100644 storage/memory/peer_store_test.go rename {middleware => storage}/storage.go (99%) diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 1c7e2e8..3b34214 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -17,6 +17,7 @@ import ( httpfrontend "github.com/jzelinskie/trakr/frontend/http" udpfrontend "github.com/jzelinskie/trakr/frontend/udp" "github.com/jzelinskie/trakr/middleware" + "github.com/jzelinskie/trakr/storage/memory" ) type ConfigFile struct { @@ -101,6 +102,9 @@ func main() { return err } + // Force the compiler to enforce memory against the storage interface. + _, _ = memory.New(memory.Config{1}) + errChan := make(chan error) closedChan := make(chan struct{}) diff --git a/middleware/middleware.go b/middleware/middleware.go index 91d1cc5..b9b31d1 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -10,6 +10,7 @@ import ( "github.com/jzelinskie/trakr/bittorrent" "github.com/jzelinskie/trakr/frontend" + "github.com/jzelinskie/trakr/storage" ) type Config struct { @@ -18,7 +19,7 @@ type Config struct { var _ frontend.TrackerLogic = &Logic{} -func NewLogic(config Config, peerStore PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) *Logic { +func NewLogic(config Config, peerStore storage.PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) *Logic { l := &Logic{ announceInterval: config.AnnounceInterval, peerStore: peerStore, @@ -51,7 +52,7 @@ func NewLogic(config Config, peerStore PeerStore, announcePreHooks, announcePost // executing a series of middleware hooks. type Logic struct { announceInterval time.Duration - peerStore PeerStore + peerStore storage.PeerStore announcePreHooks []Hook announcePostHooks []Hook scrapePreHooks []Hook diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go new file mode 100644 index 0000000..80aec6b --- /dev/null +++ b/storage/memory/peer_store.go @@ -0,0 +1,371 @@ +package memory + +import ( + "encoding/binary" + "log" + "net" + "runtime" + "sync" + "time" + + "github.com/jzelinskie/trakr/bittorrent" + "github.com/jzelinskie/trakr/storage" +) + +// TODO(jzelinskie): separate ipv4 and ipv6 swarms + +type Config struct { + ShardCount int `yaml:"shard_count"` +} + +func New(cfg Config) (storage.PeerStore, error) { + shardCount := 1 + if cfg.ShardCount > 0 { + shardCount = cfg.ShardCount + } + + shards := make([]*peerShard, shardCount) + for i := 0; i < shardCount; i++ { + shards[i] = &peerShard{} + shards[i].swarms = make(map[swarmKey]swarm) + } + + return &peerStore{ + shards: shards, + closed: make(chan struct{}), + }, nil +} + +type serializedPeer string + +type swarmKey [21]byte + +func newSwarmKey(ih bittorrent.InfoHash, p bittorrent.Peer) (key swarmKey) { + for i, ihbyte := range ih { + key[i] = ihbyte + } + if len(p.IP) == net.IPv4len { + key[20] = byte(4) + } else { + key[20] = byte(6) + } + + return +} + +type peerShard struct { + swarms map[swarmKey]swarm + sync.RWMutex +} + +type swarm struct { + // map serialized peer to mtime + seeders map[serializedPeer]int64 + leechers map[serializedPeer]int64 +} + +type peerStore struct { + shards []*peerShard + closed chan struct{} +} + +var _ storage.PeerStore = &peerStore{} + +func (s *peerStore) shardIndex(infoHash bittorrent.InfoHash) uint32 { + return binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards)) +} + +func newPeerKey(p bittorrent.Peer) serializedPeer { + b := make([]byte, 20+2+len(p.IP)) + copy(b[:20], p.ID[:]) + binary.BigEndian.PutUint16(b[20:22], p.Port) + copy(b[22:], p.IP) + + return serializedPeer(b) +} + +func decodePeerKey(pk serializedPeer) bittorrent.Peer { + return bittorrent.Peer{ + ID: bittorrent.PeerIDFromString(string(pk[:20])), + Port: binary.BigEndian.Uint16([]byte(pk[20:22])), + IP: net.IP(pk[22:]), + } +} + +func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + sk := newSwarmKey(ih, p) + pk := newPeerKey(p) + + shard := s.shards[s.shardIndex(ih)] + shard.Lock() + + if _, ok := shard.swarms[sk]; !ok { + shard.swarms[sk] = swarm{ + seeders: make(map[serializedPeer]int64), + leechers: make(map[serializedPeer]int64), + } + } + + shard.swarms[sk].seeders[pk] = time.Now().UnixNano() + + shard.Unlock() + return nil +} + +func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + sk := newSwarmKey(ih, p) + pk := newPeerKey(p) + + shard := s.shards[s.shardIndex(ih)] + shard.Lock() + + if _, ok := shard.swarms[sk]; !ok { + shard.Unlock() + return storage.ErrResourceDoesNotExist + } + + if _, ok := shard.swarms[sk].seeders[pk]; !ok { + shard.Unlock() + return storage.ErrResourceDoesNotExist + } + + delete(shard.swarms[sk].seeders, pk) + + if len(shard.swarms[sk].seeders)|len(shard.swarms[sk].leechers) == 0 { + delete(shard.swarms, sk) + } + + shard.Unlock() + return nil +} + +func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + sk := newSwarmKey(ih, p) + pk := newPeerKey(p) + + shard := s.shards[s.shardIndex(ih)] + shard.Lock() + + if _, ok := shard.swarms[sk]; !ok { + shard.swarms[sk] = swarm{ + seeders: make(map[serializedPeer]int64), + leechers: make(map[serializedPeer]int64), + } + } + + shard.swarms[sk].leechers[pk] = time.Now().UnixNano() + + shard.Unlock() + return nil +} + +func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + sk := newSwarmKey(ih, p) + pk := newPeerKey(p) + + shard := s.shards[s.shardIndex(ih)] + shard.Lock() + + if _, ok := shard.swarms[sk]; !ok { + shard.Unlock() + return storage.ErrResourceDoesNotExist + } + + if _, ok := shard.swarms[sk].leechers[pk]; !ok { + shard.Unlock() + return storage.ErrResourceDoesNotExist + } + + delete(shard.swarms[sk].leechers, pk) + + if len(shard.swarms[sk].seeders)|len(shard.swarms[sk].leechers) == 0 { + delete(shard.swarms, sk) + } + + shard.Unlock() + return nil +} + +func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + sk := newSwarmKey(ih, p) + pk := newPeerKey(p) + + shard := s.shards[s.shardIndex(ih)] + shard.Lock() + + if _, ok := shard.swarms[sk]; !ok { + shard.swarms[sk] = swarm{ + seeders: make(map[serializedPeer]int64), + leechers: make(map[serializedPeer]int64), + } + } + + delete(shard.swarms[sk].leechers, pk) + + shard.swarms[sk].seeders[pk] = time.Now().UnixNano() + + shard.Unlock() + return nil +} + +func (s *peerStore) CollectGarbage(cutoff time.Time) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String()) + cutoffUnix := cutoff.UnixNano() + for _, shard := range s.shards { + shard.RLock() + var swarmKeys []swarmKey + for sk := range shard.swarms { + swarmKeys = append(swarmKeys, sk) + } + shard.RUnlock() + runtime.Gosched() + + for _, sk := range swarmKeys { + shard.Lock() + + if _, stillExists := shard.swarms[sk]; !stillExists { + shard.Unlock() + runtime.Gosched() + continue + } + + for pk, mtime := range shard.swarms[sk].leechers { + if mtime <= cutoffUnix { + delete(shard.swarms[sk].leechers, pk) + } + } + + for pk, mtime := range shard.swarms[sk].seeders { + if mtime <= cutoffUnix { + delete(shard.swarms[sk].seeders, pk) + } + } + + if len(shard.swarms[sk].seeders)|len(shard.swarms[sk].leechers) == 0 { + delete(shard.swarms, sk) + } + + shard.Unlock() + runtime.Gosched() + } + + runtime.Gosched() + } + + return nil +} + +func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + sk := newSwarmKey(ih, announcer) + + shard := s.shards[s.shardIndex(ih)] + shard.RLock() + + if _, ok := shard.swarms[sk]; !ok { + shard.RUnlock() + return nil, storage.ErrResourceDoesNotExist + } + + if seeder { + // Append leechers as possible. + leechers := shard.swarms[sk].leechers + for p := range leechers { + decodedPeer := decodePeerKey(p) + if numWant == 0 { + break + } + + peers = append(peers, decodedPeer) + numWant-- + } + } else { + // Append as many seeders as possible. + seeders := shard.swarms[sk].seeders + for p := range seeders { + decodedPeer := decodePeerKey(p) + if numWant == 0 { + break + } + + peers = append(peers, decodedPeer) + numWant-- + } + + // Append leechers until we reach numWant. + leechers := shard.swarms[sk].leechers + if numWant > 0 { + for p := range leechers { + decodedPeer := decodePeerKey(p) + if numWant == 0 { + break + } + + if decodedPeer.Equal(announcer) { + continue + } + peers = append(peers, decodedPeer) + numWant-- + } + } + } + + shard.RUnlock() + return +} + +func (s *peerStore) Stop() <-chan error { + toReturn := make(chan error) + go func() { + shards := make([]*peerShard, len(s.shards)) + for i := 0; i < len(s.shards); i++ { + shards[i] = &peerShard{} + shards[i].swarms = make(map[swarmKey]swarm) + } + s.shards = shards + close(s.closed) + close(toReturn) + }() + return toReturn +} diff --git a/storage/memory/peer_store_test.go b/storage/memory/peer_store_test.go new file mode 100644 index 0000000..9a00b17 --- /dev/null +++ b/storage/memory/peer_store_test.go @@ -0,0 +1,142 @@ +// Copyright 2016 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package memory + +import ( + "testing" + + "github.com/chihaya/chihaya/server/store" +) + +var ( + peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{}) + peerStoreBenchmarker = store.PreparePeerStoreBenchmarker(&peerStoreDriver{}) + peerStoreTestConfig = &store.DriverConfig{} +) + +func init() { + unmarshalledConfig := struct { + Shards int + }{ + 1, + } + peerStoreTestConfig.Config = unmarshalledConfig +} + +func TestPeerStore(t *testing.T) { + peerStoreTester.TestPeerStore(t, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder(b *testing.B) { + peerStoreBenchmarker.PutSeeder(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder1KInfohash(b *testing.B) { + peerStoreBenchmarker.PutSeeder1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutSeeder1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutSeeder1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder1KInfohash(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutDeleteSeeder1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutDeleteSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist1KSeeders(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.DeleteSeederNonExist1KInfohash1KSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist1KSeeders(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash1KSeeders(b *testing.B) { + peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash1KLeechers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeers(b *testing.B) { + peerStoreBenchmarker.AnnouncePeers(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeers1KInfohash(b *testing.B) { + peerStoreBenchmarker.AnnouncePeers1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeersSeeder(b *testing.B) { + peerStoreBenchmarker.AnnouncePeersSeeder(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_AnnouncePeersSeeder1KInfohash(b *testing.B) { + peerStoreBenchmarker.AnnouncePeersSeeder1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GetSeeders(b *testing.B) { + peerStoreBenchmarker.GetSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_GetSeeders1KInfohash(b *testing.B) { + peerStoreBenchmarker.GetSeeders1KInfohash(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_NumSeeders(b *testing.B) { + peerStoreBenchmarker.NumSeeders(b, peerStoreTestConfig) +} + +func BenchmarkPeerStore_NumSeeders1KInfohash(b *testing.B) { + peerStoreBenchmarker.NumSeeders1KInfohash(b, peerStoreTestConfig) +} diff --git a/middleware/storage.go b/storage/storage.go similarity index 99% rename from middleware/storage.go rename to storage/storage.go index d9848a0..fb24a70 100644 --- a/middleware/storage.go +++ b/storage/storage.go @@ -1,4 +1,4 @@ -package middleware +package storage import ( "time" From 651ed50957893b38d18460db4e2a4afec8381301 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 11 Aug 2016 18:15:47 -0400 Subject: [PATCH 32/74] initial benchmarks. warning: /0 bug --- storage/memory/peer_store_test.go | 160 +++-------------- storage/storage_bench.go | 288 ++++++++++++++++++++++++++++++ 2 files changed, 313 insertions(+), 135 deletions(-) create mode 100644 storage/storage_bench.go diff --git a/storage/memory/peer_store_test.go b/storage/memory/peer_store_test.go index 9a00b17..df3a0c1 100644 --- a/storage/memory/peer_store_test.go +++ b/storage/memory/peer_store_test.go @@ -1,142 +1,32 @@ -// Copyright 2016 The Chihaya Authors. All rights reserved. -// Use of this source code is governed by the BSD 2-Clause license, -// which can be found in the LICENSE file. - package memory import ( "testing" - "github.com/chihaya/chihaya/server/store" + s "github.com/jzelinskie/trakr/storage" ) -var ( - peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{}) - peerStoreBenchmarker = store.PreparePeerStoreBenchmarker(&peerStoreDriver{}) - peerStoreTestConfig = &store.DriverConfig{} -) - -func init() { - unmarshalledConfig := struct { - Shards int - }{ - 1, - } - peerStoreTestConfig.Config = unmarshalledConfig -} - -func TestPeerStore(t *testing.T) { - peerStoreTester.TestPeerStore(t, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder(b *testing.B) { - peerStoreBenchmarker.PutSeeder(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder1KInfohash(b *testing.B) { - peerStoreBenchmarker.PutSeeder1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutSeeder1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutSeeder1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder1KInfohash(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutDeleteSeeder1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutDeleteSeeder1KInfohash1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist1KSeeders(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.DeleteSeederNonExist1KInfohash1KSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist1KSeeders(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash1KSeeders(b *testing.B) { - peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash1KLeechers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeers(b *testing.B) { - peerStoreBenchmarker.AnnouncePeers(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeers1KInfohash(b *testing.B) { - peerStoreBenchmarker.AnnouncePeers1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeersSeeder(b *testing.B) { - peerStoreBenchmarker.AnnouncePeersSeeder(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_AnnouncePeersSeeder1KInfohash(b *testing.B) { - peerStoreBenchmarker.AnnouncePeersSeeder1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GetSeeders(b *testing.B) { - peerStoreBenchmarker.GetSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_GetSeeders1KInfohash(b *testing.B) { - peerStoreBenchmarker.GetSeeders1KInfohash(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_NumSeeders(b *testing.B) { - peerStoreBenchmarker.NumSeeders(b, peerStoreTestConfig) -} - -func BenchmarkPeerStore_NumSeeders1KInfohash(b *testing.B) { - peerStoreBenchmarker.NumSeeders1KInfohash(b, peerStoreTestConfig) -} +func BenchmarkPut(b *testing.B) { s.Put(b, &peerStore{}) } +func BenchmarkPut1k(b *testing.B) { s.Put1k(b, &peerStore{}) } +func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, &peerStore{}) } +func BenchmarkPut1kInfohash1k(b *testing.B) { s.Put1kInfohash1k(b, &peerStore{}) } +func BenchmarkPutDelete(b *testing.B) { s.PutDelete(b, &peerStore{}) } +func BenchmarkPutDelete1k(b *testing.B) { s.PutDelete1k(b, &peerStore{}) } +func BenchmarkPutDelete1kInfohash(b *testing.B) { s.PutDelete1kInfohash(b, &peerStore{}) } +func BenchmarkPutDelete1kInfohash1k(b *testing.B) { s.PutDelete1kInfohash1k(b, &peerStore{}) } +func BenchmarkDeleteNonexist(b *testing.B) { s.DeleteNonexist(b, &peerStore{}) } +func BenchmarkDeleteNonexist1k(b *testing.B) { s.DeleteNonexist1k(b, &peerStore{}) } +func BenchmarkDeleteNonexist1kInfohash(b *testing.B) { s.DeleteNonexist1kInfohash(b, &peerStore{}) } +func BenchmarkDeleteNonexist1kInfohash1k(b *testing.B) { s.DeleteNonexist1kInfohash1k(b, &peerStore{}) } +func BenchmarkGradDelete(b *testing.B) { s.GradDelete(b, &peerStore{}) } +func BenchmarkGradDelete1k(b *testing.B) { s.GradDelete1k(b, &peerStore{}) } +func BenchmarkGradDelete1kInfohash(b *testing.B) { s.GradDelete1kInfohash(b, &peerStore{}) } +func BenchmarkGradDelete1kInfohash1k(b *testing.B) { s.GradDelete1kInfohash1k(b, &peerStore{}) } +func BenchmarkGradNonexist(b *testing.B) { s.GradNonexist(b, &peerStore{}) } +func BenchmarkGradNonexist1k(b *testing.B) { s.GradNonexist1k(b, &peerStore{}) } +func BenchmarkGradNonexist1kInfohash(b *testing.B) { s.GradNonexist1kInfohash(b, &peerStore{}) } +func BenchmarkGradNonexist1kInfohash1k(b *testing.B) { s.GradNonexist1kInfohash1k(b, &peerStore{}) } +func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, &peerStore{}) } +func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, &peerStore{}) } +func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, &peerStore{}) } +func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, &peerStore{}) } diff --git a/storage/storage_bench.go b/storage/storage_bench.go new file mode 100644 index 0000000..c76bd41 --- /dev/null +++ b/storage/storage_bench.go @@ -0,0 +1,288 @@ +package storage + +import ( + "fmt" + "net" + "testing" + + "github.com/jzelinskie/trakr/bittorrent" +) + +type benchData struct { + infohashes [1000]bittorrent.InfoHash + peers [1000]bittorrent.Peer +} + +func generateInfohashes() (a [1000]bittorrent.InfoHash) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = bittorrent.InfoHash([20]byte{b[0], b[1]}) + } + + return +} + +func generatePeers() (a [1000]bittorrent.Peer) { + b := make([]byte, 2) + for i := range a { + b[0] = byte(i) + b[1] = byte(i >> 8) + a[i] = bittorrent.Peer{ + ID: bittorrent.PeerID([20]byte{b[0], b[1]}), + IP: net.ParseIP(fmt.Sprintf("64.%d.%d.64", b[0], b[1])), + Port: uint16(i), + } + } + + return +} + +type executionFunc func(int, PeerStore, *benchData) error +type setupFunc func(PeerStore, *benchData) error + +func runBenchmark(b *testing.B, ps PeerStore, sf setupFunc, ef executionFunc) { + bd := &benchData{generateInfohashes(), generatePeers()} + if sf != nil { + err := sf(ps, bd) + if err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := ef(i, ps, bd) + if err != nil { + b.Fatal(err) + } + } + b.StopTimer() +} + +func Put(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.PutSeeder(bd.infohashes[0], bd.peers[0]) + }) +} + +func Put1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000]) + }) +} + +func Put1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) + }) +} + +func Put1kInfohash1k(b *testing.B, ps PeerStore) { + j := 0 + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) + j += 3 + return err + }) +} + +func PutDelete(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[0], bd.peers[0]) + if err != nil { + return err + } + return ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) + }) +} + +func PutDelete1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000]) + if err != nil { + return err + } + return ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) + }) +} + +func PutDelete1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) + if err != nil { + } + return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) + }) +} + +func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) { + j := 0 + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) + if err != nil { + return err + } + err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) + j += 3 + return err + }) +} + +func DeleteNonexist(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) + }) +} + +func DeleteNonexist1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) + }) +} + +func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) + }) +} + +func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { + j := 0 + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) + j += 3 + return err + }) +} + +func GradNonexist(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) + }) +} + +func GradNonexist1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) + }) +} + +func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + return ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) + }) +} + +func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) { + j := 0 + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[j%1000]) + j += 3 + return err + }) +} + +func GradDelete(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutLeecher(bd.infohashes[0], bd.peers[0]) + if err != nil { + return err + } + err = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) + if err != nil { + return err + } + return ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) + }) +} + +func GradDelete1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutLeecher(bd.infohashes[0], bd.peers[i%1000]) + if err != nil { + return err + } + err = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) + if err != nil { + return err + } + return ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) + }) +} + +func GradDelete1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[0]) + if err != nil { + return err + } + err = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) + if err != nil { + return err + } + return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) + }) +} + +func GradDelete1kInfohash1k(b *testing.B, ps PeerStore) { + j := 0 + runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[j%1000]) + if err != nil { + return err + } + err = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[j%1000]) + if err != nil { + return err + } + err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) + j += 3 + return err + }) +} + +func generateAnnounceData(ps PeerStore, bd *benchData) error { + for i := 0; i < 1000; i++ { + for j := 0; j < 1000; j++ { + var err error + if j < 1000/2 { + err = ps.PutLeecher(bd.infohashes[i], bd.peers[j]) + } else { + err = ps.PutSeeder(bd.infohashes[i], bd.peers[j]) + } + if err != nil { + return err + } + } + } + return nil +} + +func AnnounceLeecher(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + _, err := ps.AnnouncePeers(bd.infohashes[0], false, 50, bd.peers[0]) + return err + }) +} + +func AnnounceLeecher1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + _, err := ps.AnnouncePeers(bd.infohashes[i%1000], false, 50, bd.peers[0]) + return err + }) +} + +func AnnounceSeeder(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + _, err := ps.AnnouncePeers(bd.infohashes[0], true, 50, bd.peers[0]) + return err + }) +} + +func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + _, err := ps.AnnouncePeers(bd.infohashes[i%1000], true, 50, bd.peers[0]) + return err + }) +} From 6a451071932b97807bc4070ba83d130d4d357139 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Thu, 11 Aug 2016 20:02:10 -0400 Subject: [PATCH 33/74] make benchmarks parallel, fix memory benchmarks --- storage/memory/peer_store_test.go | 56 +++++----- storage/storage_bench.go | 168 +++++++++++++++++------------- 2 files changed, 129 insertions(+), 95 deletions(-) diff --git a/storage/memory/peer_store_test.go b/storage/memory/peer_store_test.go index df3a0c1..d85be46 100644 --- a/storage/memory/peer_store_test.go +++ b/storage/memory/peer_store_test.go @@ -6,27 +6,35 @@ import ( s "github.com/jzelinskie/trakr/storage" ) -func BenchmarkPut(b *testing.B) { s.Put(b, &peerStore{}) } -func BenchmarkPut1k(b *testing.B) { s.Put1k(b, &peerStore{}) } -func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, &peerStore{}) } -func BenchmarkPut1kInfohash1k(b *testing.B) { s.Put1kInfohash1k(b, &peerStore{}) } -func BenchmarkPutDelete(b *testing.B) { s.PutDelete(b, &peerStore{}) } -func BenchmarkPutDelete1k(b *testing.B) { s.PutDelete1k(b, &peerStore{}) } -func BenchmarkPutDelete1kInfohash(b *testing.B) { s.PutDelete1kInfohash(b, &peerStore{}) } -func BenchmarkPutDelete1kInfohash1k(b *testing.B) { s.PutDelete1kInfohash1k(b, &peerStore{}) } -func BenchmarkDeleteNonexist(b *testing.B) { s.DeleteNonexist(b, &peerStore{}) } -func BenchmarkDeleteNonexist1k(b *testing.B) { s.DeleteNonexist1k(b, &peerStore{}) } -func BenchmarkDeleteNonexist1kInfohash(b *testing.B) { s.DeleteNonexist1kInfohash(b, &peerStore{}) } -func BenchmarkDeleteNonexist1kInfohash1k(b *testing.B) { s.DeleteNonexist1kInfohash1k(b, &peerStore{}) } -func BenchmarkGradDelete(b *testing.B) { s.GradDelete(b, &peerStore{}) } -func BenchmarkGradDelete1k(b *testing.B) { s.GradDelete1k(b, &peerStore{}) } -func BenchmarkGradDelete1kInfohash(b *testing.B) { s.GradDelete1kInfohash(b, &peerStore{}) } -func BenchmarkGradDelete1kInfohash1k(b *testing.B) { s.GradDelete1kInfohash1k(b, &peerStore{}) } -func BenchmarkGradNonexist(b *testing.B) { s.GradNonexist(b, &peerStore{}) } -func BenchmarkGradNonexist1k(b *testing.B) { s.GradNonexist1k(b, &peerStore{}) } -func BenchmarkGradNonexist1kInfohash(b *testing.B) { s.GradNonexist1kInfohash(b, &peerStore{}) } -func BenchmarkGradNonexist1kInfohash1k(b *testing.B) { s.GradNonexist1kInfohash1k(b, &peerStore{}) } -func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, &peerStore{}) } -func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, &peerStore{}) } -func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, &peerStore{}) } -func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, &peerStore{}) } +func createNew() s.PeerStore { + ps, err := New(Config{ShardCount: 1024}) + if err != nil { + panic(err) + } + return ps +} + +func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) } +func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) } +func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) } +func BenchmarkPut1kInfohash1k(b *testing.B) { s.Put1kInfohash1k(b, createNew()) } +func BenchmarkPutDelete(b *testing.B) { s.PutDelete(b, createNew()) } +func BenchmarkPutDelete1k(b *testing.B) { s.PutDelete1k(b, createNew()) } +func BenchmarkPutDelete1kInfohash(b *testing.B) { s.PutDelete1kInfohash(b, createNew()) } +func BenchmarkPutDelete1kInfohash1k(b *testing.B) { s.PutDelete1kInfohash1k(b, createNew()) } +func BenchmarkDeleteNonexist(b *testing.B) { s.DeleteNonexist(b, createNew()) } +func BenchmarkDeleteNonexist1k(b *testing.B) { s.DeleteNonexist1k(b, createNew()) } +func BenchmarkDeleteNonexist1kInfohash(b *testing.B) { s.DeleteNonexist1kInfohash(b, createNew()) } +func BenchmarkDeleteNonexist1kInfohash1k(b *testing.B) { s.DeleteNonexist1kInfohash1k(b, createNew()) } +func BenchmarkPutGradDelete(b *testing.B) { s.PutGradDelete(b, createNew()) } +func BenchmarkPutGradDelete1k(b *testing.B) { s.PutGradDelete1k(b, createNew()) } +func BenchmarkPutGradDelete1kInfohash(b *testing.B) { s.PutGradDelete1kInfohash(b, createNew()) } +func BenchmarkPutGradDelete1kInfohash1k(b *testing.B) { s.PutGradDelete1kInfohash1k(b, createNew()) } +func BenchmarkGradNonexist(b *testing.B) { s.GradNonexist(b, createNew()) } +func BenchmarkGradNonexist1k(b *testing.B) { s.GradNonexist1k(b, createNew()) } +func BenchmarkGradNonexist1kInfohash(b *testing.B) { s.GradNonexist1kInfohash(b, createNew()) } +func BenchmarkGradNonexist1kInfohash1k(b *testing.B) { s.GradNonexist1kInfohash1k(b, createNew()) } +func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, createNew()) } +func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) } +func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) } +func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) } diff --git a/storage/storage_bench.go b/storage/storage_bench.go index c76bd41..316a425 100644 --- a/storage/storage_bench.go +++ b/storage/storage_bench.go @@ -1,8 +1,10 @@ package storage import ( - "fmt" + "math/rand" "net" + "runtime" + "sync/atomic" "testing" "github.com/jzelinskie/trakr/bittorrent" @@ -14,25 +16,37 @@ type benchData struct { } func generateInfohashes() (a [1000]bittorrent.InfoHash) { - b := make([]byte, 2) + r := rand.New(rand.NewSource(0)) for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) - a[i] = bittorrent.InfoHash([20]byte{b[0], b[1]}) + b := [20]byte{} + n, err := r.Read(b[:]) + if err != nil || n != 20 { + panic("unable to create random bytes") + } + a[i] = bittorrent.InfoHash(b) } return } func generatePeers() (a [1000]bittorrent.Peer) { - b := make([]byte, 2) + r := rand.New(rand.NewSource(0)) for i := range a { - b[0] = byte(i) - b[1] = byte(i >> 8) + ip := make([]byte, 4) + n, err := r.Read(ip) + if err != nil || n != 4 { + panic("unable to create random bytes") + } + id := [20]byte{} + n, err = r.Read(id[:]) + if err != nil || n != 20 { + panic("unable to create random bytes") + } + port := uint16(r.Uint32()) a[i] = bittorrent.Peer{ - ID: bittorrent.PeerID([20]byte{b[0], b[1]}), - IP: net.ParseIP(fmt.Sprintf("64.%d.%d.64", b[0], b[1])), - Port: uint16(i), + ID: bittorrent.PeerID(id), + IP: net.IP(ip), + Port: port, } } @@ -42,53 +56,67 @@ func generatePeers() (a [1000]bittorrent.Peer) { type executionFunc func(int, PeerStore, *benchData) error type setupFunc func(PeerStore, *benchData) error -func runBenchmark(b *testing.B, ps PeerStore, sf setupFunc, ef executionFunc) { +func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) { bd := &benchData{generateInfohashes(), generatePeers()} + spacing := int32(1000 / runtime.NumCPU()) if sf != nil { err := sf(ps, bd) if err != nil { b.Fatal(err) } } + offset := int32(0) + b.ResetTimer() - for i := 0; i < b.N; i++ { - err := ef(i, ps, bd) - if err != nil { - b.Fatal(err) + if parallel { + b.RunParallel(func(pb *testing.PB) { + i := int(atomic.AddInt32(&offset, spacing)) + for pb.Next() { + err := ef(i, ps, bd) + if err != nil { + b.Fatal(err) + } + i++ + } + }) + } else { + for i := 0; i < b.N; i++ { + err := ef(i, ps, bd) + if err != nil { + b.Fatal(err) + } } } b.StopTimer() } func Put(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { return ps.PutSeeder(bd.infohashes[0], bd.peers[0]) }) } func Put1k(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { return ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000]) }) } func Put1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { return ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) }) } func Put1kInfohash1k(b *testing.B, ps PeerStore) { - j := 0 - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) - j += 3 + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) return err }) } func PutDelete(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[0], bd.peers[0]) if err != nil { return err @@ -98,7 +126,7 @@ func PutDelete(b *testing.B, ps PeerStore) { } func PutDelete1k(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000]) if err != nil { return err @@ -108,7 +136,7 @@ func PutDelete1k(b *testing.B, ps PeerStore) { } func PutDelete1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) if err != nil { } @@ -117,74 +145,74 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) { } func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) { - j := 0 - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) if err != nil { return err } - err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) - j += 3 + err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) return err }) } func DeleteNonexist(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - return ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) + return nil }) } func DeleteNonexist1k(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - return ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) + return nil }) } func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) + return nil }) } func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { - j := 0 - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - err := ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) - j += 3 - return err + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) + return nil }) } func GradNonexist(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - return ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) + return nil }) } func GradNonexist1k(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - return ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) + return nil }) } func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - return ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) + return nil }) } func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) { - j := 0 - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - err := ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[j%1000]) - j += 3 - return err + runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { + ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) + return nil }) } -func GradDelete(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { +func PutGradDelete(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[0], bd.peers[0]) if err != nil { return err @@ -197,8 +225,8 @@ func GradDelete(b *testing.B, ps PeerStore) { }) } -func GradDelete1k(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { +func PutGradDelete1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[0], bd.peers[i%1000]) if err != nil { return err @@ -211,8 +239,8 @@ func GradDelete1k(b *testing.B, ps PeerStore) { }) } -func GradDelete1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { +func PutGradDelete1kInfohash(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[0]) if err != nil { return err @@ -225,24 +253,22 @@ func GradDelete1kInfohash(b *testing.B, ps PeerStore) { }) } -func GradDelete1kInfohash1k(b *testing.B, ps PeerStore) { - j := 0 - runBenchmark(b, ps, nil, func(i int, ps PeerStore, bd *benchData) error { - err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[j%1000]) +func PutGradDelete1kInfohash1k(b *testing.B, ps PeerStore) { + runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { + err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) if err != nil { return err } - err = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[j%1000]) + err = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) if err != nil { return err } - err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[j%1000]) - j += 3 + err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) return err }) } -func generateAnnounceData(ps PeerStore, bd *benchData) error { +func putPeers(ps PeerStore, bd *benchData) error { for i := 0; i < 1000; i++ { for j := 0; j < 1000; j++ { var err error @@ -260,28 +286,28 @@ func generateAnnounceData(ps PeerStore, bd *benchData) error { } func AnnounceLeecher(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[0], false, 50, bd.peers[0]) return err }) } func AnnounceLeecher1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[i%1000], false, 50, bd.peers[0]) return err }) } func AnnounceSeeder(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[0], true, 50, bd.peers[0]) return err }) } func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) { - runBenchmark(b, ps, generateAnnounceData, func(i int, ps PeerStore, bd *benchData) error { + runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[i%1000], true, 50, bd.peers[0]) return err }) From e1cf159d9cabaecc5d202e7816bfc99ca4d8e0f0 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Thu, 11 Aug 2016 20:35:39 -0400 Subject: [PATCH 34/74] separate IPv4 and IPv6 swarms on by shards --- storage/memory/peer_store.go | 124 +++++++++++++++-------------------- 1 file changed, 52 insertions(+), 72 deletions(-) diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index 80aec6b..9aa47fc 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -24,10 +24,9 @@ func New(cfg Config) (storage.PeerStore, error) { shardCount = cfg.ShardCount } - shards := make([]*peerShard, shardCount) - for i := 0; i < shardCount; i++ { - shards[i] = &peerShard{} - shards[i].swarms = make(map[swarmKey]swarm) + shards := make([]*peerShard, shardCount*2) + for i := 0; i < shardCount*2; i++ { + shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)} } return &peerStore{ @@ -38,23 +37,8 @@ func New(cfg Config) (storage.PeerStore, error) { type serializedPeer string -type swarmKey [21]byte - -func newSwarmKey(ih bittorrent.InfoHash, p bittorrent.Peer) (key swarmKey) { - for i, ihbyte := range ih { - key[i] = ihbyte - } - if len(p.IP) == net.IPv4len { - key[20] = byte(4) - } else { - key[20] = byte(6) - } - - return -} - type peerShard struct { - swarms map[swarmKey]swarm + swarms map[bittorrent.InfoHash]swarm sync.RWMutex } @@ -71,8 +55,12 @@ type peerStore struct { var _ storage.PeerStore = &peerStore{} -func (s *peerStore) shardIndex(infoHash bittorrent.InfoHash) uint32 { - return binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards)) +func (s *peerStore) shardIndex(infoHash bittorrent.InfoHash, p bittorrent.Peer) uint32 { + idx := binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards)) + if len(p.IP) == net.IPv6len { + idx += idx + uint32(len(s.shards)/2) + } + return idx } func newPeerKey(p bittorrent.Peer) serializedPeer { @@ -99,20 +87,19 @@ func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error { default: } - sk := newSwarmKey(ih, p) pk := newPeerKey(p) - shard := s.shards[s.shardIndex(ih)] + shard := s.shards[s.shardIndex(ih, p)] shard.Lock() - if _, ok := shard.swarms[sk]; !ok { - shard.swarms[sk] = swarm{ + if _, ok := shard.swarms[ih]; !ok { + shard.swarms[ih] = swarm{ seeders: make(map[serializedPeer]int64), leechers: make(map[serializedPeer]int64), } } - shard.swarms[sk].seeders[pk] = time.Now().UnixNano() + shard.swarms[ih].seeders[pk] = time.Now().UnixNano() shard.Unlock() return nil @@ -125,26 +112,25 @@ func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) erro default: } - sk := newSwarmKey(ih, p) pk := newPeerKey(p) - shard := s.shards[s.shardIndex(ih)] + shard := s.shards[s.shardIndex(ih, p)] shard.Lock() - if _, ok := shard.swarms[sk]; !ok { + if _, ok := shard.swarms[ih]; !ok { shard.Unlock() return storage.ErrResourceDoesNotExist } - if _, ok := shard.swarms[sk].seeders[pk]; !ok { + if _, ok := shard.swarms[ih].seeders[pk]; !ok { shard.Unlock() return storage.ErrResourceDoesNotExist } - delete(shard.swarms[sk].seeders, pk) + delete(shard.swarms[ih].seeders, pk) - if len(shard.swarms[sk].seeders)|len(shard.swarms[sk].leechers) == 0 { - delete(shard.swarms, sk) + if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 { + delete(shard.swarms, ih) } shard.Unlock() @@ -158,20 +144,19 @@ func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error default: } - sk := newSwarmKey(ih, p) pk := newPeerKey(p) - shard := s.shards[s.shardIndex(ih)] + shard := s.shards[s.shardIndex(ih, p)] shard.Lock() - if _, ok := shard.swarms[sk]; !ok { - shard.swarms[sk] = swarm{ + if _, ok := shard.swarms[ih]; !ok { + shard.swarms[ih] = swarm{ seeders: make(map[serializedPeer]int64), leechers: make(map[serializedPeer]int64), } } - shard.swarms[sk].leechers[pk] = time.Now().UnixNano() + shard.swarms[ih].leechers[pk] = time.Now().UnixNano() shard.Unlock() return nil @@ -184,26 +169,25 @@ func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) err default: } - sk := newSwarmKey(ih, p) pk := newPeerKey(p) - shard := s.shards[s.shardIndex(ih)] + shard := s.shards[s.shardIndex(ih, p)] shard.Lock() - if _, ok := shard.swarms[sk]; !ok { + if _, ok := shard.swarms[ih]; !ok { shard.Unlock() return storage.ErrResourceDoesNotExist } - if _, ok := shard.swarms[sk].leechers[pk]; !ok { + if _, ok := shard.swarms[ih].leechers[pk]; !ok { shard.Unlock() return storage.ErrResourceDoesNotExist } - delete(shard.swarms[sk].leechers, pk) + delete(shard.swarms[ih].leechers, pk) - if len(shard.swarms[sk].seeders)|len(shard.swarms[sk].leechers) == 0 { - delete(shard.swarms, sk) + if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 { + delete(shard.swarms, ih) } shard.Unlock() @@ -217,22 +201,21 @@ func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) e default: } - sk := newSwarmKey(ih, p) pk := newPeerKey(p) - shard := s.shards[s.shardIndex(ih)] + shard := s.shards[s.shardIndex(ih, p)] shard.Lock() - if _, ok := shard.swarms[sk]; !ok { - shard.swarms[sk] = swarm{ + if _, ok := shard.swarms[ih]; !ok { + shard.swarms[ih] = swarm{ seeders: make(map[serializedPeer]int64), leechers: make(map[serializedPeer]int64), } } - delete(shard.swarms[sk].leechers, pk) + delete(shard.swarms[ih].leechers, pk) - shard.swarms[sk].seeders[pk] = time.Now().UnixNano() + shard.swarms[ih].seeders[pk] = time.Now().UnixNano() shard.Unlock() return nil @@ -249,36 +232,36 @@ func (s *peerStore) CollectGarbage(cutoff time.Time) error { cutoffUnix := cutoff.UnixNano() for _, shard := range s.shards { shard.RLock() - var swarmKeys []swarmKey - for sk := range shard.swarms { - swarmKeys = append(swarmKeys, sk) + var infohashes []bittorrent.InfoHash + for ih := range shard.swarms { + infohashes = append(infohashes, ih) } shard.RUnlock() runtime.Gosched() - for _, sk := range swarmKeys { + for _, ih := range infohashes { shard.Lock() - if _, stillExists := shard.swarms[sk]; !stillExists { + if _, stillExists := shard.swarms[ih]; !stillExists { shard.Unlock() runtime.Gosched() continue } - for pk, mtime := range shard.swarms[sk].leechers { + for pk, mtime := range shard.swarms[ih].leechers { if mtime <= cutoffUnix { - delete(shard.swarms[sk].leechers, pk) + delete(shard.swarms[ih].leechers, pk) } } - for pk, mtime := range shard.swarms[sk].seeders { + for pk, mtime := range shard.swarms[ih].seeders { if mtime <= cutoffUnix { - delete(shard.swarms[sk].seeders, pk) + delete(shard.swarms[ih].seeders, pk) } } - if len(shard.swarms[sk].seeders)|len(shard.swarms[sk].leechers) == 0 { - delete(shard.swarms, sk) + if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 { + delete(shard.swarms, ih) } shard.Unlock() @@ -298,19 +281,17 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i default: } - sk := newSwarmKey(ih, announcer) - - shard := s.shards[s.shardIndex(ih)] + shard := s.shards[s.shardIndex(ih, announcer)] shard.RLock() - if _, ok := shard.swarms[sk]; !ok { + if _, ok := shard.swarms[ih]; !ok { shard.RUnlock() return nil, storage.ErrResourceDoesNotExist } if seeder { // Append leechers as possible. - leechers := shard.swarms[sk].leechers + leechers := shard.swarms[ih].leechers for p := range leechers { decodedPeer := decodePeerKey(p) if numWant == 0 { @@ -322,7 +303,7 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i } } else { // Append as many seeders as possible. - seeders := shard.swarms[sk].seeders + seeders := shard.swarms[ih].seeders for p := range seeders { decodedPeer := decodePeerKey(p) if numWant == 0 { @@ -334,7 +315,7 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i } // Append leechers until we reach numWant. - leechers := shard.swarms[sk].leechers + leechers := shard.swarms[ih].leechers if numWant > 0 { for p := range leechers { decodedPeer := decodePeerKey(p) @@ -360,8 +341,7 @@ func (s *peerStore) Stop() <-chan error { go func() { shards := make([]*peerShard, len(s.shards)) for i := 0; i < len(s.shards); i++ { - shards[i] = &peerShard{} - shards[i].swarms = make(map[swarmKey]swarm) + shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)} } s.shards = shards close(s.closed) From 94dc902b20123151f653bc7ecf205367af651c64 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Thu, 11 Aug 2016 20:37:31 -0400 Subject: [PATCH 35/74] comment/lint clean PeerStore --- storage/memory/peer_store.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index 9aa47fc..8a2eeb2 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -14,10 +14,14 @@ import ( // TODO(jzelinskie): separate ipv4 and ipv6 swarms +// Config holds the configuration of a memory PeerStore. type Config struct { ShardCount int `yaml:"shard_count"` } +// New creates a new memory PeerStore. +// +// The PeerStore will have at least one shard. func New(cfg Config) (storage.PeerStore, error) { shardCount := 1 if cfg.ShardCount > 0 { From d3f153c938c5ae77c9ff9f39c2e39cbb70e567e9 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Thu, 11 Aug 2016 20:39:09 -0400 Subject: [PATCH 36/74] close PeerStore after benchmarks --- storage/storage_bench.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/storage/storage_bench.go b/storage/storage_bench.go index 316a425..70429ae 100644 --- a/storage/storage_bench.go +++ b/storage/storage_bench.go @@ -88,6 +88,11 @@ func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef ex } } b.StopTimer() + + errChan := ps.Stop() + for err := range errChan { + b.Fatal(err) + } } func Put(b *testing.B, ps PeerStore) { From 6fddcb8eeafe77b469c6ec0948793db212e6f6b9 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 15 Aug 2016 23:44:06 -0400 Subject: [PATCH 37/74] remove gc from storage, but implement it in memory --- cmd/trakr/main.go | 16 ++-- example_config.yaml | 4 +- storage/memory/peer_store.go | 147 ++++++++++++++++++++--------------- storage/storage.go | 7 -- 4 files changed, 95 insertions(+), 79 deletions(-) diff --git a/cmd/trakr/main.go b/cmd/trakr/main.go index 3b34214..de146ca 100644 --- a/cmd/trakr/main.go +++ b/cmd/trakr/main.go @@ -22,10 +22,11 @@ import ( type ConfigFile struct { MainConfigBlock struct { + middleware.Config PrometheusAddr string `yaml:"prometheus_addr"` HTTPConfig httpfrontend.Config `yaml:"http"` UDPConfig udpfrontend.Config `yaml:"udp"` - middleware.Config + Storage memory.Config `yaml:"storage"` } `yaml:"trakr"` } @@ -95,15 +96,18 @@ func main() { } }() - // TODO create PeerStore - // TODO create Hooks - logic := middleware.NewLogic(cfg.Config, nil, nil, nil, nil, nil) + // Force the compiler to enforce memory against the storage interface. + peerStore, err := memory.New(cfg.Storage) if err != nil { return err } - // Force the compiler to enforce memory against the storage interface. - _, _ = memory.New(memory.Config{1}) + // TODO create PeerStore + // TODO create Hooks + logic := middleware.NewLogic(cfg.Config, peerStore, nil, nil, nil, nil) + if err != nil { + return err + } errChan := make(chan error) closedChan := make(chan struct{}) diff --git a/example_config.yaml b/example_config.yaml index 36e769a..3f89637 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -18,8 +18,8 @@ trakr: name: memory config: shards: 1 - gc_interval: 15m - gc_expiration: 15m + gc_interval: 14m + peer_lifetime: 15m prehooks: - name: jwt diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index 8a2eeb2..3f7f9c2 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -16,27 +16,41 @@ import ( // Config holds the configuration of a memory PeerStore. type Config struct { - ShardCount int `yaml:"shard_count"` + GarbageCollectionInterval time.Duration `yaml:"gc_interval"` + PeerLifetime time.Duration `yaml:"peer_lifetime"` + ShardCount int `yaml:"shard_count"` } -// New creates a new memory PeerStore. -// -// The PeerStore will have at least one shard. +// New creates a new PeerStore backed by memory. func New(cfg Config) (storage.PeerStore, error) { shardCount := 1 if cfg.ShardCount > 0 { shardCount = cfg.ShardCount } - shards := make([]*peerShard, shardCount*2) - for i := 0; i < shardCount*2; i++ { - shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)} + ps := &peerStore{ + shards: make([]*peerShard, shardCount*2), + closed: make(chan struct{}), } - return &peerStore{ - shards: shards, - closed: make(chan struct{}), - }, nil + for i := 0; i < shardCount*2; i++ { + ps.shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)} + } + + go func() { + for { + select { + case <-ps.closed: + return + case <-time.After(cfg.GarbageCollectionInterval): + before := time.Now().Add(-cfg.GarbageCollectionInterval) + log.Println("memory: purging peers with no announces since ", before) + ps.collectGarbage(before) + } + } + }() + + return ps, nil } type serializedPeer string @@ -225,59 +239,6 @@ func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) e return nil } -func (s *peerStore) CollectGarbage(cutoff time.Time) error { - select { - case <-s.closed: - panic("attempted to interact with stopped memory store") - default: - } - - log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String()) - cutoffUnix := cutoff.UnixNano() - for _, shard := range s.shards { - shard.RLock() - var infohashes []bittorrent.InfoHash - for ih := range shard.swarms { - infohashes = append(infohashes, ih) - } - shard.RUnlock() - runtime.Gosched() - - for _, ih := range infohashes { - shard.Lock() - - if _, stillExists := shard.swarms[ih]; !stillExists { - shard.Unlock() - runtime.Gosched() - continue - } - - for pk, mtime := range shard.swarms[ih].leechers { - if mtime <= cutoffUnix { - delete(shard.swarms[ih].leechers, pk) - } - } - - for pk, mtime := range shard.swarms[ih].seeders { - if mtime <= cutoffUnix { - delete(shard.swarms[ih].seeders, pk) - } - } - - if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 { - delete(shard.swarms, ih) - } - - shard.Unlock() - runtime.Gosched() - } - - runtime.Gosched() - } - - return nil -} - func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) { select { case <-s.closed: @@ -340,6 +301,64 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i return } +// collectGarbage deletes all Peers from the PeerStore which are older than the +// cutoff time. +// +// This function must be able to execute while other methods on this interface +// are being executed in parallel. +func (s *peerStore) collectGarbage(cutoff time.Time) error { + select { + case <-s.closed: + panic("attempted to interact with stopped memory store") + default: + } + + log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String()) + cutoffUnix := cutoff.UnixNano() + for _, shard := range s.shards { + shard.RLock() + var infohashes []bittorrent.InfoHash + for ih := range shard.swarms { + infohashes = append(infohashes, ih) + } + shard.RUnlock() + runtime.Gosched() + + for _, ih := range infohashes { + shard.Lock() + + if _, stillExists := shard.swarms[ih]; !stillExists { + shard.Unlock() + runtime.Gosched() + continue + } + + for pk, mtime := range shard.swarms[ih].leechers { + if mtime <= cutoffUnix { + delete(shard.swarms[ih].leechers, pk) + } + } + + for pk, mtime := range shard.swarms[ih].seeders { + if mtime <= cutoffUnix { + delete(shard.swarms[ih].seeders, pk) + } + } + + if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 { + delete(shard.swarms, ih) + } + + shard.Unlock() + runtime.Gosched() + } + + runtime.Gosched() + } + + return nil +} + func (s *peerStore) Stop() <-chan error { toReturn := make(chan error) go func() { diff --git a/storage/storage.go b/storage/storage.go index fb24a70..fe0618b 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -1,8 +1,6 @@ package storage import ( - "time" - "github.com/jzelinskie/trakr/bittorrent" "github.com/jzelinskie/trakr/stopper" ) @@ -53,11 +51,6 @@ type PeerStore interface { // - if seeder is false, should ideally return more seeders than leechers AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, p bittorrent.Peer) (peers []bittorrent.Peer, err error) - // CollectGarbage deletes all Peers from the PeerStore which are older than - // the cutoff time. This function must be able to execute while other methods - // on this interface are being executed in parallel. - CollectGarbage(cutoff time.Time) error - // Stopper is an interface that expects a Stop method to stops the PeerStore. // For more details see the documentation in the stopper package. stopper.Stopper From a553ded043f2cc954d6fa1ed91b337f1899ed35a Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 20:32:38 -0400 Subject: [PATCH 38/74] memory: add max numwant --- example_config.yaml | 9 ++++----- storage/memory/peer_store.go | 17 +++++++++++------ 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/example_config.yaml b/example_config.yaml index 3f89637..182591f 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -15,11 +15,10 @@ trakr: allow_ip_spoofing: false storage: - name: memory - config: - shards: 1 - gc_interval: 14m - peer_lifetime: 15m + gc_interval: 14m + peer_lifetime: 15m + shards: 1 + max_numwant: 100 prehooks: - name: jwt diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index 3f7f9c2..ab18273 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -12,13 +12,12 @@ import ( "github.com/jzelinskie/trakr/storage" ) -// TODO(jzelinskie): separate ipv4 and ipv6 swarms - // Config holds the configuration of a memory PeerStore. type Config struct { GarbageCollectionInterval time.Duration `yaml:"gc_interval"` PeerLifetime time.Duration `yaml:"peer_lifetime"` ShardCount int `yaml:"shard_count"` + MaxNumWant int `yaml:"max_numwant"` } // New creates a new PeerStore backed by memory. @@ -29,8 +28,9 @@ func New(cfg Config) (storage.PeerStore, error) { } ps := &peerStore{ - shards: make([]*peerShard, shardCount*2), - closed: make(chan struct{}), + shards: make([]*peerShard, shardCount*2), + closed: make(chan struct{}), + maxNumWant: cfg.MaxNumWant, } for i := 0; i < shardCount*2; i++ { @@ -67,8 +67,9 @@ type swarm struct { } type peerStore struct { - shards []*peerShard - closed chan struct{} + shards []*peerShard + closed chan struct{} + maxNumWant int } var _ storage.PeerStore = &peerStore{} @@ -246,6 +247,10 @@ func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant i default: } + if numWant > s.maxNumWant { + numWant = s.maxNumWant + } + shard := s.shards[s.shardIndex(ih, announcer)] shard.RLock() From 040a3294b13fa93c68daff6f52761858fe0cd54f Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 21:01:58 -0400 Subject: [PATCH 39/74] middleware: add clientwhitelist --- middleware/clientwhitelist/clientwhitelist.go | 44 +++++++++++++++++++ middleware/hooks.go | 4 +- 2 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 middleware/clientwhitelist/clientwhitelist.go diff --git a/middleware/clientwhitelist/clientwhitelist.go b/middleware/clientwhitelist/clientwhitelist.go new file mode 100644 index 0000000..1e06ac3 --- /dev/null +++ b/middleware/clientwhitelist/clientwhitelist.go @@ -0,0 +1,44 @@ +// Package clientwhitelist implements a Hook that fails an Announce if the +// client's PeerID does not begin with any of the approved prefixes. +package clientwhitelist + +import ( + "context" + + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/middleware" +) + +// ClientUnapproved is the error returned when a client's PeerID fails to +// begin with an approved prefix. +var ClientUnapproved = bittorrent.ClientError("unapproved client") + +type Hook struct { + approved map[bittorrent.ClientID]struct{} +} + +func NewHook(approved []string) { + h := &hook{ + approved: make(map[bittorrent.ClientID]struct{}), + } + + for _, clientID := range approved { + h.approved[bittorrent.NewClientID(clientID)] = struct{}{} + } + + return h +} + +var _ middleware.Hook = &Hook{} + +func (h *Hook) HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error { + if _, found := h.approved[bittorrent.NewClientID(req.Peer.ID)]; !found { + return ClientUnapproved + } + + return nil +} + +func (h *Hook) HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error { + return nil +} diff --git a/middleware/hooks.go b/middleware/hooks.go index 73797e4..1e7717b 100644 --- a/middleware/hooks.go +++ b/middleware/hooks.go @@ -15,10 +15,10 @@ type Hook interface { type nopHook struct{} -func (nopHook) HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error { +func (nopHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { return nil } -func (nopHook) HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error { +func (nopHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { return nil } From cc6614c474d672ad6f42f4cafcd11dace4c19e63 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 21:42:08 -0400 Subject: [PATCH 40/74] rename back to chihaya --- cmd/{trakr => chihaya}/main.go | 14 +++++++------- example_config.yaml | 2 +- frontend/frontend.go | 2 +- frontend/http/frontend.go | 4 ++-- frontend/http/parser.go | 2 +- frontend/http/query_params.go | 2 +- frontend/http/writer.go | 4 ++-- frontend/http/writer_test.go | 2 +- frontend/udp/frontend.go | 8 ++++---- frontend/udp/parser.go | 4 ++-- frontend/udp/writer.go | 2 +- middleware/hooks.go | 2 +- middleware/middleware.go | 10 +++++----- storage/memory/peer_store.go | 4 ++-- storage/memory/peer_store_test.go | 2 +- storage/storage.go | 4 ++-- storage/storage_bench.go | 2 +- 17 files changed, 35 insertions(+), 35 deletions(-) rename cmd/{trakr => chihaya}/main.go (92%) diff --git a/cmd/trakr/main.go b/cmd/chihaya/main.go similarity index 92% rename from cmd/trakr/main.go rename to cmd/chihaya/main.go index de146ca..b828042 100644 --- a/cmd/trakr/main.go +++ b/cmd/chihaya/main.go @@ -14,10 +14,10 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v2" - httpfrontend "github.com/jzelinskie/trakr/frontend/http" - udpfrontend "github.com/jzelinskie/trakr/frontend/udp" - "github.com/jzelinskie/trakr/middleware" - "github.com/jzelinskie/trakr/storage/memory" + httpfrontend "github.com/chihaya/chihaya/frontend/http" + udpfrontend "github.com/chihaya/chihaya/frontend/udp" + "github.com/chihaya/chihaya/middleware" + "github.com/chihaya/chihaya/storage/memory" ) type ConfigFile struct { @@ -27,7 +27,7 @@ type ConfigFile struct { HTTPConfig httpfrontend.Config `yaml:"http"` UDPConfig udpfrontend.Config `yaml:"udp"` Storage memory.Config `yaml:"storage"` - } `yaml:"trakr"` + } `yaml:"chihaya"` } // ParseConfigFile returns a new ConfigFile given the path to a YAML @@ -64,7 +64,7 @@ func main() { var cpuProfilePath string var rootCmd = &cobra.Command{ - Use: "trakr", + Use: "chihaya", Short: "BitTorrent Tracker", Long: "A customizible, multi-protocol BitTorrent Tracker", Run: func(cmd *cobra.Command, args []string) { @@ -173,7 +173,7 @@ func main() { }, } - rootCmd.Flags().StringVar(&configFilePath, "config", "/etc/trakr.yaml", "location of configuration file") + rootCmd.Flags().StringVar(&configFilePath, "config", "/etc/chihaya.yaml", "location of configuration file") rootCmd.Flags().StringVarP(&cpuProfilePath, "cpuprofile", "", "", "location to save a CPU profile") if err := rootCmd.Execute(); err != nil { diff --git a/example_config.yaml b/example_config.yaml index 182591f..53c207c 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -1,4 +1,4 @@ -trakr: +chihaya: announce_interval: 15m prometheus_addr: localhost:6880 diff --git a/frontend/frontend.go b/frontend/frontend.go index a577d96..c2a103f 100644 --- a/frontend/frontend.go +++ b/frontend/frontend.go @@ -3,7 +3,7 @@ package frontend import ( "golang.org/x/net/context" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) // TrackerLogic is the interface used by a frontend in order to: (1) generate a diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index f21ddf5..7439923 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -12,7 +12,7 @@ import ( "github.com/tylerb/graceful" "golang.org/x/net/context" - "github.com/jzelinskie/trakr/frontend" + "github.com/chihaya/chihaya/frontend" ) func init() { @@ -22,7 +22,7 @@ func init() { var promResponseDurationMilliseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Name: "trakr_http_response_duration_milliseconds", + Name: "chihaya_http_response_duration_milliseconds", Help: "The duration of time it takes to receive and write a response to an API request", Buckets: prometheus.ExponentialBuckets(9.375, 2, 10), }, diff --git a/frontend/http/parser.go b/frontend/http/parser.go index 823fe25..7e7674a 100644 --- a/frontend/http/parser.go +++ b/frontend/http/parser.go @@ -4,7 +4,7 @@ import ( "net" "net/http" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) // ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request. diff --git a/frontend/http/query_params.go b/frontend/http/query_params.go index 525b8e3..415b4fc 100644 --- a/frontend/http/query_params.go +++ b/frontend/http/query_params.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) // ErrKeyNotFound is returned when a provided key has no value associated with diff --git a/frontend/http/writer.go b/frontend/http/writer.go index 0d1b100..7e0dead 100644 --- a/frontend/http/writer.go +++ b/frontend/http/writer.go @@ -3,8 +3,8 @@ package http import ( "net/http" - "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/frontend/http/bencode" + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/frontend/http/bencode" ) // WriteError communicates an error to a BitTorrent client over HTTP. diff --git a/frontend/http/writer_test.go b/frontend/http/writer_test.go index 522bd5f..cb7d103 100644 --- a/frontend/http/writer_test.go +++ b/frontend/http/writer_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) func TestWriteError(t *testing.T) { diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 5a4a6b9..a4474b3 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -13,9 +13,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" - "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/frontend" - "github.com/jzelinskie/trakr/frontend/udp/bytepool" + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/frontend" + "github.com/chihaya/chihaya/frontend/udp/bytepool" ) func init() { @@ -25,7 +25,7 @@ func init() { var promResponseDurationMilliseconds = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Name: "trakr_udp_response_duration_milliseconds", + Name: "chihaya_udp_response_duration_milliseconds", Help: "The duration of time it takes to receive and write a response to an API request", Buckets: prometheus.ExponentialBuckets(9.375, 2, 10), }, diff --git a/frontend/udp/parser.go b/frontend/udp/parser.go index c771a42..a73fe33 100644 --- a/frontend/udp/parser.go +++ b/frontend/udp/parser.go @@ -4,7 +4,7 @@ import ( "encoding/binary" "net" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) const ( @@ -125,7 +125,7 @@ func handleOptionalParameters(packet []byte) (params bittorrent.Params, err erro return params, errMalformedPacket } - // TODO(jzelinskie): Actually parse the URL Data as described in BEP 41 + // TODO(chihaya): Actually parse the URL Data as described in BEP 41 // into something that fulfills the bittorrent.Params interface. optionStartIndex += 1 + length diff --git a/frontend/udp/writer.go b/frontend/udp/writer.go index 6f04d9e..6055d66 100644 --- a/frontend/udp/writer.go +++ b/frontend/udp/writer.go @@ -7,7 +7,7 @@ import ( "io" "time" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) // WriteError writes the failure reason as a null-terminated string. diff --git a/middleware/hooks.go b/middleware/hooks.go index 1e7717b..1c5b30f 100644 --- a/middleware/hooks.go +++ b/middleware/hooks.go @@ -3,7 +3,7 @@ package middleware import ( "golang.org/x/net/context" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) // Hook abstracts the concept of anything that needs to interact with a diff --git a/middleware/middleware.go b/middleware/middleware.go index b9b31d1..6adf4a0 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -8,9 +8,9 @@ import ( "golang.org/x/net/context" - "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/frontend" - "github.com/jzelinskie/trakr/storage" + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/frontend" + "github.com/chihaya/chihaya/storage" ) type Config struct { @@ -78,7 +78,7 @@ func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequ func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { for _, h := range l.announcePostHooks { if err := h.HandleAnnounce(ctx, req, resp); err != nil { - log.Println("trakr: post-announce hooks failed:", err.Error()) + log.Println("chihaya: post-announce hooks failed:", err.Error()) return } } @@ -103,7 +103,7 @@ func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { for _, h := range l.scrapePostHooks { if err := h.HandleScrape(ctx, req, resp); err != nil { - log.Println("trakr: post-scrape hooks failed:", err.Error()) + log.Println("chihaya: post-scrape hooks failed:", err.Error()) return } } diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index ab18273..b53aca3 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/storage" + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/storage" ) // Config holds the configuration of a memory PeerStore. diff --git a/storage/memory/peer_store_test.go b/storage/memory/peer_store_test.go index d85be46..677fd53 100644 --- a/storage/memory/peer_store_test.go +++ b/storage/memory/peer_store_test.go @@ -3,7 +3,7 @@ package memory import ( "testing" - s "github.com/jzelinskie/trakr/storage" + s "github.com/chihaya/chihaya/storage" ) func createNew() s.PeerStore { diff --git a/storage/storage.go b/storage/storage.go index fe0618b..9e23676 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -1,8 +1,8 @@ package storage import ( - "github.com/jzelinskie/trakr/bittorrent" - "github.com/jzelinskie/trakr/stopper" + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/stopper" ) // ErrResourceDoesNotExist is the error returned by all delete methods in the diff --git a/storage/storage_bench.go b/storage/storage_bench.go index 70429ae..1f0a225 100644 --- a/storage/storage_bench.go +++ b/storage/storage_bench.go @@ -7,7 +7,7 @@ import ( "sync/atomic" "testing" - "github.com/jzelinskie/trakr/bittorrent" + "github.com/chihaya/chihaya/bittorrent" ) type benchData struct { From acf2f4c1f57480c33619dfe05494d84f2eaa8920 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 21:48:39 -0400 Subject: [PATCH 41/74] remove redudant files --- DCO | 36 ------------------------------------ 1 file changed, 36 deletions(-) delete mode 100644 DCO diff --git a/DCO b/DCO deleted file mode 100644 index 716561d..0000000 --- a/DCO +++ /dev/null @@ -1,36 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. From 84a5e22d42363f7b94264e023a3f1f636d1b4803 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 22:26:19 -0400 Subject: [PATCH 42/74] travis: bump to go1.7 --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index e924e55..1a523db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,6 @@ language: go go: -- 1.6 -- tip +- 1.7 sudo: false install: - go get -t ./... From fc13031d9629c17e71cc1032f0d37a5253fd81a4 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 22:32:15 -0400 Subject: [PATCH 43/74] move to std context package --- frontend/frontend.go | 2 +- frontend/http/frontend.go | 2 +- frontend/udp/frontend.go | 2 +- middleware/hooks.go | 2 +- middleware/middleware.go | 3 +-- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/frontend/frontend.go b/frontend/frontend.go index c2a103f..63569c3 100644 --- a/frontend/frontend.go +++ b/frontend/frontend.go @@ -1,7 +1,7 @@ package frontend import ( - "golang.org/x/net/context" + "context" "github.com/chihaya/chihaya/bittorrent" ) diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 7439923..48cedf9 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -3,6 +3,7 @@ package http import ( + "context" "net" "net/http" "time" @@ -10,7 +11,6 @@ import ( "github.com/julienschmidt/httprouter" "github.com/prometheus/client_golang/prometheus" "github.com/tylerb/graceful" - "golang.org/x/net/context" "github.com/chihaya/chihaya/frontend" ) diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index a4474b3..fc65c72 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -4,6 +4,7 @@ package udp import ( "bytes" + "context" "encoding/binary" "log" "net" @@ -11,7 +12,6 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" "github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/frontend" diff --git a/middleware/hooks.go b/middleware/hooks.go index 1c5b30f..7a6c5ae 100644 --- a/middleware/hooks.go +++ b/middleware/hooks.go @@ -1,7 +1,7 @@ package middleware import ( - "golang.org/x/net/context" + "context" "github.com/chihaya/chihaya/bittorrent" ) diff --git a/middleware/middleware.go b/middleware/middleware.go index 6adf4a0..18cb64b 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -3,11 +3,10 @@ package middleware import ( + "context" "log" "time" - "golang.org/x/net/context" - "github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/frontend" "github.com/chihaya/chihaya/storage" From 07cc413399c160dc64ec066872808770e1b8a014 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 22:37:29 -0400 Subject: [PATCH 44/74] clientwhitelist: fix compilation issues --- middleware/clientwhitelist/clientwhitelist.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/middleware/clientwhitelist/clientwhitelist.go b/middleware/clientwhitelist/clientwhitelist.go index 1e06ac3..f7aefd4 100644 --- a/middleware/clientwhitelist/clientwhitelist.go +++ b/middleware/clientwhitelist/clientwhitelist.go @@ -13,11 +13,11 @@ import ( // begin with an approved prefix. var ClientUnapproved = bittorrent.ClientError("unapproved client") -type Hook struct { +type hook struct { approved map[bittorrent.ClientID]struct{} } -func NewHook(approved []string) { +func NewHook(approved []string) middleware.Hook { h := &hook{ approved: make(map[bittorrent.ClientID]struct{}), } @@ -29,9 +29,7 @@ func NewHook(approved []string) { return h } -var _ middleware.Hook = &Hook{} - -func (h *Hook) HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error { +func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { if _, found := h.approved[bittorrent.NewClientID(req.Peer.ID)]; !found { return ClientUnapproved } @@ -39,6 +37,6 @@ func (h *Hook) HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bit return nil } -func (h *Hook) HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error { +func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { return nil } From 674ab8a4c62360d2cf547d8ef2287e28f079e5ca Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 23:06:06 -0400 Subject: [PATCH 45/74] NewClientID now takes PeerID instead of string --- bittorrent/client_id.go | 16 ++++++++-------- bittorrent/client_id_test.go | 16 +++++++--------- middleware/clientwhitelist/clientwhitelist.go | 15 +++++++++++---- 3 files changed, 26 insertions(+), 21 deletions(-) diff --git a/bittorrent/client_id.go b/bittorrent/client_id.go index b50be80..840fd75 100644 --- a/bittorrent/client_id.go +++ b/bittorrent/client_id.go @@ -2,21 +2,21 @@ package bittorrent // ClientID represents the part of a PeerID that identifies a Peer's client // software. -type ClientID string +type ClientID [6]byte // NewClientID parses a ClientID from a PeerID. -func NewClientID(peerID string) ClientID { - var clientID string - length := len(peerID) +func NewClientID(pid PeerID) ClientID { + var cid ClientID + length := len(pid) if length >= 6 { - if peerID[0] == '-' { + if pid[0] == '-' { if length >= 7 { - clientID = peerID[1:7] + copy(cid[:], pid[1:7]) } } else { - clientID = peerID[:6] + copy(cid[:], pid[:6]) } } - return ClientID(clientID) + return cid } diff --git a/bittorrent/client_id_test.go b/bittorrent/client_id_test.go index 126f701..ce760fa 100644 --- a/bittorrent/client_id_test.go +++ b/bittorrent/client_id_test.go @@ -1,6 +1,9 @@ package bittorrent -import "testing" +import ( + "bytes" + "testing" +) func TestClientID(t *testing.T) { var clientTable = []struct{ peerID, clientID string }{ @@ -38,17 +41,12 @@ func TestClientID(t *testing.T) { {"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt {"346------SDFknl33408", "346---"}, // TorreTopia {"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod - - {"", ""}, - {"-", ""}, - {"12345", ""}, - {"-12345", ""}, - {"123456", "123456"}, - {"-123456", "123456"}, } for _, tt := range clientTable { - if parsedID := NewClientID(tt.peerID); parsedID != ClientID(tt.clientID) { + clientID := ClientID([]byte(tt.clientID)) + parsedID := NewClientID(PeerIDFromBytes([]byte(tt.peerID))) + if !bytes.Equal([]byte(parsedID), []byte(clientID)) { t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID) } } diff --git a/middleware/clientwhitelist/clientwhitelist.go b/middleware/clientwhitelist/clientwhitelist.go index f7aefd4..3443e05 100644 --- a/middleware/clientwhitelist/clientwhitelist.go +++ b/middleware/clientwhitelist/clientwhitelist.go @@ -4,6 +4,7 @@ package clientwhitelist import ( "context" + "errors" "github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/middleware" @@ -17,16 +18,22 @@ type hook struct { approved map[bittorrent.ClientID]struct{} } -func NewHook(approved []string) middleware.Hook { +func NewHook(approved []string) (middleware.Hook, error) { h := &hook{ approved: make(map[bittorrent.ClientID]struct{}), } - for _, clientID := range approved { - h.approved[bittorrent.NewClientID(clientID)] = struct{}{} + for _, cidString := range approved { + cidBytes := []byte(cidString) + if len(cidBytes) != 6 { + return nil, errors.New("clientID " + cidString + " must be 6 bytes") + } + var cid bittorrent.ClientID + copy(cid[:], cidBytes) + h.approved[cid] = struct{}{} } - return h + return h, nil } func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { From ddd5cbef2ce31722a2068ebb4e9b5603309a74da Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 23:37:40 -0400 Subject: [PATCH 46/74] travis: disable linting --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1a523db..525680b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ script: - go test -v $(go list ./... | grep -v /vendor/) - go vet $(go list ./... | grep -v /vendor/) - diff <(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*")) <(printf "") -- for d in $(go list ./... | grep -v /vendor/); do diff <(golint $d) <(printf ""); done +#- for d in $(go list ./... | grep -v /vendor/); do diff <(golint $d) <(printf ""); done notifications: irc: channels: From 66f76a7d486cc7d7383a176ef4b49d90577caa6d Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 23:41:59 -0400 Subject: [PATCH 47/74] make clientID tests pass --- bittorrent/client_id_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bittorrent/client_id_test.go b/bittorrent/client_id_test.go index ce760fa..991bde1 100644 --- a/bittorrent/client_id_test.go +++ b/bittorrent/client_id_test.go @@ -1,7 +1,6 @@ package bittorrent import ( - "bytes" "testing" ) @@ -44,9 +43,10 @@ func TestClientID(t *testing.T) { } for _, tt := range clientTable { - clientID := ClientID([]byte(tt.clientID)) - parsedID := NewClientID(PeerIDFromBytes([]byte(tt.peerID))) - if !bytes.Equal([]byte(parsedID), []byte(clientID)) { + var clientID ClientID + copy(clientID[:], []byte(tt.clientID)) + parsedID := NewClientID(PeerIDFromString(tt.peerID)) + if parsedID != clientID { t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID) } } From 31a79a1ce2c7a9aa6213a7826081282c7c9fa7e2 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 23:43:09 -0400 Subject: [PATCH 48/74] fix http frontend tests --- frontend/http/writer_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/http/writer_test.go b/frontend/http/writer_test.go index cb7d103..fbbb9b7 100644 --- a/frontend/http/writer_test.go +++ b/frontend/http/writer_test.go @@ -19,7 +19,7 @@ func TestWriteError(t *testing.T) { for _, tt := range table { r := httptest.NewRecorder() - err := writeError(r, bittorrent.ClientError(tt.reason)) + err := WriteError(r, bittorrent.ClientError(tt.reason)) assert.Nil(t, err) assert.Equal(t, r.Body.String(), tt.expected) } @@ -27,7 +27,7 @@ func TestWriteError(t *testing.T) { func TestWriteStatus(t *testing.T) { r := httptest.NewRecorder() - err := writeError(r, bittorrent.ClientError("something is missing")) + err := WriteError(r, bittorrent.ClientError("something is missing")) assert.Nil(t, err) assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge") } From eda825dfb085b0e9cbd21534cc3e03b72f2a8788 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Tue, 16 Aug 2016 23:51:24 -0400 Subject: [PATCH 49/74] readme: move some things around [skip ci] --- README.md | 51 ++++++++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 202b809..b966dce 100644 --- a/README.md +++ b/README.md @@ -13,19 +13,37 @@ Chihaya is an open source [BitTorrent tracker] written in [Go]. Differentiating features include: -- Protocol-agnostic, middleware-composed logic -- Low resource consumption and fast, asynchronous request processing -- Unified IPv4 and IPv6 [swarms] +- Protocol-agnostic middleware +- HTTP and UDP frontends +- IPv4 and IPv6 support - [YAML] configuration -- Optional metrics via [Prometheus] +- Metrics via [Prometheus] [releases]: https://github.com/chihaya/chihaya/releases [BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker [Go]: https://golang.org -[swarms]: https://en.wikipedia.org/wiki/Glossary_of_BitTorrent_terms#Swarm [YAML]: http://yaml.org [Prometheus]: http://prometheus.io +## Production Use + +### Facebook + +[Facebook] uses BitTorrent to deploy new versions of their software. +In order to optimize the flow of traffic within their datacenters, Chihaya is configured to prefer peers within the same subnet. +Because Facebook organizes their network such that server racks are allocated IP addresses in the same subnet, the vast majority of deployment traffic never impacts the congested areas of their network. + +[Facebook]: https://facebook.com + +### CoreOS + +[Quay] is a container registry that offers the ability to download containers via BitTorrent in order to speed up large or geographically distant deployments. +Announce URLs from Quay's torrent files contain a [JWT] in order to allow Chihaya to verify that an infohash was approved by the registry. +By verifying the infohash, Quay can be sure that only their content is being shared by their tracker. + +[Quay]: https://quay.io +[JWT]: https://jwt.io + ## Development ### Getting Started @@ -53,30 +71,9 @@ For more information read [CONTRIBUTING.md]. [freenode IRC]: http://webchat.freenode.net/?channels=chihaya [CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md -## Production Use - -### Facebook - -[Facebook] uses BitTorrent to deploy new versions of their software. -In order to optimize the flow of traffic within their datacenters, Chihaya is configured to prefer peers within the same subnet. -Because Facebook organizes their network such that server racks are allocated IP addresses in the same subnet, the vast majority of deployment traffic never impacts the congested areas of their network. - -[Facebook]: https://facebook.com - -### CoreOS - -[Quay] is a container registry that offers the ability to download containers via BitTorrent in order to speed up large or geographically distant deployments. -Announce URLs from Quay's torrent files contain a [JWT] in order to allow Chihaya to verify that an infohash was approved by the registry. -By verifying the infohash, Quay can be sure that only their content is being shared by their tracker. - -[Quay]: https://quay.io -[JWT]: https://jwt.io - ## Related projects +- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs - [OpenTracker](http://erdgeist.org/arts/software/opentracker): a popular BitTorrent tracker written in C - [Ocelot](https://github.com/WhatCD/Ocelot): a private BitTorrent tracker written in C++ -## License - -Chihaya is distributed under the 2-Clause BSD license that can be found in the `LICENSE` file. From 62f2a095a1ad10eba03dc71f5c26959c4e5c5445 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Wed, 17 Aug 2016 17:11:11 -0400 Subject: [PATCH 50/74] memory: fix garbage collection, add config check --- storage/memory/peer_store.go | 11 ++++++++++- storage/memory/peer_store_test.go | 4 +++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index b53aca3..02211a1 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -2,6 +2,7 @@ package memory import ( "encoding/binary" + "errors" "log" "net" "runtime" @@ -12,6 +13,10 @@ import ( "github.com/chihaya/chihaya/storage" ) +// ErrInvalidGCInterval is returned for a GarbageCollectionInterval that is +// less than or equal to zero. +var ErrInvalidGCInterval = errors.New("invalid garbage collection interval") + // Config holds the configuration of a memory PeerStore. type Config struct { GarbageCollectionInterval time.Duration `yaml:"gc_interval"` @@ -27,6 +32,10 @@ func New(cfg Config) (storage.PeerStore, error) { shardCount = cfg.ShardCount } + if cfg.GarbageCollectionInterval <= 0 { + return nil, ErrInvalidGCInterval + } + ps := &peerStore{ shards: make([]*peerShard, shardCount*2), closed: make(chan struct{}), @@ -43,7 +52,7 @@ func New(cfg Config) (storage.PeerStore, error) { case <-ps.closed: return case <-time.After(cfg.GarbageCollectionInterval): - before := time.Now().Add(-cfg.GarbageCollectionInterval) + before := time.Now().Add(-cfg.PeerLifetime) log.Println("memory: purging peers with no announces since ", before) ps.collectGarbage(before) } diff --git a/storage/memory/peer_store_test.go b/storage/memory/peer_store_test.go index 677fd53..aed60d2 100644 --- a/storage/memory/peer_store_test.go +++ b/storage/memory/peer_store_test.go @@ -3,11 +3,13 @@ package memory import ( "testing" + "time" + s "github.com/chihaya/chihaya/storage" ) func createNew() s.PeerStore { - ps, err := New(Config{ShardCount: 1024}) + ps, err := New(Config{ShardCount: 1024, GarbageCollectionInterval: 10 * time.Minute}) if err != nil { panic(err) } From 8ebe57a602e19dc4f70193455630e7d221e794a5 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Wed, 17 Aug 2016 19:04:26 -0400 Subject: [PATCH 51/74] udp: fix response encoding --- frontend/udp/writer.go | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/frontend/udp/writer.go b/frontend/udp/writer.go index 6055d66..e3a495f 100644 --- a/frontend/udp/writer.go +++ b/frontend/udp/writer.go @@ -26,32 +26,44 @@ func WriteError(w io.Writer, txID []byte, err error) { // WriteAnnounce encodes an announce response according to BEP 15. func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse) { - writeHeader(w, txID, announceActionID) - binary.Write(w, binary.BigEndian, uint32(resp.Interval/time.Second)) - binary.Write(w, binary.BigEndian, uint32(resp.Incomplete)) - binary.Write(w, binary.BigEndian, uint32(resp.Complete)) + var buf bytes.Buffer + + writeHeader(&buf, txID, announceActionID) + binary.Write(&buf, binary.BigEndian, uint32(resp.Interval/time.Second)) + binary.Write(&buf, binary.BigEndian, uint32(resp.Incomplete)) + binary.Write(&buf, binary.BigEndian, uint32(resp.Complete)) for _, peer := range resp.IPv4Peers { - w.Write(peer.IP) - binary.Write(w, binary.BigEndian, peer.Port) + buf.Write(peer.IP) + binary.Write(&buf, binary.BigEndian, peer.Port) } + + w.Write(buf.Bytes()) } // WriteScrape encodes a scrape response according to BEP 15. func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) { - writeHeader(w, txID, scrapeActionID) + var buf bytes.Buffer + + writeHeader(&buf, txID, scrapeActionID) for _, scrape := range resp.Files { - binary.Write(w, binary.BigEndian, scrape.Complete) - binary.Write(w, binary.BigEndian, scrape.Snatches) - binary.Write(w, binary.BigEndian, scrape.Incomplete) + binary.Write(&buf, binary.BigEndian, scrape.Complete) + binary.Write(&buf, binary.BigEndian, scrape.Snatches) + binary.Write(&buf, binary.BigEndian, scrape.Incomplete) } + + w.Write(buf.Bytes()) } // WriteConnectionID encodes a new connection response according to BEP 15. func WriteConnectionID(w io.Writer, txID, connID []byte) { - writeHeader(w, txID, connectActionID) - w.Write(connID) + var buf bytes.Buffer + + writeHeader(&buf, txID, connectActionID) + buf.Write(connID) + + w.Write(buf.Bytes()) } // writeHeader writes the action and transaction ID to the provided response From a4639a1aacdc3d7d5b78783a0d61fd842dba307b Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Wed, 17 Aug 2016 19:04:53 -0400 Subject: [PATCH 52/74] udp: clean up --- frontend/udp/frontend.go | 10 +++------- frontend/udp/parser.go | 1 - 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index fc65c72..1cf3d03 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -6,7 +6,6 @@ import ( "bytes" "context" "encoding/binary" - "log" "net" "sync" "time" @@ -94,13 +93,12 @@ func (t *Frontend) ListenAndServe() error { } defer t.socket.Close() - pool := bytepool.New(256, 2048) + pool := bytepool.New(2048, 2048) for { // Check to see if we need to shutdown. select { case <-t.closing: - t.wg.Wait() return nil default: } @@ -124,7 +122,6 @@ func (t *Frontend) ListenAndServe() error { continue } - log.Println("Got UDP Request") t.wg.Add(1) go func() { defer t.wg.Done() @@ -132,11 +129,10 @@ func (t *Frontend) ListenAndServe() error { // Handle the request. start := time.Now() - response, action, err := t.handleRequest( + action, err := t.handleRequest( Request{buffer[:n], addr.IP}, ResponseWriter{t.socket, addr}, ) - log.Printf("Handled UDP Request: %s, %s, %s\n", response, action, err) recordResponseDuration(action, err, time.Since(start)) }() } @@ -162,7 +158,7 @@ func (w ResponseWriter) Write(b []byte) (int, error) { } // handleRequest parses and responds to a UDP Request. -func (t *Frontend) handleRequest(r Request, w ResponseWriter) (response []byte, actionName string, err error) { +func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, err error) { if len(r.Packet) < 16 { // Malformed, no client packets are less than 16 bytes. // We explicitly return nothing in case this is a DoS attempt. diff --git a/frontend/udp/parser.go b/frontend/udp/parser.go index a73fe33..43dc329 100644 --- a/frontend/udp/parser.go +++ b/frontend/udp/parser.go @@ -12,7 +12,6 @@ const ( announceActionID scrapeActionID errorActionID - announceDualStackActionID ) // Option-Types as described in BEP 41 and BEP 45. From 4b244638838995d15ce59a9986fe262006432920 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Sat, 20 Aug 2016 02:27:24 -0400 Subject: [PATCH 53/74] readme: add architecture section --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index b966dce..062d22d 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,23 @@ Differentiating features include: [YAML]: http://yaml.org [Prometheus]: http://prometheus.io +## Architecture + +### Diagram + +Coming Soon + +### Description + +BitTorrent clients send announce and scrape requests to a _Frontend_. +Frontends parse requests and write responses for the particular protocol they implement. +The _TrackerLogic_ interface to is used to generate responses for their requests and optionally perform a task after responding to a client. +A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic. +PreHooks are middleware that are executed before the response has been written. +The final middleware in a chain of PreHooks ensures the existance of any required response fields by reading out of the configured implementation of the _Storage_ interface. +PostHooks are asynchronous tasks that occur after a response has been delivered to the client. +Request data is written to storage asynchronously in one of these PostHooks. + ## Production Use ### Facebook From 0e0f8e7ad13387ed5322dc62f1fefd81459d8f45 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 20 Aug 2016 10:19:29 -0400 Subject: [PATCH 54/74] cmd/chihaya: clean up --- cmd/chihaya/main.go | 59 ++++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/cmd/chihaya/main.go b/cmd/chihaya/main.go index b828042..d2f7bbf 100644 --- a/cmd/chihaya/main.go +++ b/cmd/chihaya/main.go @@ -102,71 +102,80 @@ func main() { return err } - // TODO create PeerStore // TODO create Hooks logic := middleware.NewLogic(cfg.Config, peerStore, nil, nil, nil, nil) if err != nil { return err } + shutdown := make(chan struct{}) errChan := make(chan error) - closedChan := make(chan struct{}) - var hFrontend *httpfrontend.Frontend - var uFrontend *udpfrontend.Frontend + var httpFrontend *httpfrontend.Frontend + var udpFrontend *udpfrontend.Frontend if cfg.HTTPConfig.Addr != "" { - // TODO get the real TrackerLogic - hFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig) + httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig) go func() { log.Println("started serving HTTP on", cfg.HTTPConfig.Addr) - if err := hFrontend.ListenAndServe(); err != nil { + if err := httpFrontend.ListenAndServe(); err != nil { errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error()) } }() } if cfg.UDPConfig.Addr != "" { - // TODO get the real TrackerLogic - uFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig) + udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig) go func() { log.Println("started serving UDP on", cfg.UDPConfig.Addr) - if err := uFrontend.ListenAndServe(); err != nil { + if err := udpFrontend.ListenAndServe(); err != nil { errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error()) } }() } - shutdown := make(chan os.Signal) - signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM) + sigChan := make(chan os.Signal) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) go func() { - <-shutdown - - if uFrontend != nil { - uFrontend.Stop() + select { + case <-sigChan: + case <-shutdown: } - if hFrontend != nil { - hFrontend.Stop() + if udpFrontend != nil { + udpFrontend.Stop() } - // TODO: stop PeerStore + if httpFrontend != nil { + httpFrontend.Stop() + } + + for err := range peerStore.Stop() { + if err != nil { + errChan <- err + } + } close(errChan) - close(closedChan) }() - for err := range errChan { + closed := false + var bufErr error + for err = range errChan { if err != nil { - close(shutdown) - <-closedChan - return err + if !closed { + close(shutdown) + closed = true + } else { + log.Println(bufErr) + } + bufErr = err } } - return nil + return bufErr }(); err != nil { log.Fatal(err) } From 6ba8e475caa99931db7b68d2937d61ab962f5dc7 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Wed, 24 Aug 2016 13:31:36 -0400 Subject: [PATCH 55/74] readme: add diagram --- README.md | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 062d22d..5bda1c2 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,33 @@ Differentiating features include: ### Diagram -Coming Soon +``` + ┌──────────────────────┐ + │ BitTorrent Client ├┬──┐ + └┬─────────────────────┘│◀─┘ + └──────────────────────┘ + ▲ +┌────────────────┼────────────────────────────────────────────────────┐ +│ ▼ chihaya│ +│ ┌──────────────────────┐ │ +│ │ Frontend ├┐ │ +│ └┬─────────────────────┘│ │ +│ └──────────────────────┘ │ +│ ▲ │ +│ │ │ +│ ▼ │ +│ ┌──────────────────────┐ ┌──────────────────────┐ │ +│ │ PreHook Middleware ├◀───────────│ Storage │ │ +│ └┬─────────────────────┘│ └──────────────────────┘ │ +│ └──────────┬───────────┘ △ │ +│ │ │ │ +│ ▽ │ │ +│ ┌──────────────────────┐ │ │ +│ │ PostHook Middleware ├┐ │ │ +│ └┬─────────────────────┘│───────────────────────┘ │ +│ └──────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────┘ +``` ### Description From 9dc5372796696a7b6cbb2654274dbebb60a09d4c Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Wed, 24 Aug 2016 17:21:06 -0400 Subject: [PATCH 56/74] cmd: pull closure out into its own func Also introduce a config.go. --- cmd/chihaya/config.go | 54 ++++++++++ cmd/chihaya/main.go | 245 ++++++++++++++++++------------------------ 2 files changed, 156 insertions(+), 143 deletions(-) create mode 100644 cmd/chihaya/config.go diff --git a/cmd/chihaya/config.go b/cmd/chihaya/config.go new file mode 100644 index 0000000..26b11e3 --- /dev/null +++ b/cmd/chihaya/config.go @@ -0,0 +1,54 @@ +package main + +import ( + "errors" + "io/ioutil" + "os" + + "gopkg.in/yaml.v2" + + httpfrontend "github.com/chihaya/chihaya/frontend/http" + udpfrontend "github.com/chihaya/chihaya/frontend/udp" + "github.com/chihaya/chihaya/middleware" + "github.com/chihaya/chihaya/storage/memory" +) + +// ConfigFile represents a namespaced YAML configation file. +type ConfigFile struct { + MainConfigBlock struct { + middleware.Config + PrometheusAddr string `yaml:"prometheus_addr"` + HTTPConfig httpfrontend.Config `yaml:"http"` + UDPConfig udpfrontend.Config `yaml:"udp"` + Storage memory.Config `yaml:"storage"` + } `yaml:"chihaya"` +} + +// ParseConfigFile returns a new ConfigFile given the path to a YAML +// configuration file. +// +// It supports relative and absolute paths and environment variables. +func ParseConfigFile(path string) (*ConfigFile, error) { + if path == "" { + return nil, errors.New("no config path specified") + } + + f, err := os.Open(os.ExpandEnv(path)) + if err != nil { + return nil, err + } + defer f.Close() + + contents, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + var cfgFile ConfigFile + err = yaml.Unmarshal(contents, &cfgFile) + if err != nil { + return nil, err + } + + return &cfgFile, nil +} diff --git a/cmd/chihaya/main.go b/cmd/chihaya/main.go index d2f7bbf..7faf9b4 100644 --- a/cmd/chihaya/main.go +++ b/cmd/chihaya/main.go @@ -2,7 +2,6 @@ package main import ( "errors" - "io/ioutil" "log" "net/http" "os" @@ -12,7 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" httpfrontend "github.com/chihaya/chihaya/frontend/http" udpfrontend "github.com/chihaya/chihaya/frontend/udp" @@ -20,170 +18,131 @@ import ( "github.com/chihaya/chihaya/storage/memory" ) -type ConfigFile struct { - MainConfigBlock struct { - middleware.Config - PrometheusAddr string `yaml:"prometheus_addr"` - HTTPConfig httpfrontend.Config `yaml:"http"` - UDPConfig udpfrontend.Config `yaml:"udp"` - Storage memory.Config `yaml:"storage"` - } `yaml:"chihaya"` -} - -// ParseConfigFile returns a new ConfigFile given the path to a YAML -// configuration file. -// -// It supports relative and absolute paths and environment variables. -func ParseConfigFile(path string) (*ConfigFile, error) { - if path == "" { - return nil, errors.New("no config path specified") +func rootCmdRun(cmd *cobra.Command, args []string) error { + cpuProfilePath, _ := cmd.Flags().GetString("cpuprofile") + if cpuProfilePath != "" { + log.Println("enabled CPU profiling to " + cpuProfilePath) + f, err := os.Create(cpuProfilePath) + if err != nil { + return err + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() } - f, err := os.Open(os.ExpandEnv(path)) + configFilePath, _ := cmd.Flags().GetString("config") + configFile, err := ParseConfigFile(configFilePath) if err != nil { - return nil, err + return errors.New("failed to read config: " + err.Error()) } - defer f.Close() + cfg := configFile.MainConfigBlock - contents, err := ioutil.ReadAll(f) + go func() { + promServer := http.Server{ + Addr: cfg.PrometheusAddr, + Handler: prometheus.Handler(), + } + log.Println("started serving prometheus stats on", cfg.PrometheusAddr) + if err := promServer.ListenAndServe(); err != nil { + log.Fatal(err) + } + }() + + // Force the compiler to enforce memory against the storage interface. + peerStore, err := memory.New(cfg.Storage) if err != nil { - return nil, err + return err } - var cfgFile ConfigFile - err = yaml.Unmarshal(contents, &cfgFile) + // TODO create Hooks + logic := middleware.NewLogic(cfg.Config, peerStore, nil, nil, nil, nil) if err != nil { - return nil, err + return err } - return &cfgFile, nil + shutdown := make(chan struct{}) + errChan := make(chan error) + + var httpFrontend *httpfrontend.Frontend + var udpFrontend *udpfrontend.Frontend + + if cfg.HTTPConfig.Addr != "" { + httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig) + + go func() { + log.Println("started serving HTTP on", cfg.HTTPConfig.Addr) + if err := httpFrontend.ListenAndServe(); err != nil { + errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error()) + } + }() + } + + if cfg.UDPConfig.Addr != "" { + udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig) + + go func() { + log.Println("started serving UDP on", cfg.UDPConfig.Addr) + if err := udpFrontend.ListenAndServe(); err != nil { + errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error()) + } + }() + } + + sigChan := make(chan os.Signal) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + go func() { + select { + case <-sigChan: + case <-shutdown: + } + + if udpFrontend != nil { + udpFrontend.Stop() + } + + if httpFrontend != nil { + httpFrontend.Stop() + } + + for err := range peerStore.Stop() { + if err != nil { + errChan <- err + } + } + + close(errChan) + }() + + closed := false + var bufErr error + for err = range errChan { + if err != nil { + if !closed { + close(shutdown) + closed = true + } else { + log.Println(bufErr) + } + bufErr = err + } + } + + return bufErr } func main() { - var configFilePath string - var cpuProfilePath string - var rootCmd = &cobra.Command{ Use: "chihaya", Short: "BitTorrent Tracker", Long: "A customizible, multi-protocol BitTorrent Tracker", Run: func(cmd *cobra.Command, args []string) { - if err := func() error { - if cpuProfilePath != "" { - log.Println("enabled CPU profiling to " + cpuProfilePath) - f, err := os.Create(cpuProfilePath) - if err != nil { - return err - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } - - configFile, err := ParseConfigFile(configFilePath) - if err != nil { - return errors.New("failed to read config: " + err.Error()) - } - cfg := configFile.MainConfigBlock - - go func() { - promServer := http.Server{ - Addr: cfg.PrometheusAddr, - Handler: prometheus.Handler(), - } - log.Println("started serving prometheus stats on", cfg.PrometheusAddr) - if err := promServer.ListenAndServe(); err != nil { - log.Fatal(err) - } - }() - - // Force the compiler to enforce memory against the storage interface. - peerStore, err := memory.New(cfg.Storage) - if err != nil { - return err - } - - // TODO create Hooks - logic := middleware.NewLogic(cfg.Config, peerStore, nil, nil, nil, nil) - if err != nil { - return err - } - - shutdown := make(chan struct{}) - errChan := make(chan error) - - var httpFrontend *httpfrontend.Frontend - var udpFrontend *udpfrontend.Frontend - - if cfg.HTTPConfig.Addr != "" { - httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig) - - go func() { - log.Println("started serving HTTP on", cfg.HTTPConfig.Addr) - if err := httpFrontend.ListenAndServe(); err != nil { - errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error()) - } - }() - } - - if cfg.UDPConfig.Addr != "" { - udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig) - - go func() { - log.Println("started serving UDP on", cfg.UDPConfig.Addr) - if err := udpFrontend.ListenAndServe(); err != nil { - errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error()) - } - }() - } - - sigChan := make(chan os.Signal) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - go func() { - select { - case <-sigChan: - case <-shutdown: - } - - if udpFrontend != nil { - udpFrontend.Stop() - } - - if httpFrontend != nil { - httpFrontend.Stop() - } - - for err := range peerStore.Stop() { - if err != nil { - errChan <- err - } - } - - close(errChan) - }() - - closed := false - var bufErr error - for err = range errChan { - if err != nil { - if !closed { - close(shutdown) - closed = true - } else { - log.Println(bufErr) - } - bufErr = err - } - } - - return bufErr - }(); err != nil { + if err := rootCmdRun(cmd, args); err != nil { log.Fatal(err) } }, } - - rootCmd.Flags().StringVar(&configFilePath, "config", "/etc/chihaya.yaml", "location of configuration file") - rootCmd.Flags().StringVarP(&cpuProfilePath, "cpuprofile", "", "", "location to save a CPU profile") + rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file") + rootCmd.Flags().String("cpuprofile", "", "location to save a CPU profile") if err := rootCmd.Execute(); err != nil { log.Fatal(err) From 6e790eed747c55a2fb2e491f0c1ff00bfedfb346 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Wed, 31 Aug 2016 21:09:34 -0400 Subject: [PATCH 57/74] add initial jwt middleware --- example_config.yaml | 5 +- middleware/jwt/jwt.go | 179 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+), 2 deletions(-) create mode 100644 middleware/jwt/jwt.go diff --git a/example_config.yaml b/example_config.yaml index 53c207c..908afd9 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -23,9 +23,10 @@ chihaya: prehooks: - name: jwt config: - jwk_set_uri: "" + issuer: https://issuer.com + audience: https://chihaya.issuer.com + jwk_set_uri: https://issuer.com/keys jwk_set_update_interval: 5m - jwt_audience: "" - name: approved_client config: type: whitelist diff --git a/middleware/jwt/jwt.go b/middleware/jwt/jwt.go new file mode 100644 index 0000000..1743c1f --- /dev/null +++ b/middleware/jwt/jwt.go @@ -0,0 +1,179 @@ +// Package jwt implements a Hook that fails an Announce if the client's request +// is missing a valid JSON Web Token. +// +// JWTs are validated against the standard claims in RFC7519 along with an +// extra "infohash" claim that verifies the client has access to the Swarm. +// RS256 keys are asychronously rotated from a provided JWK Set HTTP endpoint. +package jwt + +import ( + "context" + "crypto" + "encoding/json" + "errors" + "log" + "net/http" + "net/url" + "time" + + jc "github.com/SermoDigital/jose/crypto" + "github.com/SermoDigital/jose/jws" + "github.com/SermoDigital/jose/jwt" + "github.com/mendsley/gojwk" + + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/middleware" +) + +var ( + // ErrMissingJWT is returned when a JWT is missing from a request. + ErrMissingJWT = bittorrent.ClientError("unapproved request: missing jwt") + + // ErrInvalidJWT is returned when a JWT fails to verify. + ErrInvalidJWT = bittorrent.ClientError("unapproved request: invalid jwt") +) + +// Config represents all the values required by this middleware to fetch JWKs +// and verify JWTs. +type Config struct { + Issuer string `yaml:"issuer"` + Audience string `yaml:"audience"` + JWKSetURL string `yaml:"jwk_set_url"` + JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"` +} + +type hook struct { + cfg Config + publicKeys map[string]crypto.PublicKey + closing chan struct{} +} + +// NewHook returns an instance of the JWT middleware. +func NewHook(cfg Config) middleware.Hook { + h := &hook{ + cfg: cfg, + publicKeys: map[string]crypto.PublicKey{}, + closing: make(chan struct{}), + } + + go func() { + for { + select { + case <-h.closing: + return + case <-time.After(cfg.JWKUpdateInterval): + resp, err := http.Get(cfg.JWKSetURL) + if err != nil { + log.Println("failed to fetch JWK Set: " + err.Error()) + continue + } + + parsedJWKs := map[string]gojwk.Key{} + err = json.NewDecoder(resp.Body).Decode(&parsedJWKs) + if err != nil { + resp.Body.Close() + log.Println("failed to decode JWK JSON: " + err.Error()) + continue + } + resp.Body.Close() + + keys := map[string]crypto.PublicKey{} + for kid, parsedJWK := range parsedJWKs { + publicKey, err := parsedJWK.DecodePublicKey() + if err != nil { + log.Println("failed to decode JWK into public key: " + err.Error()) + continue + } + keys[kid] = publicKey + } + h.publicKeys = keys + } + } + }() + + return h +} + +func (h *hook) Stop() { + close(h.closing) +} + +func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { + if req.Params == nil { + return ErrMissingJWT + } + + jwtParam, ok := req.Params.String("jwt") + if !ok { + return ErrMissingJWT + } + + if err := validateJWT(req.InfoHash, []byte(jwtParam), h.cfg.Issuer, h.cfg.Audience, h.publicKeys); err != nil { + return ErrInvalidJWT + } + + return nil +} + +func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { + // Scrapes don't require any protection. + return nil +} + +func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string, publicKeys map[string]crypto.PublicKey) error { + parsedJWT, err := jws.ParseJWT(jwtBytes) + if err != nil { + return err + } + + claims := parsedJWT.Claims() + if iss, ok := claims.Issuer(); !ok || iss != cfgIss { + return jwt.ErrInvalidISSClaim + } + + if aud, ok := claims.Audience(); !ok || !validAudience(aud, cfgAud) { + return jwt.ErrInvalidAUDClaim + } + + if ihClaim, ok := claims.Get("infohash").(string); !ok || !validInfoHash(ihClaim, ih) { + return errors.New("claim \"infohash\" is invalid") + } + + parsedJWS := parsedJWT.(jws.JWS) + kid, ok := parsedJWS.Protected().Get("kid").(string) + if !ok { + return errors.New("invalid kid") + } + publicKey, ok := publicKeys[kid] + if !ok { + return errors.New("signed by unknown kid") + } + + return parsedJWS.Verify(publicKey, jc.SigningMethodRS256) +} + +func validAudience(aud []string, cfgAud string) bool { + for _, a := range aud { + if a == cfgAud { + return true + } + } + return false +} + +func validInfoHash(claim string, ih bittorrent.InfoHash) bool { + if len(claim) == 20 && bittorrent.InfoHashFromString(claim) == ih { + return true + } + + unescapedClaim, err := url.QueryUnescape(claim) + if err != nil { + return false + } + + if len(unescapedClaim) == 20 && bittorrent.InfoHashFromString(unescapedClaim) == ih { + return true + } + + return false +} From e39da6b4e6cd89833a961480b4980cbeebae06e7 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Wed, 31 Aug 2016 21:09:46 -0400 Subject: [PATCH 58/74] main: add CreateHooks() method for ConfigFile This change simplifies middleware.Logic to having only one list of PreHooks and one list of PostHooks. --- cmd/chihaya/config.go | 36 +++++++++++++++++++++++++++++++ cmd/chihaya/main.go | 8 +++++-- middleware/middleware.go | 46 +++++++++++++++------------------------- 3 files changed, 59 insertions(+), 31 deletions(-) diff --git a/cmd/chihaya/config.go b/cmd/chihaya/config.go index 26b11e3..04921e3 100644 --- a/cmd/chihaya/config.go +++ b/cmd/chihaya/config.go @@ -10,9 +10,15 @@ import ( httpfrontend "github.com/chihaya/chihaya/frontend/http" udpfrontend "github.com/chihaya/chihaya/frontend/udp" "github.com/chihaya/chihaya/middleware" + "github.com/chihaya/chihaya/middleware/jwt" "github.com/chihaya/chihaya/storage/memory" ) +type hookConfig struct { + Name string `yaml:"name"` + Config interface{} `yaml:"config"` +} + // ConfigFile represents a namespaced YAML configation file. type ConfigFile struct { MainConfigBlock struct { @@ -21,6 +27,8 @@ type ConfigFile struct { HTTPConfig httpfrontend.Config `yaml:"http"` UDPConfig udpfrontend.Config `yaml:"udp"` Storage memory.Config `yaml:"storage"` + PreHooks []hookConfig `yaml:"prehooks"` + PostHooks []hookConfig `yaml:"posthooks"` } `yaml:"chihaya"` } @@ -52,3 +60,31 @@ func ParseConfigFile(path string) (*ConfigFile, error) { return &cfgFile, nil } + +// CreateHooks creates instances of Hooks for all of the PreHooks and PostHooks +// configured in a ConfigFile. +func (cfg ConfigFile) CreateHooks() (preHooks, postHooks []middleware.Hook, err error) { + for _, hookCfg := range cfg.MainConfigBlock.PreHooks { + cfgBytes, err := yaml.Marshal(hookCfg.Config) + if err != nil { + panic("failed to remarshal valid YAML") + } + + switch hookCfg.Name { + case "jwt": + var jwtCfg jwt.Config + err := yaml.Unmarshal(cfgBytes, &jwtCfg) + if err != nil { + return nil, nil, errors.New("invalid JWT middleware config" + err.Error()) + } + preHooks = append(preHooks, jwt.NewHook(jwtCfg)) + } + } + + for _, hookCfg := range cfg.MainConfigBlock.PostHooks { + switch hookCfg.Name { + } + } + + return +} diff --git a/cmd/chihaya/main.go b/cmd/chihaya/main.go index 7faf9b4..3473717 100644 --- a/cmd/chihaya/main.go +++ b/cmd/chihaya/main.go @@ -54,8 +54,12 @@ func rootCmdRun(cmd *cobra.Command, args []string) error { return err } - // TODO create Hooks - logic := middleware.NewLogic(cfg.Config, peerStore, nil, nil, nil, nil) + preHooks, postHooks, err := configFile.CreateHooks() + if err != nil { + return err + } + + logic := middleware.NewLogic(cfg.Config, peerStore, preHooks, postHooks) if err != nil { return err } diff --git a/middleware/middleware.go b/middleware/middleware.go index 18cb64b..0778ce0 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -18,30 +18,20 @@ type Config struct { var _ frontend.TrackerLogic = &Logic{} -func NewLogic(config Config, peerStore storage.PeerStore, announcePreHooks, announcePostHooks, scrapePreHooks, scrapePostHooks []Hook) *Logic { +func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic { l := &Logic{ - announceInterval: config.AnnounceInterval, - peerStore: peerStore, - announcePreHooks: announcePreHooks, - announcePostHooks: announcePostHooks, - scrapePreHooks: scrapePreHooks, - scrapePostHooks: scrapePostHooks, + announceInterval: cfg.AnnounceInterval, + peerStore: peerStore, + preHooks: preHooks, + postHooks: postHooks, } - if len(l.announcePreHooks) == 0 { - l.announcePreHooks = []Hook{nopHook{}} + if len(l.preHooks) == 0 { + l.preHooks = []Hook{nopHook{}} } - if len(l.announcePostHooks) == 0 { - l.announcePostHooks = []Hook{nopHook{}} - } - - if len(l.scrapePreHooks) == 0 { - l.scrapePreHooks = []Hook{nopHook{}} - } - - if len(l.scrapePostHooks) == 0 { - l.scrapePostHooks = []Hook{nopHook{}} + if len(l.postHooks) == 0 { + l.postHooks = []Hook{nopHook{}} } return l @@ -50,12 +40,10 @@ func NewLogic(config Config, peerStore storage.PeerStore, announcePreHooks, anno // Logic is an implementation of the TrackerLogic that functions by // executing a series of middleware hooks. type Logic struct { - announceInterval time.Duration - peerStore storage.PeerStore - announcePreHooks []Hook - announcePostHooks []Hook - scrapePreHooks []Hook - scrapePostHooks []Hook + announceInterval time.Duration + peerStore storage.PeerStore + preHooks []Hook + postHooks []Hook } // HandleAnnounce generates a response for an Announce. @@ -63,7 +51,7 @@ func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequ resp := &bittorrent.AnnounceResponse{ Interval: l.announceInterval, } - for _, h := range l.announcePreHooks { + for _, h := range l.preHooks { if err := h.HandleAnnounce(ctx, req, resp); err != nil { return nil, err } @@ -75,7 +63,7 @@ func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequ // AfterAnnounce does something with the results of an Announce after it has // been completed. func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { - for _, h := range l.announcePostHooks { + for _, h := range l.postHooks { if err := h.HandleAnnounce(ctx, req, resp); err != nil { log.Println("chihaya: post-announce hooks failed:", err.Error()) return @@ -88,7 +76,7 @@ func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) resp := &bittorrent.ScrapeResponse{ Files: make(map[bittorrent.InfoHash]bittorrent.Scrape), } - for _, h := range l.scrapePreHooks { + for _, h := range l.preHooks { if err := h.HandleScrape(ctx, req, resp); err != nil { return nil, err } @@ -100,7 +88,7 @@ func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) // AfterScrape does something with the results of a Scrape after it has been // completed. func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { - for _, h := range l.scrapePostHooks { + for _, h := range l.postHooks { if err := h.HandleScrape(ctx, req, resp); err != nil { log.Println("chihaya: post-scrape hooks failed:", err.Error()) return From 7ca15e99439cb744d0a5f7f549abb6ef99b16837 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 1 Sep 2016 20:30:53 -0400 Subject: [PATCH 59/74] middleware: add blacklist support --- cmd/chihaya/config.go | 14 +++- example_config.yaml | 7 +- middleware/clientapproval/clientapproval.go | 79 +++++++++++++++++++ middleware/clientwhitelist/clientwhitelist.go | 49 ------------ 4 files changed, 96 insertions(+), 53 deletions(-) create mode 100644 middleware/clientapproval/clientapproval.go delete mode 100644 middleware/clientwhitelist/clientwhitelist.go diff --git a/cmd/chihaya/config.go b/cmd/chihaya/config.go index 04921e3..12f3d71 100644 --- a/cmd/chihaya/config.go +++ b/cmd/chihaya/config.go @@ -10,6 +10,7 @@ import ( httpfrontend "github.com/chihaya/chihaya/frontend/http" udpfrontend "github.com/chihaya/chihaya/frontend/udp" "github.com/chihaya/chihaya/middleware" + "github.com/chihaya/chihaya/middleware/clientapproval" "github.com/chihaya/chihaya/middleware/jwt" "github.com/chihaya/chihaya/storage/memory" ) @@ -75,9 +76,20 @@ func (cfg ConfigFile) CreateHooks() (preHooks, postHooks []middleware.Hook, err var jwtCfg jwt.Config err := yaml.Unmarshal(cfgBytes, &jwtCfg) if err != nil { - return nil, nil, errors.New("invalid JWT middleware config" + err.Error()) + return nil, nil, errors.New("invalid JWT middleware config: " + err.Error()) } preHooks = append(preHooks, jwt.NewHook(jwtCfg)) + case "client approval": + var caCfg clientapproval.Config + err := yaml.Unmarshal(cfgBytes, &caCfg) + if err != nil { + return nil, nil, errors.New("invalid client approval middleware config: " + err.Error()) + } + hook, err := clientapproval.NewHook(caCfg) + if err != nil { + return nil, nil, errors.New("invalid client approval middleware config: " + err.Error()) + } + preHooks = append(preHooks, hook) } } diff --git a/example_config.yaml b/example_config.yaml index 908afd9..c79cab5 100644 --- a/example_config.yaml +++ b/example_config.yaml @@ -27,11 +27,12 @@ chihaya: audience: https://chihaya.issuer.com jwk_set_uri: https://issuer.com/keys jwk_set_update_interval: 5m - - name: approved_client + - name: client approval config: - type: whitelist - clients: + whitelist: - OP1011 + blacklist: + - OP1012 posthooks: - name: gossip diff --git a/middleware/clientapproval/clientapproval.go b/middleware/clientapproval/clientapproval.go new file mode 100644 index 0000000..22fec00 --- /dev/null +++ b/middleware/clientapproval/clientapproval.go @@ -0,0 +1,79 @@ +// Package clientapproval implements a Hook that fails an Announce based on a +// whitelist or blacklist of BitTorrent client IDs. +package clientapproval + +import ( + "context" + "errors" + + "github.com/chihaya/chihaya/bittorrent" + "github.com/chihaya/chihaya/middleware" +) + +// ErrClientUnapproved is the error returned when a client's PeerID is invalid. +var ErrClientUnapproved = bittorrent.ClientError("unapproved client") + +// Config represents all the values required by this middleware to validate +// peers based on their BitTorrent client ID. +type Config struct { + Whitelist []string `yaml:"whitelist"` + Blacklist []string `yaml:"blacklist"` +} + +type hook struct { + approved map[bittorrent.ClientID]struct{} + unapproved map[bittorrent.ClientID]struct{} +} + +// NewHook returns an instance of the client approval middleware. +func NewHook(cfg Config) (middleware.Hook, error) { + h := &hook{ + approved: make(map[bittorrent.ClientID]struct{}), + unapproved: make(map[bittorrent.ClientID]struct{}), + } + + for _, cidString := range cfg.Whitelist { + cidBytes := []byte(cidString) + if len(cidBytes) != 6 { + return nil, errors.New("client ID " + cidString + " must be 6 bytes") + } + var cid bittorrent.ClientID + copy(cid[:], cidBytes) + h.approved[cid] = struct{}{} + } + + for _, cidString := range cfg.Blacklist { + cidBytes := []byte(cidString) + if len(cidBytes) != 6 { + return nil, errors.New("client ID " + cidString + " must be 6 bytes") + } + var cid bittorrent.ClientID + copy(cid[:], cidBytes) + h.unapproved[cid] = struct{}{} + } + + return h, nil +} + +func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { + clientID := bittorrent.NewClientID(req.Peer.ID) + + if len(h.approved) > 0 { + if _, found := h.approved[clientID]; !found { + return ErrClientUnapproved + } + } + + if len(h.unapproved) > 0 { + if _, found := h.unapproved[clientID]; found { + return ErrClientUnapproved + } + } + + return nil +} + +func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { + // Scrapes don't require any protection. + return nil +} diff --git a/middleware/clientwhitelist/clientwhitelist.go b/middleware/clientwhitelist/clientwhitelist.go deleted file mode 100644 index 3443e05..0000000 --- a/middleware/clientwhitelist/clientwhitelist.go +++ /dev/null @@ -1,49 +0,0 @@ -// Package clientwhitelist implements a Hook that fails an Announce if the -// client's PeerID does not begin with any of the approved prefixes. -package clientwhitelist - -import ( - "context" - "errors" - - "github.com/chihaya/chihaya/bittorrent" - "github.com/chihaya/chihaya/middleware" -) - -// ClientUnapproved is the error returned when a client's PeerID fails to -// begin with an approved prefix. -var ClientUnapproved = bittorrent.ClientError("unapproved client") - -type hook struct { - approved map[bittorrent.ClientID]struct{} -} - -func NewHook(approved []string) (middleware.Hook, error) { - h := &hook{ - approved: make(map[bittorrent.ClientID]struct{}), - } - - for _, cidString := range approved { - cidBytes := []byte(cidString) - if len(cidBytes) != 6 { - return nil, errors.New("clientID " + cidString + " must be 6 bytes") - } - var cid bittorrent.ClientID - copy(cid[:], cidBytes) - h.approved[cid] = struct{}{} - } - - return h, nil -} - -func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { - if _, found := h.approved[bittorrent.NewClientID(req.Peer.ID)]; !found { - return ClientUnapproved - } - - return nil -} - -func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { - return nil -} From 62605706359e0b047249466b224b974cd062418c Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Fri, 2 Sep 2016 15:53:28 -0400 Subject: [PATCH 60/74] http: extract query parser to bittorrent package --- bittorrent/bittorrent.go | 5 - bittorrent/params.go | 192 ++++++++++++++++++ .../params_test.go | 43 ++-- frontend/http/parser.go | 4 +- frontend/http/query_params.go | 124 ----------- 5 files changed, 222 insertions(+), 146 deletions(-) create mode 100644 bittorrent/params.go rename frontend/http/query_params_test.go => bittorrent/params_test.go (74%) delete mode 100644 frontend/http/query_params.go diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index eb18451..75c7289 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -124,11 +124,6 @@ func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID } // EqualEndpoint reports whether p and x have the same endpoint. func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP) } -// Params is used to fetch request optional parameters from an Announce. -type Params interface { - String(key string) (string, bool) -} - // ClientError represents an error that should be exposed to the client over // the BitTorrent protocol implementation. type ClientError string diff --git a/bittorrent/params.go b/bittorrent/params.go new file mode 100644 index 0000000..9cb33c6 --- /dev/null +++ b/bittorrent/params.go @@ -0,0 +1,192 @@ +package bittorrent + +import ( + "errors" + "net/url" + "strconv" + "strings" +) + +// Params is used to fetch (optional) request parameters from an Announce. +// For HTTP Announces this includes the request path and parsed query, for UDP +// Announces this is the extracted path and parsed query from optional URLData +// as specified in BEP41. +// +// See ParseURLData for specifics on parsing and limitations. +type Params interface { + // String returns a string parsed from a query. Every key can be + // returned as a string because they are encoded in the URL as strings. + String(key string) (string, bool) + + // RawPath returns the raw path from the request URL. + // The path returned can contain URL encoded data. + // For a request of the form "/announce?port=1234" this would return + // "/announce". + RawPath() string + + // RawQuery returns the raw query from the request URL, excluding the + // delimiter '?'. + // For a request of the form "/announce?port=1234" this would return + // "port=1234" + RawQuery() string +} + +// ErrKeyNotFound is returned when a provided key has no value associated with +// it. +var ErrKeyNotFound = errors.New("query: value for the provided key does not exist") + +// ErrInvalidInfohash is returned when parsing a query encounters an infohash +// with invalid length. +var ErrInvalidInfohash = errors.New("query: invalid infohash") + +// QueryParams parses a URL Query and implements the Params interface with some +// additional helpers. +type QueryParams struct { + path string + query string + params map[string]string + infoHashes []InfoHash +} + +// ParseURLData parses a request URL or UDP URLData as defined in BEP41. +// It expects a concatenated string of the request's path and query parts as +// defined in RFC 3986. As both the udp: and http: scheme used by BitTorrent +// include an authority part the path part must always begin with a slash. +// An example of the expected URLData would be "/announce?port=1234&uploaded=0" +// or "/?auth=0x1337". +// HTTP servers should pass (*http.Request).RequestURI, UDP servers should +// pass the concatenated, unchanged URLData as defined in BEP41. +// +// Note that, in the case of a key occurring multiple times in the query, only +// the last value for that key is kept. +// The only exception to this rule is the key "info_hash" which will attempt to +// parse each value as an InfoHash and return an error if parsing fails. All +// InfoHashes are collected and can later be retrieved by calling the InfoHashes +// method. +func ParseURLData(urlData string) (*QueryParams, error) { + var path, query string + + queryDelim := strings.IndexAny(urlData, "?") + if queryDelim == -1 { + path = urlData + } else { + path = urlData[:queryDelim] + query = urlData[queryDelim+1:] + } + + q, err := parseQuery(query) + if err != nil { + return nil, err + } + q.path = path + return q, nil +} + +// parseQuery parses a URL query into QueryParams. +// The query is expected to exclude the delimiting '?'. +func parseQuery(rawQuery string) (*QueryParams, error) { + var ( + keyStart, keyEnd int + valStart, valEnd int + + onKey = true + + q = &QueryParams{ + query: rawQuery, + infoHashes: nil, + params: make(map[string]string), + } + ) + + for i, length := 0, len(rawQuery); i < length; i++ { + separator := rawQuery[i] == '&' || rawQuery[i] == ';' + last := i == length-1 + + if separator || last { + if onKey && !last { + keyStart = i + 1 + continue + } + + if last && !separator && !onKey { + valEnd = i + } + + keyStr, err := url.QueryUnescape(rawQuery[keyStart : keyEnd+1]) + if err != nil { + return nil, err + } + + var valStr string + + if valEnd > 0 { + valStr, err = url.QueryUnescape(rawQuery[valStart : valEnd+1]) + if err != nil { + return nil, err + } + } + + if keyStr == "info_hash" { + if len(valStr) != 20 { + return nil, ErrInvalidInfohash + } + q.infoHashes = append(q.infoHashes, InfoHashFromString(valStr)) + } else { + q.params[strings.ToLower(keyStr)] = valStr + } + + valEnd = 0 + onKey = true + keyStart = i + 1 + + } else if rawQuery[i] == '=' { + onKey = false + valStart = i + 1 + valEnd = 0 + } else if onKey { + keyEnd = i + } else { + valEnd = i + } + } + + return q, nil +} + +// String returns a string parsed from a query. Every key can be returned as a +// string because they are encoded in the URL as strings. +func (qp *QueryParams) String(key string) (string, bool) { + value, ok := qp.params[key] + return value, ok +} + +// Uint64 returns a uint parsed from a query. After being called, it is safe to +// cast the uint64 to your desired length. +func (qp *QueryParams) Uint64(key string) (uint64, error) { + str, exists := qp.params[key] + if !exists { + return 0, ErrKeyNotFound + } + + val, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return 0, err + } + + return val, nil +} + +// InfoHashes returns a list of requested infohashes. +func (qp *QueryParams) InfoHashes() []InfoHash { + return qp.infoHashes +} + +// RawPath returns the raw path from the parsed URL. +func (qp *QueryParams) RawPath() string { + return qp.path +} + +// RawQuery returns the raw query from the parsed URL. +func (qp *QueryParams) RawQuery() string { + return qp.query +} diff --git a/frontend/http/query_params_test.go b/bittorrent/params_test.go similarity index 74% rename from frontend/http/query_params_test.go rename to bittorrent/params_test.go index ec9a0d0..36d0819 100644 --- a/frontend/http/query_params_test.go +++ b/bittorrent/params_test.go @@ -1,4 +1,4 @@ -package http +package bittorrent import ( "net/url" @@ -6,11 +6,10 @@ import ( ) var ( - baseAddr = "https://www.subdomain.tracker.com:80/" - testInfoHash = "01234567890123456789" - testPeerID = "-TEST01-6wfG2wk6wWLc" + testPeerID = "-TEST01-6wfG2wk6wWLc" ValidAnnounceArguments = []url.Values{ + {}, {"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}}, {"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}}, {"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}}, @@ -26,7 +25,7 @@ var ( } InvalidQueries = []string{ - baseAddr + "announce/?" + "info_hash=%0%a", + "/announce?" + "info_hash=%0%a", } ) @@ -45,28 +44,42 @@ func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool { return true } -func TestValidQueries(t *testing.T) { +func TestParseEmptyURLData(t *testing.T) { + parsedQuery, err := ParseURLData("") + if err != nil { + t.Fatal(err) + } + if parsedQuery == nil { + t.Fatal("Parsed query must not be nil") + } +} + +func TestParseValidURLData(t *testing.T) { for parseIndex, parseVal := range ValidAnnounceArguments { - parsedQueryObj, err := NewQueryParams(baseAddr + "announce/?" + parseVal.Encode()) + parsedQueryObj, err := ParseURLData("/announce?" + parseVal.Encode()) if err != nil { - t.Error(err) + t.Fatal(err) } if !mapArrayEqual(parseVal, parsedQueryObj.params) { - t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params) + t.Fatalf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params) + } + + if parsedQueryObj.path != "/announce" { + t.Fatalf("Incorrect path, expected %q, got %q", "/announce", parsedQueryObj.path) } } } -func TestInvalidQueries(t *testing.T) { +func TestParseInvalidURLData(t *testing.T) { for parseIndex, parseStr := range InvalidQueries { - parsedQueryObj, err := NewQueryParams(parseStr) + parsedQueryObj, err := ParseURLData(parseStr) if err == nil { - t.Error("Should have produced error", parseIndex) + t.Fatal("Should have produced error", parseIndex) } if parsedQueryObj != nil { - t.Error("Should be nil after error", parsedQueryObj, parseIndex) + t.Fatal("Should be nil after error", parsedQueryObj, parseIndex) } } } @@ -74,7 +87,7 @@ func TestInvalidQueries(t *testing.T) { func BenchmarkParseQuery(b *testing.B) { for bCount := 0; bCount < b.N; bCount++ { for parseIndex, parseStr := range ValidAnnounceArguments { - parsedQueryObj, err := NewQueryParams(baseAddr + "announce/?" + parseStr.Encode()) + parsedQueryObj, err := parseQuery(parseStr.Encode()) if err != nil { b.Error(err, parseIndex) b.Log(parsedQueryObj) @@ -86,7 +99,7 @@ func BenchmarkParseQuery(b *testing.B) { func BenchmarkURLParseQuery(b *testing.B) { for bCount := 0; bCount < b.N; bCount++ { for parseIndex, parseStr := range ValidAnnounceArguments { - parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode()) + parsedQueryObj, err := url.ParseQuery(parseStr.Encode()) if err != nil { b.Error(err, parseIndex) b.Log(parsedQueryObj) diff --git a/frontend/http/parser.go b/frontend/http/parser.go index 7e7674a..d873f40 100644 --- a/frontend/http/parser.go +++ b/frontend/http/parser.go @@ -13,7 +13,7 @@ import ( // If realIPHeader is not empty string, the first value of the HTTP Header with // that name will be used. func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) { - qp, err := NewQueryParams(r.URL.RawQuery) + qp, err := bittorrent.ParseURLData(r.RequestURI) if err != nil { return nil, err } @@ -84,7 +84,7 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) ( // ParseScrape parses an bittorrent.ScrapeRequest from an http.Request. func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) { - qp, err := NewQueryParams(r.URL.RawQuery) + qp, err := bittorrent.ParseURLData(r.RequestURI) if err != nil { return nil, err } diff --git a/frontend/http/query_params.go b/frontend/http/query_params.go deleted file mode 100644 index 415b4fc..0000000 --- a/frontend/http/query_params.go +++ /dev/null @@ -1,124 +0,0 @@ -package http - -import ( - "errors" - "net/url" - "strconv" - "strings" - - "github.com/chihaya/chihaya/bittorrent" -) - -// ErrKeyNotFound is returned when a provided key has no value associated with -// it. -var ErrKeyNotFound = errors.New("http: value for the provided key does not exist") - -// ErrInvalidInfohash is returned when parsing a query encounters an infohash -// with invalid length. -var ErrInvalidInfohash = errors.New("http: invalid infohash") - -// QueryParams parses an HTTP Query and implements the bittorrent.Params -// interface with some additional helpers. -type QueryParams struct { - query string - params map[string]string - infoHashes []bittorrent.InfoHash -} - -// NewQueryParams parses a raw URL query. -func NewQueryParams(query string) (*QueryParams, error) { - var ( - keyStart, keyEnd int - valStart, valEnd int - - onKey = true - - q = &QueryParams{ - query: query, - infoHashes: nil, - params: make(map[string]string), - } - ) - - for i, length := 0, len(query); i < length; i++ { - separator := query[i] == '&' || query[i] == ';' || query[i] == '?' - last := i == length-1 - - if separator || last { - if onKey && !last { - keyStart = i + 1 - continue - } - - if last && !separator && !onKey { - valEnd = i - } - - keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1]) - if err != nil { - return nil, err - } - - var valStr string - - if valEnd > 0 { - valStr, err = url.QueryUnescape(query[valStart : valEnd+1]) - if err != nil { - return nil, err - } - } - - if keyStr == "info_hash" { - if len(valStr) != 20 { - return nil, ErrInvalidInfohash - } - q.infoHashes = append(q.infoHashes, bittorrent.InfoHashFromString(valStr)) - } else { - q.params[strings.ToLower(keyStr)] = valStr - } - - valEnd = 0 - onKey = true - keyStart = i + 1 - - } else if query[i] == '=' { - onKey = false - valStart = i + 1 - valEnd = 0 - } else if onKey { - keyEnd = i - } else { - valEnd = i - } - } - - return q, nil -} - -// String returns a string parsed from a query. Every key can be returned as a -// string because they are encoded in the URL as strings. -func (qp *QueryParams) String(key string) (string, bool) { - value, ok := qp.params[key] - return value, ok -} - -// Uint64 returns a uint parsed from a query. After being called, it is safe to -// cast the uint64 to your desired length. -func (qp *QueryParams) Uint64(key string) (uint64, error) { - str, exists := qp.params[key] - if !exists { - return 0, ErrKeyNotFound - } - - val, err := strconv.ParseUint(str, 10, 64) - if err != nil { - return 0, err - } - - return val, nil -} - -// InfoHashes returns a list of requested infohashes. -func (qp *QueryParams) InfoHashes() []bittorrent.InfoHash { - return qp.infoHashes -} From c667497c6dd90cb171e45ebdb04cc77b085539d5 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Tue, 30 Aug 2016 22:21:05 -0400 Subject: [PATCH 61/74] udp: implement bep41 --- frontend/udp/parser.go | 78 ++++++++++++++++++++++++------------- frontend/udp/parser_test.go | 71 +++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 26 deletions(-) create mode 100644 frontend/udp/parser_test.go diff --git a/frontend/udp/parser.go b/frontend/udp/parser.go index 43dc329..3c0fed5 100644 --- a/frontend/udp/parser.go +++ b/frontend/udp/parser.go @@ -1,8 +1,11 @@ package udp import ( + "bytes" "encoding/binary" + "fmt" "net" + "sync" "github.com/chihaya/chihaya/bittorrent" ) @@ -37,11 +40,12 @@ var ( bittorrent.Stopped, } - errMalformedPacket = bittorrent.ClientError("malformed packet") - errMalformedIP = bittorrent.ClientError("malformed IP address") - errMalformedEvent = bittorrent.ClientError("malformed event ID") - errUnknownAction = bittorrent.ClientError("unknown action ID") - errBadConnectionID = bittorrent.ClientError("bad connection ID") + errMalformedPacket = bittorrent.ClientError("malformed packet") + errMalformedIP = bittorrent.ClientError("malformed IP address") + errMalformedEvent = bittorrent.ClientError("malformed event ID") + errUnknownAction = bittorrent.ClientError("unknown action ID") + errBadConnectionID = bittorrent.ClientError("bad connection ID") + errUnknownOptionType = bittorrent.ClientError("unknown option type") ) // ParseAnnounce parses an AnnounceRequest from a UDP request. @@ -76,7 +80,7 @@ func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest numWant := binary.BigEndian.Uint32(r.Packet[92:96]) port := binary.BigEndian.Uint16(r.Packet[96:98]) - params, err := handleOptionalParameters(r.Packet) + params, err := handleOptionalParameters(r.Packet[98:]) if err != nil { return nil, err } @@ -97,43 +101,65 @@ func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest }, nil } +type buffer struct { + bytes.Buffer +} + +var bufferFree = sync.Pool{ + New: func() interface{} { return new(buffer) }, +} + +func newBuffer() *buffer { + return bufferFree.Get().(*buffer) +} + +func (b *buffer) free() { + b.Reset() + bufferFree.Put(b) +} + // handleOptionalParameters parses the optional parameters as described in BEP // 41 and updates an announce with the values parsed. -func handleOptionalParameters(packet []byte) (params bittorrent.Params, err error) { - if len(packet) <= 98 { - return +func handleOptionalParameters(packet []byte) (bittorrent.Params, error) { + if len(packet) == 0 { + return bittorrent.ParseURLData("") } - optionStartIndex := 98 - for optionStartIndex < len(packet)-1 { - option := packet[optionStartIndex] + var buf = newBuffer() + defer buf.free() + + for i := 0; i < len(packet); { + option := packet[i] switch option { case optionEndOfOptions: - return - + return bittorrent.ParseURLData(buf.String()) case optionNOP: - optionStartIndex++ - + i++ case optionURLData: - if optionStartIndex+1 > len(packet)-1 { - return params, errMalformedPacket + if i+1 >= len(packet) { + return nil, errMalformedPacket } - length := int(packet[optionStartIndex+1]) - if optionStartIndex+1+length > len(packet)-1 { - return params, errMalformedPacket + length := int(packet[i+1]) + if i+2+length > len(packet) { + return nil, errMalformedPacket } - // TODO(chihaya): Actually parse the URL Data as described in BEP 41 - // into something that fulfills the bittorrent.Params interface. + n, err := buf.Write(packet[i+2 : i+2+length]) + if err != nil { + return nil, err + } + if n != length { + return nil, fmt.Errorf("expected to write %d bytes, wrote %d", length, n) + } - optionStartIndex += 1 + length + i += 2 + length default: - return + return nil, errUnknownOptionType } } - return + return bittorrent.ParseURLData(buf.String()) } // ParseScrape parses a ScrapeRequest from a UDP request. diff --git a/frontend/udp/parser_test.go b/frontend/udp/parser_test.go new file mode 100644 index 0000000..a6c6b92 --- /dev/null +++ b/frontend/udp/parser_test.go @@ -0,0 +1,71 @@ +package udp + +import "testing" + +var table = []struct { + data []byte + values map[string]string + err error +}{ + { + []byte{0x2, 0x5, '/', '?', 'a', '=', 'b'}, + map[string]string{"a": "b"}, + nil, + }, + { + []byte{0x2, 0x0}, + map[string]string{}, + nil, + }, + { + []byte{0x2, 0x1}, + nil, + errMalformedPacket, + }, + { + []byte{0x2}, + nil, + errMalformedPacket, + }, + { + []byte{0x2, 0x8, '/', 'c', '/', 'd', '?', 'a', '=', 'b'}, + map[string]string{"a": "b"}, + nil, + }, + { + []byte{0x2, 0x2, '/', '?', 0x2, 0x3, 'a', '=', 'b'}, + map[string]string{"a": "b"}, + nil, + }, + { + []byte{0x2, 0x9, '/', '?', 'a', '=', 'b', '%', '2', '0', 'c'}, + map[string]string{"a": "b c"}, + nil, + }, +} + +func TestHandleOptionalParameters(t *testing.T) { + for _, testCase := range table { + params, err := handleOptionalParameters(testCase.data) + if err != testCase.err { + if testCase.err == nil { + t.Fatalf("expected no parsing error for %x but got %s", testCase.data, err) + } else { + t.Fatalf("expected parsing error for %x", testCase.data) + } + } + if testCase.values != nil { + if params == nil { + t.Fatalf("expected values %v for %x", testCase.values, testCase.data) + } else { + for key, want := range testCase.values { + if got, ok := params.String(key); !ok { + t.Fatalf("params missing entry %s for data %x", key, testCase.data) + } else if got != want { + t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, testCase.data) + } + } + } + } + } +} From d66ed27dd9e870640cb4db89c59ceef36edfcbc8 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Fri, 2 Sep 2016 16:42:01 -0400 Subject: [PATCH 62/74] udp: add support for opentracker-style IPv6 announces --- frontend/udp/frontend.go | 6 +++--- frontend/udp/parser.go | 22 ++++++++++++++++------ frontend/udp/writer.go | 18 +++++++++++++++--- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 1cf3d03..c135787 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -192,11 +192,11 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), t.PrivateKey)) return - case announceActionID: + case announceActionID, announceV6ActionID: actionName = "announce" var req *bittorrent.AnnounceRequest - req, err = ParseAnnounce(r, t.AllowIPSpoofing) + req, err = ParseAnnounce(r, t.AllowIPSpoofing, actionID == announceV6ActionID) if err != nil { WriteError(w, txID, err) return @@ -209,7 +209,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string return } - WriteAnnounce(w, txID, resp) + WriteAnnounce(w, txID, resp, actionID == announceV6ActionID) go t.logic.AfterAnnounce(context.TODO(), req, resp) diff --git a/frontend/udp/parser.go b/frontend/udp/parser.go index 3c0fed5..ccddfff 100644 --- a/frontend/udp/parser.go +++ b/frontend/udp/parser.go @@ -15,6 +15,7 @@ const ( announceActionID scrapeActionID errorActionID + announceV6ActionID ) // Option-Types as described in BEP 41 and BEP 45. @@ -51,8 +52,17 @@ var ( // ParseAnnounce parses an AnnounceRequest from a UDP request. // // If allowIPSpoofing is true, IPs provided via params will be used. -func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) { - if len(r.Packet) < 98 { +// +// If v6 is true the announce will be parsed as an IPv6 announce "the +// opentracker way", see +// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/ +func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceRequest, error) { + ipEnd := 84 + net.IPv4len + if v6 { + ipEnd = 84 + net.IPv6len + } + + if len(r.Packet) < ipEnd+10 { return nil, errMalformedPacket } @@ -68,7 +78,7 @@ func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest } ip := r.IP - ipbytes := r.Packet[84:88] + ipbytes := r.Packet[84:ipEnd] if allowIPSpoofing { ip = net.IP(ipbytes) } @@ -77,10 +87,10 @@ func ParseAnnounce(r Request, allowIPSpoofing bool) (*bittorrent.AnnounceRequest return nil, errMalformedIP } - numWant := binary.BigEndian.Uint32(r.Packet[92:96]) - port := binary.BigEndian.Uint16(r.Packet[96:98]) + numWant := binary.BigEndian.Uint32(r.Packet[ipEnd+4 : ipEnd+8]) + port := binary.BigEndian.Uint16(r.Packet[ipEnd+8 : ipEnd+10]) - params, err := handleOptionalParameters(r.Packet[98:]) + params, err := handleOptionalParameters(r.Packet[ipEnd+10:]) if err != nil { return nil, err } diff --git a/frontend/udp/writer.go b/frontend/udp/writer.go index e3a495f..86eef08 100644 --- a/frontend/udp/writer.go +++ b/frontend/udp/writer.go @@ -25,15 +25,27 @@ func WriteError(w io.Writer, txID []byte, err error) { } // WriteAnnounce encodes an announce response according to BEP 15. -func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse) { +// The peers returned will be resp.IPv6Peers or resp.IPv4Peers, depending on +// whether v6 is set. The action ID will be 4, according to +// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/. +func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6 bool) { var buf bytes.Buffer - writeHeader(&buf, txID, announceActionID) + if v6 { + writeHeader(&buf, txID, announceV6ActionID) + } else { + writeHeader(&buf, txID, announceActionID) + } binary.Write(&buf, binary.BigEndian, uint32(resp.Interval/time.Second)) binary.Write(&buf, binary.BigEndian, uint32(resp.Incomplete)) binary.Write(&buf, binary.BigEndian, uint32(resp.Complete)) - for _, peer := range resp.IPv4Peers { + peers := resp.IPv4Peers + if v6 { + peers = resp.IPv6Peers + } + + for _, peer := range peers { buf.Write(peer.IP) binary.Write(&buf, binary.BigEndian, peer.Port) } From 8997d76f1ecf2ad0e1072224a3dc007984c3e488 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Sat, 3 Sep 2016 17:09:00 -0400 Subject: [PATCH 63/74] udp: use free list for response buffers --- frontend/udp/writer.go | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/frontend/udp/writer.go b/frontend/udp/writer.go index 86eef08..d8fc87d 100644 --- a/frontend/udp/writer.go +++ b/frontend/udp/writer.go @@ -1,7 +1,6 @@ package udp import ( - "bytes" "encoding/binary" "fmt" "io" @@ -17,11 +16,12 @@ func WriteError(w io.Writer, txID []byte, err error) { err = fmt.Errorf("internal error occurred: %s", err.Error()) } - var buf bytes.Buffer - writeHeader(&buf, txID, errorActionID) + buf := newBuffer() + writeHeader(buf, txID, errorActionID) buf.WriteString(err.Error()) buf.WriteRune('\000') w.Write(buf.Bytes()) + buf.free() } // WriteAnnounce encodes an announce response according to BEP 15. @@ -29,16 +29,16 @@ func WriteError(w io.Writer, txID []byte, err error) { // whether v6 is set. The action ID will be 4, according to // http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/. func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6 bool) { - var buf bytes.Buffer + buf := newBuffer() if v6 { - writeHeader(&buf, txID, announceV6ActionID) + writeHeader(buf, txID, announceV6ActionID) } else { - writeHeader(&buf, txID, announceActionID) + writeHeader(buf, txID, announceActionID) } - binary.Write(&buf, binary.BigEndian, uint32(resp.Interval/time.Second)) - binary.Write(&buf, binary.BigEndian, uint32(resp.Incomplete)) - binary.Write(&buf, binary.BigEndian, uint32(resp.Complete)) + binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second)) + binary.Write(buf, binary.BigEndian, uint32(resp.Incomplete)) + binary.Write(buf, binary.BigEndian, uint32(resp.Complete)) peers := resp.IPv4Peers if v6 { @@ -47,35 +47,38 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, for _, peer := range peers { buf.Write(peer.IP) - binary.Write(&buf, binary.BigEndian, peer.Port) + binary.Write(buf, binary.BigEndian, peer.Port) } w.Write(buf.Bytes()) + buf.free() } // WriteScrape encodes a scrape response according to BEP 15. func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) { - var buf bytes.Buffer + buf := newBuffer() - writeHeader(&buf, txID, scrapeActionID) + writeHeader(buf, txID, scrapeActionID) for _, scrape := range resp.Files { - binary.Write(&buf, binary.BigEndian, scrape.Complete) - binary.Write(&buf, binary.BigEndian, scrape.Snatches) - binary.Write(&buf, binary.BigEndian, scrape.Incomplete) + binary.Write(buf, binary.BigEndian, scrape.Complete) + binary.Write(buf, binary.BigEndian, scrape.Snatches) + binary.Write(buf, binary.BigEndian, scrape.Incomplete) } w.Write(buf.Bytes()) + buf.free() } // WriteConnectionID encodes a new connection response according to BEP 15. func WriteConnectionID(w io.Writer, txID, connID []byte) { - var buf bytes.Buffer + buf := newBuffer() - writeHeader(&buf, txID, connectActionID) + writeHeader(buf, txID, connectActionID) buf.Write(connID) w.Write(buf.Bytes()) + buf.free() } // writeHeader writes the action and transaction ID to the provided response From fa328396230d34843192f2952193b478d1f382cc Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 12:10:42 -0400 Subject: [PATCH 64/74] replace std log w/ logrus There still needs to be much more logged with the debug level. --- cmd/chihaya/main.go | 22 ++++++++++++---------- middleware/jwt/jwt.go | 8 ++++---- middleware/middleware.go | 6 +++--- storage/memory/peer_store.go | 6 +++--- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/cmd/chihaya/main.go b/cmd/chihaya/main.go index 3473717..0cc265d 100644 --- a/cmd/chihaya/main.go +++ b/cmd/chihaya/main.go @@ -2,13 +2,13 @@ package main import ( "errors" - "log" "net/http" "os" "os/signal" "runtime/pprof" "syscall" + log "github.com/Sirupsen/logrus" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" @@ -21,7 +21,7 @@ import ( func rootCmdRun(cmd *cobra.Command, args []string) error { cpuProfilePath, _ := cmd.Flags().GetString("cpuprofile") if cpuProfilePath != "" { - log.Println("enabled CPU profiling to " + cpuProfilePath) + log.Infoln("enabled CPU profiling to", cpuProfilePath) f, err := os.Create(cpuProfilePath) if err != nil { return err @@ -42,26 +42,26 @@ func rootCmdRun(cmd *cobra.Command, args []string) error { Addr: cfg.PrometheusAddr, Handler: prometheus.Handler(), } - log.Println("started serving prometheus stats on", cfg.PrometheusAddr) + log.Infoln("started serving prometheus stats on", cfg.PrometheusAddr) if err := promServer.ListenAndServe(); err != nil { - log.Fatal(err) + log.Fatalln("failed to start prometheus server:", err.Error()) } }() // Force the compiler to enforce memory against the storage interface. peerStore, err := memory.New(cfg.Storage) if err != nil { - return err + return errors.New("failed to create memory storage: " + err.Error()) } preHooks, postHooks, err := configFile.CreateHooks() if err != nil { - return err + return errors.New("failed to create hooks: " + err.Error()) } logic := middleware.NewLogic(cfg.Config, peerStore, preHooks, postHooks) if err != nil { - return err + return errors.New("failed to create TrackerLogic: " + err.Error()) } shutdown := make(chan struct{}) @@ -74,7 +74,7 @@ func rootCmdRun(cmd *cobra.Command, args []string) error { httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig) go func() { - log.Println("started serving HTTP on", cfg.HTTPConfig.Addr) + log.Infoln("started serving HTTP on", cfg.HTTPConfig.Addr) if err := httpFrontend.ListenAndServe(); err != nil { errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error()) } @@ -85,7 +85,7 @@ func rootCmdRun(cmd *cobra.Command, args []string) error { udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig) go func() { - log.Println("started serving UDP on", cfg.UDPConfig.Addr) + log.Infoln("started serving UDP on", cfg.UDPConfig.Addr) if err := udpFrontend.ListenAndServe(); err != nil { errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error()) } @@ -114,6 +114,8 @@ func rootCmdRun(cmd *cobra.Command, args []string) error { } } + // TODO(jzelinskie): stop hooks here + close(errChan) }() @@ -125,7 +127,7 @@ func rootCmdRun(cmd *cobra.Command, args []string) error { close(shutdown) closed = true } else { - log.Println(bufErr) + log.Infoln(bufErr) } bufErr = err } diff --git a/middleware/jwt/jwt.go b/middleware/jwt/jwt.go index 1743c1f..8b9c937 100644 --- a/middleware/jwt/jwt.go +++ b/middleware/jwt/jwt.go @@ -11,7 +11,6 @@ import ( "crypto" "encoding/json" "errors" - "log" "net/http" "net/url" "time" @@ -19,6 +18,7 @@ import ( jc "github.com/SermoDigital/jose/crypto" "github.com/SermoDigital/jose/jws" "github.com/SermoDigital/jose/jwt" + log "github.com/Sirupsen/logrus" "github.com/mendsley/gojwk" "github.com/chihaya/chihaya/bittorrent" @@ -64,7 +64,7 @@ func NewHook(cfg Config) middleware.Hook { case <-time.After(cfg.JWKUpdateInterval): resp, err := http.Get(cfg.JWKSetURL) if err != nil { - log.Println("failed to fetch JWK Set: " + err.Error()) + log.Errorln("failed to fetch JWK Set: " + err.Error()) continue } @@ -72,7 +72,7 @@ func NewHook(cfg Config) middleware.Hook { err = json.NewDecoder(resp.Body).Decode(&parsedJWKs) if err != nil { resp.Body.Close() - log.Println("failed to decode JWK JSON: " + err.Error()) + log.Errorln("failed to decode JWK JSON: " + err.Error()) continue } resp.Body.Close() @@ -81,7 +81,7 @@ func NewHook(cfg Config) middleware.Hook { for kid, parsedJWK := range parsedJWKs { publicKey, err := parsedJWK.DecodePublicKey() if err != nil { - log.Println("failed to decode JWK into public key: " + err.Error()) + log.Errorln("failed to decode JWK into public key: " + err.Error()) continue } keys[kid] = publicKey diff --git a/middleware/middleware.go b/middleware/middleware.go index 0778ce0..0c9b86b 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -4,9 +4,9 @@ package middleware import ( "context" - "log" "time" + log "github.com/Sirupsen/logrus" "github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/frontend" "github.com/chihaya/chihaya/storage" @@ -65,7 +65,7 @@ func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequ func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { for _, h := range l.postHooks { if err := h.HandleAnnounce(ctx, req, resp); err != nil { - log.Println("chihaya: post-announce hooks failed:", err.Error()) + log.Errorln("chihaya: post-announce hooks failed:", err.Error()) return } } @@ -90,7 +90,7 @@ func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { for _, h := range l.postHooks { if err := h.HandleScrape(ctx, req, resp); err != nil { - log.Println("chihaya: post-scrape hooks failed:", err.Error()) + log.Errorln("chihaya: post-scrape hooks failed:", err.Error()) return } } diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index 02211a1..1947d48 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -3,12 +3,13 @@ package memory import ( "encoding/binary" "errors" - "log" "net" "runtime" "sync" "time" + log "github.com/Sirupsen/logrus" + "github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/storage" ) @@ -53,7 +54,7 @@ func New(cfg Config) (storage.PeerStore, error) { return case <-time.After(cfg.GarbageCollectionInterval): before := time.Now().Add(-cfg.PeerLifetime) - log.Println("memory: purging peers with no announces since ", before) + log.Debugln("memory: purging peers with no announces since", before) ps.collectGarbage(before) } } @@ -327,7 +328,6 @@ func (s *peerStore) collectGarbage(cutoff time.Time) error { default: } - log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String()) cutoffUnix := cutoff.UnixNano() for _, shard := range s.shards { shard.RLock() From 486e898ce944822c262a6fbd7217ad0b4eb0257f Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 12:19:54 -0400 Subject: [PATCH 65/74] cmd: add --debug for debug logging --- cmd/chihaya/main.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/chihaya/main.go b/cmd/chihaya/main.go index 0cc265d..2ff789a 100644 --- a/cmd/chihaya/main.go +++ b/cmd/chihaya/main.go @@ -19,6 +19,11 @@ import ( ) func rootCmdRun(cmd *cobra.Command, args []string) error { + debugLog, _ := cmd.Flags().GetBool("debug") + if debugLog { + log.SetLevel(log.DebugLevel) + log.Debugln("debug logging enabled") + } cpuProfilePath, _ := cmd.Flags().GetString("cpuprofile") if cpuProfilePath != "" { log.Infoln("enabled CPU profiling to", cpuProfilePath) @@ -149,6 +154,7 @@ func main() { } rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file") rootCmd.Flags().String("cpuprofile", "", "location to save a CPU profile") + rootCmd.Flags().Bool("debug", false, "enable debug logging") if err := rootCmd.Execute(); err != nil { log.Fatal(err) From 6cd505269ee7bc33a087c97822e7e82d1e017812 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 12:22:49 -0400 Subject: [PATCH 66/74] bt: remove unused Tracker interface --- bittorrent/bittorrent.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/bittorrent/bittorrent.go b/bittorrent/bittorrent.go index eb18451..5f650df 100644 --- a/bittorrent/bittorrent.go +++ b/bittorrent/bittorrent.go @@ -135,9 +135,3 @@ type ClientError string // Error implements the error interface for ClientError. func (c ClientError) Error() string { return string(c) } - -// Tracker represents an implementation of the BitTorrent tracker protocol. -type Tracker interface { - ListenAndServe() error - Stop() -} From 093a748457037ed29d3685ecd03e85c86365dbfb Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 12:25:11 -0400 Subject: [PATCH 67/74] frontend: s/ctx.TODO/ctx.Background --- frontend/http/frontend.go | 8 ++++---- frontend/udp/frontend.go | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 48cedf9..6f976cf 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -136,7 +136,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr return } - resp, err := t.logic.HandleAnnounce(context.TODO(), req) + resp, err := t.logic.HandleAnnounce(context.Background(), req) if err != nil { WriteError(w, err) return @@ -148,7 +148,7 @@ func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr return } - go t.logic.AfterAnnounce(context.TODO(), req, resp) + go t.logic.AfterAnnounce(context.Background(), req, resp) } // scrapeRoute parses and responds to a Scrape by using t.TrackerLogic. @@ -163,7 +163,7 @@ func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - resp, err := t.logic.HandleScrape(context.TODO(), req) + resp, err := t.logic.HandleScrape(context.Background(), req) if err != nil { WriteError(w, err) return @@ -175,5 +175,5 @@ func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou return } - go t.logic.AfterScrape(context.TODO(), req, resp) + go t.logic.AfterScrape(context.Background(), req, resp) } diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 1cf3d03..757ec13 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -203,7 +203,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string } var resp *bittorrent.AnnounceResponse - resp, err = t.logic.HandleAnnounce(context.TODO(), req) + resp, err = t.logic.HandleAnnounce(context.Background(), req) if err != nil { WriteError(w, txID, err) return @@ -211,7 +211,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string WriteAnnounce(w, txID, resp) - go t.logic.AfterAnnounce(context.TODO(), req, resp) + go t.logic.AfterAnnounce(context.Background(), req, resp) return @@ -226,7 +226,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string } var resp *bittorrent.ScrapeResponse - resp, err = t.logic.HandleScrape(context.TODO(), req) + resp, err = t.logic.HandleScrape(context.Background(), req) if err != nil { WriteError(w, txID, err) return @@ -234,7 +234,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string WriteScrape(w, txID, resp) - go t.logic.AfterScrape(context.TODO(), req, resp) + go t.logic.AfterScrape(context.Background(), req, resp) return From c4706022d7d38ed8292ee0036aeeb1e055381fea Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 12:27:00 -0400 Subject: [PATCH 68/74] udp: remove unnecessary returns in switch stmt --- frontend/udp/frontend.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 757ec13..4d0ef58 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -190,7 +190,6 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string } WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), t.PrivateKey)) - return case announceActionID: actionName = "announce" @@ -213,8 +212,6 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string go t.logic.AfterAnnounce(context.Background(), req, resp) - return - case scrapeActionID: actionName = "scrape" @@ -236,11 +233,10 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string go t.logic.AfterScrape(context.Background(), req, resp) - return - default: err = errUnknownAction WriteError(w, txID, err) - return } + + return } From 57ee2d0c9030e2f28ef939bf83c9bb1acc5fb501 Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 12:30:03 -0400 Subject: [PATCH 69/74] bytepool: enforce equal length and cap --- frontend/udp/bytepool/bytepool.go | 6 +++--- frontend/udp/frontend.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/frontend/udp/bytepool/bytepool.go b/frontend/udp/bytepool/bytepool.go index f4ec893..93507a9 100644 --- a/frontend/udp/bytepool/bytepool.go +++ b/frontend/udp/bytepool/bytepool.go @@ -7,11 +7,11 @@ type BytePool struct { sync.Pool } -// New allocates a new BytePool with slices of the provided capacity. -func New(length, capacity int) *BytePool { +// New allocates a new BytePool with slices of equal length and capacity. +func New(length int) *BytePool { var bp BytePool bp.Pool.New = func() interface{} { - return make([]byte, length, capacity) + return make([]byte, length, length) } return &bp } diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 4d0ef58..2be9a3e 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -93,7 +93,7 @@ func (t *Frontend) ListenAndServe() error { } defer t.socket.Close() - pool := bytepool.New(2048, 2048) + pool := bytepool.New(2048) for { // Check to see if we need to shutdown. From d794f92c1438b252d40822952484bffb2d1f7c9d Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Thu, 18 Aug 2016 10:15:07 -0400 Subject: [PATCH 70/74] http: fix encoding of IPv4 peers with 16-byte addresses --- frontend/http/writer.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/frontend/http/writer.go b/frontend/http/writer.go index 7e0dead..54b3fe2 100644 --- a/frontend/http/writer.go +++ b/frontend/http/writer.go @@ -36,7 +36,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo // Add the IPv4 peers to the dictionary. for _, peer := range resp.IPv4Peers { - IPv4CompactDict = append(IPv4CompactDict, compact(peer)...) + IPv4CompactDict = append(IPv4CompactDict, compact4(peer)...) } if len(IPv4CompactDict) > 0 { bdict["peers"] = IPv4CompactDict @@ -44,7 +44,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo // Add the IPv6 peers to the dictionary. for _, peer := range resp.IPv6Peers { - IPv6CompactDict = append(IPv6CompactDict, compact(peer)...) + IPv6CompactDict = append(IPv6CompactDict, compact6(peer)...) } if len(IPv6CompactDict) > 0 { bdict["peers6"] = IPv6CompactDict @@ -82,8 +82,23 @@ func WriteScrapeResponse(w http.ResponseWriter, resp *bittorrent.ScrapeResponse) }) } -func compact(peer bittorrent.Peer) (buf []byte) { - buf = []byte(peer.IP) +func compact4(peer bittorrent.Peer) (buf []byte) { + if ip := peer.IP.To4(); ip == nil { + panic("non-IPv4 IP for Peer in IPv4Peers") + } else { + buf = []byte(ip) + } + buf = append(buf, byte(peer.Port>>8)) + buf = append(buf, byte(peer.Port&0xff)) + return +} + +func compact6(peer bittorrent.Peer) (buf []byte) { + if ip := peer.IP.To16(); ip == nil { + panic("non-IPv6 IP for Peer in IPv6Peers") + } else { + buf = []byte(ip) + } buf = append(buf, byte(peer.Port>>8)) buf = append(buf, byte(peer.Port&0xff)) return From 146fbedb8652a3628fe42fc2335e2670c8ae3865 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Mon, 5 Sep 2016 14:06:42 -0400 Subject: [PATCH 71/74] general: add missing documentation Fixes #46 --- middleware/middleware.go | 3 ++ storage/storage_bench.go | 98 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/middleware/middleware.go b/middleware/middleware.go index 0c9b86b..b6be862 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -12,12 +12,15 @@ import ( "github.com/chihaya/chihaya/storage" ) +// Config holds the configuration common across all middleware. type Config struct { AnnounceInterval time.Duration `yaml:"announce_interval"` } var _ frontend.TrackerLogic = &Logic{} +// NewLogic creates a new instance of a TrackerLogic that executes the provided +// middleware hooks. func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic { l := &Logic{ announceInterval: cfg.AnnounceInterval, diff --git a/storage/storage_bench.go b/storage/storage_bench.go index 1f0a225..babab7c 100644 --- a/storage/storage_bench.go +++ b/storage/storage_bench.go @@ -95,24 +95,40 @@ func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef ex } } +// Put benchmarks the PutSeeder method of a PeerStore by repeatedly Putting the +// same Peer for the same InfoHash. +// +// Put can run in parallel. func Put(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { return ps.PutSeeder(bd.infohashes[0], bd.peers[0]) }) } +// Put1k benchmarks the PutSeeder method of a PeerStore by cycling through 1000 +// Peers and Putting them into the swarm of one infohash. +// +// Put1k can run in parallel. func Put1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { return ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000]) }) } +// Put1kInfohash benchmarks the PutSeeder method of a PeerStore by cycling +// through 1000 infohashes and putting the same peer into their swarms. +// +// Put1kInfohash can run in parallel. func Put1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { return ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) }) } +// Put1kInfohash1k benchmarks the PutSeeder method of a PeerStore by cycling +// through 1000 infohashes and 1000 Peers and calling Put with them. +// +// Put1kInfohash1k can run in parallel. func Put1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) @@ -120,6 +136,10 @@ func Put1kInfohash1k(b *testing.B, ps PeerStore) { }) } +// PutDelete benchmarks the PutSeeder and DeleteSeeder methods of a PeerStore by +// calling PutSeeder followed by DeleteSeeder for one Peer and one infohash. +// +// PutDelete can not run in parallel. func PutDelete(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[0], bd.peers[0]) @@ -130,6 +150,10 @@ func PutDelete(b *testing.B, ps PeerStore) { }) } +// PutDelete1k benchmarks the PutSeeder and DeleteSeeder methods in the same way +// PutDelete does, but with one from 1000 Peers per iteration. +// +// PutDelete1k can not run in parallel. func PutDelete1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000]) @@ -140,6 +164,10 @@ func PutDelete1k(b *testing.B, ps PeerStore) { }) } +// PutDelete1kInfohash behaves like PutDelete1k with 1000 infohashes instead of +// 1000 Peers. +// +// PutDelete1kInfohash can not run in parallel. func PutDelete1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) @@ -149,6 +177,10 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) { }) } +// PutDelete1kInfohash1k behaves like PutDelete1k with 1000 infohashes in +// addition to 1000 Peers. +// +// PutDelete1kInfohash1k can not run in parallel. func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) @@ -160,6 +192,10 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) { }) } +// DeleteNonexist benchmarks the DeleteSeeder method of a PeerStore by +// attempting to delete a Peer that is nonexistent. +// +// DeleteNonexist can run in parallel. func DeleteNonexist(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) @@ -167,6 +203,10 @@ func DeleteNonexist(b *testing.B, ps PeerStore) { }) } +// DeleteNonexist1k benchmarks the DeleteSeeder method of a PeerStore by +// attempting to delete one of 1000 nonexistent Peers. +// +// DeleteNonexist can run in parallel. func DeleteNonexist1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) @@ -174,6 +214,10 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) { }) } +// DeleteNonexist1kInfohash benchmarks the DeleteSeeder method of a PeerStore by +// attempting to delete one Peer from one of 1000 infohashes. +// +// DeleteNonexist1kInfohash can run in parallel. func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) @@ -181,6 +225,10 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { }) } +// DeleteNonexist1kInfohash1k benchmarks the Delete method of a PeerStore by +// attempting to delete one of 1000 Peers from one of 1000 Infohashes. +// +// DeleteNonexist1kInfohash1k can run in parallel. func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) @@ -188,6 +236,10 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { }) } +// GradNonexist benchmarks the GraduateLeecher method of a PeerStore by +// attempting to graduate a nonexistent Peer. +// +// GradNonexist can run in parallel. func GradNonexist(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) @@ -195,6 +247,10 @@ func GradNonexist(b *testing.B, ps PeerStore) { }) } +// GradNonexist1k benchmarks the GraduateLeecher method of a PeerStore by +// attempting to graduate one of 1000 nonexistent Peers. +// +// GradNonexist1k can run in parallel. func GradNonexist1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) @@ -202,6 +258,10 @@ func GradNonexist1k(b *testing.B, ps PeerStore) { }) } +// GradNonexist1kInfohash benchmarks the GraduateLeecher method of a PeerStore +// by attempting to graduate a nonexistent Peer for one of 100 Infohashes. +// +// GradNonexist1kInfohash can run in parallel. func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) @@ -209,6 +269,11 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { }) } +// GradNonexist1kInfohash1k benchmarks the GraduateLeecher method of a PeerStore +// by attempting to graduate one of 1000 nonexistent Peers for one of 1000 +// infohashes. +// +// GradNonexist1kInfohash1k can run in parallel. func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) @@ -216,6 +281,11 @@ func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) { }) } +// PutGradDelete benchmarks the PutLeecher, GraduateLeecher and DeleteSeeder +// methods of a PeerStore by adding one leecher to a swarm, promoting it to a +// seeder and deleting the seeder. +// +// PutGradDelete can not run in parallel. func PutGradDelete(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[0], bd.peers[0]) @@ -230,6 +300,9 @@ func PutGradDelete(b *testing.B, ps PeerStore) { }) } +// PutGradDelete1k behaves like PutGradDelete with one of 1000 Peers. +// +// PutGradDelete1k can not run in parallel. func PutGradDelete1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[0], bd.peers[i%1000]) @@ -244,6 +317,10 @@ func PutGradDelete1k(b *testing.B, ps PeerStore) { }) } +// PutGradDelete1kInfohash behaves like PutGradDelete with one of 1000 +// infohashes. +// +// PutGradDelete1kInfohash can not run in parallel. func PutGradDelete1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[0]) @@ -258,6 +335,10 @@ func PutGradDelete1kInfohash(b *testing.B, ps PeerStore) { }) } +// PutGradDelete1kInfohash1k behaves like PutGradDelete with one of 1000 Peers +// and one of 1000 infohashes. +// +// PutGradDelete1kInfohash can not run in parallel. func PutGradDelete1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) @@ -290,6 +371,11 @@ func putPeers(ps PeerStore, bd *benchData) error { return nil } +// AnnounceLeecher benchmarks the AnnouncePeers method of a PeerStore for +// announcing a leecher. +// The swarm announced to has 500 seeders and 500 leechers. +// +// AnnounceLeecher can run in parallel. func AnnounceLeecher(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[0], false, 50, bd.peers[0]) @@ -297,6 +383,10 @@ func AnnounceLeecher(b *testing.B, ps PeerStore) { }) } +// AnnounceLeecher1kInfohash behaves like AnnounceLeecher with one of 1000 +// infohashes. +// +// AnnounceLeecher1kInfohash can run in parallel. func AnnounceLeecher1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[i%1000], false, 50, bd.peers[0]) @@ -304,6 +394,10 @@ func AnnounceLeecher1kInfohash(b *testing.B, ps PeerStore) { }) } +// AnnounceSeeder behaves like AnnounceLeecher with a seeder instead of a +// leecher. +// +// AnnounceSeeder can run in parallel. func AnnounceSeeder(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[0], true, 50, bd.peers[0]) @@ -311,6 +405,10 @@ func AnnounceSeeder(b *testing.B, ps PeerStore) { }) } +// AnnounceSeeder1kInfohash behaves like AnnounceSeeder with one of 1000 +// infohashes. +// +// AnnounceSeeder1kInfohash can run in parallel. func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error { _, err := ps.AnnouncePeers(bd.infohashes[i%1000], true, 50, bd.peers[0]) From c31fa42659eb5c3c2ecb7c501b09136227cf202f Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Mon, 5 Sep 2016 14:06:59 -0400 Subject: [PATCH 72/74] travis: enable linting --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 525680b..1a523db 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ script: - go test -v $(go list ./... | grep -v /vendor/) - go vet $(go list ./... | grep -v /vendor/) - diff <(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*")) <(printf "") -#- for d in $(go list ./... | grep -v /vendor/); do diff <(golint $d) <(printf ""); done +- for d in $(go list ./... | grep -v /vendor/); do diff <(golint $d) <(printf ""); done notifications: irc: channels: From 49c06aac170c465c91c67973755b1d69ac5783cb Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Mon, 5 Sep 2016 18:18:29 -0400 Subject: [PATCH 73/74] add dockerfile and glide config --- Dockerfile | 13 ++++++++++ glide.lock | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ glide.yaml | 26 ++++++++++++++++++++ 3 files changed, 109 insertions(+) create mode 100644 Dockerfile create mode 100644 glide.lock create mode 100644 glide.yaml diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..84c30f7 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +FROM golang:alpine +MAINTAINER Jimmy Zelinskie + +RUN apk update && apk add curl git +RUN curl https://glide.sh/get | sh + +WORKDIR /go/src/github.com/chihaya/chihaya +ADD . /go/src/github.com/chihaya/chihaya +RUN glide install +RUN go install github.com/chihaya/chihaya/cmd/chihaya + +EXPOSE 6880 6881 +ENTRYPOINT ["chihaya"] diff --git a/glide.lock b/glide.lock new file mode 100644 index 0000000..3f7d236 --- /dev/null +++ b/glide.lock @@ -0,0 +1,70 @@ +hash: fe839da75efcf365317b1b5eb04bfa15cd1db10265f4947b8aff78932bf4622e +updated: 2016-09-05T18:13:39.020799284-04:00 +imports: +- name: github.com/beorn7/perks + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 + subpackages: + - quantile +- name: github.com/golang/protobuf + version: 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a + subpackages: + - proto +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/julienschmidt/httprouter + version: 8c199fb6259ffc1af525cc3ad52ee60ba8359669 +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil +- name: github.com/mendsley/gojwk + version: 4d5ec6e58103388d6cb0d7d72bc72649be4f0504 +- name: github.com/prometheus/client_golang + version: c5b7fccd204277076155f10851dad72b76a49317 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 + subpackages: + - go +- name: github.com/prometheus/common + version: 616e90af75cc300730196d04f3676f838d70414f + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +- name: github.com/SermoDigital/jose + version: 389fea327ef076853db8fae03a0f38e30e6092ab + subpackages: + - crypto + - jws + - jwt +- name: github.com/Sirupsen/logrus + version: 4b6ea7319e214d98c938f12692336f7ca9348d6b +- name: github.com/spf13/cobra + version: 9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744 +- name: github.com/spf13/pflag + version: 103ce5cd2042f2fe629c1957abb64ab3e7f50235 +- name: github.com/tylerb/graceful + version: 50a48b6e73fcc75b45e22c05b79629a67c79e938 +- name: golang.org/x/sys + version: a646d33e2ee3172a661fc09bca23bb4889a41bc8 + subpackages: + - unix +- name: gopkg.in/yaml.v2 + version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e +testImports: +- name: github.com/davecgh/go-spew + version: 6cf5744a041a0022271cefed95ba843f6d87fd51 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/stretchr/testify + version: f390dcf405f7b83c997eac1b06768bb9f44dec18 + subpackages: + - assert diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 0000000..4d2a41e --- /dev/null +++ b/glide.yaml @@ -0,0 +1,26 @@ +package: github.com/chihaya/chihaya +import: +- package: github.com/SermoDigital/jose + version: ~1.0.0 + subpackages: + - crypto + - jws + - jwt +- package: github.com/Sirupsen/logrus + version: ~0.10.0 +- package: github.com/julienschmidt/httprouter + version: ~1.1.0 +- package: github.com/mendsley/gojwk +- package: github.com/prometheus/client_golang + version: ~0.8.0 + subpackages: + - prometheus +- package: github.com/spf13/cobra +- package: github.com/tylerb/graceful + version: ~1.2.13 +- package: gopkg.in/yaml.v2 +testImport: +- package: github.com/stretchr/testify + version: ~1.1.3 + subpackages: + - assert From 1ff41d78823449004a10f8a61a4dbe86c320f9a7 Mon Sep 17 00:00:00 2001 From: Leo Balduf Date: Mon, 5 Sep 2016 18:23:10 -0400 Subject: [PATCH 74/74] middleware: make hooks return a context --- middleware/clientapproval/clientapproval.go | 12 +++++----- middleware/hooks.go | 14 ++--------- middleware/jwt/jwt.go | 14 +++++------ middleware/middleware.go | 26 ++++++++------------- 4 files changed, 25 insertions(+), 41 deletions(-) diff --git a/middleware/clientapproval/clientapproval.go b/middleware/clientapproval/clientapproval.go index 22fec00..e09cd62 100644 --- a/middleware/clientapproval/clientapproval.go +++ b/middleware/clientapproval/clientapproval.go @@ -55,25 +55,25 @@ func NewHook(cfg Config) (middleware.Hook, error) { return h, nil } -func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { +func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) { clientID := bittorrent.NewClientID(req.Peer.ID) if len(h.approved) > 0 { if _, found := h.approved[clientID]; !found { - return ErrClientUnapproved + return ctx, ErrClientUnapproved } } if len(h.unapproved) > 0 { if _, found := h.unapproved[clientID]; found { - return ErrClientUnapproved + return ctx, ErrClientUnapproved } } - return nil + return ctx, nil } -func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { +func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) { // Scrapes don't require any protection. - return nil + return ctx, nil } diff --git a/middleware/hooks.go b/middleware/hooks.go index 7a6c5ae..0cfbeac 100644 --- a/middleware/hooks.go +++ b/middleware/hooks.go @@ -9,16 +9,6 @@ import ( // Hook abstracts the concept of anything that needs to interact with a // BitTorrent client's request and response to a BitTorrent tracker. type Hook interface { - HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) error - HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) error -} - -type nopHook struct{} - -func (nopHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { - return nil -} - -func (nopHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { - return nil + HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) (context.Context, error) + HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) (context.Context, error) } diff --git a/middleware/jwt/jwt.go b/middleware/jwt/jwt.go index 8b9c937..7fcfe3d 100644 --- a/middleware/jwt/jwt.go +++ b/middleware/jwt/jwt.go @@ -98,26 +98,26 @@ func (h *hook) Stop() { close(h.closing) } -func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { +func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) { if req.Params == nil { - return ErrMissingJWT + return ctx, ErrMissingJWT } jwtParam, ok := req.Params.String("jwt") if !ok { - return ErrMissingJWT + return ctx, ErrMissingJWT } if err := validateJWT(req.InfoHash, []byte(jwtParam), h.cfg.Issuer, h.cfg.Audience, h.publicKeys); err != nil { - return ErrInvalidJWT + return ctx, ErrInvalidJWT } - return nil + return ctx, nil } -func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) error { +func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) { // Scrapes don't require any protection. - return nil + return ctx, nil } func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string, publicKeys map[string]crypto.PublicKey) error { diff --git a/middleware/middleware.go b/middleware/middleware.go index b6be862..56cff22 100644 --- a/middleware/middleware.go +++ b/middleware/middleware.go @@ -29,14 +29,6 @@ func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hoo postHooks: postHooks, } - if len(l.preHooks) == 0 { - l.preHooks = []Hook{nopHook{}} - } - - if len(l.postHooks) == 0 { - l.postHooks = []Hook{nopHook{}} - } - return l } @@ -50,12 +42,12 @@ type Logic struct { } // HandleAnnounce generates a response for an Announce. -func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error) { - resp := &bittorrent.AnnounceResponse{ +func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) { + resp = &bittorrent.AnnounceResponse{ Interval: l.announceInterval, } for _, h := range l.preHooks { - if err := h.HandleAnnounce(ctx, req, resp); err != nil { + if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil { return nil, err } } @@ -66,8 +58,9 @@ func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequ // AfterAnnounce does something with the results of an Announce after it has // been completed. func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) { + var err error for _, h := range l.postHooks { - if err := h.HandleAnnounce(ctx, req, resp); err != nil { + if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil { log.Errorln("chihaya: post-announce hooks failed:", err.Error()) return } @@ -75,12 +68,12 @@ func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceReque } // HandleScrape generates a response for a Scrape. -func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error) { - resp := &bittorrent.ScrapeResponse{ +func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (resp *bittorrent.ScrapeResponse, err error) { + resp = &bittorrent.ScrapeResponse{ Files: make(map[bittorrent.InfoHash]bittorrent.Scrape), } for _, h := range l.preHooks { - if err := h.HandleScrape(ctx, req, resp); err != nil { + if ctx, err = h.HandleScrape(ctx, req, resp); err != nil { return nil, err } } @@ -91,8 +84,9 @@ func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) // AfterScrape does something with the results of a Scrape after it has been // completed. func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) { + var err error for _, h := range l.postHooks { - if err := h.HandleScrape(ctx, req, resp); err != nil { + if ctx, err = h.HandleScrape(ctx, req, resp); err != nil { log.Errorln("chihaya: post-scrape hooks failed:", err.Error()) return }