Merge pull request #207 from chihaya/refactor

merge refactor into master
This commit is contained in:
mrd0ll4r 2016-09-05 19:33:09 -04:00 committed by GitHub
commit bcdbb04f74
102 changed files with 3146 additions and 6855 deletions

View file

@ -1,7 +1,6 @@
language: go
go:
- 1.6
- tip
- 1.7
sudo: false
install:
- go get -t ./...

View file

@ -1,5 +0,0 @@
# This is the official list of Chihaya authors for copyright purposes, in alphabetical order.
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
Justin Li <jli@j-li.net>

View file

@ -1,27 +1,13 @@
# vim: ft=dockerfile
FROM golang
FROM golang:alpine
MAINTAINER Jimmy Zelinskie <jimmyzelinskie@gmail.com>
# Install glide
WORKDIR /tmp
ADD https://github.com/Masterminds/glide/releases/download/0.10.2/glide-0.10.2-linux-amd64.tar.gz /tmp
RUN tar xvf /tmp/glide-0.10.2-linux-amd64.tar.gz
RUN mv /tmp/linux-amd64/glide /usr/bin/glide
RUN apk update && apk add curl git
RUN curl https://glide.sh/get | sh
# Add files
WORKDIR /go/src/github.com/chihaya/chihaya/
RUN mkdir -p /go/src/github.com/chihaya/chihaya/
# Add source
ADD . .
# Install chihaya
WORKDIR /go/src/github.com/chihaya/chihaya
ADD . /go/src/github.com/chihaya/chihaya
RUN glide install
RUN go install github.com/chihaya/chihaya/cmd/chihaya
# Configuration/environment
VOLUME ["/config"]
EXPOSE 6880-6882
# docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest
ENTRYPOINT ["chihaya", "-config=/config/config.json"]
EXPOSE 6880 6881
ENTRYPOINT ["chihaya"]

3
MAINTAINERS Normal file
View file

@ -0,0 +1,3 @@
Jimmy Zelinskie <jimmyzelinskie@gmail.com> (@jzelinskie) pkg:*
Justin Li <jli@j-li.net> (@pushrax) pkg:*
Leo Balduf <balduf@hm.edu> (@mrd0ll4r) pkg:*

View file

@ -13,19 +13,80 @@ Chihaya is an open source [BitTorrent tracker] written in [Go].
Differentiating features include:
- Protocol-agnostic, middleware-composed logic
- Low resource consumption and fast, asynchronous request processing
- Unified IPv4 and IPv6 [swarms]
- Protocol-agnostic middleware
- HTTP and UDP frontends
- IPv4 and IPv6 support
- [YAML] configuration
- Optional metrics via [Prometheus]
- Metrics via [Prometheus]
[releases]: https://github.com/chihaya/chihaya/releases
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
[Go]: https://golang.org
[swarms]: https://en.wikipedia.org/wiki/Glossary_of_BitTorrent_terms#Swarm
[YAML]: http://yaml.org
[Prometheus]: http://prometheus.io
## Architecture
### Diagram
```
┌──────────────────────┐
│ BitTorrent Client ├┬──┐
└┬─────────────────────┘│◀─┘
└──────────────────────┘
┌────────────────┼────────────────────────────────────────────────────┐
│ ▼ chihaya│
│ ┌──────────────────────┐ │
│ │ Frontend ├┐ │
│ └┬─────────────────────┘│ │
│ └──────────────────────┘ │
│ ▲ │
│ │ │
│ ▼ │
│ ┌──────────────────────┐ ┌──────────────────────┐ │
│ │ PreHook Middleware ├◀───────────│ Storage │ │
│ └┬─────────────────────┘│ └──────────────────────┘ │
│ └──────────┬───────────┘ △ │
│ │ │ │
│ ▽ │ │
│ ┌──────────────────────┐ │ │
│ │ PostHook Middleware ├┐ │ │
│ └┬─────────────────────┘│───────────────────────┘ │
│ └──────────────────────┘ │
└─────────────────────────────────────────────────────────────────────┘
```
### Description
BitTorrent clients send announce and scrape requests to a _Frontend_.
Frontends parse requests and write responses for the particular protocol they implement.
The _TrackerLogic_ interface to is used to generate responses for their requests and optionally perform a task after responding to a client.
A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic.
PreHooks are middleware that are executed before the response has been written.
The final middleware in a chain of PreHooks ensures the existance of any required response fields by reading out of the configured implementation of the _Storage_ interface.
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Request data is written to storage asynchronously in one of these PostHooks.
## Production Use
### Facebook
[Facebook] uses BitTorrent to deploy new versions of their software.
In order to optimize the flow of traffic within their datacenters, Chihaya is configured to prefer peers within the same subnet.
Because Facebook organizes their network such that server racks are allocated IP addresses in the same subnet, the vast majority of deployment traffic never impacts the congested areas of their network.
[Facebook]: https://facebook.com
### CoreOS
[Quay] is a container registry that offers the ability to download containers via BitTorrent in order to speed up large or geographically distant deployments.
Announce URLs from Quay's torrent files contain a [JWT] in order to allow Chihaya to verify that an infohash was approved by the registry.
By verifying the infohash, Quay can be sure that only their content is being shared by their tracker.
[Quay]: https://quay.io
[JWT]: https://jwt.io
## Development
### Getting Started
@ -53,30 +114,9 @@ For more information read [CONTRIBUTING.md].
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md
## Production Use
### Facebook
[Facebook] uses BitTorrent to deploy new versions of their software.
In order to optimize the flow of traffic within their datacenters, Chihaya is configured to prefer peers within the same subnet.
Because Facebook organizes their network such that server racks are allocated IP addresses in the same subnet, the vast majority of deployment traffic never impacts the congested areas of their network.
[Facebook]: https://facebook.com
### CoreOS
[Quay] is a container registry that offers the ability to download containers via BitTorrent in order to speed up large or geographically distant deployments.
Announce URLs from Quay's torrent files contain a [JWT] in order to allow Chihaya to verify that an infohash was approved by the registry.
By verifying the infohash, Quay can be sure that only their content is being shared by their tracker.
[Quay]: https://quay.io
[JWT]: https://jwt.io
## Related projects
- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs
- [OpenTracker](http://erdgeist.org/arts/software/opentracker): a popular BitTorrent tracker written in C
- [Ocelot](https://github.com/WhatCD/Ocelot): a private BitTorrent tracker written in C++
## License
Chihaya is distributed under the 2-Clause BSD license that can be found in the `LICENSE` file.

View file

@ -1,14 +1,11 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.package middleware
package chihaya
// Package bittorrent implements all of the abstractions used to decouple the
// protocol of a BitTorrent tracker from the logic of handling Announces and
// Scrapes.
package bittorrent
import (
"net"
"time"
"github.com/chihaya/chihaya/pkg/event"
)
// PeerID represents a peer ID.
@ -71,41 +68,16 @@ func InfoHashFromString(s string) InfoHash {
// AnnounceRequest represents the parsed parameters from an announce request.
type AnnounceRequest struct {
Event event.Event
InfoHash InfoHash
PeerID PeerID
Event Event
InfoHash InfoHash
Compact bool
NumWant uint32
Left uint64
Downloaded uint64
Uploaded uint64
IPv4, IPv6 net.IP
Port uint16
Compact bool
NumWant int32
Left, Downloaded, Uploaded uint64
Params Params
}
// Peer4 returns a Peer using the IPv4 endpoint of the Announce.
// Note that, if the Announce does not contain an IPv4 address, the IP field of
// the returned Peer can be nil.
func (r *AnnounceRequest) Peer4() Peer {
return Peer{
IP: r.IPv4,
Port: r.Port,
ID: r.PeerID,
}
}
// Peer6 returns a Peer using the IPv6 endpoint of the Announce.
// Note that, if the Announce does not contain an IPv6 address, the IP field of
// the returned Peer can be nil.
func (r *AnnounceRequest) Peer6() Peer {
return Peer{
IP: r.IPv6,
Port: r.Port,
ID: r.PeerID,
}
Peer
Params
}
// AnnounceResponse represents the parameters used to create an announce
@ -133,8 +105,9 @@ type ScrapeResponse struct {
// Scrape represents the state of a swarm that is returned in a scrape response.
type Scrape struct {
Complete int32
Incomplete int32
Snatches uint32
Complete uint32
Incomplete uint32
}
// Peer represents the connection details of a peer that is returned in an
@ -146,16 +119,14 @@ type Peer struct {
}
// Equal reports whether p and x are the same.
func (p Peer) Equal(x Peer) bool {
return p.EqualEndpoint(x) && p.ID == x.ID
}
func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID }
// EqualEndpoint reports whether p and x have the same endpoint.
func (p Peer) EqualEndpoint(x Peer) bool {
return p.Port == x.Port && p.IP.Equal(x.IP)
}
func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP) }
// Params is used to fetch request parameters.
type Params interface {
String(key string) (string, error)
}
// ClientError represents an error that should be exposed to the client over
// the BitTorrent protocol implementation.
type ClientError string
// Error implements the error interface for ClientError.
func (c ClientError) Error() string { return string(c) }

22
bittorrent/client_id.go Normal file
View file

@ -0,0 +1,22 @@
package bittorrent
// ClientID represents the part of a PeerID that identifies a Peer's client
// software.
type ClientID [6]byte
// NewClientID parses a ClientID from a PeerID.
func NewClientID(pid PeerID) ClientID {
var cid ClientID
length := len(pid)
if length >= 6 {
if pid[0] == '-' {
if length >= 7 {
copy(cid[:], pid[1:7])
}
} else {
copy(cid[:], pid[:6])
}
}
return cid
}

View file

@ -1,16 +1,11 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package bittorrent
package clientid
import "testing"
import (
"testing"
)
func TestClientID(t *testing.T) {
var clientTable = []struct {
peerID string
clientID string
}{
var clientTable = []struct{ peerID, clientID string }{
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
{"-BS5820-oy4La2MWGEFj", "BS5820"},
@ -45,17 +40,13 @@ func TestClientID(t *testing.T) {
{"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt
{"346------SDFknl33408", "346---"}, // TorreTopia
{"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod
{"", ""},
{"-", ""},
{"12345", ""},
{"-12345", ""},
{"123456", "123456"},
{"-123456", "123456"},
}
for _, tt := range clientTable {
if parsedID := New(tt.peerID); parsedID != tt.clientID {
var clientID ClientID
copy(clientID[:], []byte(tt.clientID))
parsedID := NewClientID(PeerIDFromString(tt.peerID))
if parsedID != clientID {
t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID)
}
}

View file

@ -1,10 +1,4 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package event implements type-level constraints for dealing with the events
// communicated via BitTorrent announce.
package event
package bittorrent
import (
"errors"
@ -51,8 +45,8 @@ func init() {
}
}
// New returns the proper Event given a string.
func New(eventStr string) (Event, error) {
// NewEvent returns the proper Event given a string.
func NewEvent(eventStr string) (Event, error) {
if e, ok := stringToEvent[strings.ToLower(eventStr)]; ok {
return e, nil
}
@ -66,5 +60,5 @@ func (e Event) String() string {
return name
}
panic("event: event has no associated name")
panic("bittorrent: event has no associated name")
}

View file

@ -1,8 +1,4 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package event
package bittorrent
import (
"testing"
@ -26,7 +22,7 @@ func TestNew(t *testing.T) {
}
for _, tt := range table {
got, err := New(tt.data)
got, err := NewEvent(tt.data)
assert.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
assert.Equal(t, got, tt.expected, "events should equal the expected value")
}

192
bittorrent/params.go Normal file
View file

@ -0,0 +1,192 @@
package bittorrent
import (
"errors"
"net/url"
"strconv"
"strings"
)
// Params is used to fetch (optional) request parameters from an Announce.
// For HTTP Announces this includes the request path and parsed query, for UDP
// Announces this is the extracted path and parsed query from optional URLData
// as specified in BEP41.
//
// See ParseURLData for specifics on parsing and limitations.
type Params interface {
// String returns a string parsed from a query. Every key can be
// returned as a string because they are encoded in the URL as strings.
String(key string) (string, bool)
// RawPath returns the raw path from the request URL.
// The path returned can contain URL encoded data.
// For a request of the form "/announce?port=1234" this would return
// "/announce".
RawPath() string
// RawQuery returns the raw query from the request URL, excluding the
// delimiter '?'.
// For a request of the form "/announce?port=1234" this would return
// "port=1234"
RawQuery() string
}
// ErrKeyNotFound is returned when a provided key has no value associated with
// it.
var ErrKeyNotFound = errors.New("query: value for the provided key does not exist")
// ErrInvalidInfohash is returned when parsing a query encounters an infohash
// with invalid length.
var ErrInvalidInfohash = errors.New("query: invalid infohash")
// QueryParams parses a URL Query and implements the Params interface with some
// additional helpers.
type QueryParams struct {
path string
query string
params map[string]string
infoHashes []InfoHash
}
// ParseURLData parses a request URL or UDP URLData as defined in BEP41.
// It expects a concatenated string of the request's path and query parts as
// defined in RFC 3986. As both the udp: and http: scheme used by BitTorrent
// include an authority part the path part must always begin with a slash.
// An example of the expected URLData would be "/announce?port=1234&uploaded=0"
// or "/?auth=0x1337".
// HTTP servers should pass (*http.Request).RequestURI, UDP servers should
// pass the concatenated, unchanged URLData as defined in BEP41.
//
// Note that, in the case of a key occurring multiple times in the query, only
// the last value for that key is kept.
// The only exception to this rule is the key "info_hash" which will attempt to
// parse each value as an InfoHash and return an error if parsing fails. All
// InfoHashes are collected and can later be retrieved by calling the InfoHashes
// method.
func ParseURLData(urlData string) (*QueryParams, error) {
var path, query string
queryDelim := strings.IndexAny(urlData, "?")
if queryDelim == -1 {
path = urlData
} else {
path = urlData[:queryDelim]
query = urlData[queryDelim+1:]
}
q, err := parseQuery(query)
if err != nil {
return nil, err
}
q.path = path
return q, nil
}
// parseQuery parses a URL query into QueryParams.
// The query is expected to exclude the delimiting '?'.
func parseQuery(rawQuery string) (*QueryParams, error) {
var (
keyStart, keyEnd int
valStart, valEnd int
onKey = true
q = &QueryParams{
query: rawQuery,
infoHashes: nil,
params: make(map[string]string),
}
)
for i, length := 0, len(rawQuery); i < length; i++ {
separator := rawQuery[i] == '&' || rawQuery[i] == ';'
last := i == length-1
if separator || last {
if onKey && !last {
keyStart = i + 1
continue
}
if last && !separator && !onKey {
valEnd = i
}
keyStr, err := url.QueryUnescape(rawQuery[keyStart : keyEnd+1])
if err != nil {
return nil, err
}
var valStr string
if valEnd > 0 {
valStr, err = url.QueryUnescape(rawQuery[valStart : valEnd+1])
if err != nil {
return nil, err
}
}
if keyStr == "info_hash" {
if len(valStr) != 20 {
return nil, ErrInvalidInfohash
}
q.infoHashes = append(q.infoHashes, InfoHashFromString(valStr))
} else {
q.params[strings.ToLower(keyStr)] = valStr
}
valEnd = 0
onKey = true
keyStart = i + 1
} else if rawQuery[i] == '=' {
onKey = false
valStart = i + 1
valEnd = 0
} else if onKey {
keyEnd = i
} else {
valEnd = i
}
}
return q, nil
}
// String returns a string parsed from a query. Every key can be returned as a
// string because they are encoded in the URL as strings.
func (qp *QueryParams) String(key string) (string, bool) {
value, ok := qp.params[key]
return value, ok
}
// Uint64 returns a uint parsed from a query. After being called, it is safe to
// cast the uint64 to your desired length.
func (qp *QueryParams) Uint64(key string) (uint64, error) {
str, exists := qp.params[key]
if !exists {
return 0, ErrKeyNotFound
}
val, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}
return val, nil
}
// InfoHashes returns a list of requested infohashes.
func (qp *QueryParams) InfoHashes() []InfoHash {
return qp.infoHashes
}
// RawPath returns the raw path from the parsed URL.
func (qp *QueryParams) RawPath() string {
return qp.path
}
// RawQuery returns the raw query from the parsed URL.
func (qp *QueryParams) RawQuery() string {
return qp.query
}

109
bittorrent/params_test.go Normal file
View file

@ -0,0 +1,109 @@
package bittorrent
import (
"net/url"
"testing"
)
var (
testPeerID = "-TEST01-6wfG2wk6wWLc"
ValidAnnounceArguments = []url.Values{
{},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
{"peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
{"peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
{"peer_id": {""}, "compact": {""}},
}
InvalidQueries = []string{
"/announce?" + "info_hash=%0%a",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
if len(boxed) != len(unboxed) {
return false
}
for mapKey, mapVal := range boxed {
// Always expect box to hold only one element
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
return false
}
}
return true
}
func TestParseEmptyURLData(t *testing.T) {
parsedQuery, err := ParseURLData("")
if err != nil {
t.Fatal(err)
}
if parsedQuery == nil {
t.Fatal("Parsed query must not be nil")
}
}
func TestParseValidURLData(t *testing.T) {
for parseIndex, parseVal := range ValidAnnounceArguments {
parsedQueryObj, err := ParseURLData("/announce?" + parseVal.Encode())
if err != nil {
t.Fatal(err)
}
if !mapArrayEqual(parseVal, parsedQueryObj.params) {
t.Fatalf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params)
}
if parsedQueryObj.path != "/announce" {
t.Fatalf("Incorrect path, expected %q, got %q", "/announce", parsedQueryObj.path)
}
}
}
func TestParseInvalidURLData(t *testing.T) {
for parseIndex, parseStr := range InvalidQueries {
parsedQueryObj, err := ParseURLData(parseStr)
if err == nil {
t.Fatal("Should have produced error", parseIndex)
}
if parsedQueryObj != nil {
t.Fatal("Should be nil after error", parsedQueryObj, parseIndex)
}
}
}
func BenchmarkParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := parseQuery(parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := url.ParseQuery(parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}

View file

@ -1,45 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package chihaya
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
var (
peers = []struct {
peerID string
ip string
port uint16
}{
{"-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720},
{"-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878},
{"-TR0960-6ep6svaa61r4", "fd45:7856:3dae::48", 2878},
{"-BS5820-oy4La2MWGEFj", "fd0a:29a8:8445::38", 2878},
{"-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 8999},
}
)
func TestPeerEquality(t *testing.T) {
// Build peers from test data.
var builtPeers []Peer
for _, peer := range peers {
builtPeers = append(builtPeers, Peer{
ID: PeerIDFromString(peer.peerID),
IP: net.ParseIP(peer.ip),
Port: peer.port,
})
}
assert.True(t, builtPeers[0].Equal(builtPeers[0]))
assert.False(t, builtPeers[0].Equal(builtPeers[1]))
assert.True(t, builtPeers[1].Equal(builtPeers[1]))
assert.False(t, builtPeers[1].Equal(builtPeers[2]))
assert.False(t, builtPeers[1].Equal(builtPeers[3]))
assert.False(t, builtPeers[1].Equal(builtPeers[4]))
}

102
cmd/chihaya/config.go Normal file
View file

@ -0,0 +1,102 @@
package main
import (
"errors"
"io/ioutil"
"os"
"gopkg.in/yaml.v2"
httpfrontend "github.com/chihaya/chihaya/frontend/http"
udpfrontend "github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/middleware/clientapproval"
"github.com/chihaya/chihaya/middleware/jwt"
"github.com/chihaya/chihaya/storage/memory"
)
type hookConfig struct {
Name string `yaml:"name"`
Config interface{} `yaml:"config"`
}
// ConfigFile represents a namespaced YAML configation file.
type ConfigFile struct {
MainConfigBlock struct {
middleware.Config
PrometheusAddr string `yaml:"prometheus_addr"`
HTTPConfig httpfrontend.Config `yaml:"http"`
UDPConfig udpfrontend.Config `yaml:"udp"`
Storage memory.Config `yaml:"storage"`
PreHooks []hookConfig `yaml:"prehooks"`
PostHooks []hookConfig `yaml:"posthooks"`
} `yaml:"chihaya"`
}
// ParseConfigFile returns a new ConfigFile given the path to a YAML
// configuration file.
//
// It supports relative and absolute paths and environment variables.
func ParseConfigFile(path string) (*ConfigFile, error) {
if path == "" {
return nil, errors.New("no config path specified")
}
f, err := os.Open(os.ExpandEnv(path))
if err != nil {
return nil, err
}
defer f.Close()
contents, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
var cfgFile ConfigFile
err = yaml.Unmarshal(contents, &cfgFile)
if err != nil {
return nil, err
}
return &cfgFile, nil
}
// CreateHooks creates instances of Hooks for all of the PreHooks and PostHooks
// configured in a ConfigFile.
func (cfg ConfigFile) CreateHooks() (preHooks, postHooks []middleware.Hook, err error) {
for _, hookCfg := range cfg.MainConfigBlock.PreHooks {
cfgBytes, err := yaml.Marshal(hookCfg.Config)
if err != nil {
panic("failed to remarshal valid YAML")
}
switch hookCfg.Name {
case "jwt":
var jwtCfg jwt.Config
err := yaml.Unmarshal(cfgBytes, &jwtCfg)
if err != nil {
return nil, nil, errors.New("invalid JWT middleware config: " + err.Error())
}
preHooks = append(preHooks, jwt.NewHook(jwtCfg))
case "client approval":
var caCfg clientapproval.Config
err := yaml.Unmarshal(cfgBytes, &caCfg)
if err != nil {
return nil, nil, errors.New("invalid client approval middleware config: " + err.Error())
}
hook, err := clientapproval.NewHook(caCfg)
if err != nil {
return nil, nil, errors.New("invalid client approval middleware config: " + err.Error())
}
preHooks = append(preHooks, hook)
}
}
for _, hookCfg := range cfg.MainConfigBlock.PostHooks {
switch hookCfg.Name {
}
}
return
}

View file

@ -1,77 +1,162 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package main
import (
"flag"
"log"
"errors"
"net/http"
"os"
"os/signal"
"runtime/pprof"
"syscall"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server"
"github.com/chihaya/chihaya/tracker"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/cobra"
// Servers
_ "github.com/chihaya/chihaya/server/http"
_ "github.com/chihaya/chihaya/server/prometheus"
_ "github.com/chihaya/chihaya/server/store"
_ "github.com/chihaya/chihaya/server/store/memory"
// Middleware
_ "github.com/chihaya/chihaya/middleware/deniability"
_ "github.com/chihaya/chihaya/middleware/varinterval"
_ "github.com/chihaya/chihaya/server/store/middleware/client"
_ "github.com/chihaya/chihaya/server/store/middleware/infohash"
_ "github.com/chihaya/chihaya/server/store/middleware/ip"
_ "github.com/chihaya/chihaya/server/store/middleware/response"
_ "github.com/chihaya/chihaya/server/store/middleware/swarm"
httpfrontend "github.com/chihaya/chihaya/frontend/http"
udpfrontend "github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/storage/memory"
)
var (
configPath string
cpuprofile string
)
func init() {
flag.StringVar(&configPath, "config", "", "path to the configuration file")
flag.StringVar(&cpuprofile, "cpuprofile", "", "path to cpu profile output")
}
func main() {
flag.Parse()
if cpuprofile != "" {
log.Println("profiling...")
f, err := os.Create(cpuprofile)
func rootCmdRun(cmd *cobra.Command, args []string) error {
debugLog, _ := cmd.Flags().GetBool("debug")
if debugLog {
log.SetLevel(log.DebugLevel)
log.Debugln("debug logging enabled")
}
cpuProfilePath, _ := cmd.Flags().GetString("cpuprofile")
if cpuProfilePath != "" {
log.Infoln("enabled CPU profiling to", cpuProfilePath)
f, err := os.Create(cpuProfilePath)
if err != nil {
log.Fatal(err)
return err
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
cfg, err := chihaya.OpenConfigFile(configPath)
configFilePath, _ := cmd.Flags().GetString("config")
configFile, err := ParseConfigFile(configFilePath)
if err != nil {
log.Fatal("failed to load config: " + err.Error())
return errors.New("failed to read config: " + err.Error())
}
cfg := configFile.MainConfigBlock
go func() {
promServer := http.Server{
Addr: cfg.PrometheusAddr,
Handler: prometheus.Handler(),
}
log.Infoln("started serving prometheus stats on", cfg.PrometheusAddr)
if err := promServer.ListenAndServe(); err != nil {
log.Fatalln("failed to start prometheus server:", err.Error())
}
}()
// Force the compiler to enforce memory against the storage interface.
peerStore, err := memory.New(cfg.Storage)
if err != nil {
return errors.New("failed to create memory storage: " + err.Error())
}
tkr, err := tracker.NewTracker(&cfg.Tracker)
preHooks, postHooks, err := configFile.CreateHooks()
if err != nil {
log.Fatal("failed to create tracker: " + err.Error())
return errors.New("failed to create hooks: " + err.Error())
}
pool, err := server.StartPool(cfg.Servers, tkr)
logic := middleware.NewLogic(cfg.Config, peerStore, preHooks, postHooks)
if err != nil {
log.Fatal("failed to create server pool: " + err.Error())
return errors.New("failed to create TrackerLogic: " + err.Error())
}
shutdown := make(chan os.Signal)
signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM)
<-shutdown
pool.Stop()
shutdown := make(chan struct{})
errChan := make(chan error)
var httpFrontend *httpfrontend.Frontend
var udpFrontend *udpfrontend.Frontend
if cfg.HTTPConfig.Addr != "" {
httpFrontend = httpfrontend.NewFrontend(logic, cfg.HTTPConfig)
go func() {
log.Infoln("started serving HTTP on", cfg.HTTPConfig.Addr)
if err := httpFrontend.ListenAndServe(); err != nil {
errChan <- errors.New("failed to cleanly shutdown HTTP frontend: " + err.Error())
}
}()
}
if cfg.UDPConfig.Addr != "" {
udpFrontend = udpfrontend.NewFrontend(logic, cfg.UDPConfig)
go func() {
log.Infoln("started serving UDP on", cfg.UDPConfig.Addr)
if err := udpFrontend.ListenAndServe(); err != nil {
errChan <- errors.New("failed to cleanly shutdown UDP frontend: " + err.Error())
}
}()
}
sigChan := make(chan os.Signal)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
select {
case <-sigChan:
case <-shutdown:
}
if udpFrontend != nil {
udpFrontend.Stop()
}
if httpFrontend != nil {
httpFrontend.Stop()
}
for err := range peerStore.Stop() {
if err != nil {
errChan <- err
}
}
// TODO(jzelinskie): stop hooks here
close(errChan)
}()
closed := false
var bufErr error
for err = range errChan {
if err != nil {
if !closed {
close(shutdown)
closed = true
} else {
log.Infoln(bufErr)
}
bufErr = err
}
}
return bufErr
}
func main() {
var rootCmd = &cobra.Command{
Use: "chihaya",
Short: "BitTorrent Tracker",
Long: "A customizible, multi-protocol BitTorrent Tracker",
Run: func(cmd *cobra.Command, args []string) {
if err := rootCmdRun(cmd, args); err != nil {
log.Fatal(err)
}
},
}
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
rootCmd.Flags().String("cpuprofile", "", "location to save a CPU profile")
rootCmd.Flags().Bool("debug", false, "enable debug logging")
if err := rootCmd.Execute(); err != nil {
log.Fatal(err)
}
}

View file

@ -1,98 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package chihaya
import (
"io"
"io/ioutil"
"os"
"time"
"gopkg.in/yaml.v2"
)
// DefaultConfig is a sane configuration used as a fallback or for testing.
var DefaultConfig = Config{
Tracker: TrackerConfig{
AnnounceInterval: 30 * time.Minute,
MinAnnounceInterval: 20 * time.Minute,
AnnounceMiddleware: []MiddlewareConfig{},
ScrapeMiddleware: []MiddlewareConfig{},
},
Servers: []ServerConfig{},
}
// Config represents the global configuration of a chihaya binary.
type Config struct {
Tracker TrackerConfig `yaml:"tracker"`
Servers []ServerConfig `yaml:"servers"`
}
// TrackerConfig represents the configuration of protocol-agnostic BitTorrent
// Tracker used by Servers started by chihaya.
type TrackerConfig struct {
AnnounceInterval time.Duration `yaml:"announce"`
MinAnnounceInterval time.Duration `yaml:"min_announce"`
AnnounceMiddleware []MiddlewareConfig `yaml:"announce_middleware"`
ScrapeMiddleware []MiddlewareConfig `yaml:"scrape_middleware"`
}
// MiddlewareConfig represents the configuration of a middleware used by
// the tracker.
type MiddlewareConfig struct {
Name string `yaml:"name"`
Config interface{} `yaml:"config"`
}
// ServerConfig represents the configuration of the Servers started by chihaya.
type ServerConfig struct {
Name string `yaml:"name"`
Config interface{} `yaml:"config"`
}
// ConfigFile represents a YAML configuration file that namespaces all chihaya
// configuration under the "chihaya" namespace.
type ConfigFile struct {
Chihaya Config `yaml:"chihaya"`
}
// DecodeConfigFile unmarshals an io.Reader into a new Config.
func DecodeConfigFile(r io.Reader) (*Config, error) {
contents, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
cfgFile := &ConfigFile{}
err = yaml.Unmarshal(contents, cfgFile)
if err != nil {
return nil, err
}
return &cfgFile.Chihaya, nil
}
// OpenConfigFile returns a new Config given the path to a YAML configuration
// file.
// It supports relative and absolute paths and environment variables.
// Given "", it returns DefaultConfig.
func OpenConfigFile(path string) (*Config, error) {
if path == "" {
return &DefaultConfig, nil
}
f, err := os.Open(os.ExpandEnv(path))
if err != nil {
return nil, err
}
defer f.Close()
cfg, err := DecodeConfigFile(f)
if err != nil {
return nil, err
}
return cfg, nil
}

View file

@ -1,61 +0,0 @@
# Copyright 2016 The Chihaya Authors. All rights reserved.
# Use of this source code is governed by the BSD 2-Clause license,
# which can be found in the LICENSE file.
chihaya:
tracker:
announce: 10m
min_announce: 5m
announce_middleware:
# - name: ip_blacklist
# - name: ip_whitelist
# - name: client_blacklist
# - name: client_whitelist
# - name: infohash_blacklist
# - name: infohash_whitelist
# - name: varinterval
# - name: deniability
- name: store_swarm_interaction
- name: store_response
scrape_middleware:
# - name: infohash_blacklist
# config:
# mode: block
- name: store_response
servers:
- name: store
config:
addr: localhost:6880
request_timeout: 10s
read_timeout: 10s
write_timeout: 10s
client_store:
name: memory
ip_store:
name: memory
string_store:
name: memory
peer_store:
name: memory
config:
gcAfter: 30m
shards: 1
- name: prometheus
config:
addr: localhost:6881
shutdown_timeout: 10s
read_timeout: 10s
write_timeout: 10s
- name: http
config:
addr: localhost:6882
request_timeout: 10s
read_timeout: 10s
write_timeout: 10s
# - name: udp
# config:
# addr: localhost:6883

40
example_config.yaml Normal file
View file

@ -0,0 +1,40 @@
chihaya:
announce_interval: 15m
prometheus_addr: localhost:6880
http:
addr: 0.0.0.0:6881
allow_ip_spoofing: false
real_ip_header: x-real-ip
read_timeout: 5s
write_timeout: 5s
request_timeout: 5s
udp:
addr: 0.0.0.0:6881
allow_ip_spoofing: false
storage:
gc_interval: 14m
peer_lifetime: 15m
shards: 1
max_numwant: 100
prehooks:
- name: jwt
config:
issuer: https://issuer.com
audience: https://chihaya.issuer.com
jwk_set_uri: https://issuer.com/keys
jwk_set_update_interval: 5m
- name: client approval
config:
whitelist:
- OP1011
blacklist:
- OP1012
posthooks:
- name: gossip
config:
boostrap_node: 127.0.0.1:6881

25
frontend/frontend.go Normal file
View file

@ -0,0 +1,25 @@
package frontend
import (
"context"
"github.com/chihaya/chihaya/bittorrent"
)
// TrackerLogic is the interface used by a frontend in order to: (1) generate a
// response from a parsed request, and (2) asynchronously observe anything
// after the response has been delivered to the client.
type TrackerLogic interface {
// HandleAnnounce generates a response for an Announce.
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (*bittorrent.AnnounceResponse, error)
// AfterAnnounce does something with the results of an Announce after it
// has been completed.
AfterAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse)
// HandleScrape generates a response for a Scrape.
HandleScrape(context.Context, *bittorrent.ScrapeRequest) (*bittorrent.ScrapeResponse, error)
// AfterScrape does something with the results of a Scrape after it has been completed.
AfterScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse)
}

View file

@ -1,7 +1,3 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package bencode implements bencoding of data as defined in BEP 3 using
// type assertion over reflection for performance.
package bencode

View file

@ -1,7 +1,3 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package bencode
import (

View file

@ -1,7 +1,3 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package bencode
import (

View file

@ -1,7 +1,3 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package bencode
import (

View file

@ -1,7 +1,3 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package bencode
import (

179
frontend/http/frontend.go Normal file
View file

@ -0,0 +1,179 @@
// Package http implements a BitTorrent frontend via the HTTP protocol as
// described in BEP 3 and BEP 23.
package http
import (
"context"
"net"
"net/http"
"time"
"github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/frontend"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
recordResponseDuration("action", nil, time.Second)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_http_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "error"},
)
// recordResponseDuration records the duration of time to respond to a Request
// in milliseconds .
func recordResponseDuration(action string, err error, duration time.Duration) {
var errString string
if err != nil {
errString = err.Error()
}
promResponseDurationMilliseconds.
WithLabelValues(action, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
// Config represents all of the configurable options for an HTTP BitTorrent
// Frontend.
type Config struct {
Addr string
ReadTimeout time.Duration
WriteTimeout time.Duration
RequestTimeout time.Duration
AllowIPSpoofing bool
RealIPHeader string
}
// Frontend holds the state of an HTTP BitTorrent Frontend.
type Frontend struct {
grace *graceful.Server
logic frontend.TrackerLogic
Config
}
// NewFrontend allocates a new instance of a Frontend.
func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend {
return &Frontend{
logic: logic,
Config: cfg,
}
}
// Stop provides a thread-safe way to shutdown a currently running Tracker.
func (t *Frontend) Stop() {
t.grace.Stop(t.grace.Timeout)
<-t.grace.StopChan()
}
func (t *Frontend) handler() http.Handler {
router := httprouter.New()
router.GET("/announce", t.announceRoute)
router.GET("/scrape", t.scrapeRoute)
return router
}
// ListenAndServe listens on the TCP network address t.Addr and blocks serving
// BitTorrent requests until t.Stop() is called or an error is returned.
func (t *Frontend) ListenAndServe() error {
t.grace = &graceful.Server{
Server: &http.Server{
Addr: t.Addr,
Handler: t.handler(),
ReadTimeout: t.ReadTimeout,
WriteTimeout: t.WriteTimeout,
},
Timeout: t.RequestTimeout,
NoSignalHandling: true,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
//stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
//stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("http: connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
panic("http: connection transitioned to unknown state")
}
},
}
t.grace.SetKeepAlivesEnabled(false)
if err := t.grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
panic("http: failed to gracefully run HTTP server: " + err.Error())
}
}
return nil
}
// announceRoute parses and responds to an Announce by using t.TrackerLogic.
func (t *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var err error
start := time.Now()
defer recordResponseDuration("announce", err, time.Since(start))
req, err := ParseAnnounce(r, t.RealIPHeader, t.AllowIPSpoofing)
if err != nil {
WriteError(w, err)
return
}
resp, err := t.logic.HandleAnnounce(context.Background(), req)
if err != nil {
WriteError(w, err)
return
}
err = WriteAnnounceResponse(w, resp)
if err != nil {
WriteError(w, err)
return
}
go t.logic.AfterAnnounce(context.Background(), req, resp)
}
// scrapeRoute parses and responds to a Scrape by using t.TrackerLogic.
func (t *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
var err error
start := time.Now()
defer recordResponseDuration("scrape", err, time.Since(start))
req, err := ParseScrape(r)
if err != nil {
WriteError(w, err)
return
}
resp, err := t.logic.HandleScrape(context.Background(), req)
if err != nil {
WriteError(w, err)
return
}
err = WriteScrapeResponse(w, resp)
if err != nil {
WriteError(w, err)
return
}
go t.logic.AfterScrape(context.Background(), req, resp)
}

137
frontend/http/parser.go Normal file
View file

@ -0,0 +1,137 @@
package http
import (
"net"
"net/http"
"github.com/chihaya/chihaya/bittorrent"
)
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
//
// If allowIPSpoofing is true, IPs provided via params will be used.
// If realIPHeader is not empty string, the first value of the HTTP Header with
// that name will be used.
func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) {
qp, err := bittorrent.ParseURLData(r.RequestURI)
if err != nil {
return nil, err
}
request := &bittorrent.AnnounceRequest{Params: qp}
eventStr, _ := qp.String("event")
request.Event, err = bittorrent.NewEvent(eventStr)
if err != nil {
return nil, bittorrent.ClientError("failed to provide valid client event")
}
compactStr, _ := qp.String("compact")
request.Compact = compactStr != "" && compactStr != "0"
infoHashes := qp.InfoHashes()
if len(infoHashes) < 1 {
return nil, bittorrent.ClientError("no info_hash parameter supplied")
}
if len(infoHashes) > 1 {
return nil, bittorrent.ClientError("multiple info_hash parameters supplied")
}
request.InfoHash = infoHashes[0]
peerID, ok := qp.String("peer_id")
if !ok {
return nil, bittorrent.ClientError("failed to parse parameter: peer_id")
}
if len(peerID) != 20 {
return nil, bittorrent.ClientError("failed to provide valid peer_id")
}
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
request.Left, err = qp.Uint64("left")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: left")
}
request.Downloaded, err = qp.Uint64("downloaded")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
}
request.Uploaded, err = qp.Uint64("uploaded")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
}
numwant, err := qp.Uint64("numwant")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
}
request.NumWant = uint32(numwant)
port, err := qp.Uint64("port")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: port")
}
request.Peer.Port = uint16(port)
request.Peer.IP = requestedIP(r, qp, realIPHeader, allowIPSpoofing)
if request.Peer.IP == nil {
return nil, bittorrent.ClientError("failed to parse peer IP address")
}
return request, nil
}
// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request.
func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
qp, err := bittorrent.ParseURLData(r.RequestURI)
if err != nil {
return nil, err
}
infoHashes := qp.InfoHashes()
if len(infoHashes) < 1 {
return nil, bittorrent.ClientError("no info_hash parameter supplied")
}
request := &bittorrent.ScrapeRequest{
InfoHashes: infoHashes,
Params: qp,
}
return request, nil
}
// requestedIP determines the IP address for a BitTorrent client request.
//
// If allowIPSpoofing is true, IPs provided via params will be used.
// If realIPHeader is not empty string, the first value of the HTTP Header with
// that name will be used.
func requestedIP(r *http.Request, p bittorrent.Params, realIPHeader string, allowIPSpoofing bool) net.IP {
if allowIPSpoofing {
if ipstr, ok := p.String("ip"); ok {
ip := net.ParseIP(ipstr)
return ip
}
if ipstr, ok := p.String("ipv4"); ok {
ip := net.ParseIP(ipstr)
return ip
}
if ipstr, ok := p.String("ipv6"); ok {
ip := net.ParseIP(ipstr)
return ip
}
}
if realIPHeader != "" {
if ips, ok := r.Header[realIPHeader]; ok && len(ips) > 0 {
ip := net.ParseIP(ips[0])
return ip
}
}
host, _, _ := net.SplitHostPort(r.RemoteAddr)
return net.ParseIP(host)
}

View file

@ -1,20 +1,16 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net/http"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/bencode"
"github.com/chihaya/chihaya/tracker"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend/http/bencode"
)
func writeError(w http.ResponseWriter, err error) error {
// WriteError communicates an error to a BitTorrent client over HTTP.
func WriteError(w http.ResponseWriter, err error) error {
message := "internal server error"
if _, clientErr := err.(tracker.ClientError); clientErr {
if _, clientErr := err.(bittorrent.ClientError); clientErr {
message = err.Error()
}
@ -24,7 +20,9 @@ func writeError(w http.ResponseWriter, err error) error {
})
}
func writeAnnounceResponse(w http.ResponseWriter, resp *chihaya.AnnounceResponse) error {
// WriteAnnounceResponse communicates the results of an Announce to a
// BitTorrent client over HTTP.
func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceResponse) error {
bdict := bencode.Dict{
"complete": resp.Complete,
"incomplete": resp.Incomplete,
@ -38,7 +36,7 @@ func writeAnnounceResponse(w http.ResponseWriter, resp *chihaya.AnnounceResponse
// Add the IPv4 peers to the dictionary.
for _, peer := range resp.IPv4Peers {
IPv4CompactDict = append(IPv4CompactDict, compact(peer)...)
IPv4CompactDict = append(IPv4CompactDict, compact4(peer)...)
}
if len(IPv4CompactDict) > 0 {
bdict["peers"] = IPv4CompactDict
@ -46,7 +44,7 @@ func writeAnnounceResponse(w http.ResponseWriter, resp *chihaya.AnnounceResponse
// Add the IPv6 peers to the dictionary.
for _, peer := range resp.IPv6Peers {
IPv6CompactDict = append(IPv6CompactDict, compact(peer)...)
IPv6CompactDict = append(IPv6CompactDict, compact6(peer)...)
}
if len(IPv6CompactDict) > 0 {
bdict["peers6"] = IPv6CompactDict
@ -68,7 +66,9 @@ func writeAnnounceResponse(w http.ResponseWriter, resp *chihaya.AnnounceResponse
return bencode.NewEncoder(w).Encode(bdict)
}
func writeScrapeResponse(w http.ResponseWriter, resp *chihaya.ScrapeResponse) error {
// WriteScrapeResponse communicates the results of a Scrape to a BitTorrent
// client over HTTP.
func WriteScrapeResponse(w http.ResponseWriter, resp *bittorrent.ScrapeResponse) error {
filesDict := bencode.NewDict()
for infohash, scrape := range resp.Files {
filesDict[string(infohash[:])] = bencode.Dict{
@ -82,14 +82,29 @@ func writeScrapeResponse(w http.ResponseWriter, resp *chihaya.ScrapeResponse) er
})
}
func compact(peer chihaya.Peer) (buf []byte) {
buf = []byte(peer.IP)
func compact4(peer bittorrent.Peer) (buf []byte) {
if ip := peer.IP.To4(); ip == nil {
panic("non-IPv4 IP for Peer in IPv4Peers")
} else {
buf = []byte(ip)
}
buf = append(buf, byte(peer.Port>>8))
buf = append(buf, byte(peer.Port&0xff))
return
}
func dict(peer chihaya.Peer) bencode.Dict {
func compact6(peer bittorrent.Peer) (buf []byte) {
if ip := peer.IP.To16(); ip == nil {
panic("non-IPv6 IP for Peer in IPv6Peers")
} else {
buf = []byte(ip)
}
buf = append(buf, byte(peer.Port>>8))
buf = append(buf, byte(peer.Port&0xff))
return
}
func dict(peer bittorrent.Peer) bencode.Dict {
return bencode.Dict{
"peer id": string(peer.ID[:]),
"ip": peer.IP.String(),

View file

@ -1,15 +1,12 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net/http/httptest"
"testing"
"github.com/chihaya/chihaya/tracker"
"github.com/stretchr/testify/assert"
"github.com/chihaya/chihaya/bittorrent"
)
func TestWriteError(t *testing.T) {
@ -22,7 +19,7 @@ func TestWriteError(t *testing.T) {
for _, tt := range table {
r := httptest.NewRecorder()
err := writeError(r, tracker.ClientError(tt.reason))
err := WriteError(r, bittorrent.ClientError(tt.reason))
assert.Nil(t, err)
assert.Equal(t, r.Body.String(), tt.expected)
}
@ -30,7 +27,7 @@ func TestWriteError(t *testing.T) {
func TestWriteStatus(t *testing.T) {
r := httptest.NewRecorder()
err := writeError(r, tracker.ClientError("something is missing"))
err := WriteError(r, bittorrent.ClientError("something is missing"))
assert.Nil(t, err)
assert.Equal(t, r.Body.String(), "d14:failure reason20:something is missinge")
}

View file

@ -0,0 +1,34 @@
package bytepool
import "sync"
// BytePool is a cached pool of reusable byte slices.
type BytePool struct {
sync.Pool
}
// New allocates a new BytePool with slices of equal length and capacity.
func New(length int) *BytePool {
var bp BytePool
bp.Pool.New = func() interface{} {
return make([]byte, length, length)
}
return &bp
}
// Get returns a byte slice from the pool.
func (bp *BytePool) Get() []byte {
return bp.Pool.Get().([]byte)
}
// Put returns a byte slice to the pool.
func (bp *BytePool) Put(b []byte) {
b = b[:cap(b)]
// Zero out the bytes.
// Apparently this specific expression is optimized by the compiler, see
// github.com/golang/go/issues/5373.
for i := range b {
b[i] = 0
}
bp.Pool.Put(b)
}

View file

@ -0,0 +1,50 @@
package udp
import (
"crypto/hmac"
"crypto/sha256"
"encoding/binary"
"net"
"time"
)
// ttl is the number of seconds a connection ID should be valid according to
// BEP 15.
const ttl = 2 * time.Minute
// NewConnectionID creates a new 8 byte connection identifier for UDP packets
// as described by BEP 15.
//
// The first 4 bytes of the connection identifier is a unix timestamp and the
// last 4 bytes are a truncated HMAC token created from the aforementioned
// unix timestamp and the source IP address of the UDP packet.
//
// Truncated HMAC is known to be safe for 2^(-n) where n is the size in bits
// of the truncated HMAC token. In this use case we have 32 bits, thus a
// forgery probability of approximately 1 in 4 billion.
func NewConnectionID(ip net.IP, now time.Time, key string) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint32(buf, uint32(now.UTC().Unix()))
mac := hmac.New(sha256.New, []byte(key))
mac.Write(buf[:4])
mac.Write(ip)
macBytes := mac.Sum(nil)[:4]
copy(buf[4:], macBytes)
return buf
}
// ValidConnectionID determines whether a connection identifier is legitimate.
func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool {
ts := time.Unix(int64(binary.BigEndian.Uint32(connectionID[:4])), 0)
if now.After(ts.Add(ttl)) || ts.After(now.Add(maxClockSkew)) {
return false
}
mac := hmac.New(sha256.New, []byte(key))
mac.Write(connectionID[:4])
mac.Write(ip)
expectedMAC := mac.Sum(nil)[:4]
return hmac.Equal(expectedMAC, connectionID[4:])
}

View file

@ -0,0 +1,29 @@
package udp
import (
"net"
"testing"
"time"
)
var golden = []struct {
createdAt int64
now int64
ip string
key string
valid bool
}{
{0, 1, "127.0.0.1", "", true},
{0, 420420, "127.0.0.1", "", false},
{0, 0, "[::]", "", true},
}
func TestVerification(t *testing.T) {
for _, tt := range golden {
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
got := ValidConnectionID(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute, tt.key)
if got != tt.valid {
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
}
}
}

242
frontend/udp/frontend.go Normal file
View file

@ -0,0 +1,242 @@
// Package udp implements a BitTorrent tracker via the UDP protocol as
// described in BEP 15.
package udp
import (
"bytes"
"context"
"encoding/binary"
"net"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/frontend/udp/bytepool"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
recordResponseDuration("action", nil, time.Second)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_udp_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "error"},
)
// recordResponseDuration records the duration of time to respond to a UDP
// Request in milliseconds .
func recordResponseDuration(action string, err error, duration time.Duration) {
var errString string
if err != nil {
errString = err.Error()
}
promResponseDurationMilliseconds.
WithLabelValues(action, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
// Config represents all of the configurable options for a UDP BitTorrent
// Tracker.
type Config struct {
Addr string
PrivateKey string
MaxClockSkew time.Duration
AllowIPSpoofing bool
}
// Frontend holds the state of a UDP BitTorrent Frontend.
type Frontend struct {
socket *net.UDPConn
closing chan struct{}
wg sync.WaitGroup
logic frontend.TrackerLogic
Config
}
// NewFrontend allocates a new instance of a Frontend.
func NewFrontend(logic frontend.TrackerLogic, cfg Config) *Frontend {
return &Frontend{
closing: make(chan struct{}),
logic: logic,
Config: cfg,
}
}
// Stop provides a thread-safe way to shutdown a currently running Frontend.
func (t *Frontend) Stop() {
close(t.closing)
t.socket.SetReadDeadline(time.Now())
t.wg.Wait()
}
// ListenAndServe listens on the UDP network address t.Addr and blocks serving
// BitTorrent requests until t.Stop() is called or an error is returned.
func (t *Frontend) ListenAndServe() error {
udpAddr, err := net.ResolveUDPAddr("udp", t.Addr)
if err != nil {
return err
}
t.socket, err = net.ListenUDP("udp", udpAddr)
if err != nil {
return err
}
defer t.socket.Close()
pool := bytepool.New(2048)
for {
// Check to see if we need to shutdown.
select {
case <-t.closing:
return nil
default:
}
// Read a UDP packet into a reusable buffer.
buffer := pool.Get()
t.socket.SetReadDeadline(time.Now().Add(time.Second))
n, addr, err := t.socket.ReadFromUDP(buffer)
if err != nil {
pool.Put(buffer)
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
// A temporary failure is not fatal; just pretend it never happened.
continue
}
return err
}
// We got nothin'
if n == 0 {
pool.Put(buffer)
continue
}
t.wg.Add(1)
go func() {
defer t.wg.Done()
defer pool.Put(buffer)
// Handle the request.
start := time.Now()
action, err := t.handleRequest(
Request{buffer[:n], addr.IP},
ResponseWriter{t.socket, addr},
)
recordResponseDuration(action, err, time.Since(start))
}()
}
}
// Request represents a UDP payload received by a Tracker.
type Request struct {
Packet []byte
IP net.IP
}
// ResponseWriter implements the ability to respond to a Request via the
// io.Writer interface.
type ResponseWriter struct {
socket *net.UDPConn
addr *net.UDPAddr
}
// Write implements the io.Writer interface for a ResponseWriter.
func (w ResponseWriter) Write(b []byte) (int, error) {
w.socket.WriteToUDP(b, w.addr)
return len(b), nil
}
// handleRequest parses and responds to a UDP Request.
func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, err error) {
if len(r.Packet) < 16 {
// Malformed, no client packets are less than 16 bytes.
// We explicitly return nothing in case this is a DoS attempt.
err = errMalformedPacket
return
}
// Parse the headers of the UDP packet.
connID := r.Packet[0:8]
actionID := binary.BigEndian.Uint32(r.Packet[8:12])
txID := r.Packet[12:16]
// If this isn't requesting a new connection ID and the connection ID is
// invalid, then fail.
if actionID != connectActionID && !ValidConnectionID(connID, r.IP, time.Now(), t.MaxClockSkew, t.PrivateKey) {
err = errBadConnectionID
WriteError(w, txID, err)
return
}
// Handle the requested action.
switch actionID {
case connectActionID:
actionName = "connect"
if !bytes.Equal(connID, initialConnectionID) {
err = errMalformedPacket
return
}
WriteConnectionID(w, txID, NewConnectionID(r.IP, time.Now(), t.PrivateKey))
case announceActionID, announceV6ActionID:
actionName = "announce"
var req *bittorrent.AnnounceRequest
req, err = ParseAnnounce(r, t.AllowIPSpoofing, actionID == announceV6ActionID)
if err != nil {
WriteError(w, txID, err)
return
}
var resp *bittorrent.AnnounceResponse
resp, err = t.logic.HandleAnnounce(context.Background(), req)
if err != nil {
WriteError(w, txID, err)
return
}
WriteAnnounce(w, txID, resp, actionID == announceV6ActionID)
go t.logic.AfterAnnounce(context.Background(), req, resp)
case scrapeActionID:
actionName = "scrape"
var req *bittorrent.ScrapeRequest
req, err = ParseScrape(r)
if err != nil {
WriteError(w, txID, err)
return
}
var resp *bittorrent.ScrapeResponse
resp, err = t.logic.HandleScrape(context.Background(), req)
if err != nil {
WriteError(w, txID, err)
return
}
WriteScrape(w, txID, resp)
go t.logic.AfterScrape(context.Background(), req, resp)
default:
err = errUnknownAction
WriteError(w, txID, err)
}
return
}

199
frontend/udp/parser.go Normal file
View file

@ -0,0 +1,199 @@
package udp
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"sync"
"github.com/chihaya/chihaya/bittorrent"
)
const (
connectActionID uint32 = iota
announceActionID
scrapeActionID
errorActionID
announceV6ActionID
)
// Option-Types as described in BEP 41 and BEP 45.
const (
optionEndOfOptions byte = 0x0
optionNOP = 0x1
optionURLData = 0x2
)
var (
// initialConnectionID is the magic initial connection ID specified by BEP 15.
initialConnectionID = []byte{0, 0, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80}
// emptyIPs are the value of an IP field that has been left blank.
emptyIPv4 = []byte{0, 0, 0, 0}
emptyIPv6 = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
// eventIDs map values described in BEP 15 to Events.
eventIDs = []bittorrent.Event{
bittorrent.None,
bittorrent.Completed,
bittorrent.Started,
bittorrent.Stopped,
}
errMalformedPacket = bittorrent.ClientError("malformed packet")
errMalformedIP = bittorrent.ClientError("malformed IP address")
errMalformedEvent = bittorrent.ClientError("malformed event ID")
errUnknownAction = bittorrent.ClientError("unknown action ID")
errBadConnectionID = bittorrent.ClientError("bad connection ID")
errUnknownOptionType = bittorrent.ClientError("unknown option type")
)
// ParseAnnounce parses an AnnounceRequest from a UDP request.
//
// If allowIPSpoofing is true, IPs provided via params will be used.
//
// If v6 is true the announce will be parsed as an IPv6 announce "the
// opentracker way", see
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceRequest, error) {
ipEnd := 84 + net.IPv4len
if v6 {
ipEnd = 84 + net.IPv6len
}
if len(r.Packet) < ipEnd+10 {
return nil, errMalformedPacket
}
infohash := r.Packet[16:36]
peerID := r.Packet[36:56]
downloaded := binary.BigEndian.Uint64(r.Packet[56:64])
left := binary.BigEndian.Uint64(r.Packet[64:72])
uploaded := binary.BigEndian.Uint64(r.Packet[72:80])
eventID := int(r.Packet[83])
if eventID >= len(eventIDs) {
return nil, errMalformedEvent
}
ip := r.IP
ipbytes := r.Packet[84:ipEnd]
if allowIPSpoofing {
ip = net.IP(ipbytes)
}
if !allowIPSpoofing && r.IP == nil {
// We have no IP address to fallback on.
return nil, errMalformedIP
}
numWant := binary.BigEndian.Uint32(r.Packet[ipEnd+4 : ipEnd+8])
port := binary.BigEndian.Uint16(r.Packet[ipEnd+8 : ipEnd+10])
params, err := handleOptionalParameters(r.Packet[ipEnd+10:])
if err != nil {
return nil, err
}
return &bittorrent.AnnounceRequest{
Event: eventIDs[eventID],
InfoHash: bittorrent.InfoHashFromBytes(infohash),
NumWant: uint32(numWant),
Left: left,
Downloaded: downloaded,
Uploaded: uploaded,
Peer: bittorrent.Peer{
ID: bittorrent.PeerIDFromBytes(peerID),
IP: ip,
Port: port,
},
Params: params,
}, nil
}
type buffer struct {
bytes.Buffer
}
var bufferFree = sync.Pool{
New: func() interface{} { return new(buffer) },
}
func newBuffer() *buffer {
return bufferFree.Get().(*buffer)
}
func (b *buffer) free() {
b.Reset()
bufferFree.Put(b)
}
// handleOptionalParameters parses the optional parameters as described in BEP
// 41 and updates an announce with the values parsed.
func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
if len(packet) == 0 {
return bittorrent.ParseURLData("")
}
var buf = newBuffer()
defer buf.free()
for i := 0; i < len(packet); {
option := packet[i]
switch option {
case optionEndOfOptions:
return bittorrent.ParseURLData(buf.String())
case optionNOP:
i++
case optionURLData:
if i+1 >= len(packet) {
return nil, errMalformedPacket
}
length := int(packet[i+1])
if i+2+length > len(packet) {
return nil, errMalformedPacket
}
n, err := buf.Write(packet[i+2 : i+2+length])
if err != nil {
return nil, err
}
if n != length {
return nil, fmt.Errorf("expected to write %d bytes, wrote %d", length, n)
}
i += 2 + length
default:
return nil, errUnknownOptionType
}
}
return bittorrent.ParseURLData(buf.String())
}
// ParseScrape parses a ScrapeRequest from a UDP request.
func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
// If a scrape isn't at least 36 bytes long, it's malformed.
if len(r.Packet) < 36 {
return nil, errMalformedPacket
}
// Skip past the initial headers and check that the bytes left equal the
// length of a valid list of infohashes.
r.Packet = r.Packet[16:]
if len(r.Packet)%20 != 0 {
return nil, errMalformedPacket
}
// Allocate a list of infohashes and append it to the list until we're out.
var infohashes []bittorrent.InfoHash
for len(r.Packet) >= 20 {
infohashes = append(infohashes, bittorrent.InfoHashFromBytes(r.Packet[:20]))
r.Packet = r.Packet[20:]
}
return &bittorrent.ScrapeRequest{
InfoHashes: infohashes,
}, nil
}

View file

@ -0,0 +1,71 @@
package udp
import "testing"
var table = []struct {
data []byte
values map[string]string
err error
}{
{
[]byte{0x2, 0x5, '/', '?', 'a', '=', 'b'},
map[string]string{"a": "b"},
nil,
},
{
[]byte{0x2, 0x0},
map[string]string{},
nil,
},
{
[]byte{0x2, 0x1},
nil,
errMalformedPacket,
},
{
[]byte{0x2},
nil,
errMalformedPacket,
},
{
[]byte{0x2, 0x8, '/', 'c', '/', 'd', '?', 'a', '=', 'b'},
map[string]string{"a": "b"},
nil,
},
{
[]byte{0x2, 0x2, '/', '?', 0x2, 0x3, 'a', '=', 'b'},
map[string]string{"a": "b"},
nil,
},
{
[]byte{0x2, 0x9, '/', '?', 'a', '=', 'b', '%', '2', '0', 'c'},
map[string]string{"a": "b c"},
nil,
},
}
func TestHandleOptionalParameters(t *testing.T) {
for _, testCase := range table {
params, err := handleOptionalParameters(testCase.data)
if err != testCase.err {
if testCase.err == nil {
t.Fatalf("expected no parsing error for %x but got %s", testCase.data, err)
} else {
t.Fatalf("expected parsing error for %x", testCase.data)
}
}
if testCase.values != nil {
if params == nil {
t.Fatalf("expected values %v for %x", testCase.values, testCase.data)
} else {
for key, want := range testCase.values {
if got, ok := params.String(key); !ok {
t.Fatalf("params missing entry %s for data %x", key, testCase.data)
} else if got != want {
t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, testCase.data)
}
}
}
}
}
}

89
frontend/udp/writer.go Normal file
View file

@ -0,0 +1,89 @@
package udp
import (
"encoding/binary"
"fmt"
"io"
"time"
"github.com/chihaya/chihaya/bittorrent"
)
// WriteError writes the failure reason as a null-terminated string.
func WriteError(w io.Writer, txID []byte, err error) {
// If the client wasn't at fault, acknowledge it.
if _, ok := err.(bittorrent.ClientError); !ok {
err = fmt.Errorf("internal error occurred: %s", err.Error())
}
buf := newBuffer()
writeHeader(buf, txID, errorActionID)
buf.WriteString(err.Error())
buf.WriteRune('\000')
w.Write(buf.Bytes())
buf.free()
}
// WriteAnnounce encodes an announce response according to BEP 15.
// The peers returned will be resp.IPv6Peers or resp.IPv4Peers, depending on
// whether v6 is set. The action ID will be 4, according to
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/.
func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6 bool) {
buf := newBuffer()
if v6 {
writeHeader(buf, txID, announceV6ActionID)
} else {
writeHeader(buf, txID, announceActionID)
}
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
binary.Write(buf, binary.BigEndian, uint32(resp.Incomplete))
binary.Write(buf, binary.BigEndian, uint32(resp.Complete))
peers := resp.IPv4Peers
if v6 {
peers = resp.IPv6Peers
}
for _, peer := range peers {
buf.Write(peer.IP)
binary.Write(buf, binary.BigEndian, peer.Port)
}
w.Write(buf.Bytes())
buf.free()
}
// WriteScrape encodes a scrape response according to BEP 15.
func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
buf := newBuffer()
writeHeader(buf, txID, scrapeActionID)
for _, scrape := range resp.Files {
binary.Write(buf, binary.BigEndian, scrape.Complete)
binary.Write(buf, binary.BigEndian, scrape.Snatches)
binary.Write(buf, binary.BigEndian, scrape.Incomplete)
}
w.Write(buf.Bytes())
buf.free()
}
// WriteConnectionID encodes a new connection response according to BEP 15.
func WriteConnectionID(w io.Writer, txID, connID []byte) {
buf := newBuffer()
writeHeader(buf, txID, connectActionID)
buf.Write(connID)
w.Write(buf.Bytes())
buf.free()
}
// writeHeader writes the action and transaction ID to the provided response
// buffer.
func writeHeader(w io.Writer, txID []byte, action uint32) {
binary.Write(w, binary.BigEndian, action)
w.Write(txID)
}

58
glide.lock generated
View file

@ -1,24 +1,26 @@
hash: e7d2be6c361fe6fe6242b56e502829e8a72733f9ff0aa57443c9397c3488174f
updated: 2016-05-21T17:58:26.448148976-04:00
hash: fe839da75efcf365317b1b5eb04bfa15cd1db10265f4947b8aff78932bf4622e
updated: 2016-09-05T18:13:39.020799284-04:00
imports:
- name: github.com/beorn7/perks
version: 3ac7bf7a47d159a033b107610db8a1b6575507a4
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
subpackages:
- quantile
- name: github.com/golang/protobuf
version: cd85f19845cc96cc6e5269c894d8cd3c67e9ed83
version: 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a
subpackages:
- proto
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/julienschmidt/httprouter
version: 77366a47451a56bb3ba682481eed85b64fea14e8
version: 8c199fb6259ffc1af525cc3ad52ee60ba8359669
- name: github.com/matttproud/golang_protobuf_extensions
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
- pbutil
- name: github.com/mrd0ll4r/netmatch
version: af335c21c765757f2649dbf1d3d43f77eb6c4eb8
- name: github.com/mendsley/gojwk
version: 4d5ec6e58103388d6cb0d7d72bc72649be4f0504
- name: github.com/prometheus/client_golang
version: d38f1ef46f0d78136db3e585f7ebe1bcc3476f73
version: c5b7fccd204277076155f10851dad72b76a49317
subpackages:
- prometheus
- name: github.com/prometheus/client_model
@ -26,19 +28,43 @@ imports:
subpackages:
- go
- name: github.com/prometheus/common
version: a715f9d07a512e8339f70a275ace0e67c0f9a65f
version: 616e90af75cc300730196d04f3676f838d70414f
subpackages:
- expfmt
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
- name: github.com/tylerb/graceful
version: 9a3d4236b03bb5d26f7951134d248f9d5510d599
- name: golang.org/x/net
version: 0c607074acd38c5f23d1344dfe74c977464d1257
- name: github.com/SermoDigital/jose
version: 389fea327ef076853db8fae03a0f38e30e6092ab
subpackages:
- netutil
- crypto
- jws
- jwt
- name: github.com/Sirupsen/logrus
version: 4b6ea7319e214d98c938f12692336f7ca9348d6b
- name: github.com/spf13/cobra
version: 9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744
- name: github.com/spf13/pflag
version: 103ce5cd2042f2fe629c1957abb64ab3e7f50235
- name: github.com/tylerb/graceful
version: 50a48b6e73fcc75b45e22c05b79629a67c79e938
- name: golang.org/x/sys
version: a646d33e2ee3172a661fc09bca23bb4889a41bc8
subpackages:
- unix
- name: gopkg.in/yaml.v2
version: a83829b6f1293c91addabc89d0571c246397bbf4
devImports: []
version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e
testImports:
- name: github.com/davecgh/go-spew
version: 6cf5744a041a0022271cefed95ba843f6d87fd51
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: 792786c7400a136282c1664665ae0a8db921c6c2
subpackages:
- difflib
- name: github.com/stretchr/testify
version: f390dcf405f7b83c997eac1b06768bb9f44dec18
subpackages:
- assert

View file

@ -1,9 +1,26 @@
package: github.com/chihaya/chihaya
import:
- package: github.com/SermoDigital/jose
version: ~1.0.0
subpackages:
- crypto
- jws
- jwt
- package: github.com/Sirupsen/logrus
version: ~0.10.0
- package: github.com/julienschmidt/httprouter
- package: github.com/mrd0ll4r/netmatch
version: ~1.1.0
- package: github.com/mendsley/gojwk
- package: github.com/prometheus/client_golang
version: ~0.8.0
subpackages:
- prometheus
- package: github.com/spf13/cobra
- package: github.com/tylerb/graceful
version: ~1.2.13
- package: gopkg.in/yaml.v2
testImport:
- package: github.com/stretchr/testify
version: ~1.1.3
subpackages:
- assert

View file

@ -0,0 +1,79 @@
// Package clientapproval implements a Hook that fails an Announce based on a
// whitelist or blacklist of BitTorrent client IDs.
package clientapproval
import (
"context"
"errors"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// ErrClientUnapproved is the error returned when a client's PeerID is invalid.
var ErrClientUnapproved = bittorrent.ClientError("unapproved client")
// Config represents all the values required by this middleware to validate
// peers based on their BitTorrent client ID.
type Config struct {
Whitelist []string `yaml:"whitelist"`
Blacklist []string `yaml:"blacklist"`
}
type hook struct {
approved map[bittorrent.ClientID]struct{}
unapproved map[bittorrent.ClientID]struct{}
}
// NewHook returns an instance of the client approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
h := &hook{
approved: make(map[bittorrent.ClientID]struct{}),
unapproved: make(map[bittorrent.ClientID]struct{}),
}
for _, cidString := range cfg.Whitelist {
cidBytes := []byte(cidString)
if len(cidBytes) != 6 {
return nil, errors.New("client ID " + cidString + " must be 6 bytes")
}
var cid bittorrent.ClientID
copy(cid[:], cidBytes)
h.approved[cid] = struct{}{}
}
for _, cidString := range cfg.Blacklist {
cidBytes := []byte(cidString)
if len(cidBytes) != 6 {
return nil, errors.New("client ID " + cidString + " must be 6 bytes")
}
var cid bittorrent.ClientID
copy(cid[:], cidBytes)
h.unapproved[cid] = struct{}{}
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
clientID := bittorrent.NewClientID(req.Peer.ID)
if len(h.approved) > 0 {
if _, found := h.approved[clientID]; !found {
return ctx, ErrClientUnapproved
}
}
if len(h.unapproved) > 0 {
if _, found := h.unapproved[clientID]; found {
return ctx, ErrClientUnapproved
}
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -1,39 +0,0 @@
## Deniability Middleware
This package provides the announce middleware `deniability` which inserts ghost peers into announce responses to achieve plausible deniability.
### Functionality
This middleware will choose random announces and modify the list of peers returned.
A random number of randomly generated peers will be inserted at random positions into the list of peers.
As soon as the list of peers exceeds `numWant`, peers will be replaced rather than inserted.
Note that if a response is picked for augmentation, both IPv4 and IPv6 peers will be modified, in case they are not empty.
Also note that the IP address for the generated peeer consists of bytes in the range [1,254].
### Configuration
This middleware provides the following parameters for configuration:
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be augmented with random peers.
- `max_random_peers` (int, >0) sets an upper boundary (inclusive) for the amount of peers added.
- `prefix` (string, 20 characters at most) sets the prefix for generated peer IDs.
The peer ID will be padded to 20 bytes using a random string of alphanumeric characters.
- `min_port` (int, >0, <=65535) sets a lower boundary for the port for generated peers.
- `max_port` (int, >0, <=65536, > `min_port`) sets an upper boundary for the port for generated peers.
An example config might look like this:
chihaya:
tracker:
announce_middleware:
- name: deniability
config:
modify_response_probability: 0.2
max_random_peers: 5
prefix: -AZ2060-
min_port: 40000
max_port: 60000
For more information about peer IDs and their prefixes, see [this wiki entry](https://wiki.theory.org/BitTorrentSpecification#peer_id).

View file

@ -1,46 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package deniability
import (
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
)
// Config represents the configuration for the deniability middleware.
type Config struct {
// ModifyResponseProbability is the probability by which a response will
// be augmented with random peers.
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
// MaxRandomPeers is the amount of peers that will be added at most.
MaxRandomPeers int `yaml:"max_random_peers"`
// Prefix is the prefix to be used for peer IDs.
Prefix string `yaml:"prefix"`
// MinPort is the minimum port (inclusive) for the generated peer.
MinPort int `yaml:"min_port"`
// MaxPort is the maximum port (exclusive) for the generated peer.
MaxPort int `yaml:"max_port"`
}
// newConfig parses the given MiddlewareConfig as a deniability.Config.
func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) {
bytes, err := yaml.Marshal(mwcfg.Config)
if err != nil {
return nil, err
}
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return &cfg, nil
}

View file

@ -1,63 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package deniability
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
)
type configTestData struct {
modifyProbability string
maxNewPeers string
prefix string
minPort string
maxPort string
err bool
expected Config
}
var (
configTemplate = `
name: foo
config:
modify_response_probability: %s
max_random_peers: %s
prefix: %s
min_port: %s
max_port: %s`
configData = []configTestData{
{"1.0", "5", "abc", "2000", "3000", false, Config{1.0, 5, "abc", 2000, 3000}},
{"a", "a", "12", "a", "a", true, Config{}},
}
)
func TestNewConfig(t *testing.T) {
var mwconfig chihaya.MiddlewareConfig
cfg, err := newConfig(mwconfig)
assert.Nil(t, err)
assert.NotNil(t, cfg)
for _, test := range configData {
config := fmt.Sprintf(configTemplate, test.modifyProbability, test.maxNewPeers, test.prefix, test.minPort, test.maxPort)
err = yaml.Unmarshal([]byte(config), &mwconfig)
assert.Nil(t, err)
cfg, err = newConfig(mwconfig)
if test.err {
assert.NotNil(t, err)
continue
}
assert.Nil(t, err)
assert.Equal(t, test.expected, *cfg)
}
}

View file

@ -1,121 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package deniability
import (
"errors"
"math/rand"
"time"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/random"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddlewareConstructor("deniability", constructor)
}
type deniabilityMiddleware struct {
cfg *Config
r *rand.Rand
}
// constructor provides a middleware constructor that returns a middleware to
// insert peers into the peer lists returned as a response to an announce.
//
// It returns an error if the config provided is either syntactically or
// semantically incorrect.
func constructor(c chihaya.MiddlewareConfig) (tracker.AnnounceMiddleware, error) {
cfg, err := newConfig(c)
if err != nil {
return nil, err
}
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
return nil, errors.New("modify_response_probability must be in [0,1)")
}
if cfg.MaxRandomPeers <= 0 {
return nil, errors.New("max_random_peers must be > 0")
}
if cfg.MinPort <= 0 {
return nil, errors.New("min_port must not be <= 0")
}
if cfg.MaxPort > 65536 {
return nil, errors.New("max_port must not be > 65536")
}
if cfg.MinPort >= cfg.MaxPort {
return nil, errors.New("max_port must not be <= min_port")
}
if len(cfg.Prefix) > 20 {
return nil, errors.New("prefix must not be longer than 20 bytes")
}
mw := deniabilityMiddleware{
cfg: cfg,
r: rand.New(rand.NewSource(time.Now().UnixNano())),
}
return mw.modifyResponse, nil
}
func (mw *deniabilityMiddleware) modifyResponse(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
err := next(cfg, req, resp)
if err != nil {
return err
}
if mw.cfg.ModifyResponseProbability == 1 || mw.r.Float32() < mw.cfg.ModifyResponseProbability {
numNewPeers := mw.r.Intn(mw.cfg.MaxRandomPeers) + 1
for i := 0; i < numNewPeers; i++ {
if len(resp.IPv6Peers) > 0 {
if len(resp.IPv6Peers) >= int(req.NumWant) {
mw.replacePeer(resp.IPv6Peers, true)
} else {
resp.IPv6Peers = mw.insertPeer(resp.IPv6Peers, true)
}
}
if len(resp.IPv4Peers) > 0 {
if len(resp.IPv4Peers) >= int(req.NumWant) {
mw.replacePeer(resp.IPv4Peers, false)
} else {
resp.IPv4Peers = mw.insertPeer(resp.IPv4Peers, false)
}
}
}
}
return nil
}
}
// replacePeer replaces a peer from a random position within the given slice
// of peers with a randomly generated one.
//
// replacePeer panics if len(peers) == 0.
func (mw *deniabilityMiddleware) replacePeer(peers []chihaya.Peer, v6 bool) {
peers[mw.r.Intn(len(peers))] = random.Peer(mw.r, mw.cfg.Prefix, v6, mw.cfg.MinPort, mw.cfg.MaxPort)
}
// insertPeer inserts a randomly generated peer at a random position into the
// given slice and returns the new slice.
func (mw *deniabilityMiddleware) insertPeer(peers []chihaya.Peer, v6 bool) []chihaya.Peer {
pos := 0
if len(peers) > 0 {
pos = mw.r.Intn(len(peers))
}
peers = append(peers, chihaya.Peer{})
copy(peers[pos+1:], peers[pos:])
peers[pos] = random.Peer(mw.r, mw.cfg.Prefix, v6, mw.cfg.MinPort, mw.cfg.MaxPort)
return peers
}

View file

@ -1,110 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package deniability
import (
"fmt"
"math/rand"
"net"
"testing"
"github.com/stretchr/testify/assert"
"github.com/chihaya/chihaya"
)
type constructorTestData struct {
cfg Config
error bool
}
var constructorData = []constructorTestData{
{Config{1.0, 10, "abc", 1024, 1025}, false},
{Config{1.1, 10, "abc", 1024, 1025}, true},
{Config{0, 10, "abc", 1024, 1025}, true},
{Config{1.0, 0, "abc", 1024, 1025}, true},
{Config{1.0, 10, "01234567890123456789_", 1024, 1025}, true},
{Config{1.0, 10, "abc", 0, 1025}, true},
{Config{1.0, 10, "abc", 1024, 0}, true},
{Config{1.0, 10, "abc", 1024, 65537}, true},
}
func TestReplacePeer(t *testing.T) {
cfg := Config{
Prefix: "abc",
MinPort: 1024,
MaxPort: 1025,
}
mw := deniabilityMiddleware{
r: rand.New(rand.NewSource(0)),
cfg: &cfg,
}
peer := chihaya.Peer{
ID: chihaya.PeerID([20]byte{}),
Port: 2000,
IP: net.ParseIP("10.150.255.23"),
}
peers := []chihaya.Peer{peer}
mw.replacePeer(peers, false)
assert.Equal(t, 1, len(peers))
assert.Equal(t, "abc", string(peers[0].ID[:3]))
assert.Equal(t, uint16(1024), peers[0].Port)
assert.NotNil(t, peers[0].IP.To4())
mw.replacePeer(peers, true)
assert.Equal(t, 1, len(peers))
assert.Equal(t, "abc", string(peers[0].ID[:3]))
assert.Equal(t, uint16(1024), peers[0].Port)
assert.Nil(t, peers[0].IP.To4())
peers = []chihaya.Peer{peer, peer}
mw.replacePeer(peers, true)
assert.True(t, (peers[0].Port == peer.Port) != (peers[1].Port == peer.Port), "not exactly one peer was replaced")
}
func TestInsertPeer(t *testing.T) {
cfg := Config{
Prefix: "abc",
MinPort: 1024,
MaxPort: 1025,
}
mw := deniabilityMiddleware{
r: rand.New(rand.NewSource(0)),
cfg: &cfg,
}
peer := chihaya.Peer{
ID: chihaya.PeerID([20]byte{}),
Port: 2000,
IP: net.ParseIP("10.150.255.23"),
}
var peers []chihaya.Peer
peers = mw.insertPeer(peers, false)
assert.Equal(t, 1, len(peers))
assert.Equal(t, uint16(1024), peers[0].Port)
assert.Equal(t, "abc", string(peers[0].ID[:3]))
assert.NotNil(t, peers[0].IP.To4())
peers = []chihaya.Peer{peer, peer}
peers = mw.insertPeer(peers, true)
assert.Equal(t, 3, len(peers))
}
func TestConstructor(t *testing.T) {
for _, tt := range constructorData {
_, err := constructor(chihaya.MiddlewareConfig{
Config: tt.cfg,
})
if tt.error {
assert.NotNil(t, err, fmt.Sprintf("error expected for %+v", tt.cfg))
} else {
assert.Nil(t, err, fmt.Sprintf("no error expected for %+v", tt.cfg))
}
}
}

14
middleware/hooks.go Normal file
View file

@ -0,0 +1,14 @@
package middleware
import (
"context"
"github.com/chihaya/chihaya/bittorrent"
)
// Hook abstracts the concept of anything that needs to interact with a
// BitTorrent client's request and response to a BitTorrent tracker.
type Hook interface {
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) (context.Context, error)
HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) (context.Context, error)
}

179
middleware/jwt/jwt.go Normal file
View file

@ -0,0 +1,179 @@
// Package jwt implements a Hook that fails an Announce if the client's request
// is missing a valid JSON Web Token.
//
// JWTs are validated against the standard claims in RFC7519 along with an
// extra "infohash" claim that verifies the client has access to the Swarm.
// RS256 keys are asychronously rotated from a provided JWK Set HTTP endpoint.
package jwt
import (
"context"
"crypto"
"encoding/json"
"errors"
"net/http"
"net/url"
"time"
jc "github.com/SermoDigital/jose/crypto"
"github.com/SermoDigital/jose/jws"
"github.com/SermoDigital/jose/jwt"
log "github.com/Sirupsen/logrus"
"github.com/mendsley/gojwk"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
var (
// ErrMissingJWT is returned when a JWT is missing from a request.
ErrMissingJWT = bittorrent.ClientError("unapproved request: missing jwt")
// ErrInvalidJWT is returned when a JWT fails to verify.
ErrInvalidJWT = bittorrent.ClientError("unapproved request: invalid jwt")
)
// Config represents all the values required by this middleware to fetch JWKs
// and verify JWTs.
type Config struct {
Issuer string `yaml:"issuer"`
Audience string `yaml:"audience"`
JWKSetURL string `yaml:"jwk_set_url"`
JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"`
}
type hook struct {
cfg Config
publicKeys map[string]crypto.PublicKey
closing chan struct{}
}
// NewHook returns an instance of the JWT middleware.
func NewHook(cfg Config) middleware.Hook {
h := &hook{
cfg: cfg,
publicKeys: map[string]crypto.PublicKey{},
closing: make(chan struct{}),
}
go func() {
for {
select {
case <-h.closing:
return
case <-time.After(cfg.JWKUpdateInterval):
resp, err := http.Get(cfg.JWKSetURL)
if err != nil {
log.Errorln("failed to fetch JWK Set: " + err.Error())
continue
}
parsedJWKs := map[string]gojwk.Key{}
err = json.NewDecoder(resp.Body).Decode(&parsedJWKs)
if err != nil {
resp.Body.Close()
log.Errorln("failed to decode JWK JSON: " + err.Error())
continue
}
resp.Body.Close()
keys := map[string]crypto.PublicKey{}
for kid, parsedJWK := range parsedJWKs {
publicKey, err := parsedJWK.DecodePublicKey()
if err != nil {
log.Errorln("failed to decode JWK into public key: " + err.Error())
continue
}
keys[kid] = publicKey
}
h.publicKeys = keys
}
}
}()
return h
}
func (h *hook) Stop() {
close(h.closing)
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
if req.Params == nil {
return ctx, ErrMissingJWT
}
jwtParam, ok := req.Params.String("jwt")
if !ok {
return ctx, ErrMissingJWT
}
if err := validateJWT(req.InfoHash, []byte(jwtParam), h.cfg.Issuer, h.cfg.Audience, h.publicKeys); err != nil {
return ctx, ErrInvalidJWT
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}
func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string, publicKeys map[string]crypto.PublicKey) error {
parsedJWT, err := jws.ParseJWT(jwtBytes)
if err != nil {
return err
}
claims := parsedJWT.Claims()
if iss, ok := claims.Issuer(); !ok || iss != cfgIss {
return jwt.ErrInvalidISSClaim
}
if aud, ok := claims.Audience(); !ok || !validAudience(aud, cfgAud) {
return jwt.ErrInvalidAUDClaim
}
if ihClaim, ok := claims.Get("infohash").(string); !ok || !validInfoHash(ihClaim, ih) {
return errors.New("claim \"infohash\" is invalid")
}
parsedJWS := parsedJWT.(jws.JWS)
kid, ok := parsedJWS.Protected().Get("kid").(string)
if !ok {
return errors.New("invalid kid")
}
publicKey, ok := publicKeys[kid]
if !ok {
return errors.New("signed by unknown kid")
}
return parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
}
func validAudience(aud []string, cfgAud string) bool {
for _, a := range aud {
if a == cfgAud {
return true
}
}
return false
}
func validInfoHash(claim string, ih bittorrent.InfoHash) bool {
if len(claim) == 20 && bittorrent.InfoHashFromString(claim) == ih {
return true
}
unescapedClaim, err := url.QueryUnescape(claim)
if err != nil {
return false
}
if len(unescapedClaim) == 20 && bittorrent.InfoHashFromString(unescapedClaim) == ih {
return true
}
return false
}

94
middleware/middleware.go Normal file
View file

@ -0,0 +1,94 @@
// Package middleware implements the TrackerLogic interface by executing
// a series of middleware hooks.
package middleware
import (
"context"
"time"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/storage"
)
// Config holds the configuration common across all middleware.
type Config struct {
AnnounceInterval time.Duration `yaml:"announce_interval"`
}
var _ frontend.TrackerLogic = &Logic{}
// NewLogic creates a new instance of a TrackerLogic that executes the provided
// middleware hooks.
func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
l := &Logic{
announceInterval: cfg.AnnounceInterval,
peerStore: peerStore,
preHooks: preHooks,
postHooks: postHooks,
}
return l
}
// Logic is an implementation of the TrackerLogic that functions by
// executing a series of middleware hooks.
type Logic struct {
announceInterval time.Duration
peerStore storage.PeerStore
preHooks []Hook
postHooks []Hook
}
// HandleAnnounce generates a response for an Announce.
func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) {
resp = &bittorrent.AnnounceResponse{
Interval: l.announceInterval,
}
for _, h := range l.preHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
return nil, err
}
}
return resp, nil
}
// AfterAnnounce does something with the results of an Announce after it has
// been completed.
func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
log.Errorln("chihaya: post-announce hooks failed:", err.Error())
return
}
}
}
// HandleScrape generates a response for a Scrape.
func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (resp *bittorrent.ScrapeResponse, err error) {
resp = &bittorrent.ScrapeResponse{
Files: make(map[bittorrent.InfoHash]bittorrent.Scrape),
}
for _, h := range l.preHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
return nil, err
}
}
return resp, nil
}
// AfterScrape does something with the results of a Scrape after it has been
// completed.
func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
log.Errorln("chihaya: post-scrape hooks failed:", err.Error())
return
}
}
}

View file

@ -1,34 +0,0 @@
## Announce Interval Variation Middleware
This package provides the announce middleware `varinterval` which randomizes the announce interval.
### Functionality
This middleware will choose random announces and modify the `interval` and `min_interval` fields.
A random number of seconds will be added to the `interval` field and, if desired, also to the `min_interval` field.
Note that if a response is picked for modification and `min_interval` should be changed as well, both `interval` and `min_interval` will be modified by the same amount.
### Use Case
Use this middleware to avoid recurring load spikes on the tracker.
By randomizing the announce interval, load spikes will flatten out after a few cycles.
### Configuration
This middleware provides the following parameters for configuration:
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be chosen to have its announce intervals modified.
- `max_increase_delta` (int, >0) sets an upper boundary (inclusive) for the amount of seconds added.
- `modify_min_interval` (boolean) whether to modify the `min_interval` field as well.
An example config might look like this:
chihaya:
tracker:
announce_middleware:
- name: varinterval
config:
modify_response_probability: 0.2
max_increase_delta: 60
modify_min_interval: true

View file

@ -1,43 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package varinterval
import (
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
)
// Config represents the configuration for the varinterval middleware.
type Config struct {
// ModifyResponseProbability is the probability by which a response will
// be modified.
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
// MaxIncreaseDelta is the amount of seconds that will be added at most.
MaxIncreaseDelta int `yaml:"max_increase_delta"`
// ModifyMinInterval specifies whether min_interval should be increased
// as well.
ModifyMinInterval bool `yaml:"modify_min_interval"`
}
// newConfig parses the given MiddlewareConfig as a varinterval.Config.
//
// The contents of the config are not checked.
func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) {
bytes, err := yaml.Marshal(mwcfg.Config)
if err != nil {
return nil, err
}
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return &cfg, nil
}

View file

@ -1,59 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package varinterval
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
)
type configTestData struct {
modifyProbability string
maxIncreaseDelta string
modifyMinInterval string
err bool
expected Config
}
var (
configTemplate = `
name: foo
config:
modify_response_probability: %s
max_increase_delta: %s
modify_min_interval: %s`
configData = []configTestData{
{"1.0", "60", "false", false, Config{1.0, 60, false}},
{"a", "60", "false", true, Config{}},
}
)
func TestNewConfig(t *testing.T) {
var mwconfig chihaya.MiddlewareConfig
cfg, err := newConfig(mwconfig)
assert.Nil(t, err)
assert.NotNil(t, cfg)
for _, test := range configData {
config := fmt.Sprintf(configTemplate, test.modifyProbability, test.maxIncreaseDelta, test.modifyMinInterval)
err = yaml.Unmarshal([]byte(config), &mwconfig)
assert.Nil(t, err)
cfg, err = newConfig(mwconfig)
if test.err {
assert.NotNil(t, err)
continue
}
assert.Nil(t, err)
assert.Equal(t, test.expected, *cfg)
}
}

View file

@ -1,70 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package varinterval
import (
"errors"
"math/rand"
"time"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddlewareConstructor("varinterval", constructor)
}
type varintervalMiddleware struct {
cfg *Config
r *rand.Rand
}
// constructor provides a middleware constructor that returns a middleware to
// insert a variation into announce intervals.
//
// It returns an error if the config provided is either syntactically or
// semantically incorrect.
func constructor(c chihaya.MiddlewareConfig) (tracker.AnnounceMiddleware, error) {
cfg, err := newConfig(c)
if err != nil {
return nil, err
}
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
return nil, errors.New("modify_response_probability must be in [0,1)")
}
if cfg.MaxIncreaseDelta <= 0 {
return nil, errors.New("max_increase_delta must be > 0")
}
mw := varintervalMiddleware{
cfg: cfg,
r: rand.New(rand.NewSource(time.Now().UnixNano())),
}
return mw.modifyResponse, nil
}
func (mw *varintervalMiddleware) modifyResponse(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
err := next(cfg, req, resp)
if err != nil {
return err
}
if mw.cfg.ModifyResponseProbability == 1 || mw.r.Float32() < mw.cfg.ModifyResponseProbability {
addSeconds := time.Duration(mw.r.Intn(mw.cfg.MaxIncreaseDelta)+1) * time.Second
resp.Interval += addSeconds
if mw.cfg.ModifyMinInterval {
resp.MinInterval += addSeconds
}
}
return nil
}
}

View file

@ -1,66 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package varinterval
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/tracker"
)
type constructorTestData struct {
cfg Config
error bool
}
var constructorData = []constructorTestData{
{Config{1.0, 10, false}, false},
{Config{1.1, 10, false}, true},
{Config{0, 10, true}, true},
{Config{1.0, 0, false}, true},
}
func TestConstructor(t *testing.T) {
for _, tt := range constructorData {
_, err := constructor(chihaya.MiddlewareConfig{
Config: tt.cfg,
})
if tt.error {
assert.NotNil(t, err, fmt.Sprintf("error expected for %+v", tt.cfg))
} else {
assert.Nil(t, err, fmt.Sprintf("no error expected for %+v", tt.cfg))
}
}
}
func TestModifyResponse(t *testing.T) {
var (
achain tracker.AnnounceChain
req chihaya.AnnounceRequest
resp chihaya.AnnounceResponse
)
mw, err := constructor(chihaya.MiddlewareConfig{
Config: Config{
ModifyResponseProbability: 1.0,
MaxIncreaseDelta: 10,
ModifyMinInterval: true,
},
})
assert.Nil(t, err)
achain.Append(mw)
handler := achain.Handler()
err = handler(nil, &req, &resp)
assert.Nil(t, err)
assert.True(t, resp.Interval > 0, "interval should have been increased")
assert.True(t, resp.MinInterval > 0, "min_interval should have been increased")
}

View file

@ -1,23 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package clientid implements the parsing of BitTorrent ClientIDs from
// BitTorrent PeerIDs.
package clientid
// New returns the part of a PeerID that identifies a peer's client software.
func New(peerID string) (clientID string) {
length := len(peerID)
if length >= 6 {
if peerID[0] == '-' {
if length >= 7 {
clientID = peerID[1:7]
}
} else {
clientID = peerID[:6]
}
}
return
}

View file

@ -1,74 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package random
import (
"math/rand"
"net"
"github.com/chihaya/chihaya"
)
// Peer generates a random chihaya.Peer.
//
// prefix is the prefix to use for the peer ID. If len(prefix) > 20, it will be
// truncated to 20 characters. If len(prefix) < 20, it will be padded with an
// alphanumeric random string to have 20 characters.
//
// v6 indicates whether an IPv6 address should be generated.
// Regardless of the length of the generated IP address, its bytes will have
// values in [1,254].
//
// minPort and maxPort describe the range for the randomly generated port, where
// minPort <= port < maxPort.
// minPort and maxPort will be checked and altered so that
// 1 <= minPort <= maxPort <= 65536.
// If minPort == maxPort, port will be set to minPort.
func Peer(r *rand.Rand, prefix string, v6 bool, minPort, maxPort int) chihaya.Peer {
var (
port uint16
ip net.IP
)
if minPort <= 0 {
minPort = 1
}
if maxPort > 65536 {
maxPort = 65536
}
if maxPort < minPort {
maxPort = minPort
}
if len(prefix) > 20 {
prefix = prefix[:20]
}
if minPort == maxPort {
port = uint16(minPort)
} else {
port = uint16(r.Int63()%int64(maxPort-minPort)) + uint16(minPort)
}
if v6 {
b := make([]byte, 16)
ip = net.IP(b)
} else {
b := make([]byte, 4)
ip = net.IP(b)
}
for i := range ip {
b := r.Intn(254) + 1
ip[i] = byte(b)
}
prefix = prefix + AlphaNumericString(r, 20-len(prefix))
return chihaya.Peer{
ID: chihaya.PeerIDFromString(prefix),
Port: port,
IP: ip,
}
}

View file

@ -1,43 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package random
import (
"math/rand"
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestPeer(t *testing.T) {
r := rand.New(rand.NewSource(0))
for i := 0; i < 100; i++ {
minPort := 2000
maxPort := 2010
p := Peer(r, "", false, minPort, maxPort)
assert.Equal(t, 20, len(p.ID))
assert.True(t, p.Port >= uint16(minPort) && p.Port < uint16(maxPort))
assert.NotNil(t, p.IP.To4())
}
for i := 0; i < 100; i++ {
minPort := 2000
maxPort := 2010
p := Peer(r, "", true, minPort, maxPort)
assert.Equal(t, 20, len(p.ID))
assert.True(t, p.Port >= uint16(minPort) && p.Port < uint16(maxPort))
assert.True(t, len(p.IP) == net.IPv6len)
}
p := Peer(r, "abcdefghijklmnopqrst", false, 2000, 2000)
assert.Equal(t, "abcdefghijklmnopqrst", string(p.ID[:]))
assert.Equal(t, uint16(2000), p.Port)
p = Peer(r, "abcdefghijklmnopqrstUVWXYZ", true, -10, -5)
assert.Equal(t, "abcdefghijklmnopqrst", string(p.ID[:]))
assert.True(t, p.Port >= uint16(1) && p.Port <= uint16(65535))
}

View file

@ -1,26 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package random
import "math/rand"
// AlphaNumeric is an alphabet with all lower- and uppercase letters and
// numbers.
const AlphaNumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
// AlphaNumericString is a shorthand for String(r, l, AlphaNumeric).
func AlphaNumericString(r rand.Source, l int) string {
return String(r, l, AlphaNumeric)
}
// String generates a random string of length l, containing only runes from
// the alphabet using the random source r.
func String(r rand.Source, l int, alphabet string) string {
b := make([]byte, l)
for i := range b {
b[i] = alphabet[r.Int63()%int64(len(alphabet))]
}
return string(b)
}

View file

@ -1,30 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package random
import (
"math/rand"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAlphaNumericString(t *testing.T) {
r := rand.NewSource(0)
s := AlphaNumericString(r, 0)
assert.Equal(t, 0, len(s))
s = AlphaNumericString(r, 10)
assert.Equal(t, 10, len(s))
for i := 0; i < 100; i++ {
s := AlphaNumericString(r, 10)
for _, c := range s {
assert.True(t, strings.Contains(AlphaNumeric, string(c)))
}
}
}

View file

@ -1,38 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"time"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
)
type httpConfig struct {
Addr string `yaml:"addr"`
RequestTimeout time.Duration `yaml:"request_timeout"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
DualStackedPeers bool `yaml:"dual_stacked_peers"`
RealIPHeader string `yaml:"real_ip_header"`
}
func newHTTPConfig(srvcfg *chihaya.ServerConfig) (*httpConfig, error) {
bytes, err := yaml.Marshal(srvcfg.Config)
if err != nil {
return nil, err
}
var cfg httpConfig
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return &cfg, nil
}

View file

@ -1,133 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package query implements a simple, fast URL parser designed to be used to
// parse parameters sent from BitTorrent clients. The last value of a key wins,
// except for they key "info_hash".
package query
import (
"errors"
"net/url"
"strconv"
"strings"
"github.com/chihaya/chihaya"
)
// ErrKeyNotFound is returned when a provided key has no value associated with
// it.
var ErrKeyNotFound = errors.New("query: value for the provided key does not exist")
// ErrInvalidInfohash is returned when parsing a query encounters an infohash
// with invalid length.
var ErrInvalidInfohash = errors.New("query: invalid infohash")
// Query represents a parsed URL.Query.
type Query struct {
query string
params map[string]string
infoHashes []chihaya.InfoHash
}
// New parses a raw URL query.
func New(query string) (*Query, error) {
var (
keyStart, keyEnd int
valStart, valEnd int
onKey = true
q = &Query{
query: query,
infoHashes: nil,
params: make(map[string]string),
}
)
for i, length := 0, len(query); i < length; i++ {
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
last := i == length-1
if separator || last {
if onKey && !last {
keyStart = i + 1
continue
}
if last && !separator && !onKey {
valEnd = i
}
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
if err != nil {
return nil, err
}
var valStr string
if valEnd > 0 {
valStr, err = url.QueryUnescape(query[valStart : valEnd+1])
if err != nil {
return nil, err
}
}
if keyStr == "info_hash" {
if len(valStr) != 20 {
return nil, ErrInvalidInfohash
}
q.infoHashes = append(q.infoHashes, chihaya.InfoHashFromString(valStr))
} else {
q.params[strings.ToLower(keyStr)] = valStr
}
valEnd = 0
onKey = true
keyStart = i + 1
} else if query[i] == '=' {
onKey = false
valStart = i + 1
valEnd = 0
} else if onKey {
keyEnd = i
} else {
valEnd = i
}
}
return q, nil
}
// String returns a string parsed from a query. Every key can be returned as a
// string because they are encoded in the URL as strings.
func (q *Query) String(key string) (string, error) {
val, exists := q.params[key]
if !exists {
return "", ErrKeyNotFound
}
return val, nil
}
// Uint64 returns a uint parsed from a query. After being called, it is safe to
// cast the uint64 to your desired length.
func (q *Query) Uint64(key string) (uint64, error) {
str, exists := q.params[key]
if !exists {
return 0, ErrKeyNotFound
}
val, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}
return val, nil
}
// InfoHashes returns a list of requested infohashes.
func (q *Query) InfoHashes() []chihaya.InfoHash {
return q.infoHashes
}

View file

@ -1,100 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package query
import (
"net/url"
"testing"
)
var (
baseAddr = "https://www.subdomain.tracker.com:80/"
testInfoHash = "01234567890123456789"
testPeerID = "-TEST01-6wfG2wk6wWLc"
ValidAnnounceArguments = []url.Values{
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
url.Values{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
url.Values{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
url.Values{"peer_id": {""}, "compact": {""}},
}
InvalidQueries = []string{
baseAddr + "announce/?" + "info_hash=%0%a",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
if len(boxed) != len(unboxed) {
return false
}
for mapKey, mapVal := range boxed {
// Always expect box to hold only one element
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
return false
}
}
return true
}
func TestValidQueries(t *testing.T) {
for parseIndex, parseVal := range ValidAnnounceArguments {
parsedQueryObj, err := New(baseAddr + "announce/?" + parseVal.Encode())
if err != nil {
t.Error(err)
}
if !mapArrayEqual(parseVal, parsedQueryObj.params) {
t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.params)
}
}
}
func TestInvalidQueries(t *testing.T) {
for parseIndex, parseStr := range InvalidQueries {
parsedQueryObj, err := New(parseStr)
if err == nil {
t.Error("Should have produced error", parseIndex)
}
if parsedQueryObj != nil {
t.Error("Should be nil after error", parsedQueryObj, parseIndex)
}
}
}
func BenchmarkParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := New(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}

View file

@ -1,183 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net"
"net/http"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/event"
"github.com/chihaya/chihaya/server/http/query"
"github.com/chihaya/chihaya/tracker"
)
func announceRequest(r *http.Request, cfg *httpConfig) (*chihaya.AnnounceRequest, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
request := &chihaya.AnnounceRequest{Params: q}
eventStr, err := q.String("event")
if err == query.ErrKeyNotFound {
eventStr = ""
} else if err != nil {
return nil, tracker.ClientError("failed to parse parameter: event")
}
request.Event, err = event.New(eventStr)
if err != nil {
return nil, tracker.ClientError("failed to provide valid client event")
}
compactStr, _ := q.String("compact")
request.Compact = compactStr != "" && compactStr != "0"
infoHashes := q.InfoHashes()
if len(infoHashes) < 1 {
return nil, tracker.ClientError("no info_hash parameter supplied")
}
if len(infoHashes) > 1 {
return nil, tracker.ClientError("multiple info_hash parameters supplied")
}
request.InfoHash = infoHashes[0]
peerID, err := q.String("peer_id")
if err != nil {
return nil, tracker.ClientError("failed to parse parameter: peer_id")
}
if len(peerID) != 20 {
return nil, tracker.ClientError("failed to provide valid peer_id")
}
request.PeerID = chihaya.PeerIDFromString(peerID)
request.Left, err = q.Uint64("left")
if err != nil {
return nil, tracker.ClientError("failed to parse parameter: left")
}
request.Downloaded, err = q.Uint64("downloaded")
if err != nil {
return nil, tracker.ClientError("failed to parse parameter: downloaded")
}
request.Uploaded, err = q.Uint64("uploaded")
if err != nil {
return nil, tracker.ClientError("failed to parse parameter: uploaded")
}
numwant, _ := q.Uint64("numwant")
request.NumWant = int32(numwant)
port, err := q.Uint64("port")
if err != nil {
return nil, tracker.ClientError("failed to parse parameter: port")
}
request.Port = uint16(port)
v4, v6, err := requestedIP(q, r, cfg)
if err != nil {
return nil, tracker.ClientError("failed to parse remote IP")
}
request.IPv4 = v4
request.IPv6 = v6
return request, nil
}
func scrapeRequest(r *http.Request, cfg *httpConfig) (*chihaya.ScrapeRequest, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
infoHashes := q.InfoHashes()
if len(infoHashes) < 1 {
return nil, tracker.ClientError("no info_hash parameter supplied")
}
request := &chihaya.ScrapeRequest{
InfoHashes: infoHashes,
Params: q,
}
return request, nil
}
// requestedIP returns the IP address for a request. If there are multiple in
// the request, one IPv4 and one IPv6 will be returned.
func requestedIP(p chihaya.Params, r *http.Request, cfg *httpConfig) (v4, v6 net.IP, err error) {
var done bool
if cfg.AllowIPSpoofing {
if str, e := p.String("ip"); e == nil {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
if str, e := p.String("ipv4"); e == nil {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
if str, e := p.String("ipv6"); e == nil {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
}
if cfg.RealIPHeader != "" {
if xRealIPs, ok := r.Header[cfg.RealIPHeader]; ok {
if v4, v6, done = getIPs(string(xRealIPs[0]), v4, v6, cfg); done {
return
}
}
} else {
if r.RemoteAddr == "" && v4 == nil {
if v4, v6, done = getIPs("127.0.0.1", v4, v6, cfg); done {
return
}
}
if v4, v6, done = getIPs(r.RemoteAddr, v4, v6, cfg); done {
return
}
}
if v4 == nil && v6 == nil {
err = tracker.ClientError("failed to parse IP address")
}
return
}
func getIPs(ipstr string, ipv4, ipv6 net.IP, cfg *httpConfig) (net.IP, net.IP, bool) {
host, _, err := net.SplitHostPort(ipstr)
if err != nil {
host = ipstr
}
if ip := net.ParseIP(host); ip != nil {
ipTo4 := ip.To4()
if ipv4 == nil && ipTo4 != nil {
ipv4 = ipTo4
} else if ipv6 == nil && ipTo4 == nil {
ipv6 = ip
}
}
var done bool
if cfg.DualStackedPeers {
done = ipv4 != nil && ipv6 != nil
} else {
done = ipv4 != nil || ipv6 != nil
}
return ipv4, ipv6, done
}

View file

@ -1,133 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"errors"
"log"
"net"
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server"
"github.com/chihaya/chihaya/tracker"
)
func init() {
server.Register("http", constructor)
}
func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {
cfg, err := newHTTPConfig(srvcfg)
if err != nil {
return nil, errors.New("http: invalid config: " + err.Error())
}
return &httpServer{
cfg: cfg,
tkr: tkr,
}, nil
}
type httpServer struct {
cfg *httpConfig
tkr *tracker.Tracker
grace *graceful.Server
}
// Start runs the server and blocks until it has exited.
//
// It panics if the server exits unexpectedly.
func (s *httpServer) Start() {
s.grace = &graceful.Server{
Server: &http.Server{
Addr: s.cfg.Addr,
Handler: s.routes(),
ReadTimeout: s.cfg.ReadTimeout,
WriteTimeout: s.cfg.WriteTimeout,
},
Timeout: s.cfg.RequestTimeout,
NoSignalHandling: true,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
//stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
//stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("http: connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
panic("http: connection transitioned to unknown state")
}
},
}
s.grace.SetKeepAlivesEnabled(false)
if err := s.grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
log.Printf("Failed to gracefully run HTTP server: %s", err.Error())
panic(err)
}
}
log.Println("HTTP server shut down cleanly")
}
// Stop stops the server and blocks until the server has exited.
func (s *httpServer) Stop() {
s.grace.Stop(s.grace.Timeout)
<-s.grace.StopChan()
}
func (s *httpServer) routes() *httprouter.Router {
r := httprouter.New()
r.GET("/announce", s.serveAnnounce)
r.GET("/scrape", s.serveScrape)
return r
}
func (s *httpServer) serveAnnounce(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
req, err := announceRequest(r, s.cfg)
if err != nil {
writeError(w, err)
return
}
resp, err := s.tkr.HandleAnnounce(req)
if err != nil {
writeError(w, err)
return
}
err = writeAnnounceResponse(w, resp)
if err != nil {
log.Println("error serializing response", err)
}
}
func (s *httpServer) serveScrape(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
req, err := scrapeRequest(r, s.cfg)
if err != nil {
writeError(w, err)
return
}
resp, err := s.tkr.HandleScrape(req)
if err != nil {
writeError(w, err)
return
}
writeScrapeResponse(w, resp)
}

View file

@ -1,49 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package server
import (
"sync"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/tracker"
)
// Pool represents a running pool of servers.
type Pool struct {
servers []Server
wg sync.WaitGroup
}
// StartPool creates a new pool of servers specified by the provided
// configuration and runs them.
func StartPool(cfgs []chihaya.ServerConfig, tkr *tracker.Tracker) (*Pool, error) {
var toReturn Pool
for _, cfg := range cfgs {
srv, err := New(&cfg, tkr)
if err != nil {
return nil, err
}
toReturn.wg.Add(1)
go func(srv Server) {
defer toReturn.wg.Done()
srv.Start()
}(srv)
toReturn.servers = append(toReturn.servers, srv)
}
return &toReturn, nil
}
// Stop safely shuts down a pool of servers.
func (p *Pool) Stop() {
for _, srv := range p.servers {
srv.Stop()
}
p.wg.Wait()
}

View file

@ -1,103 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package prometheus implements a chihaya Server for serving metrics to
// Prometheus.
package prometheus
import (
"errors"
"log"
"net"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/tylerb/graceful"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server"
"github.com/chihaya/chihaya/tracker"
)
func init() {
server.Register("prometheus", constructor)
}
func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {
cfg, err := NewServerConfig(srvcfg)
if err != nil {
return nil, errors.New("prometheus: invalid config: " + err.Error())
}
return &Server{
cfg: cfg,
}, nil
}
// ServerConfig represents the configuration options for a
// PrometheusServer.
type ServerConfig struct {
Addr string `yaml:"addr"`
ShutdownTimeout time.Duration `yaml:"shutdown_timeout"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
}
// NewServerConfig marshals a chihaya.ServerConfig and unmarshals it
// into a more specific prometheus ServerConfig.
func NewServerConfig(srvcfg *chihaya.ServerConfig) (*ServerConfig, error) {
bytes, err := yaml.Marshal(srvcfg.Config)
if err != nil {
return nil, err
}
var cfg ServerConfig
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return &cfg, nil
}
// Server implements a chihaya Server for serving metrics to Prometheus.
type Server struct {
cfg *ServerConfig
grace *graceful.Server
}
var _ server.Server = &Server{}
// Start starts the prometheus server and blocks until it exits.
//
// It panics if the server exits unexpectedly.
func (s *Server) Start() {
s.grace = &graceful.Server{
Server: &http.Server{
Addr: s.cfg.Addr,
Handler: prometheus.Handler(),
ReadTimeout: s.cfg.ReadTimeout,
WriteTimeout: s.cfg.WriteTimeout,
},
Timeout: s.cfg.ShutdownTimeout,
NoSignalHandling: true,
}
if err := s.grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
log.Printf("Failed to gracefully run Prometheus server: %s", err.Error())
panic(err)
}
}
log.Println("Prometheus server shut down cleanly")
}
// Stop stops the prometheus server and blocks until it exits.
func (s *Server) Stop() {
s.grace.Stop(s.cfg.ShutdownTimeout)
<-s.grace.StopChan()
}

View file

@ -1,56 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package server implements an abstraction over servers meant to be run .
// alongside a tracker.
//
// Servers may be implementations of different transport protocols or have their
// own custom behavior.
package server
import (
"fmt"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/tracker"
)
var constructors = make(map[string]Constructor)
// Constructor is a function that creates a new Server.
type Constructor func(*chihaya.ServerConfig, *tracker.Tracker) (Server, error)
// Register makes a Constructor available by the provided name.
//
// If this function is called twice with the same name or if the Constructor is
// nil, it panics.
func Register(name string, con Constructor) {
if con == nil {
panic("server: could not register nil Constructor")
}
if _, dup := constructors[name]; dup {
panic("server: could not register duplicate Constructor: " + name)
}
constructors[name] = con
}
// New creates a Server specified by a configuration.
func New(cfg *chihaya.ServerConfig, tkr *tracker.Tracker) (Server, error) {
con, ok := constructors[cfg.Name]
if !ok {
return nil, fmt.Errorf("server: unknown Constructor %q (forgotten import?)", cfg.Name)
}
return con(cfg, tkr)
}
// Server represents one instance of a server accessing the tracker.
type Server interface {
// Start starts a server and blocks until the server exits.
//
// It should panic if the server exits unexpectedly.
Start()
// Stop stops a server and blocks until the server exits.
Stop()
}

View file

@ -1,43 +0,0 @@
## The store Package
The `store` package offers a storage interface and middlewares sufficient to run a public tracker based on it.
### Architecture
The store consists of three parts:
- A set of interfaces, tests based on these interfaces and the store logic, unifying these interfaces into the store
- Drivers, implementing the store interfaces and
- Middleware that depends on the store
The store interfaces are `IPStore`, `PeerStore` and `StringStore`.
During runtime, each of them will be implemented by a driver.
Even though all different drivers for one interface provide the same functionality, their behaviour can be very different.
For example: The memory implementation keeps all state in-memory - this is very fast, but not persistent, it loses its state on every restart.
A database-backed driver on the other hand could provide persistence, at the cost of performance.
The pluggable design of Chihaya allows for the different interfaces to use different drivers.
For example: A typical use case of the `StringStore` is to provide blacklists or whitelists for infohashes/client IDs/....
You'd typically want these lists to be persistent, so you'd choose a driver that provides persistence.
The `PeerStore` on the other hand rarely needs to be persistent, as all peer state will be restored after one announce interval.
You'd therefore typically choose a very performant but non-persistent driver for the `PeerStore`.
### Testing
The main store package also contains a set of tests and benchmarks for drivers.
Both use the store interfaces and can work with any driver that implements these interfaces.
The tests verify that the driver behaves as specified by the interface and its documentation.
The benchmarks can be used to compare performance of a wide range of operations on the interfaces.
This makes it very easy to implement a new driver:
All functions that are part of the store interfaces can be tested easily with the tests that come with the store package.
Generally the memory implementation can be used as a guideline for implementing new drivers.
Both benchmarks and tests require a clean state to work correctly.
All of the test and benchmark functions therefore take a `*DriverConfig` as a parameter, this should be used to configure the driver in a way that it provides a clean state for every test or benchmark.
For example: Imagine a file-based driver that achieves persistence by storing its state in a file.
It must then be possible to provide the location of this file in the `'DriverConfig`, so that every different benchmark gets to work with a new file.
Most benchmarks come in two flavors: The "normal" version and the "1K" version.
A normal benchmark uses the same value over and over again to benchmark one operation.
A 1K benchmark uses a different value from a set of 1000 values for every iteration, this can show caching effects, if the driver uses them.
The 1K benchmarks require a little more computation to select the values and thus typically yield slightly lower results even for a "perfect" cache, i.e. the memory implementation.

View file

@ -1,93 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package store
import (
"fmt"
"net"
"github.com/chihaya/chihaya/pkg/stopper"
)
var ipStoreDrivers = make(map[string]IPStoreDriver)
// IPStore represents an interface for manipulating IPs and IP ranges.
type IPStore interface {
// AddIP adds a single IP address to the IPStore.
AddIP(ip net.IP) error
// AddNetwork adds a range of IP addresses, denoted by a network in CIDR
// notation, to the IPStore.
AddNetwork(network string) error
// HasIP returns whether the given IP address is contained in the IPStore
// or belongs to any of the stored networks.
HasIP(ip net.IP) (bool, error)
// HasAnyIP returns whether any of the given IP addresses are contained
// in the IPStore or belongs to any of the stored networks.
HasAnyIP(ips []net.IP) (bool, error)
// HassAllIPs returns whether all of the given IP addresses are
// contained in the IPStore or belongs to any of the stored networks.
HasAllIPs(ips []net.IP) (bool, error)
// RemoveIP removes a single IP address from the IPStore.
//
// This wil not remove the given address from any networks it belongs to
// that are stored in the IPStore.
//
// Returns ErrResourceDoesNotExist if the given IP address is not
// contained in the store.
RemoveIP(ip net.IP) error
// RemoveNetwork removes a range of IP addresses that was previously
// added through AddNetwork.
//
// The given network must not, as a string, match the previously added
// network, but rather denote the same network, e.g. if the network
// 192.168.22.255/24 was added, removing the network 192.168.22.123/24
// will succeed.
//
// Returns ErrResourceDoesNotExist if the given network is not
// contained in the store.
RemoveNetwork(network string) error
// Stopper provides the Stop method that stops the IPStore.
// Stop should shut down the IPStore in a separate goroutine and send
// an error to the channel if the shutdown failed. If the shutdown
// was successful, the channel is to be closed.
stopper.Stopper
}
// IPStoreDriver represents an interface for creating a handle to the
// storage of IPs.
type IPStoreDriver interface {
New(*DriverConfig) (IPStore, error)
}
// RegisterIPStoreDriver makes a driver available by the provided name.
//
// If this function is called twice with the same name or if the driver is nil,
// it panics.
func RegisterIPStoreDriver(name string, driver IPStoreDriver) {
if driver == nil {
panic("store: could not register nil IPStoreDriver")
}
if _, dup := ipStoreDrivers[name]; dup {
panic("store: could not register duplicate IPStoreDriver: " + name)
}
ipStoreDrivers[name] = driver
}
// OpenIPStore returns an IPStore specified by a configuration.
func OpenIPStore(cfg *DriverConfig) (IPStore, error) {
driver, ok := ipStoreDrivers[cfg.Name]
if !ok {
return nil, fmt.Errorf("store: unknown IPStoreDriver %q (forgotten import?)", cfg)
}
return driver.New(cfg)
}

View file

@ -1,225 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package memory
import (
"net"
"sync"
"github.com/mrd0ll4r/netmatch"
"github.com/chihaya/chihaya/server/store"
)
func init() {
store.RegisterIPStoreDriver("memory", &ipStoreDriver{})
}
type ipStoreDriver struct{}
func (d *ipStoreDriver) New(_ *store.DriverConfig) (store.IPStore, error) {
return &ipStore{
ips: make(map[[16]byte]struct{}),
networks: netmatch.New(),
closed: make(chan struct{}),
}, nil
}
// ipStore implements store.IPStore using an in-memory map of byte arrays and
// a trie-like structure.
type ipStore struct {
ips map[[16]byte]struct{}
networks *netmatch.Trie
closed chan struct{}
sync.RWMutex
}
var (
_ store.IPStore = &ipStore{}
v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff}
)
// key converts an IP address to a [16]byte.
// The byte array can then be used as a key for a map, unlike net.IP, which is a
// []byte.
// If an IPv4 address is specified, it will be prefixed with
// the net.v4InV6Prefix and thus becomes a valid IPv6 address.
func key(ip net.IP) [16]byte {
var array [16]byte
if len(ip) == net.IPv4len {
copy(array[:], v4InV6Prefix)
copy(array[12:], ip)
} else {
copy(array[:], ip)
}
return array
}
func (s *ipStore) AddNetwork(network string) error {
key, length, err := netmatch.ParseNetwork(network)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
return s.networks.Add(key, length)
}
func (s *ipStore) AddIP(ip net.IP) error {
s.Lock()
defer s.Unlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
s.ips[key(ip)] = struct{}{}
return nil
}
func (s *ipStore) HasIP(ip net.IP) (bool, error) {
key := key(ip)
s.RLock()
defer s.RUnlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
_, ok := s.ips[key]
if ok {
return true, nil
}
match, err := s.networks.Match(key)
if err != nil {
return false, err
}
return match, nil
}
func (s *ipStore) HasAnyIP(ips []net.IP) (bool, error) {
s.RLock()
defer s.RUnlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
for _, ip := range ips {
key := key(ip)
if _, ok := s.ips[key]; ok {
return true, nil
}
match, err := s.networks.Match(key)
if err != nil {
return false, err
}
if match {
return true, nil
}
}
return false, nil
}
func (s *ipStore) HasAllIPs(ips []net.IP) (bool, error) {
s.RLock()
defer s.RUnlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
for _, ip := range ips {
key := key(ip)
if _, ok := s.ips[key]; !ok {
match, err := s.networks.Match(key)
if err != nil {
return false, err
}
if !match {
return false, nil
}
}
}
return true, nil
}
func (s *ipStore) RemoveIP(ip net.IP) error {
key := key(ip)
s.Lock()
defer s.Unlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
if _, ok := s.ips[key]; !ok {
return store.ErrResourceDoesNotExist
}
delete(s.ips, key)
return nil
}
func (s *ipStore) RemoveNetwork(network string) error {
key, length, err := netmatch.ParseNetwork(network)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
err = s.networks.Remove(key, length)
if err != nil && err == netmatch.ErrNotContained {
return store.ErrResourceDoesNotExist
}
return err
}
func (s *ipStore) Stop() <-chan error {
toReturn := make(chan error)
go func() {
s.Lock()
defer s.Unlock()
s.ips = make(map[[16]byte]struct{})
s.networks = netmatch.New()
close(s.closed)
close(toReturn)
}()
return toReturn
}

View file

@ -1,200 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package memory
import (
"net"
"testing"
"github.com/chihaya/chihaya/server/store"
"github.com/stretchr/testify/require"
)
var (
v6 = net.ParseIP("0c22:384e:0:0c22:384e::68")
v4 = net.ParseIP("12.13.14.15")
v4s = net.ParseIP("12.13.14.15").To4()
ipStoreTester = store.PrepareIPStoreTester(&ipStoreDriver{})
ipStoreBenchmarker = store.PrepareIPStoreBenchmarker(&ipStoreDriver{})
ipStoreTestConfig = &store.DriverConfig{}
)
func TestKey(t *testing.T) {
var table = []struct {
input net.IP
expected [16]byte
}{
{v6, [16]byte{12, 34, 56, 78, 0, 0, 12, 34, 56, 78, 0, 0, 0, 0, 0, 104}},
{v4, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 12, 13, 14, 15}}, // IPv4 in IPv6 prefix
{v4s, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 12, 13, 14, 15}}, // is equal to the one above, should produce equal output
}
for _, tt := range table {
got := key(tt.input)
require.Equal(t, got, tt.expected)
}
}
func TestIPStore(t *testing.T) {
ipStoreTester.TestIPStore(t, ipStoreTestConfig)
}
func TestHasAllHasAny(t *testing.T) {
ipStoreTester.TestHasAllHasAny(t, ipStoreTestConfig)
}
func TestNetworks(t *testing.T) {
ipStoreTester.TestNetworks(t, ipStoreTestConfig)
}
func TestHasAllHasAnyNetworks(t *testing.T) {
ipStoreTester.TestHasAllHasAnyNetworks(t, ipStoreTestConfig)
}
func BenchmarkIPStore_AddV4(b *testing.B) {
ipStoreBenchmarker.AddV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddV6(b *testing.B) {
ipStoreBenchmarker.AddV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupV4(b *testing.B) {
ipStoreBenchmarker.LookupV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupV6(b *testing.B) {
ipStoreBenchmarker.LookupV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemoveV4(b *testing.B) {
ipStoreBenchmarker.AddRemoveV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemoveV6(b *testing.B) {
ipStoreBenchmarker.AddRemoveV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupNonExistV4(b *testing.B) {
ipStoreBenchmarker.LookupNonExistV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupNonExistV6(b *testing.B) {
ipStoreBenchmarker.LookupNonExistV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExistV4(b *testing.B) {
ipStoreBenchmarker.RemoveNonExistV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExistV6(b *testing.B) {
ipStoreBenchmarker.RemoveNonExistV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddV4Network(b *testing.B) {
ipStoreBenchmarker.AddV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddV6Network(b *testing.B) {
ipStoreBenchmarker.AddV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupV4Network(b *testing.B) {
ipStoreBenchmarker.LookupV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupV6Network(b *testing.B) {
ipStoreBenchmarker.LookupV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemoveV4Network(b *testing.B) {
ipStoreBenchmarker.AddRemoveV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemoveV6Network(b *testing.B) {
ipStoreBenchmarker.AddRemoveV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExistV4Network(b *testing.B) {
ipStoreBenchmarker.RemoveNonExistV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExistV6Network(b *testing.B) {
ipStoreBenchmarker.RemoveNonExistV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Add1KV4(b *testing.B) {
ipStoreBenchmarker.Add1KV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Add1KV6(b *testing.B) {
ipStoreBenchmarker.Add1KV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Lookup1KV4(b *testing.B) {
ipStoreBenchmarker.Lookup1KV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Lookup1KV6(b *testing.B) {
ipStoreBenchmarker.Lookup1KV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemove1KV4(b *testing.B) {
ipStoreBenchmarker.AddRemove1KV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemove1KV6(b *testing.B) {
ipStoreBenchmarker.AddRemove1KV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupNonExist1KV4(b *testing.B) {
ipStoreBenchmarker.LookupNonExist1KV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_LookupNonExist1KV6(b *testing.B) {
ipStoreBenchmarker.LookupNonExist1KV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExist1KV4(b *testing.B) {
ipStoreBenchmarker.RemoveNonExist1KV4(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExist1KV6(b *testing.B) {
ipStoreBenchmarker.RemoveNonExist1KV6(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Add1KV4Network(b *testing.B) {
ipStoreBenchmarker.Add1KV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Add1KV6Network(b *testing.B) {
ipStoreBenchmarker.Add1KV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Lookup1KV4Network(b *testing.B) {
ipStoreBenchmarker.Lookup1KV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_Lookup1KV6Network(b *testing.B) {
ipStoreBenchmarker.Lookup1KV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemove1KV4Network(b *testing.B) {
ipStoreBenchmarker.AddRemove1KV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_AddRemove1KV6Network(b *testing.B) {
ipStoreBenchmarker.AddRemove1KV6Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExist1KV4Network(b *testing.B) {
ipStoreBenchmarker.RemoveNonExist1KV4Network(b, ipStoreTestConfig)
}
func BenchmarkIPStore_RemoveNonExist1KV6Network(b *testing.B) {
ipStoreBenchmarker.RemoveNonExist1KV6Network(b, ipStoreTestConfig)
}

View file

@ -1,478 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package memory
import (
"encoding/binary"
"log"
"net"
"runtime"
"sync"
"time"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server/store"
)
func init() {
store.RegisterPeerStoreDriver("memory", &peerStoreDriver{})
}
type peerStoreDriver struct{}
func (d *peerStoreDriver) New(storecfg *store.DriverConfig) (store.PeerStore, error) {
cfg, err := newPeerStoreConfig(storecfg)
if err != nil {
return nil, err
}
shards := make([]*peerShard, cfg.Shards)
for i := 0; i < cfg.Shards; i++ {
shards[i] = &peerShard{}
shards[i].swarms = make(map[chihaya.InfoHash]swarm)
}
return &peerStore{
shards: shards,
closed: make(chan struct{}),
}, nil
}
type peerStoreConfig struct {
Shards int `yaml:"shards"`
}
func newPeerStoreConfig(storecfg *store.DriverConfig) (*peerStoreConfig, error) {
bytes, err := yaml.Marshal(storecfg.Config)
if err != nil {
return nil, err
}
var cfg peerStoreConfig
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
if cfg.Shards < 1 {
cfg.Shards = 1
}
return &cfg, nil
}
type serializedPeer string
type peerShard struct {
swarms map[chihaya.InfoHash]swarm
sync.RWMutex
}
type swarm struct {
// map serialized peer to mtime
seeders map[serializedPeer]int64
leechers map[serializedPeer]int64
}
type peerStore struct {
shards []*peerShard
closed chan struct{}
}
var _ store.PeerStore = &peerStore{}
func (s *peerStore) shardIndex(infoHash chihaya.InfoHash) uint32 {
return binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards))
}
func peerKey(p chihaya.Peer) serializedPeer {
b := make([]byte, 20+2+len(p.IP))
copy(b[:20], p.ID[:])
binary.BigEndian.PutUint16(b[20:22], p.Port)
copy(b[22:], p.IP)
return serializedPeer(b)
}
func decodePeerKey(pk serializedPeer) chihaya.Peer {
return chihaya.Peer{
ID: chihaya.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: net.IP(pk[22:]),
}
}
func (s *peerStore) PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.Lock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.swarms[infoHash] = swarm{
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
shard.swarms[infoHash].seeders[peerKey(p)] = time.Now().UnixNano()
shard.Unlock()
return nil
}
func (s *peerStore) DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
pk := peerKey(p)
shard.Lock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.Unlock()
return store.ErrResourceDoesNotExist
}
if _, ok := shard.swarms[infoHash].seeders[pk]; !ok {
shard.Unlock()
return store.ErrResourceDoesNotExist
}
delete(shard.swarms[infoHash].seeders, pk)
if len(shard.swarms[infoHash].seeders)|len(shard.swarms[infoHash].leechers) == 0 {
delete(shard.swarms, infoHash)
}
shard.Unlock()
return nil
}
func (s *peerStore) PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.Lock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.swarms[infoHash] = swarm{
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
shard.swarms[infoHash].leechers[peerKey(p)] = time.Now().UnixNano()
shard.Unlock()
return nil
}
func (s *peerStore) DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
pk := peerKey(p)
shard.Lock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.Unlock()
return store.ErrResourceDoesNotExist
}
if _, ok := shard.swarms[infoHash].leechers[pk]; !ok {
shard.Unlock()
return store.ErrResourceDoesNotExist
}
delete(shard.swarms[infoHash].leechers, pk)
if len(shard.swarms[infoHash].seeders)|len(shard.swarms[infoHash].leechers) == 0 {
delete(shard.swarms, infoHash)
}
shard.Unlock()
return nil
}
func (s *peerStore) GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
key := peerKey(p)
shard := s.shards[s.shardIndex(infoHash)]
shard.Lock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.swarms[infoHash] = swarm{
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
delete(shard.swarms[infoHash].leechers, key)
shard.swarms[infoHash].seeders[key] = time.Now().UnixNano()
shard.Unlock()
return nil
}
func (s *peerStore) CollectGarbage(cutoff time.Time) error {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
log.Printf("memory: collecting garbage. Cutoff time: %s", cutoff.String())
cutoffUnix := cutoff.UnixNano()
for _, shard := range s.shards {
shard.RLock()
var infohashes []chihaya.InfoHash
for key := range shard.swarms {
infohashes = append(infohashes, key)
}
shard.RUnlock()
runtime.Gosched()
for _, infohash := range infohashes {
shard.Lock()
for peerKey, mtime := range shard.swarms[infohash].leechers {
if mtime <= cutoffUnix {
delete(shard.swarms[infohash].leechers, peerKey)
}
}
for peerKey, mtime := range shard.swarms[infohash].seeders {
if mtime <= cutoffUnix {
delete(shard.swarms[infohash].seeders, peerKey)
}
}
if len(shard.swarms[infohash].seeders)|len(shard.swarms[infohash].leechers) == 0 {
delete(shard.swarms, infohash)
}
shard.Unlock()
runtime.Gosched()
}
runtime.Gosched()
}
return nil
}
func (s *peerStore) AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers, peers6 []chihaya.Peer, err error) {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.RLock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.RUnlock()
return nil, nil, store.ErrResourceDoesNotExist
}
if seeder {
// Append leechers as possible.
leechers := shard.swarms[infoHash].leechers
for p := range leechers {
decodedPeer := decodePeerKey(p)
if numWant == 0 {
break
}
if decodedPeer.IP.To4() == nil {
peers6 = append(peers6, decodedPeer)
} else {
peers = append(peers, decodedPeer)
}
numWant--
}
} else {
// Append as many seeders as possible.
seeders := shard.swarms[infoHash].seeders
for p := range seeders {
decodedPeer := decodePeerKey(p)
if numWant == 0 {
break
}
if decodedPeer.IP.To4() == nil {
peers6 = append(peers6, decodedPeer)
} else {
peers = append(peers, decodedPeer)
}
numWant--
}
// Append leechers until we reach numWant.
leechers := shard.swarms[infoHash].leechers
if numWant > 0 {
for p := range leechers {
decodedPeer := decodePeerKey(p)
if numWant == 0 {
break
}
if decodedPeer.IP.To4() == nil {
if decodedPeer.Equal(peer6) {
continue
}
peers6 = append(peers6, decodedPeer)
} else {
if decodedPeer.Equal(peer4) {
continue
}
peers = append(peers, decodedPeer)
}
numWant--
}
}
}
shard.RUnlock()
return
}
func (s *peerStore) GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.RLock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.RUnlock()
return nil, nil, store.ErrResourceDoesNotExist
}
seeders := shard.swarms[infoHash].seeders
for p := range seeders {
decodedPeer := decodePeerKey(p)
if decodedPeer.IP.To4() == nil {
peers6 = append(peers6, decodedPeer)
} else {
peers = append(peers, decodedPeer)
}
}
shard.RUnlock()
return
}
func (s *peerStore) GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error) {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.RLock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.RUnlock()
return nil, nil, store.ErrResourceDoesNotExist
}
leechers := shard.swarms[infoHash].leechers
for p := range leechers {
decodedPeer := decodePeerKey(p)
if decodedPeer.IP.To4() == nil {
peers6 = append(peers6, decodedPeer)
} else {
peers = append(peers, decodedPeer)
}
}
shard.RUnlock()
return
}
func (s *peerStore) NumSeeders(infoHash chihaya.InfoHash) int {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.RLock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.RUnlock()
return 0
}
numSeeders := len(shard.swarms[infoHash].seeders)
shard.RUnlock()
return numSeeders
}
func (s *peerStore) NumLeechers(infoHash chihaya.InfoHash) int {
select {
case <-s.closed:
panic("attempted to interact with stopped store")
default:
}
shard := s.shards[s.shardIndex(infoHash)]
shard.RLock()
if _, ok := shard.swarms[infoHash]; !ok {
shard.RUnlock()
return 0
}
numLeechers := len(shard.swarms[infoHash].leechers)
shard.RUnlock()
return numLeechers
}
func (s *peerStore) Stop() <-chan error {
toReturn := make(chan error)
go func() {
shards := make([]*peerShard, len(s.shards))
for i := 0; i < len(s.shards); i++ {
shards[i] = &peerShard{}
shards[i].swarms = make(map[chihaya.InfoHash]swarm)
}
s.shards = shards
close(s.closed)
close(toReturn)
}()
return toReturn
}

View file

@ -1,142 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package memory
import (
"testing"
"github.com/chihaya/chihaya/server/store"
)
var (
peerStoreTester = store.PreparePeerStoreTester(&peerStoreDriver{})
peerStoreBenchmarker = store.PreparePeerStoreBenchmarker(&peerStoreDriver{})
peerStoreTestConfig = &store.DriverConfig{}
)
func init() {
unmarshalledConfig := struct {
Shards int
}{
1,
}
peerStoreTestConfig.Config = unmarshalledConfig
}
func TestPeerStore(t *testing.T) {
peerStoreTester.TestPeerStore(t, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutSeeder(b *testing.B) {
peerStoreBenchmarker.PutSeeder(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutSeeder1KInfohash(b *testing.B) {
peerStoreBenchmarker.PutSeeder1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutSeeder1KSeeders(b *testing.B) {
peerStoreBenchmarker.PutSeeder1KSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutSeeder1KInfohash1KSeeders(b *testing.B) {
peerStoreBenchmarker.PutSeeder1KInfohash1KSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutDeleteSeeder(b *testing.B) {
peerStoreBenchmarker.PutDeleteSeeder(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutDeleteSeeder1KInfohash(b *testing.B) {
peerStoreBenchmarker.PutDeleteSeeder1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutDeleteSeeder1KSeeders(b *testing.B) {
peerStoreBenchmarker.PutDeleteSeeder1KSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutDeleteSeeder1KInfohash1KSeeders(b *testing.B) {
peerStoreBenchmarker.PutDeleteSeeder1KInfohash1KSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_DeleteSeederNonExist(b *testing.B) {
peerStoreBenchmarker.DeleteSeederNonExist(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash(b *testing.B) {
peerStoreBenchmarker.DeleteSeederNonExist1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_DeleteSeederNonExist1KSeeders(b *testing.B) {
peerStoreBenchmarker.DeleteSeederNonExist1KSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_DeleteSeederNonExist1KInfohash1KSeeders(b *testing.B) {
peerStoreBenchmarker.DeleteSeederNonExist1KInfohash1KSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutGraduateDeleteLeecher(b *testing.B) {
peerStoreBenchmarker.PutGraduateDeleteLeecher(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash(b *testing.B) {
peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutGraduateDeleteLeecher1KSeeders(b *testing.B) {
peerStoreBenchmarker.PutGraduateDeleteLeecher1KLeechers(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_PutGraduateDeleteLeecher1KInfohash1KSeeders(b *testing.B) {
peerStoreBenchmarker.PutGraduateDeleteLeecher1KInfohash1KLeechers(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_GraduateLeecherNonExist(b *testing.B) {
peerStoreBenchmarker.GraduateLeecherNonExist(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash(b *testing.B) {
peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_GraduateLeecherNonExist1KSeeders(b *testing.B) {
peerStoreBenchmarker.GraduateLeecherNonExist1KLeechers(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_GraduateLeecherNonExist1KInfohash1KSeeders(b *testing.B) {
peerStoreBenchmarker.GraduateLeecherNonExist1KInfohash1KLeechers(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_AnnouncePeers(b *testing.B) {
peerStoreBenchmarker.AnnouncePeers(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_AnnouncePeers1KInfohash(b *testing.B) {
peerStoreBenchmarker.AnnouncePeers1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_AnnouncePeersSeeder(b *testing.B) {
peerStoreBenchmarker.AnnouncePeersSeeder(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_AnnouncePeersSeeder1KInfohash(b *testing.B) {
peerStoreBenchmarker.AnnouncePeersSeeder1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_GetSeeders(b *testing.B) {
peerStoreBenchmarker.GetSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_GetSeeders1KInfohash(b *testing.B) {
peerStoreBenchmarker.GetSeeders1KInfohash(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_NumSeeders(b *testing.B) {
peerStoreBenchmarker.NumSeeders(b, peerStoreTestConfig)
}
func BenchmarkPeerStore_NumSeeders1KInfohash(b *testing.B) {
peerStoreBenchmarker.NumSeeders1KInfohash(b, peerStoreTestConfig)
}

View file

@ -1,93 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package memory
import (
"sync"
"github.com/chihaya/chihaya/server/store"
)
func init() {
store.RegisterStringStoreDriver("memory", &stringStoreDriver{})
}
type stringStoreDriver struct{}
func (d *stringStoreDriver) New(_ *store.DriverConfig) (store.StringStore, error) {
return &stringStore{
strings: make(map[string]struct{}),
closed: make(chan struct{}),
}, nil
}
type stringStore struct {
strings map[string]struct{}
closed chan struct{}
sync.RWMutex
}
var _ store.StringStore = &stringStore{}
func (ss *stringStore) PutString(s string) error {
ss.Lock()
defer ss.Unlock()
select {
case <-ss.closed:
panic("attempted to interact with stopped store")
default:
}
ss.strings[s] = struct{}{}
return nil
}
func (ss *stringStore) HasString(s string) (bool, error) {
ss.RLock()
defer ss.RUnlock()
select {
case <-ss.closed:
panic("attempted to interact with stopped store")
default:
}
_, ok := ss.strings[s]
return ok, nil
}
func (ss *stringStore) RemoveString(s string) error {
ss.Lock()
defer ss.Unlock()
select {
case <-ss.closed:
panic("attempted to interact with stopped store")
default:
}
if _, ok := ss.strings[s]; !ok {
return store.ErrResourceDoesNotExist
}
delete(ss.strings, s)
return nil
}
func (ss *stringStore) Stop() <-chan error {
toReturn := make(chan error)
go func() {
ss.Lock()
defer ss.Unlock()
ss.strings = make(map[string]struct{})
close(ss.closed)
close(toReturn)
}()
return toReturn
}

View file

@ -1,101 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package memory
import (
"testing"
"github.com/chihaya/chihaya/server/store"
)
var (
stringStoreTester = store.PrepareStringStoreTester(&stringStoreDriver{})
stringStoreBenchmarker = store.PrepareStringStoreBenchmarker(&stringStoreDriver{})
stringStoreTestConfig = &store.DriverConfig{}
)
func TestStringStore(t *testing.T) {
stringStoreTester.TestStringStore(t, stringStoreTestConfig)
}
func BenchmarkStringStore_AddShort(b *testing.B) {
stringStoreBenchmarker.AddShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_AddLong(b *testing.B) {
stringStoreBenchmarker.AddLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_LookupShort(b *testing.B) {
stringStoreBenchmarker.LookupShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_LookupLong(b *testing.B) {
stringStoreBenchmarker.LookupLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_AddRemoveShort(b *testing.B) {
stringStoreBenchmarker.AddRemoveShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_AddRemoveLong(b *testing.B) {
stringStoreBenchmarker.AddRemoveLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_LookupNonExistShort(b *testing.B) {
stringStoreBenchmarker.LookupNonExistShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_LookupNonExistLong(b *testing.B) {
stringStoreBenchmarker.LookupNonExistLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_RemoveNonExistShort(b *testing.B) {
stringStoreBenchmarker.RemoveNonExistShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_RemoveNonExistLong(b *testing.B) {
stringStoreBenchmarker.RemoveNonExistLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_Add1KShort(b *testing.B) {
stringStoreBenchmarker.Add1KShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_Add1KLong(b *testing.B) {
stringStoreBenchmarker.Add1KLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_Lookup1KShort(b *testing.B) {
stringStoreBenchmarker.Lookup1KShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_Lookup1KLong(b *testing.B) {
stringStoreBenchmarker.Lookup1KLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_AddRemove1KShort(b *testing.B) {
stringStoreBenchmarker.AddRemove1KShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_AddRemove1KLong(b *testing.B) {
stringStoreBenchmarker.AddRemove1KLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_LookupNonExist1KShort(b *testing.B) {
stringStoreBenchmarker.LookupNonExist1KShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_LookupNonExist1KLong(b *testing.B) {
stringStoreBenchmarker.LookupNonExist1KLong(b, stringStoreTestConfig)
}
func BenchmarkStringStore_RemoveNonExist1KShort(b *testing.B) {
stringStoreBenchmarker.RemoveNonExist1KShort(b, stringStoreTestConfig)
}
func BenchmarkStringStore_RemoveNonExist1KLong(b *testing.B) {
stringStoreBenchmarker.RemoveNonExist1KLong(b, stringStoreTestConfig)
}

View file

@ -1,25 +0,0 @@
## Client Blacklisting/Whitelisting Middlewares
This package provides the announce middlewares `client_whitelist` and `client_blacklist` for blacklisting or whitelisting clients for announces.
### `client_blacklist`
The `client_blacklist` middleware uses all clientIDs stored in the `StringStore` to blacklist, i.e. block announces.
The clientID part of the peerID of an announce is matched against the `StringStore`, if it's contained within the `StringStore`, the announce is aborted.
### `client_whitelist`
The `client_whitelist` middleware uses all clientIDs stored in the `StringStore` to whitelist, i.e. allow announces.
The clientID part of the peerID of an announce is matched against the `StringStore`, if it's _not_ contained within the `StringStore`, the announce is aborted.
### Important things to notice
Both middlewares operate on announce requests only.
Both middlewares use the same `StringStore`.
It is therefore not advised to have both the `client_blacklist` and the `client_whitelist` middleware running.
(If you add clientID to the `StringStore`, it will be used for blacklisting and whitelisting.
If your store contains no clientIDs, no announces will be blocked by the blacklist, but all announces will be blocked by the whitelist.
If your store contains all clientIDs, no announces will be blocked by the whitelist, but all announces will be blocked by the blacklist.)

View file

@ -1,34 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package client
import (
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/clientid"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("client_blacklist", blacklistAnnounceClient)
}
// ErrBlacklistedClient is returned by an announce middleware if the announcing
// Client is blacklisted.
var ErrBlacklistedClient = tracker.ClientError("client blacklisted")
// blacklistAnnounceClient provides a middleware that only allows Clients to
// announce that are not stored in the StringStore.
func blacklistAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
blacklisted, err := store.MustGetStore().HasString(PrefixClient + clientid.New(string(req.PeerID[:])))
if err != nil {
return err
} else if blacklisted {
return ErrBlacklistedClient
}
return next(cfg, req, resp)
}
}

View file

@ -1,37 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package client
import (
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/clientid"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("client_whitelist", whitelistAnnounceClient)
}
// PrefixClient is the prefix to be used for client peer IDs.
const PrefixClient = "c-"
// ErrNotWhitelistedClient is returned by an announce middleware if the
// announcing Client is not whitelisted.
var ErrNotWhitelistedClient = tracker.ClientError("client not whitelisted")
// whitelistAnnounceClient provides a middleware that only allows Clients to
// announce that are stored in the StringStore.
func whitelistAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
whitelisted, err := store.MustGetStore().HasString(PrefixClient + clientid.New(string(req.PeerID[:])))
if err != nil {
return err
} else if !whitelisted {
return ErrNotWhitelistedClient
}
return next(cfg, req, resp)
}
}

View file

@ -1,69 +0,0 @@
## Infohash Blacklisting/Whitelisting Middlewares
This package provides the middleware `infohash_blacklist` and `infohash_whitelist` for blacklisting or whitelisting infohashes.
It also provides the configurable scrape middleware `infohash_blacklist` and `infohash_whitelist` for blacklisting or whitelisting infohashes.
### `infohash_blacklist`
#### For Announces
The `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to blacklist, i.e. block announces.
#### For Scrapes
The configurable `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to blacklist scrape requests.
The scrape middleware has two modes of operation: _Block_ and _Filter_.
- _Block_ will drop a scrape request if it contains a blacklisted infohash.
- _Filter_ will filter all blacklisted infohashes from a scrape request, potentially leaving behind an empty scrape request.
**IMPORTANT**: This mode **does not work with UDP servers**.
See the configuration section for information about how to configure the scrape middleware.
### `infohash_whitelist`
#### For Announces
The `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to whitelist, i.e. allow announces.
#### For Scrapes
The configurable `infohash_blacklist` middleware uses all infohashes stored in the `StringStore` with the `PrefixInfohash` prefix to whitelist scrape requests.
The scrape middleware has two modes of operation: _Block_ and _Filter_.
- _Block_ will drop a scrape request if it contains a non-whitelisted infohash.
- _Filter_ will filter all non-whitelisted infohashes from a scrape request, potentially leaving behind an empty scrape request.
**IMPORTANT**: This mode **does not work with UDP servers**.
See the configuration section for information about how to configure the scrape middleware.
### Important things to notice
Both blacklist and whitelist middleware use the same `StringStore`.
It is therefore not advised to have both the `infohash_blacklist` and the `infohash_whitelist` announce or scrape middleware running.
(If you add an infohash to the `StringStore`, it will be used for blacklisting and whitelisting.
If your store contains no infohashes, no announces/scrapes will be blocked by the blacklist, but all will be blocked by the whitelist.
If your store contains all addresses, no announces/scrapes will be blocked by the whitelist, but all will be blocked by the blacklist.)
Also note that the announce and scrape middleware both use the same `StringStore`.
It is therefore not possible to use different infohashes for black-/whitelisting on announces and scrape requests.
### Configuration
The scrape middleware is configurable.
The configuration uses a single required parameter `mode` to determine the mode of operation for the middleware.
An example configuration might look like this:
chihaya:
tracker:
scrape_middleware:
- name: infohash_blacklist
config:
mode: block
`mode` accepts two values: `block` and `filter`.
**IMPORTANT**: The `filter` mode **does not work with UDP servers**.

View file

@ -1,106 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package infohash
import (
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("infohash_blacklist", blacklistAnnounceInfohash)
tracker.RegisterScrapeMiddlewareConstructor("infohash_blacklist", blacklistScrapeInfohash)
mustGetStore = func() store.StringStore {
return store.MustGetStore().StringStore
}
}
// ErrBlockedInfohash is returned by a middleware if any of the infohashes
// contained in an announce or scrape are disallowed.
var ErrBlockedInfohash = tracker.ClientError("disallowed infohash")
var mustGetStore func() store.StringStore
// blacklistAnnounceInfohash provides a middleware that only allows announces
// for infohashes that are not stored in a StringStore.
func blacklistAnnounceInfohash(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
blacklisted, err := mustGetStore().HasString(PrefixInfohash + string(req.InfoHash[:]))
if err != nil {
return err
} else if blacklisted {
return ErrBlockedInfohash
}
return next(cfg, req, resp)
}
}
// blacklistScrapeInfohash provides a middleware constructor for a middleware
// that blocks or filters scrape requests based on the infohashes scraped.
//
// The middleware works in two modes: block and filter.
// The block mode blocks a scrape completely if any of the infohashes is
// disallowed.
// The filter mode filters any disallowed infohashes from the scrape,
// potentially leaving an empty scrape.
//
// ErrUnknownMode is returned if the Mode specified in the config is unknown.
func blacklistScrapeInfohash(c chihaya.MiddlewareConfig) (tracker.ScrapeMiddleware, error) {
cfg, err := newConfig(c)
if err != nil {
return nil, err
}
switch cfg.Mode {
case ModeFilter:
return blacklistFilterScrape, nil
case ModeBlock:
return blacklistBlockScrape, nil
default:
panic("unknown mode")
}
}
func blacklistFilterScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
blacklisted := false
storage := mustGetStore()
infohashes := req.InfoHashes
for i, ih := range infohashes {
blacklisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
if err != nil {
return err
} else if blacklisted {
req.InfoHashes[i] = req.InfoHashes[len(req.InfoHashes)-1]
req.InfoHashes = req.InfoHashes[:len(req.InfoHashes)-1]
}
}
return next(cfg, req, resp)
}
}
func blacklistBlockScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
blacklisted := false
storage := mustGetStore()
for _, ih := range req.InfoHashes {
blacklisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
if err != nil {
return err
} else if blacklisted {
return ErrBlockedInfohash
}
}
return next(cfg, req, resp)
}
}

View file

@ -1,140 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package infohash
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/stopper"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
type storeMock struct {
strings map[string]struct{}
}
func (ss *storeMock) PutString(s string) error {
ss.strings[s] = struct{}{}
return nil
}
func (ss *storeMock) HasString(s string) (bool, error) {
_, ok := ss.strings[s]
return ok, nil
}
func (ss *storeMock) RemoveString(s string) error {
delete(ss.strings, s)
return nil
}
func (ss *storeMock) Stop() <-chan error {
return stopper.AlreadyStopped
}
var mock store.StringStore = &storeMock{
strings: make(map[string]struct{}),
}
var (
ih1 = chihaya.InfoHash([20]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
ih2 = chihaya.InfoHash([20]byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
)
func TestASetUp(t *testing.T) {
mustGetStore = func() store.StringStore {
return mock
}
mustGetStore().PutString(PrefixInfohash + string(ih1[:]))
}
func TestBlacklistAnnounceMiddleware(t *testing.T) {
var (
achain tracker.AnnounceChain
req chihaya.AnnounceRequest
resp chihaya.AnnounceResponse
)
achain.Append(blacklistAnnounceInfohash)
handler := achain.Handler()
err := handler(nil, &req, &resp)
assert.Nil(t, err)
req.InfoHash = chihaya.InfoHash(ih1)
err = handler(nil, &req, &resp)
assert.Equal(t, ErrBlockedInfohash, err)
req.InfoHash = chihaya.InfoHash(ih2)
err = handler(nil, &req, &resp)
assert.Nil(t, err)
}
func TestBlacklistScrapeMiddlewareBlock(t *testing.T) {
var (
schain tracker.ScrapeChain
req chihaya.ScrapeRequest
resp chihaya.ScrapeResponse
)
mw, err := blacklistScrapeInfohash(chihaya.MiddlewareConfig{
Name: "blacklist_infohash",
Config: Config{
Mode: ModeBlock,
},
})
assert.Nil(t, err)
schain.Append(mw)
handler := schain.Handler()
err = handler(nil, &req, &resp)
assert.Nil(t, err)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
err = handler(nil, &req, &resp)
assert.Equal(t, ErrBlockedInfohash, err)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih2)}
err = handler(nil, &req, &resp)
assert.Nil(t, err)
}
func TestBlacklistScrapeMiddlewareFilter(t *testing.T) {
var (
schain tracker.ScrapeChain
req chihaya.ScrapeRequest
resp chihaya.ScrapeResponse
)
mw, err := blacklistScrapeInfohash(chihaya.MiddlewareConfig{
Name: "blacklist_infohash",
Config: Config{
Mode: ModeFilter,
},
})
assert.Nil(t, err)
schain.Append(mw)
handler := schain.Handler()
err = handler(nil, &req, &resp)
assert.Nil(t, err)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
err = handler(nil, &req, &resp)
assert.Nil(t, err)
assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih2)}, req.InfoHashes)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih2)}
err = handler(nil, &req, &resp)
assert.Nil(t, err)
}

View file

@ -1,56 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package infohash
import (
"errors"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
)
// ErrUnknownMode is returned by a MiddlewareConstructor if the Mode specified
// in the configuration is unknown.
var ErrUnknownMode = errors.New("unknown mode")
// Mode represents the mode of operation for an infohash scrape middleware.
type Mode string
const (
// ModeFilter makes the middleware filter disallowed infohashes from a
// scrape request.
ModeFilter = Mode("filter")
// ModeBlock makes the middleware block a scrape request if it contains
// at least one disallowed infohash.
ModeBlock = Mode("block")
)
// Config represents the configuration for an infohash scrape middleware.
type Config struct {
Mode Mode `yaml:"mode"`
}
// newConfig parses the given MiddlewareConfig as an infohash.Config.
// ErrUnknownMode is returned if the mode is unknown.
func newConfig(mwcfg chihaya.MiddlewareConfig) (*Config, error) {
bytes, err := yaml.Marshal(mwcfg.Config)
if err != nil {
return nil, err
}
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
if cfg.Mode != ModeBlock && cfg.Mode != ModeFilter {
return nil, ErrUnknownMode
}
return &cfg, nil
}

View file

@ -1,56 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package infohash
import (
"fmt"
"testing"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
"github.com/stretchr/testify/assert"
)
var (
configTemplate = `name: foo
config:
%s: %s`
data = []testData{
{"mode", "block", false, ModeBlock},
{"mode", "filter", false, ModeFilter},
{"some", "stuff", true, ModeBlock},
}
)
type testData struct {
key string
value string
err bool
expected Mode
}
func TestNewConfig(t *testing.T) {
var mwconfig chihaya.MiddlewareConfig
cfg, err := newConfig(mwconfig)
assert.NotNil(t, err)
assert.Nil(t, cfg)
for _, test := range data {
config := fmt.Sprintf(configTemplate, test.key, test.value)
err = yaml.Unmarshal([]byte(config), &mwconfig)
assert.Nil(t, err)
cfg, err = newConfig(mwconfig)
if test.err {
assert.NotNil(t, err)
continue
}
assert.Nil(t, err)
assert.Equal(t, test.expected, cfg.Mode)
}
}

View file

@ -1,99 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package infohash
import (
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("infohash_whitelist", whitelistAnnounceInfohash)
tracker.RegisterScrapeMiddlewareConstructor("infohash_whitelist", whitelistScrapeInfohash)
}
// PrefixInfohash is the prefix to be used for infohashes.
const PrefixInfohash = "ih-"
// whitelistAnnounceInfohash provides a middleware that only allows announces
// for infohashes that are not stored in a StringStore
func whitelistAnnounceInfohash(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
whitelisted, err := mustGetStore().HasString(PrefixInfohash + string(req.InfoHash[:]))
if err != nil {
return err
} else if !whitelisted {
return ErrBlockedInfohash
}
return next(cfg, req, resp)
}
}
// whitelistScrapeInfohash provides a middleware constructor for a middleware
// that blocks or filters scrape requests based on the infohashes scraped.
//
// The middleware works in two modes: block and filter.
// The block mode blocks a scrape completely if any of the infohashes is
// disallowed.
// The filter mode filters any disallowed infohashes from the scrape,
// potentially leaving an empty scrape.
//
// ErrUnknownMode is returned if the Mode specified in the config is unknown.
func whitelistScrapeInfohash(c chihaya.MiddlewareConfig) (tracker.ScrapeMiddleware, error) {
cfg, err := newConfig(c)
if err != nil {
return nil, err
}
switch cfg.Mode {
case ModeFilter:
return whitelistFilterScrape, nil
case ModeBlock:
return whitelistBlockScrape, nil
default:
panic("unknown mode")
}
}
func whitelistFilterScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
whitelisted := false
storage := mustGetStore()
infohashes := req.InfoHashes
for i, ih := range infohashes {
whitelisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
if err != nil {
return err
} else if !whitelisted {
req.InfoHashes[i] = req.InfoHashes[len(req.InfoHashes)-1]
req.InfoHashes = req.InfoHashes[:len(req.InfoHashes)-1]
}
}
return next(cfg, req, resp)
}
}
func whitelistBlockScrape(next tracker.ScrapeHandler) tracker.ScrapeHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
whitelisted := false
storage := mustGetStore()
for _, ih := range req.InfoHashes {
whitelisted, err = storage.HasString(PrefixInfohash + string(ih[:]))
if err != nil {
return err
} else if !whitelisted {
return ErrBlockedInfohash
}
}
return next(cfg, req, resp)
}
}

View file

@ -1,96 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package infohash
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/tracker"
)
func TestWhitelistAnnounceMiddleware(t *testing.T) {
var (
achain tracker.AnnounceChain
req chihaya.AnnounceRequest
resp chihaya.AnnounceResponse
)
achain.Append(whitelistAnnounceInfohash)
handler := achain.Handler()
err := handler(nil, &req, &resp)
assert.Equal(t, ErrBlockedInfohash, err)
req.InfoHash = chihaya.InfoHash(ih2)
err = handler(nil, &req, &resp)
assert.Equal(t, ErrBlockedInfohash, err)
req.InfoHash = chihaya.InfoHash(ih1)
err = handler(nil, &req, &resp)
assert.Nil(t, err)
}
func TestWhitelistScrapeMiddlewareBlock(t *testing.T) {
var (
schain tracker.ScrapeChain
req chihaya.ScrapeRequest
resp chihaya.ScrapeResponse
)
mw, err := whitelistScrapeInfohash(chihaya.MiddlewareConfig{
Name: "whitelist_infohash",
Config: Config{
Mode: ModeBlock,
},
})
assert.Nil(t, err)
schain.Append(mw)
handler := schain.Handler()
err = handler(nil, &req, &resp)
assert.Nil(t, err)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
err = handler(nil, &req, &resp)
assert.Equal(t, ErrBlockedInfohash, err)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1)}
err = handler(nil, &req, &resp)
assert.Nil(t, err)
}
func TestWhitelistScrapeMiddlewareFilter(t *testing.T) {
var (
schain tracker.ScrapeChain
req chihaya.ScrapeRequest
resp chihaya.ScrapeResponse
)
mw, err := whitelistScrapeInfohash(chihaya.MiddlewareConfig{
Name: "whitelist_infohash",
Config: Config{
Mode: ModeFilter,
},
})
assert.Nil(t, err)
schain.Append(mw)
handler := schain.Handler()
err = handler(nil, &req, &resp)
assert.Nil(t, err)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1), chihaya.InfoHash(ih2)}
err = handler(nil, &req, &resp)
assert.Nil(t, err)
assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih1)}, req.InfoHashes)
req.InfoHashes = []chihaya.InfoHash{chihaya.InfoHash(ih1)}
err = handler(nil, &req, &resp)
assert.Nil(t, err)
assert.Equal(t, []chihaya.InfoHash{chihaya.InfoHash(ih1)}, req.InfoHashes)
}

View file

@ -1,32 +0,0 @@
## IP Blacklisting/Whitelisting Middlewares
This package provides the announce middlewares `ip_blacklist` and `ip_whitelist` for blacklisting or whitelisting IP addresses and networks for announces.
### `ip_blacklist`
The `ip_blacklist` middleware uses all IP addresses and networks stored in the `IPStore` to blacklist, i.e. block announces.
Both the IPv4 and the IPv6 addresses contained in the announce are matched against the `IPStore`.
If one or both of the two are contained in the `IPStore`, the announce will be rejected _completely_.
### `ip_whitelist`
The `ip_whitelist` middleware uses all IP addresses and networks stored in the `IPStore` to whitelist, i.e. allow announces.
If present, both the IPv4 and the IPv6 addresses contained in the announce are matched against the `IPStore`.
Only if all IP address that are present in the announce are also present in the `IPStore` will the announce be allowed, otherwise it will be rejected _completely_.
### Important things to notice
Both middlewares operate on announce requests only.
The middlewares will check the IPv4 and IPv6 IPs a client announces to the tracker against an `IPStore`.
Normally the IP address embedded in the announce is the public IP address of the machine the client is running on.
Note however, that a client can override this behaviour by specifying an IP address in the announce itself.
_This middleware does not (dis)allow announces coming from certain IP addresses, but announces containing certain IP addresses_.
Always keep that in mind.
Both middlewares use the same `IPStore`.
It is therefore not advised to have both the `ip_blacklist` and the `ip_whitelist` middleware running.
(If you add an IP address or network to the `IPStore`, it will be used for blacklisting and whitelisting.
If your store contains no addresses, no announces will be blocked by the blacklist, but all announces will be blocked by the whitelist.
If your store contains all addresses, no announces will be blocked by the whitelist, but all announces will be blocked by the blacklist.)

View file

@ -1,47 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package ip
import (
"net"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("ip_blacklist", blacklistAnnounceIP)
}
// ErrBlockedIP is returned by an announce middleware if any of the announcing
// IPs is disallowed.
var ErrBlockedIP = tracker.ClientError("disallowed IP address")
// blacklistAnnounceIP provides a middleware that only allows IPs to announce
// that are not stored in an IPStore.
func blacklistAnnounceIP(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
blacklisted := false
storage := store.MustGetStore()
// We have to check explicitly if they are present, because someone
// could have added a <nil> net.IP to the store.
if req.IPv6 != nil && req.IPv4 != nil {
blacklisted, err = storage.HasAnyIP([]net.IP{req.IPv4, req.IPv6})
} else if req.IPv4 != nil {
blacklisted, err = storage.HasIP(req.IPv4)
} else {
blacklisted, err = storage.HasIP(req.IPv6)
}
if err != nil {
return err
} else if blacklisted {
return ErrBlockedIP
}
return next(cfg, req, resp)
}
}

View file

@ -1,43 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package ip
import (
"net"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("ip_whitelist", whitelistAnnounceIP)
}
// whitelistAnnounceIP provides a middleware that only allows IPs to announce
// that are stored in an IPStore.
func whitelistAnnounceIP(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
whitelisted := false
storage := store.MustGetStore()
// We have to check explicitly if they are present, because someone
// could have added a <nil> net.IP to the store.
if req.IPv4 != nil && req.IPv6 != nil {
whitelisted, err = storage.HasAllIPs([]net.IP{req.IPv4, req.IPv6})
} else if req.IPv4 != nil {
whitelisted, err = storage.HasIP(req.IPv4)
} else {
whitelisted, err = storage.HasIP(req.IPv6)
}
if err != nil {
return err
} else if !whitelisted {
return ErrBlockedIP
}
return next(cfg, req, resp)
}
}

View file

@ -1,11 +0,0 @@
## Response Middleware
This package provides the final response for a chain of middleware using the “store” package.
### `store_response`
The `store_response` middleware uses the peer data stored in the peerStore to create a response for the request.
### Important things to notice
This middleware is very basic, and may not do everything that you require.

View file

@ -1,59 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package response
import (
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("store_response", responseAnnounceClient)
tracker.RegisterScrapeMiddleware("store_response", responseScrapeClient)
}
// FailedToRetrievePeers represents an error that has been return when
// attempting to fetch peers from the store.
type FailedToRetrievePeers string
// Error interface for FailedToRetrievePeers.
func (f FailedToRetrievePeers) Error() string { return string(f) }
// responseAnnounceClient provides a middleware to make a response to an
// announce based on the current request.
func responseAnnounceClient(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
storage := store.MustGetStore()
resp.Interval = cfg.AnnounceInterval
resp.MinInterval = cfg.MinAnnounceInterval
resp.Compact = req.Compact
resp.Complete = int32(storage.NumSeeders(req.InfoHash))
resp.Incomplete = int32(storage.NumLeechers(req.InfoHash))
resp.IPv4Peers, resp.IPv6Peers, err = storage.AnnouncePeers(req.InfoHash, req.Left == 0, int(req.NumWant), req.Peer4(), req.Peer6())
if err != nil {
return FailedToRetrievePeers(err.Error())
}
return next(cfg, req, resp)
}
}
// responseScrapeClient provides a middleware to make a response to an
// scrape based on the current request.
func responseScrapeClient(next tracker.ScrapeHandler) tracker.ScrapeHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) (err error) {
storage := store.MustGetStore()
for _, infoHash := range req.InfoHashes {
resp.Files[infoHash] = chihaya.Scrape{
Complete: int32(storage.NumSeeders(infoHash)),
Incomplete: int32(storage.NumLeechers(infoHash)),
}
}
return next(cfg, req, resp)
}
}

View file

@ -1,12 +0,0 @@
## Swarm Interaction Middleware
This package provides the announce middleware that modifies peer data stored in the `store` package.
### `store_swarm_interaction`
The `store_swarm_interaction` middleware updates the data stored in the `peerStore` based on the announce.
### Important things to notice
It is recommended to have this middleware run before the `store_response` middleware.
The `store_response` middleware assumes the store to be already updated by the announce.

View file

@ -1,75 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package response
import (
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/event"
"github.com/chihaya/chihaya/server/store"
"github.com/chihaya/chihaya/tracker"
)
func init() {
tracker.RegisterAnnounceMiddleware("store_swarm_interaction", announceSwarmInteraction)
}
// FailedSwarmInteraction represents an error that indicates that the
// interaction of a peer with a swarm failed.
type FailedSwarmInteraction string
// Error satisfies the error interface for FailedSwarmInteraction.
func (f FailedSwarmInteraction) Error() string { return string(f) }
// announceSwarmInteraction provides a middleware that manages swarm
// interactions for a peer based on the announce.
func announceSwarmInteraction(next tracker.AnnounceHandler) tracker.AnnounceHandler {
return func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) (err error) {
if req.IPv4 != nil {
err = updatePeerStore(req, req.Peer4())
if err != nil {
return FailedSwarmInteraction(err.Error())
}
}
if req.IPv6 != nil {
err = updatePeerStore(req, req.Peer6())
if err != nil {
return FailedSwarmInteraction(err.Error())
}
}
return next(cfg, req, resp)
}
}
func updatePeerStore(req *chihaya.AnnounceRequest, peer chihaya.Peer) (err error) {
storage := store.MustGetStore()
switch {
case req.Event == event.Stopped:
err = storage.DeleteSeeder(req.InfoHash, peer)
if err != nil && err != store.ErrResourceDoesNotExist {
return err
}
err = storage.DeleteLeecher(req.InfoHash, peer)
if err != nil && err != store.ErrResourceDoesNotExist {
return err
}
case req.Event == event.Completed || req.Left == 0:
err = storage.GraduateLeecher(req.InfoHash, peer)
if err != nil {
return err
}
default:
err = storage.PutLeecher(req.InfoHash, peer)
if err != nil {
return err
}
}
return nil
}

View file

@ -1,103 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package store
import (
"fmt"
"time"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/stopper"
)
var peerStoreDrivers = make(map[string]PeerStoreDriver)
// PeerStore represents an interface for manipulating peers.
type PeerStore interface {
// PutSeeder adds a seeder for the infoHash to the PeerStore.
PutSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error
// DeleteSeeder removes a seeder for the infoHash from the PeerStore.
//
// Returns ErrResourceDoesNotExist if the infoHash or peer does not
// exist.
DeleteSeeder(infoHash chihaya.InfoHash, p chihaya.Peer) error
// PutLeecher adds a leecher for the infoHash to the PeerStore.
PutLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error
// DeleteLeecher removes a leecher for the infoHash from the PeerStore.
//
// Returns ErrResourceDoesNotExist if the infoHash or peer does not
// exist.
DeleteLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error
// GraduateLeecher promotes a peer from a leecher to a seeder for the
// infoHash within the PeerStore.
//
// If the given Peer is not a leecher, it will still be added to the
// list of seeders and no error will be returned.
GraduateLeecher(infoHash chihaya.InfoHash, p chihaya.Peer) error
// AnnouncePeers returns a list of both IPv4, and IPv6 peers for an
// announce.
//
// If seeder is true then the peers returned will only be leechers, the
// ammount of leechers returned will be the smaller value of numWant or
// the available leechers.
// If it is false then seeders will be returned up until numWant or the
// available seeders, whichever is smaller. If the available seeders is
// less than numWant then peers are returned until numWant or they run out.
AnnouncePeers(infoHash chihaya.InfoHash, seeder bool, numWant int, peer4, peer6 chihaya.Peer) (peers, peers6 []chihaya.Peer, err error)
// CollectGarbage deletes peers from the peerStore which are older than the
// cutoff time.
CollectGarbage(cutoff time.Time) error
// GetSeeders gets all the seeders for a particular infoHash.
GetSeeders(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error)
// GetLeechers gets all the leechers for a particular infoHash.
GetLeechers(infoHash chihaya.InfoHash) (peers, peers6 []chihaya.Peer, err error)
// NumSeeders gets the amount of seeders for a particular infoHash.
NumSeeders(infoHash chihaya.InfoHash) int
// NumLeechers gets the amount of leechers for a particular infoHash.
NumLeechers(infoHash chihaya.InfoHash) int
// Stopper provides the Stop method that stops the PeerStore.
// Stop should shut down the PeerStore in a separate goroutine and send
// an error to the channel if the shutdown failed. If the shutdown
// was successful, the channel is to be closed.
stopper.Stopper
}
// PeerStoreDriver represents an interface for creating a handle to the storage
// of peers.
type PeerStoreDriver interface {
New(*DriverConfig) (PeerStore, error)
}
// RegisterPeerStoreDriver makes a driver available by the provided name.
//
// If this function is called twice with the same name or if the driver is nil,
// it panics.
func RegisterPeerStoreDriver(name string, driver PeerStoreDriver) {
if driver == nil {
panic("storage: could not register nil PeerStoreDriver")
}
if _, dup := peerStoreDrivers[name]; dup {
panic("storage: could not register duplicate PeerStoreDriver: " + name)
}
peerStoreDrivers[name] = driver
}
// OpenPeerStore returns a PeerStore specified by a configuration.
func OpenPeerStore(cfg *DriverConfig) (PeerStore, error) {
driver, ok := peerStoreDrivers[cfg.Name]
if !ok {
return nil, fmt.Errorf("storage: unknown PeerStoreDriver %q (forgotten import?)", cfg)
}
return driver.New(cfg)
}

View file

@ -1,142 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package store
import (
"errors"
"log"
"time"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya"
"github.com/chihaya/chihaya/pkg/stopper"
"github.com/chihaya/chihaya/server"
"github.com/chihaya/chihaya/tracker"
)
var theStore *Store
func init() {
server.Register("store", constructor)
}
// ErrResourceDoesNotExist is the error returned by all delete methods in the
// store if the requested resource does not exist.
var ErrResourceDoesNotExist = errors.New("resource does not exist")
func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {
if theStore == nil {
cfg, err := newConfig(srvcfg)
if err != nil {
return nil, errors.New("store: invalid store config: " + err.Error())
}
theStore = &Store{
cfg: cfg,
tkr: tkr,
shutdown: make(chan struct{}),
sg: stopper.NewStopGroup(),
}
ps, err := OpenPeerStore(&cfg.PeerStore)
if err != nil {
return nil, err
}
theStore.sg.Add(ps)
ips, err := OpenIPStore(&cfg.IPStore)
if err != nil {
return nil, err
}
theStore.sg.Add(ips)
ss, err := OpenStringStore(&cfg.StringStore)
if err != nil {
return nil, err
}
theStore.sg.Add(ss)
theStore.PeerStore = ps
theStore.IPStore = ips
theStore.StringStore = ss
}
return theStore, nil
}
// Config represents the configuration for the store.
type Config struct {
Addr string `yaml:"addr"`
RequestTimeout time.Duration `yaml:"request_timeout"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
GCAfter time.Duration `yaml:"gc_after"`
PeerStore DriverConfig `yaml:"peer_store"`
IPStore DriverConfig `yaml:"ip_store"`
StringStore DriverConfig `yaml:"string_store"`
}
// DriverConfig represents the configuration for a store driver.
type DriverConfig struct {
Name string `yaml:"name"`
Config interface{} `yaml:"config"`
}
func newConfig(srvcfg *chihaya.ServerConfig) (*Config, error) {
bytes, err := yaml.Marshal(srvcfg.Config)
if err != nil {
return nil, err
}
var cfg Config
err = yaml.Unmarshal(bytes, &cfg)
if err != nil {
return nil, err
}
return &cfg, nil
}
// MustGetStore is used by middleware to access the store.
//
// This function calls log.Fatal if a server hasn't been already created by
// the server package.
func MustGetStore() *Store {
if theStore == nil {
log.Fatal("store middleware used without store server")
}
return theStore
}
// Store provides storage for a tracker.
type Store struct {
cfg *Config
tkr *tracker.Tracker
shutdown chan struct{}
sg *stopper.StopGroup
PeerStore
IPStore
StringStore
}
// Start starts the store drivers and blocks until all of them exit.
func (s *Store) Start() {
<-s.shutdown
}
// Stop stops the store drivers and waits for them to exit.
func (s *Store) Stop() {
errors := s.sg.Stop()
if len(errors) == 0 {
log.Println("Store server shut down cleanly")
} else {
log.Println("Store server: failed to shutdown drivers")
for _, err := range errors {
log.Println(err.Error())
}
}
close(s.shutdown)
}

File diff suppressed because it is too large Load diff

View file

@ -1,526 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package store
import (
"testing"
"net"
"time"
"github.com/chihaya/chihaya"
"github.com/stretchr/testify/require"
)
// StringStoreTester is a collection of tests for a StringStore driver.
// Every benchmark expects a new, clean storage. Every benchmark should be
// called with a DriverConfig that ensures this.
type StringStoreTester interface {
TestStringStore(*testing.T, *DriverConfig)
}
var _ StringStoreTester = &stringStoreTester{}
type stringStoreTester struct {
s1, s2 string
driver StringStoreDriver
}
// PrepareStringStoreTester prepares a reusable suite for StringStore driver
// tests.
func PrepareStringStoreTester(driver StringStoreDriver) StringStoreTester {
return &stringStoreTester{
s1: "abc",
s2: "def",
driver: driver,
}
}
func (s *stringStoreTester) TestStringStore(t *testing.T, cfg *DriverConfig) {
ss, err := s.driver.New(cfg)
require.Nil(t, err)
require.NotNil(t, ss)
has, err := ss.HasString(s.s1)
require.Nil(t, err)
require.False(t, has)
has, err = ss.HasString(s.s2)
require.Nil(t, err)
require.False(t, has)
err = ss.RemoveString(s.s1)
require.NotNil(t, err)
err = ss.PutString(s.s1)
require.Nil(t, err)
has, err = ss.HasString(s.s1)
require.Nil(t, err)
require.True(t, has)
has, err = ss.HasString(s.s2)
require.Nil(t, err)
require.False(t, has)
err = ss.PutString(s.s1)
require.Nil(t, err)
err = ss.PutString(s.s2)
require.Nil(t, err)
has, err = ss.HasString(s.s1)
require.Nil(t, err)
require.True(t, has)
has, err = ss.HasString(s.s2)
require.Nil(t, err)
require.True(t, has)
err = ss.RemoveString(s.s1)
require.Nil(t, err)
err = ss.RemoveString(s.s2)
require.Nil(t, err)
has, err = ss.HasString(s.s1)
require.Nil(t, err)
require.False(t, has)
has, err = ss.HasString(s.s2)
require.Nil(t, err)
require.False(t, has)
errChan := ss.Stop()
err = <-errChan
require.Nil(t, err, "StringStore shutdown must not fail")
}
// IPStoreTester is a collection of tests for an IPStore driver.
// Every benchmark expects a new, clean storage. Every benchmark should be
// called with a DriverConfig that ensures this.
type IPStoreTester interface {
TestIPStore(*testing.T, *DriverConfig)
TestHasAllHasAny(*testing.T, *DriverConfig)
TestNetworks(*testing.T, *DriverConfig)
TestHasAllHasAnyNetworks(*testing.T, *DriverConfig)
}
var _ IPStoreTester = &ipStoreTester{}
type ipStoreTester struct {
v6, v4, v4s net.IP
net1, net2 string
inNet1, inNet2 net.IP
excluded net.IP
driver IPStoreDriver
}
// PrepareIPStoreTester prepares a reusable suite for IPStore driver
// tests.
func PrepareIPStoreTester(driver IPStoreDriver) IPStoreTester {
return &ipStoreTester{
v6: net.ParseIP("0c22:384e:0:0c22:384e::68"),
v4: net.ParseIP("12.13.14.15"),
v4s: net.ParseIP("12.13.14.15").To4(),
net1: "192.168.22.255/24",
net2: "192.168.23.255/24",
inNet1: net.ParseIP("192.168.22.22"),
inNet2: net.ParseIP("192.168.23.23"),
excluded: net.ParseIP("10.154.243.22"),
driver: driver,
}
}
func (s *ipStoreTester) TestIPStore(t *testing.T, cfg *DriverConfig) {
is, err := s.driver.New(cfg)
require.Nil(t, err)
require.NotNil(t, is)
// check default state
found, err := is.HasIP(s.v4)
require.Nil(t, err)
require.False(t, found)
// check IPv4
err = is.AddIP(s.v4)
require.Nil(t, err)
found, err = is.HasIP(s.v4)
require.Nil(t, err)
require.True(t, found)
found, err = is.HasIP(s.v4s)
require.Nil(t, err)
require.True(t, found)
found, err = is.HasIP(s.v6)
require.Nil(t, err)
require.False(t, found)
// check removes
err = is.RemoveIP(s.v6)
require.NotNil(t, err)
err = is.RemoveIP(s.v4s)
require.Nil(t, err)
found, err = is.HasIP(s.v4)
require.Nil(t, err)
require.False(t, found)
// check IPv6
err = is.AddIP(s.v6)
require.Nil(t, err)
found, err = is.HasIP(s.v6)
require.Nil(t, err)
require.True(t, found)
err = is.RemoveIP(s.v6)
require.Nil(t, err)
found, err = is.HasIP(s.v6)
require.Nil(t, err)
require.False(t, found)
errChan := is.Stop()
err = <-errChan
require.Nil(t, err, "IPStore shutdown must not fail")
}
func (s *ipStoreTester) TestHasAllHasAny(t *testing.T, cfg *DriverConfig) {
is, err := s.driver.New(cfg)
require.Nil(t, err)
require.NotNil(t, is)
found, err := is.HasAnyIP(nil)
require.Nil(t, err)
require.False(t, found)
found, err = is.HasAllIPs(nil)
require.Nil(t, err)
require.True(t, found)
found, err = is.HasAllIPs([]net.IP{s.v6})
require.Nil(t, err)
require.False(t, found)
err = is.AddIP(s.v4)
require.Nil(t, err)
found, err = is.HasAnyIP([]net.IP{s.v6, s.v4})
require.Nil(t, err)
require.True(t, found)
found, err = is.HasAllIPs([]net.IP{s.v6, s.v4})
require.Nil(t, err)
require.False(t, found)
found, err = is.HasAllIPs([]net.IP{s.v4})
require.Nil(t, err)
require.True(t, found)
err = is.AddIP(s.v6)
require.Nil(t, err)
found, err = is.HasAnyIP([]net.IP{s.v6, s.v6})
require.Nil(t, err)
require.True(t, found)
found, err = is.HasAllIPs([]net.IP{s.v6, s.v6})
require.Nil(t, err)
require.True(t, found)
errChan := is.Stop()
err = <-errChan
require.Nil(t, err, "IPStore shutdown must not fail")
}
func (s *ipStoreTester) TestNetworks(t *testing.T, cfg *DriverConfig) {
is, err := s.driver.New(cfg)
require.Nil(t, err)
require.NotNil(t, is)
match, err := is.HasIP(s.inNet1)
require.Nil(t, err)
require.False(t, match)
match, err = is.HasIP(s.inNet2)
require.Nil(t, err)
require.False(t, match)
err = is.AddNetwork("")
require.NotNil(t, err)
err = is.RemoveNetwork("")
require.NotNil(t, err)
err = is.AddNetwork(s.net1)
require.Nil(t, err)
match, err = is.HasIP(s.inNet1)
require.Nil(t, err)
require.True(t, match)
match, err = is.HasIP(s.inNet2)
require.Nil(t, err)
require.False(t, match)
err = is.RemoveNetwork(s.net2)
require.NotNil(t, err)
err = is.RemoveNetwork(s.net1)
require.Nil(t, err)
match, err = is.HasIP(s.inNet1)
require.Nil(t, err)
require.False(t, match)
match, err = is.HasIP(s.inNet2)
require.Nil(t, err)
require.False(t, match)
errChan := is.Stop()
err = <-errChan
require.Nil(t, err, "IPStore shutdown must not fail")
}
func (s *ipStoreTester) TestHasAllHasAnyNetworks(t *testing.T, cfg *DriverConfig) {
is, err := s.driver.New(cfg)
require.Nil(t, err)
require.NotNil(t, s)
match, err := is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded})
require.Nil(t, err)
require.False(t, match)
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded})
require.Nil(t, err)
require.False(t, match)
err = is.AddNetwork(s.net1)
require.Nil(t, err)
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.True(t, match)
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.False(t, match)
err = is.AddNetwork(s.net2)
require.Nil(t, err)
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2, s.excluded})
require.Nil(t, err)
require.True(t, match)
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.True(t, match)
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2, s.excluded})
require.Nil(t, err)
require.False(t, match)
err = is.RemoveNetwork(s.net1)
require.Nil(t, err)
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.True(t, match)
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.False(t, match)
err = is.RemoveNetwork(s.net2)
require.Nil(t, err)
match, err = is.HasAnyIP([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.False(t, match)
match, err = is.HasAllIPs([]net.IP{s.inNet1, s.inNet2})
require.Nil(t, err)
require.False(t, match)
errChan := is.Stop()
err = <-errChan
require.Nil(t, err, "IPStore shutdown must not fail")
}
// PeerStoreTester is a collection of tests for a PeerStore driver.
// Every benchmark expects a new, clean storage. Every benchmark should be
// called with a DriverConfig that ensures this.
type PeerStoreTester interface {
// CompareEndpoints sets the function used to compare peers to a
// comparison that only compares endpoints and omits PeerIDs.
CompareEndpoints()
TestPeerStore(*testing.T, *DriverConfig)
}
var _ PeerStoreTester = &peerStoreTester{}
type peerStoreTester struct {
driver PeerStoreDriver
equalityFunc func(a, b chihaya.Peer) bool
}
// PreparePeerStoreTester prepares a reusable suite for PeerStore driver
// tests.
// The tester will use PeerIDs and endpoints to compare peers.
func PreparePeerStoreTester(driver PeerStoreDriver) PeerStoreTester {
return &peerStoreTester{
driver: driver,
equalityFunc: func(a, b chihaya.Peer) bool { return a.Equal(b) },
}
}
func (pt *peerStoreTester) CompareEndpoints() {
pt.equalityFunc = func(a, b chihaya.Peer) bool { return a.EqualEndpoint(b) }
}
func (pt *peerStoreTester) peerInSlice(peer chihaya.Peer, peers []chihaya.Peer) bool {
for _, v := range peers {
if pt.equalityFunc(peer, v) {
return true
}
}
return false
}
func (pt *peerStoreTester) TestPeerStore(t *testing.T, cfg *DriverConfig) {
var (
hash = chihaya.InfoHash([20]byte{})
peers = []struct {
seeder bool
peerID string
ip string
port uint16
}{
{false, "-AZ3034-6wfG2wk6wWLc", "250.183.81.177", 5720},
{false, "-AZ3042-6ozMq5q6Q3NX", "38.241.13.19", 4833},
{false, "-BS5820-oy4La2MWGEFj", "fd45:7856:3dae::48", 2878},
{false, "-AR6360-6oZyyMWoOOBe", "fd0a:29a8:8445::38", 3167},
{true, "-AG2083-s1hiF8vGAAg0", "231.231.49.173", 1453},
{true, "-AG3003-lEl2Mm4NEO4n", "254.99.84.77", 7032},
{true, "-MR1100-00HS~T7*65rm", "211.229.100.129", 2614},
{true, "-LK0140-ATIV~nbEQAMr", "fdad:c435:bf79::12", 4114},
{true, "-KT2210-347143496631", "fdda:1b35:7d6e::9", 6179},
{true, "-TR0960-6ep6svaa61r4", "fd7f:78f0:4c77::55", 4727},
}
)
s, err := pt.driver.New(cfg)
require.Nil(t, err)
require.NotNil(t, s)
for _, p := range peers {
// Construct chihaya.Peer from test data.
peer := chihaya.Peer{
ID: chihaya.PeerIDFromString(p.peerID),
IP: net.ParseIP(p.ip),
Port: p.port,
}
if p.seeder {
err = s.PutSeeder(hash, peer)
} else {
err = s.PutLeecher(hash, peer)
}
require.Nil(t, err)
}
leechers1, leechers61, err := s.GetLeechers(hash)
require.Nil(t, err)
require.NotEmpty(t, leechers1)
require.NotEmpty(t, leechers61)
num := s.NumLeechers(hash)
require.Equal(t, len(leechers1)+len(leechers61), num)
seeders1, seeders61, err := s.GetSeeders(hash)
require.Nil(t, err)
require.NotEmpty(t, seeders1)
require.NotEmpty(t, seeders61)
num = s.NumSeeders(hash)
require.Equal(t, len(seeders1)+len(seeders61), num)
leechers := append(leechers1, leechers61...)
seeders := append(seeders1, seeders61...)
for _, p := range peers {
// Construct chihaya.Peer from test data.
peer := chihaya.Peer{
ID: chihaya.PeerIDFromString(p.peerID),
IP: net.ParseIP(p.ip),
Port: p.port,
}
if p.seeder {
require.True(t, pt.peerInSlice(peer, seeders))
} else {
require.True(t, pt.peerInSlice(peer, leechers))
}
if p.seeder {
err = s.DeleteSeeder(hash, peer)
} else {
err = s.DeleteLeecher(hash, peer)
}
require.Nil(t, err)
}
require.Zero(t, s.NumLeechers(hash))
require.Zero(t, s.NumSeeders(hash))
// Re-add all the peers to the peerStore.
for _, p := range peers {
// Construct chihaya.Peer from test data.
peer := chihaya.Peer{
ID: chihaya.PeerIDFromString(p.peerID),
IP: net.ParseIP(p.ip),
Port: p.port,
}
if p.seeder {
s.PutSeeder(hash, peer)
} else {
s.PutLeecher(hash, peer)
}
}
// Check that there are 6 seeders, and 4 leechers.
require.Equal(t, 6, s.NumSeeders(hash))
require.Equal(t, 4, s.NumLeechers(hash))
peer := chihaya.Peer{
ID: chihaya.PeerIDFromString(peers[0].peerID),
IP: net.ParseIP(peers[0].ip),
Port: peers[0].port,
}
err = s.GraduateLeecher(hash, peer)
require.Nil(t, err)
// Check that there are 7 seeders, and 3 leechers after graduating a
// leecher to a seeder.
require.Equal(t, 7, s.NumSeeders(hash))
require.Equal(t, 3, s.NumLeechers(hash))
_, _, err = s.AnnouncePeers(hash, true, 5, peer, chihaya.Peer{})
// Only test if it works, do not test the slices returned. They change
// depending on the driver.
require.Nil(t, err)
err = s.CollectGarbage(time.Now())
require.Nil(t, err)
require.Equal(t, 0, s.NumLeechers(hash))
require.Equal(t, 0, s.NumSeeders(hash))
errChan := s.Stop()
err = <-errChan
require.Nil(t, err, "PeerStore shutdown must not fail")
}

View file

@ -1,64 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package store
import (
"fmt"
"github.com/chihaya/chihaya/pkg/stopper"
)
var stringStoreDrivers = make(map[string]StringStoreDriver)
// StringStore represents an interface for manipulating strings.
type StringStore interface {
// PutString adds the given string to the StringStore.
PutString(s string) error
// HasString returns whether or not the StringStore contains the given
// string.
HasString(s string) (bool, error)
// RemoveString removes the string from the string store.
// Returns ErrResourceDoesNotExist if the given string is not contained
// in the store.
RemoveString(s string) error
// Stopper provides the Stop method that stops the StringStore.
// Stop should shut down the StringStore in a separate goroutine and send
// an error to the channel if the shutdown failed. If the shutdown
// was successful, the channel is to be closed.
stopper.Stopper
}
// StringStoreDriver represents an interface for creating a handle to the
// storage of strings.
type StringStoreDriver interface {
New(*DriverConfig) (StringStore, error)
}
// RegisterStringStoreDriver makes a driver available by the provided name.
//
// If this function is called twice with the same name or if the driver is nil,
// it panics.
func RegisterStringStoreDriver(name string, driver StringStoreDriver) {
if driver == nil {
panic("store: could not register nil StringStoreDriver")
}
if _, dup := stringStoreDrivers[name]; dup {
panic("store: could not register duplicate StringStoreDriver: " + name)
}
stringStoreDrivers[name] = driver
}
// OpenStringStore returns a StringStore specified by a configuration.
func OpenStringStore(cfg *DriverConfig) (StringStore, error) {
driver, ok := stringStoreDrivers[cfg.Name]
if !ok {
return nil, fmt.Errorf("store: unknown StringStoreDriver %q (forgotten import?)", cfg)
}
return driver.New(cfg)
}

View file

@ -1,18 +1,14 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package stopper
import (
"sync"
)
// AlreadyStopped is a closed error channel to be used by StopperFuncs when
// AlreadyStopped is a closed error channel to be used by Funcs when
// an element was already stopped.
var AlreadyStopped <-chan error
// AlreadyStoppedFunc is a StopperFunc that returns AlreadyStopped.
// AlreadyStoppedFunc is a Func that returns AlreadyStopped.
var AlreadyStoppedFunc = func() <-chan error { return AlreadyStopped }
func init() {
@ -34,17 +30,17 @@ type Stopper interface {
// StopGroup is a group that can be stopped.
type StopGroup struct {
stoppables []StopperFunc
stoppables []Func
stoppablesLock sync.Mutex
}
// StopperFunc is a function that can be used to provide a clean shutdown.
type StopperFunc func() <-chan error
// Func is a function that can be used to provide a clean shutdown.
type Func func() <-chan error
// NewStopGroup creates a new StopGroup.
func NewStopGroup() *StopGroup {
return &StopGroup{
stoppables: make([]StopperFunc, 0),
stoppables: make([]Func, 0),
}
}
@ -57,9 +53,9 @@ func (cg *StopGroup) Add(toAdd Stopper) {
cg.stoppables = append(cg.stoppables, toAdd.Stop)
}
// AddFunc adds a StopperFunc to the StopGroup.
// On the next call to Stop(), the StopperFunc will be called.
func (cg *StopGroup) AddFunc(toAddFunc StopperFunc) {
// AddFunc adds a Func to the StopGroup.
// On the next call to Stop(), the Func will be called.
func (cg *StopGroup) AddFunc(toAddFunc Func) {
cg.stoppablesLock.Lock()
defer cg.stoppablesLock.Unlock()

View file

@ -0,0 +1,388 @@
package memory
import (
"encoding/binary"
"errors"
"net"
"runtime"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/storage"
)
// ErrInvalidGCInterval is returned for a GarbageCollectionInterval that is
// less than or equal to zero.
var ErrInvalidGCInterval = errors.New("invalid garbage collection interval")
// Config holds the configuration of a memory PeerStore.
type Config struct {
GarbageCollectionInterval time.Duration `yaml:"gc_interval"`
PeerLifetime time.Duration `yaml:"peer_lifetime"`
ShardCount int `yaml:"shard_count"`
MaxNumWant int `yaml:"max_numwant"`
}
// New creates a new PeerStore backed by memory.
func New(cfg Config) (storage.PeerStore, error) {
shardCount := 1
if cfg.ShardCount > 0 {
shardCount = cfg.ShardCount
}
if cfg.GarbageCollectionInterval <= 0 {
return nil, ErrInvalidGCInterval
}
ps := &peerStore{
shards: make([]*peerShard, shardCount*2),
closed: make(chan struct{}),
maxNumWant: cfg.MaxNumWant,
}
for i := 0; i < shardCount*2; i++ {
ps.shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
}
go func() {
for {
select {
case <-ps.closed:
return
case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime)
log.Debugln("memory: purging peers with no announces since", before)
ps.collectGarbage(before)
}
}
}()
return ps, nil
}
type serializedPeer string
type peerShard struct {
swarms map[bittorrent.InfoHash]swarm
sync.RWMutex
}
type swarm struct {
// map serialized peer to mtime
seeders map[serializedPeer]int64
leechers map[serializedPeer]int64
}
type peerStore struct {
shards []*peerShard
closed chan struct{}
maxNumWant int
}
var _ storage.PeerStore = &peerStore{}
func (s *peerStore) shardIndex(infoHash bittorrent.InfoHash, p bittorrent.Peer) uint32 {
idx := binary.BigEndian.Uint32(infoHash[:4]) % uint32(len(s.shards))
if len(p.IP) == net.IPv6len {
idx += idx + uint32(len(s.shards)/2)
}
return idx
}
func newPeerKey(p bittorrent.Peer) serializedPeer {
b := make([]byte, 20+2+len(p.IP))
copy(b[:20], p.ID[:])
binary.BigEndian.PutUint16(b[20:22], p.Port)
copy(b[22:], p.IP)
return serializedPeer(b)
}
func decodePeerKey(pk serializedPeer) bittorrent.Peer {
return bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: net.IP(pk[22:]),
}
}
func (s *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, p)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.swarms[ih] = swarm{
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
shard.Unlock()
return nil
}
func (s *peerStore) DeleteSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, p)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
if _, ok := shard.swarms[ih].seeders[pk]; !ok {
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
delete(shard.swarms[ih].seeders, pk)
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
delete(shard.swarms, ih)
}
shard.Unlock()
return nil
}
func (s *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, p)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.swarms[ih] = swarm{
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
shard.swarms[ih].leechers[pk] = time.Now().UnixNano()
shard.Unlock()
return nil
}
func (s *peerStore) DeleteLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, p)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
if _, ok := shard.swarms[ih].leechers[pk]; !ok {
shard.Unlock()
return storage.ErrResourceDoesNotExist
}
delete(shard.swarms[ih].leechers, pk)
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
delete(shard.swarms, ih)
}
shard.Unlock()
return nil
}
func (s *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
pk := newPeerKey(p)
shard := s.shards[s.shardIndex(ih, p)]
shard.Lock()
if _, ok := shard.swarms[ih]; !ok {
shard.swarms[ih] = swarm{
seeders: make(map[serializedPeer]int64),
leechers: make(map[serializedPeer]int64),
}
}
delete(shard.swarms[ih].leechers, pk)
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
shard.Unlock()
return nil
}
func (s *peerStore) AnnouncePeers(ih bittorrent.InfoHash, seeder bool, numWant int, announcer bittorrent.Peer) (peers []bittorrent.Peer, err error) {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
if numWant > s.maxNumWant {
numWant = s.maxNumWant
}
shard := s.shards[s.shardIndex(ih, announcer)]
shard.RLock()
if _, ok := shard.swarms[ih]; !ok {
shard.RUnlock()
return nil, storage.ErrResourceDoesNotExist
}
if seeder {
// Append leechers as possible.
leechers := shard.swarms[ih].leechers
for p := range leechers {
decodedPeer := decodePeerKey(p)
if numWant == 0 {
break
}
peers = append(peers, decodedPeer)
numWant--
}
} else {
// Append as many seeders as possible.
seeders := shard.swarms[ih].seeders
for p := range seeders {
decodedPeer := decodePeerKey(p)
if numWant == 0 {
break
}
peers = append(peers, decodedPeer)
numWant--
}
// Append leechers until we reach numWant.
leechers := shard.swarms[ih].leechers
if numWant > 0 {
for p := range leechers {
decodedPeer := decodePeerKey(p)
if numWant == 0 {
break
}
if decodedPeer.Equal(announcer) {
continue
}
peers = append(peers, decodedPeer)
numWant--
}
}
}
shard.RUnlock()
return
}
// collectGarbage deletes all Peers from the PeerStore which are older than the
// cutoff time.
//
// This function must be able to execute while other methods on this interface
// are being executed in parallel.
func (s *peerStore) collectGarbage(cutoff time.Time) error {
select {
case <-s.closed:
panic("attempted to interact with stopped memory store")
default:
}
cutoffUnix := cutoff.UnixNano()
for _, shard := range s.shards {
shard.RLock()
var infohashes []bittorrent.InfoHash
for ih := range shard.swarms {
infohashes = append(infohashes, ih)
}
shard.RUnlock()
runtime.Gosched()
for _, ih := range infohashes {
shard.Lock()
if _, stillExists := shard.swarms[ih]; !stillExists {
shard.Unlock()
runtime.Gosched()
continue
}
for pk, mtime := range shard.swarms[ih].leechers {
if mtime <= cutoffUnix {
delete(shard.swarms[ih].leechers, pk)
}
}
for pk, mtime := range shard.swarms[ih].seeders {
if mtime <= cutoffUnix {
delete(shard.swarms[ih].seeders, pk)
}
}
if len(shard.swarms[ih].seeders)|len(shard.swarms[ih].leechers) == 0 {
delete(shard.swarms, ih)
}
shard.Unlock()
runtime.Gosched()
}
runtime.Gosched()
}
return nil
}
func (s *peerStore) Stop() <-chan error {
toReturn := make(chan error)
go func() {
shards := make([]*peerShard, len(s.shards))
for i := 0; i < len(s.shards); i++ {
shards[i] = &peerShard{swarms: make(map[bittorrent.InfoHash]swarm)}
}
s.shards = shards
close(s.closed)
close(toReturn)
}()
return toReturn
}

View file

@ -0,0 +1,42 @@
package memory
import (
"testing"
"time"
s "github.com/chihaya/chihaya/storage"
)
func createNew() s.PeerStore {
ps, err := New(Config{ShardCount: 1024, GarbageCollectionInterval: 10 * time.Minute})
if err != nil {
panic(err)
}
return ps
}
func BenchmarkPut(b *testing.B) { s.Put(b, createNew()) }
func BenchmarkPut1k(b *testing.B) { s.Put1k(b, createNew()) }
func BenchmarkPut1kInfohash(b *testing.B) { s.Put1kInfohash(b, createNew()) }
func BenchmarkPut1kInfohash1k(b *testing.B) { s.Put1kInfohash1k(b, createNew()) }
func BenchmarkPutDelete(b *testing.B) { s.PutDelete(b, createNew()) }
func BenchmarkPutDelete1k(b *testing.B) { s.PutDelete1k(b, createNew()) }
func BenchmarkPutDelete1kInfohash(b *testing.B) { s.PutDelete1kInfohash(b, createNew()) }
func BenchmarkPutDelete1kInfohash1k(b *testing.B) { s.PutDelete1kInfohash1k(b, createNew()) }
func BenchmarkDeleteNonexist(b *testing.B) { s.DeleteNonexist(b, createNew()) }
func BenchmarkDeleteNonexist1k(b *testing.B) { s.DeleteNonexist1k(b, createNew()) }
func BenchmarkDeleteNonexist1kInfohash(b *testing.B) { s.DeleteNonexist1kInfohash(b, createNew()) }
func BenchmarkDeleteNonexist1kInfohash1k(b *testing.B) { s.DeleteNonexist1kInfohash1k(b, createNew()) }
func BenchmarkPutGradDelete(b *testing.B) { s.PutGradDelete(b, createNew()) }
func BenchmarkPutGradDelete1k(b *testing.B) { s.PutGradDelete1k(b, createNew()) }
func BenchmarkPutGradDelete1kInfohash(b *testing.B) { s.PutGradDelete1kInfohash(b, createNew()) }
func BenchmarkPutGradDelete1kInfohash1k(b *testing.B) { s.PutGradDelete1kInfohash1k(b, createNew()) }
func BenchmarkGradNonexist(b *testing.B) { s.GradNonexist(b, createNew()) }
func BenchmarkGradNonexist1k(b *testing.B) { s.GradNonexist1k(b, createNew()) }
func BenchmarkGradNonexist1kInfohash(b *testing.B) { s.GradNonexist1kInfohash(b, createNew()) }
func BenchmarkGradNonexist1kInfohash1k(b *testing.B) { s.GradNonexist1kInfohash1k(b, createNew()) }
func BenchmarkAnnounceLeecher(b *testing.B) { s.AnnounceLeecher(b, createNew()) }
func BenchmarkAnnounceLeecher1kInfohash(b *testing.B) { s.AnnounceLeecher1kInfohash(b, createNew()) }
func BenchmarkAnnounceSeeder(b *testing.B) { s.AnnounceSeeder(b, createNew()) }
func BenchmarkAnnounceSeeder1kInfohash(b *testing.B) { s.AnnounceSeeder1kInfohash(b, createNew()) }

57
storage/storage.go Normal file
View file

@ -0,0 +1,57 @@
package storage
import (
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/stopper"
)
// ErrResourceDoesNotExist is the error returned by all delete methods in the
// store if the requested resource does not exist.
var ErrResourceDoesNotExist = bittorrent.ClientError("resource does not exist")
// PeerStore is an interface that abstracts the interactions of storing and
// manipulating Peers such that it can be implemented for various data stores.
type PeerStore interface {
// PutSeeder adds a Seeder to the Swarm identified by the provided infoHash.
PutSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// DeleteSeeder removes a Seeder from the Swarm identified by the provided
// infoHash.
//
// If the Swarm or Peer does not exist, this function should return
// ErrResourceDoesNotExist.
DeleteSeeder(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// PutLeecher adds a Leecher to the Swarm identified by the provided
// infoHash.
PutLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// DeleteLeecher removes a Leecher from the Swarm identified by the provided
// infoHash.
//
// If the Swarm or Peer does not exist, this function should return
// ErrResourceDoesNotExist.
DeleteLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// GraduateLeecher promotes a Leecher to a Seeder in the Swarm identified by
// the provided infoHash.
//
// If the given Peer is not present as a Leecher, add the Peer as a Seeder
// and return no error.
GraduateLeecher(infoHash bittorrent.InfoHash, p bittorrent.Peer) error
// AnnouncePeers is a best effort attempt to return Peers from the Swarm
// identified by the provided infoHash. The returned Peers are required to be
// either all IPv4 or all IPv6.
//
// The returned Peers should strive be:
// - as close to length equal to numWant as possible without going over
// - all IPv4 or all IPv6 depending on the provided peer
// - if seeder is true, should ideally return more leechers than seeders
// - if seeder is false, should ideally return more seeders than leechers
AnnouncePeers(infoHash bittorrent.InfoHash, seeder bool, numWant int, p bittorrent.Peer) (peers []bittorrent.Peer, err error)
// Stopper is an interface that expects a Stop method to stops the PeerStore.
// For more details see the documentation in the stopper package.
stopper.Stopper
}

417
storage/storage_bench.go Normal file
View file

@ -0,0 +1,417 @@
package storage
import (
"math/rand"
"net"
"runtime"
"sync/atomic"
"testing"
"github.com/chihaya/chihaya/bittorrent"
)
type benchData struct {
infohashes [1000]bittorrent.InfoHash
peers [1000]bittorrent.Peer
}
func generateInfohashes() (a [1000]bittorrent.InfoHash) {
r := rand.New(rand.NewSource(0))
for i := range a {
b := [20]byte{}
n, err := r.Read(b[:])
if err != nil || n != 20 {
panic("unable to create random bytes")
}
a[i] = bittorrent.InfoHash(b)
}
return
}
func generatePeers() (a [1000]bittorrent.Peer) {
r := rand.New(rand.NewSource(0))
for i := range a {
ip := make([]byte, 4)
n, err := r.Read(ip)
if err != nil || n != 4 {
panic("unable to create random bytes")
}
id := [20]byte{}
n, err = r.Read(id[:])
if err != nil || n != 20 {
panic("unable to create random bytes")
}
port := uint16(r.Uint32())
a[i] = bittorrent.Peer{
ID: bittorrent.PeerID(id),
IP: net.IP(ip),
Port: port,
}
}
return
}
type executionFunc func(int, PeerStore, *benchData) error
type setupFunc func(PeerStore, *benchData) error
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
bd := &benchData{generateInfohashes(), generatePeers()}
spacing := int32(1000 / runtime.NumCPU())
if sf != nil {
err := sf(ps, bd)
if err != nil {
b.Fatal(err)
}
}
offset := int32(0)
b.ResetTimer()
if parallel {
b.RunParallel(func(pb *testing.PB) {
i := int(atomic.AddInt32(&offset, spacing))
for pb.Next() {
err := ef(i, ps, bd)
if err != nil {
b.Fatal(err)
}
i++
}
})
} else {
for i := 0; i < b.N; i++ {
err := ef(i, ps, bd)
if err != nil {
b.Fatal(err)
}
}
}
b.StopTimer()
errChan := ps.Stop()
for err := range errChan {
b.Fatal(err)
}
}
// Put benchmarks the PutSeeder method of a PeerStore by repeatedly Putting the
// same Peer for the same InfoHash.
//
// Put can run in parallel.
func Put(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
return ps.PutSeeder(bd.infohashes[0], bd.peers[0])
})
}
// Put1k benchmarks the PutSeeder method of a PeerStore by cycling through 1000
// Peers and Putting them into the swarm of one infohash.
//
// Put1k can run in parallel.
func Put1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
return ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000])
})
}
// Put1kInfohash benchmarks the PutSeeder method of a PeerStore by cycling
// through 1000 infohashes and putting the same peer into their swarms.
//
// Put1kInfohash can run in parallel.
func Put1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
return ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
})
}
// Put1kInfohash1k benchmarks the PutSeeder method of a PeerStore by cycling
// through 1000 infohashes and 1000 Peers and calling Put with them.
//
// Put1kInfohash1k can run in parallel.
func Put1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return err
})
}
// PutDelete benchmarks the PutSeeder and DeleteSeeder methods of a PeerStore by
// calling PutSeeder followed by DeleteSeeder for one Peer and one infohash.
//
// PutDelete can not run in parallel.
func PutDelete(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[0], bd.peers[0])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
})
}
// PutDelete1k benchmarks the PutSeeder and DeleteSeeder methods in the same way
// PutDelete does, but with one from 1000 Peers per iteration.
//
// PutDelete1k can not run in parallel.
func PutDelete1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[0], bd.peers[i%1000])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
})
}
// PutDelete1kInfohash behaves like PutDelete1k with 1000 infohashes instead of
// 1000 Peers.
//
// PutDelete1kInfohash can not run in parallel.
func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
if err != nil {
}
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
})
}
// PutDelete1kInfohash1k behaves like PutDelete1k with 1000 infohashes in
// addition to 1000 Peers.
//
// PutDelete1kInfohash1k can not run in parallel.
func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
if err != nil {
return err
}
err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return err
})
}
// DeleteNonexist benchmarks the DeleteSeeder method of a PeerStore by
// attempting to delete a Peer that is nonexistent.
//
// DeleteNonexist can run in parallel.
func DeleteNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
return nil
})
}
// DeleteNonexist1k benchmarks the DeleteSeeder method of a PeerStore by
// attempting to delete one of 1000 nonexistent Peers.
//
// DeleteNonexist can run in parallel.
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
return nil
})
}
// DeleteNonexist1kInfohash benchmarks the DeleteSeeder method of a PeerStore by
// attempting to delete one Peer from one of 1000 infohashes.
//
// DeleteNonexist1kInfohash can run in parallel.
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
return nil
})
}
// DeleteNonexist1kInfohash1k benchmarks the Delete method of a PeerStore by
// attempting to delete one of 1000 Peers from one of 1000 Infohashes.
//
// DeleteNonexist1kInfohash1k can run in parallel.
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil
})
}
// GradNonexist benchmarks the GraduateLeecher method of a PeerStore by
// attempting to graduate a nonexistent Peer.
//
// GradNonexist can run in parallel.
func GradNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
return nil
})
}
// GradNonexist1k benchmarks the GraduateLeecher method of a PeerStore by
// attempting to graduate one of 1000 nonexistent Peers.
//
// GradNonexist1k can run in parallel.
func GradNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
return nil
})
}
// GradNonexist1kInfohash benchmarks the GraduateLeecher method of a PeerStore
// by attempting to graduate a nonexistent Peer for one of 100 Infohashes.
//
// GradNonexist1kInfohash can run in parallel.
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
return nil
})
}
// GradNonexist1kInfohash1k benchmarks the GraduateLeecher method of a PeerStore
// by attempting to graduate one of 1000 nonexistent Peers for one of 1000
// infohashes.
//
// GradNonexist1kInfohash1k can run in parallel.
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil
})
}
// PutGradDelete benchmarks the PutLeecher, GraduateLeecher and DeleteSeeder
// methods of a PeerStore by adding one leecher to a swarm, promoting it to a
// seeder and deleting the seeder.
//
// PutGradDelete can not run in parallel.
func PutGradDelete(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutLeecher(bd.infohashes[0], bd.peers[0])
if err != nil {
return err
}
err = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
})
}
// PutGradDelete1k behaves like PutGradDelete with one of 1000 Peers.
//
// PutGradDelete1k can not run in parallel.
func PutGradDelete1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutLeecher(bd.infohashes[0], bd.peers[i%1000])
if err != nil {
return err
}
err = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
})
}
// PutGradDelete1kInfohash behaves like PutGradDelete with one of 1000
// infohashes.
//
// PutGradDelete1kInfohash can not run in parallel.
func PutGradDelete1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[0])
if err != nil {
return err
}
err = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
})
}
// PutGradDelete1kInfohash1k behaves like PutGradDelete with one of 1000 Peers
// and one of 1000 infohashes.
//
// PutGradDelete1kInfohash can not run in parallel.
func PutGradDelete1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
if err != nil {
return err
}
err = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
if err != nil {
return err
}
err = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return err
})
}
func putPeers(ps PeerStore, bd *benchData) error {
for i := 0; i < 1000; i++ {
for j := 0; j < 1000; j++ {
var err error
if j < 1000/2 {
err = ps.PutLeecher(bd.infohashes[i], bd.peers[j])
} else {
err = ps.PutSeeder(bd.infohashes[i], bd.peers[j])
}
if err != nil {
return err
}
}
}
return nil
}
// AnnounceLeecher benchmarks the AnnouncePeers method of a PeerStore for
// announcing a leecher.
// The swarm announced to has 500 seeders and 500 leechers.
//
// AnnounceLeecher can run in parallel.
func AnnounceLeecher(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
_, err := ps.AnnouncePeers(bd.infohashes[0], false, 50, bd.peers[0])
return err
})
}
// AnnounceLeecher1kInfohash behaves like AnnounceLeecher with one of 1000
// infohashes.
//
// AnnounceLeecher1kInfohash can run in parallel.
func AnnounceLeecher1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
_, err := ps.AnnouncePeers(bd.infohashes[i%1000], false, 50, bd.peers[0])
return err
})
}
// AnnounceSeeder behaves like AnnounceLeecher with a seeder instead of a
// leecher.
//
// AnnounceSeeder can run in parallel.
func AnnounceSeeder(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
_, err := ps.AnnouncePeers(bd.infohashes[0], true, 50, bd.peers[0])
return err
})
}
// AnnounceSeeder1kInfohash behaves like AnnounceSeeder with one of 1000
// infohashes.
//
// AnnounceSeeder1kInfohash can run in parallel.
func AnnounceSeeder1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, putPeers, func(i int, ps PeerStore, bd *benchData) error {
_, err := ps.AnnouncePeers(bd.infohashes[i%1000], true, 50, bd.peers[0])
return err
})
}

View file

@ -1,140 +0,0 @@
// Copyright 2016 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package tracker
import "github.com/chihaya/chihaya"
// AnnounceHandler is a function that operates on an AnnounceResponse before it
// has been delivered to a client.
type AnnounceHandler func(*chihaya.TrackerConfig, *chihaya.AnnounceRequest, *chihaya.AnnounceResponse) error
// AnnounceMiddleware is a higher-order function used to implement the chaining
// of AnnounceHandlers.
type AnnounceMiddleware func(AnnounceHandler) AnnounceHandler
// AnnounceMiddlewareConstructor is a function that creates a new
// AnnounceMiddleware from a MiddlewareConfig.
type AnnounceMiddlewareConstructor func(chihaya.MiddlewareConfig) (AnnounceMiddleware, error)
// AnnounceChain is a chain of AnnounceMiddlewares.
type AnnounceChain struct{ mw []AnnounceMiddleware }
// Append appends AnnounceMiddlewares to the AnnounceChain.
func (c *AnnounceChain) Append(mw ...AnnounceMiddleware) {
c.mw = append(c.mw, mw...)
}
// Handler builds an AnnounceChain into an AnnounceHandler.
func (c *AnnounceChain) Handler() AnnounceHandler {
final := func(cfg *chihaya.TrackerConfig, req *chihaya.AnnounceRequest, resp *chihaya.AnnounceResponse) error {
return nil
}
for i := len(c.mw) - 1; i >= 0; i-- {
final = c.mw[i](final)
}
return final
}
var announceMiddlewareConstructors = make(map[string]AnnounceMiddlewareConstructor)
// RegisterAnnounceMiddlewareConstructor makes a configurable middleware
// globally available under the provided name.
//
// If this function is called twice with the same name or if the constructor is
// nil, it panics.
func RegisterAnnounceMiddlewareConstructor(name string, mw AnnounceMiddlewareConstructor) {
if mw == nil {
panic("tracker: could not register nil AnnounceMiddlewareConstructor")
}
if _, dup := announceMiddlewareConstructors[name]; dup {
panic("tracker: could not register duplicate AnnounceMiddleware: " + name)
}
announceMiddlewareConstructors[name] = mw
}
// RegisterAnnounceMiddleware makes a middleware globally available under the
// provided name.
//
// This function is intended to register middleware that has no configuration.
// If this function is called twice with the same name or if the middleware is
// nil, it panics.
func RegisterAnnounceMiddleware(name string, mw AnnounceMiddleware) {
if mw == nil {
panic("tracker: could not register nil AnnounceMiddleware")
}
RegisterAnnounceMiddlewareConstructor(name, func(_ chihaya.MiddlewareConfig) (AnnounceMiddleware, error) {
return mw, nil
})
}
// ScrapeHandler is a function that operates on a ScrapeResponse before it has
// been delivered to a client.
type ScrapeHandler func(*chihaya.TrackerConfig, *chihaya.ScrapeRequest, *chihaya.ScrapeResponse) error
// ScrapeMiddleware is higher-order function used to implement the chaining of
// ScrapeHandlers.
type ScrapeMiddleware func(ScrapeHandler) ScrapeHandler
// ScrapeMiddlewareConstructor is a function that creates a new
// ScrapeMiddleware from a MiddlewareConfig.
type ScrapeMiddlewareConstructor func(chihaya.MiddlewareConfig) (ScrapeMiddleware, error)
// ScrapeChain is a chain of ScrapeMiddlewares.
type ScrapeChain struct{ mw []ScrapeMiddleware }
// Append appends ScrapeMiddlewares to the ScrapeChain.
func (c *ScrapeChain) Append(mw ...ScrapeMiddleware) {
c.mw = append(c.mw, mw...)
}
// Handler builds the ScrapeChain into a ScrapeHandler.
func (c *ScrapeChain) Handler() ScrapeHandler {
final := func(cfg *chihaya.TrackerConfig, req *chihaya.ScrapeRequest, resp *chihaya.ScrapeResponse) error {
return nil
}
for i := len(c.mw) - 1; i >= 0; i-- {
final = c.mw[i](final)
}
return final
}
var scrapeMiddlewareConstructors = make(map[string]ScrapeMiddlewareConstructor)
// RegisterScrapeMiddlewareConstructor makes a configurable middleware globally
// available under the provided name.
//
// If this function is called twice with the same name or if the constructor is
// nil, it panics.
func RegisterScrapeMiddlewareConstructor(name string, mw ScrapeMiddlewareConstructor) {
if mw == nil {
panic("tracker: could not register nil ScrapeMiddlewareConstructor")
}
if _, dup := scrapeMiddlewareConstructors[name]; dup {
panic("tracker: could not register duplicate ScrapeMiddleware: " + name)
}
scrapeMiddlewareConstructors[name] = mw
}
// RegisterScrapeMiddleware makes a middleware globally available under the
// provided name.
//
// This function is intended to register middleware that has no configuration.
// If this function is called twice with the same name or if the middleware is
// nil, it panics.
func RegisterScrapeMiddleware(name string, mw ScrapeMiddleware) {
if mw == nil {
panic("tracker: could not register nil ScrapeMiddleware")
}
RegisterScrapeMiddlewareConstructor(name, func(_ chihaya.MiddlewareConfig) (ScrapeMiddleware, error) {
return mw, nil
})
}

Some files were not shown because too many files have changed in this diff Show more