clean the slate!

This commit is contained in:
Jimmy Zelinskie 2016-01-25 00:39:16 -05:00
parent e37f453b34
commit 5c27c960f0
42 changed files with 0 additions and 5076 deletions

View file

@ -1,5 +0,0 @@
# This is the official list of Chihaya authors for copyright purposes, in alphabetical order.
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
Justin Li <jli@j-li.net>

View file

@ -1,237 +0,0 @@
# Configuration
Chihaya's behaviour is customized by setting up a JSON configuration file.
Available keys are as follows:
##### `httpListenAddr`
type: string
default: "localhost:6881"
The listen address for the HTTP server. If only a port is specified, the tracker will listen on all interfaces. If left empty, the tracker will not run a HTTP endpoint.
##### `httpRequestTimeout`
type: duration
default: "4s"
The duration to allow outstanding requests to survive before forcefully terminating them.
##### `httpReadTimeout`
type: duration
default: "4s"
The maximum duration before timing out read of the request.
##### `httpWriteTimeout`
type: duration
default: "4s"
The maximum duration before timing out write of the request.
##### `httpListenLimit`
type: integer
default: 0
Limits the number of outstanding requests. Set to `0` to disable.
##### `udpListenAddr`
type: string
default: "localhost:6881"
Then listen address for the UDP server. If only a port is specified, the tracker will listen on all interfaces. If left empty, the tracker will not run a UDP endpoint.
##### `createOnAnnounce`
type: bool
default: true
Whether to register new torrents with the tracker when any client announces (`true`), or to return an error if the torrent doesn't exist (`false`).
##### `purgeInactiveTorrents`
type: bool
default: true
If torrents should be forgotten when there are no active peers.
##### `announce`
type: duration
default: "30m"
The announce `interval` value sent to clients. This specifies how long clients should wait between regular announces.
##### `minAnnounce`
type: duration
default: "30m"
The announce `min_interval` value sent to clients. This theoretically specifies the minimum allowed time between announces, but most clients don't really respect it.
##### `defaultNumWant`
type: integer
default: 50
The default maximum number of peers to return if the client has not requested a specific number.
##### `allowIPSpoofing`
type: bool
default: true
Whether peers are allowed to set their own IP via the various supported methods or if these are ignored. This must be enabled for dual-stack IP support, since there is no other way to determine both IPs of a peer otherwise.
##### `dualStackedPeers`
type: bool
default: true
True if peers may have both an IPv4 and IPv6 address, otherwise only one IP per peer will be used.
##### `realIPHeader`
type: string
default: blank
An optional HTTP header indicating the upstream IP, for example `X-Forwarded-For` or `X-Real-IP`. Use this when running the tracker behind a reverse proxy.
##### `respectAF`
type: bool
default: false
Whether responses should only include peers of the same address family as the announcing peer, or if peers of any family may be returned (i.e. both IPv4 and IPv6).
##### `clientWhitelistEnabled`
type: bool
default: false
Enables the peer ID whitelist.
##### `clientWhitelist`
type: array of strings
default: []
List of peer ID prefixes to allow if `client_whitelist_enabled` is set to true.
##### `torrentMapShards`
type: integer
default: 1
Number of internal torrent maps to use. Leave this at 1 in general, however it can potentially improve performance when there are many unique torrents and few peers per torrent.
##### `reapInterval`
type: duration
default: "60s"
Interval at which a search for inactive peers should be performed.
##### `reapRatio`
type: float64
default: 1.25
Peers will be rated inactive if they haven't announced for `reapRatio * minAnnounce`.
##### `apiListenAddr`
type: string
default: "localhost:6880"
The listen address for the HTTP API. If only a port is specified, the tracker will listen on all interfaces. If left empty, the tracker will not run the HTTP API.
##### `apiRequestTimeout`
type: duration
default: "4s"
The duration to allow outstanding requests to survive before forcefully terminating them.
##### `apiReadTimeout`
type: duration
default: "4s"
The maximum duration before timing out read of the request.
##### `apiWriteTimeout`
type: duration
default: "4s"
The maximum duration before timing out write of the request.
##### `apiListenLimit`
type: integer
default: 0
Limits the number of outstanding requests. Set to `0` to disable.
##### `driver`
type: string
default: "noop"
Sets the backend driver to load. The included `"noop"` driver provides no functionality.
##### `statsBufferSize`
type: integer
default: 0
The size of the event-queues for statistics.
##### `includeMemStats`
type: bool
default: true
Whether to include information about memory in the statistics.
##### `verboseMemStats`
type: bool
default: false
Whether the information about memory should be verbose.
##### `memStatsInterval`
type: duration
default: "5s"
Interval at which to collect statistics about memory.
##### `jwkSetURI`
type: string
default: ""
If this string is not empty, then the tracker will attempt to use JWTs to validate infohashes before announces. The format for the JSON at this endpoint can be found at [the RFC for JWKs](https://tools.ietf.org/html/draft-ietf-jose-json-web-key-41#page-10) with the addition of an "issuer" key. Simply stated, this feature requires two fields at this JSON endpoint: "keys" and "issuer". "keys" is a list of JWKs that can be used to validate JWTs and "issuer" should match the "iss" claim in the JWT. The lifetime of a JWK is based upon standard HTTP caching headers and falls back to 5 minutes if no cache headers are provided.
#### `jwkSetUpdateInterval`
type: duration
default: "5m"
The interval at which keys are updated from JWKSetURI. Because the fallback lifetime for keys without cache headers is 5 minutes, this value should never be below 5 minutes unless you know your jwkSetURI has caching headers.
#### `jwtAudience`
type: string
default: ""
The audience claim that is used to validate JWTs.

View file

@ -1,77 +0,0 @@
## Communication
Currently, real time conversation happens on [#chihaya] on [freenode].
We are currently attempting to have more information available on GitHub.
[#chihaya]: http://webchat.freenode.net?channels=chihaya
[freenode]: http://freenode.net
## Pull request procedure
Please don't write massive patches without prior communication, as it will most
likely lead to confusion and time wasted for everyone. However, small
unannounced fixes are always welcome!
Pull requests will be treated as "review requests", and we will give
feedback we expect to see corrected on [style] and substance before merging.
Changes contributed via pull request should focus on a single issue at a time,
like any other. We will not accept pull-requests that try to "sneak" unrelated
changes in.
The average contribution flow is as follows:
- Create a topic branch from where you want to base your work. This is usually `master`.
- Make commits of logical units.
- Make sure your commit messages are in the [proper format]
- Push your changes to a topic branch in your fork of the repository.
- Submit a pull request.
- Your PR will be reviewed and merged by one of the maintainers.
Any new files should include the license header found at the top of every
source file.
[style]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md#style
[proper format]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md#commit-messages
## Style
### Go
The project follows idiomatic [Go conventions] for style. If you're just
starting out writing Go, you can check out this [meta-package] that documents
style idiomatic style decisions you will find in open source Go code.
[Go conventions]: https://github.com/golang/go/wiki/CodeReviewComments
[meta-package]: https://github.com/jzelinskie/conventions
### Commit Messages
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.

View file

@ -1,33 +0,0 @@
# vim: ft=dockerfile
FROM golang
MAINTAINER Jimmy Zelinskie <jimmyzelinskie@gmail.com>
# Add files
WORKDIR /go/src/github.com/chihaya/chihaya/
RUN mkdir -p /go/src/github.com/chihaya/chihaya/
# Dependencies
RUN go get github.com/tools/godep
ADD Godeps /go/src/github.com/chihaya/chihaya/Godeps
RUN godep restore
# Add source
ADD *.go /go/src/github.com/chihaya/chihaya/
ADD api /go/src/github.com/chihaya/chihaya/api
ADD cmd /go/src/github.com/chihaya/chihaya/cmd
ADD config /go/src/github.com/chihaya/chihaya/config
ADD http /go/src/github.com/chihaya/chihaya/http
ADD stats /go/src/github.com/chihaya/chihaya/stats
ADD tracker /go/src/github.com/chihaya/chihaya/tracker
ADD udp /go/src/github.com/chihaya/chihaya/udp
# Install
RUN go install github.com/chihaya/chihaya/cmd/chihaya
# Configuration/environment
VOLUME ["/config"]
EXPOSE 6880-6882
# docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -v=5
ENTRYPOINT ["chihaya", "-config=/config/config.json", "-logtostderr=true"]
CMD ["-v=5"]

57
Godeps/Godeps.json generated
View file

@ -1,57 +0,0 @@
{
"ImportPath": "github.com/chihaya/chihaya",
"GoVersion": "go1.5.1",
"Deps": [
{
"ImportPath": "github.com/chihaya/bencode",
"Rev": "3c485a8d166ff6a79baba90c2c2da01c8348e930"
},
{
"ImportPath": "github.com/coreos/go-oidc/http",
"Rev": "ec2746d2ccb220e81c41b0b0cb2d4a1cc23f7950"
},
{
"ImportPath": "github.com/coreos/go-oidc/jose",
"Rev": "ec2746d2ccb220e81c41b0b0cb2d4a1cc23f7950"
},
{
"ImportPath": "github.com/coreos/go-systemd/journal",
"Comment": "v4-36-gdd4f6b8",
"Rev": "dd4f6b87c2a80813d1a01790344322da19ff195e"
},
{
"ImportPath": "github.com/coreos/pkg/capnslog",
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
},
{
"ImportPath": "github.com/golang/glog",
"Rev": "fca8c8854093a154ff1eb580aae10276ad6b1b5f"
},
{
"ImportPath": "github.com/julienschmidt/httprouter",
"Comment": "v1.1-14-g21439ef",
"Rev": "21439ef4d70ba4f3e2a5ed9249e7b03af4019b40"
},
{
"ImportPath": "github.com/pushrax/bufferpool",
"Rev": "7d6e1653dee10a165d1f357f3a57bc8031e9621b"
},
{
"ImportPath": "github.com/pushrax/faststats",
"Rev": "0fc2c5e41a187240ffaa09320eea7df9f8071388"
},
{
"ImportPath": "github.com/pushrax/flatjson",
"Rev": "86044f1c998d49053e13293029414ddb63f3a422"
},
{
"ImportPath": "github.com/tylerb/graceful",
"Comment": "v1.2.3",
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
},
{
"ImportPath": "golang.org/x/net/netutil",
"Rev": "520af5de654dc4dd4f0f65aa40e66dbbd9043df1"
}
]
}

5
Godeps/Readme generated
View file

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

24
LICENSE
View file

@ -1,24 +0,0 @@
Chihaya is released under a BSD 2-Clause license, reproduced below.
Copyright (c) 2015, The Chihaya Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,99 +0,0 @@
# Chihaya
[![GoDoc](https://godoc.org/github.com/chihaya/chihaya?status.svg)](https://godoc.org/github.com/chihaya/chihaya)
[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
[![Build Status](https://api.travis-ci.org/chihaya/chihaya.svg?branch=master)](https://travis-ci.org/chihaya/chihaya)
[![Docker Repository on Quay.io](https://quay.io/repository/jzelinskie/chihaya/status "Docker Repository on Quay.io")](https://quay.io/repository/jzelinskie/chihaya)
Chihaya is a high-performance [BitTorrent tracker] written in the Go
programming language. It is still heavily under development and the current
`master` branch should probably not be used in production
(unless you know what you're doing).
Current features include:
- Public tracker feature-set with full compatibility with what exists of the BitTorrent spec
- Low resource consumption, and fast, asynchronous request processing
- Full IPv6 support, including handling for dual-stacked peers
- Extensive metrics for visibility into the tracker and swarm's performance
- Ability to prioritize peers in local subnets to reduce backbone contention
- JWT Validation to approve the usage of a given infohash.
Planned features include:
- Private tracker feature-set with compatibility for a [Gazelle]-like deployment (WIP)
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
[gazelle]: https://github.com/whatcd/gazelle
## When would I use Chihaya?
Chihaya is a eventually meant for every kind of BitTorrent tracker deployment.
Chihaya has been used to replace instances of [opentracker] and also instances of [ocelot].
Chihaya handles torrent announces and scrapes in memory.
However, using a backend driver, Chihaya can also asynchronously provide deltas to maintain a set of persistent data without throttling a database.
This is particularly useful behavior for private tracker use-cases.
[opentracker]: http://erdgeist.org/arts/software/opentracker
[ocelot]: https://github.com/WhatCD/Ocelot
## Running Chihaya
### Configuration
Copy [`example_config.json`] to your choice of location, and update the values as required.
An explanation of the available keys can be found in [CONFIGURATION.md].
[`example_config.json`]: https://github.com/chihaya/chihaya/blob/master/example_config.json
[CONFIGURATION.md]: https://github.com/chihaya/chihaya/blob/master/CONFIGURATION.md
### Docker
```sh
$ docker pull quay.io/jzelinskie/chihaya:latest
$ export CHIHAYA_LOG_LEVEL=5 # most verbose, and the default
$ docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -v=$CHIHAYA_LOG_LEVEL
```
## Developing Chihaya
### Building & Installing
Chihaya requires Go 1.5+ (preferrably the latest stable Go), [Godep], and a [Go environment] previously set up.
[Godep]: https://github.com/tools/godep
[Go environment]: https://golang.org/doc/code.html
```sh
$ export GOPATH=$PWD/chihaya
$ git clone https://github.com/chihaya/chihaya.git chihaya/src/github.com/chihaya/chihaya
$ cd chihaya/src/github.com/chihaya/chihaya/cmd/chihaya/
$ godep restore
$ go install github.com/chihaya/chihaya/cmd/chihaya
```
### Testing
Chihaya has end-to-end test coverage for announces in addition to unit tests for isolated components.
To run the tests, use:
```sh
$ cd $GOPATH/src/github.com/chihaya/chihaya
$ godep go test -v ./...
```
There is also a set of benchmarks for performance-critical sections of Chihaya.
These can be run similarly:
```sh
$ cd $GOPATH/src/github.com/chihaya/chihaya
$ godep go test -v ./... -bench .
```
### Contributing
See [CONTRIBUTING.md] for guidelines to contributing to the project.
Feel free to make issues or ask questions.
Our maintainers are also always idle in #chihaya on freenode.
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md

View file

@ -1,162 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package api implements a RESTful HTTP JSON API server for a BitTorrent
// tracker.
package api
import (
"net"
"net/http"
"time"
"github.com/golang/glog"
"github.com/julienschmidt/httprouter"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
)
// Server represents an API server for a torrent tracker.
type Server struct {
config *config.Config
tracker *tracker.Tracker
grace *graceful.Server
stopping bool
}
// NewServer returns a new API server for a given configuration and tracker
// instance.
func NewServer(cfg *config.Config, tkr *tracker.Tracker) *Server {
return &Server{
config: cfg,
tracker: tkr,
}
}
// Stop cleanly shuts down the server.
func (s *Server) Stop() {
if !s.stopping {
s.grace.Stop(s.grace.Timeout)
}
}
// Serve runs an API server, blocking until the server has shut down.
func (s *Server) Serve() {
glog.V(0).Info("Starting API on ", s.config.APIConfig.ListenAddr)
if s.config.APIConfig.ListenLimit != 0 {
glog.V(0).Info("Limiting connections to ", s.config.APIConfig.ListenLimit)
}
grace := &graceful.Server{
Timeout: s.config.APIConfig.RequestTimeout.Duration,
ConnState: s.connState,
ListenLimit: s.config.APIConfig.ListenLimit,
NoSignalHandling: true,
Server: &http.Server{
Addr: s.config.APIConfig.ListenAddr,
Handler: newRouter(s),
ReadTimeout: s.config.APIConfig.ReadTimeout.Duration,
WriteTimeout: s.config.APIConfig.WriteTimeout.Duration,
},
}
s.grace = grace
grace.SetKeepAlivesEnabled(false)
grace.ShutdownInitiated = func() { s.stopping = true }
if err := grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
glog.Errorf("Failed to gracefully run API server: %s", err.Error())
return
}
}
glog.Info("API server shut down cleanly")
}
// newRouter returns a router with all the routes.
func newRouter(s *Server) *httprouter.Router {
r := httprouter.New()
if s.config.ClientWhitelistEnabled {
r.GET("/clients/:clientID", makeHandler(s.getClient))
r.PUT("/clients/:clientID", makeHandler(s.putClient))
r.DELETE("/clients/:clientID", makeHandler(s.delClient))
}
r.GET("/torrents/:infohash", makeHandler(s.getTorrent))
r.PUT("/torrents/:infohash", makeHandler(s.putTorrent))
r.DELETE("/torrents/:infohash", makeHandler(s.delTorrent))
r.GET("/check", makeHandler(s.check))
r.GET("/stats", makeHandler(s.stats))
return r
}
// connState is used by graceful in order to gracefully shutdown. It also
// keeps track of connection stats.
func (s *Server) connState(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
glog.Errorf("Connection transitioned to unknown state %s (%d)", state, state)
}
}
// ResponseHandler is an HTTP handler that returns a status code.
type ResponseHandler func(http.ResponseWriter, *http.Request, httprouter.Params) (int, error)
// makeHandler wraps our ResponseHandlers while timing requests, collecting,
// stats, logging, and handling errors.
func makeHandler(handler ResponseHandler) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
start := time.Now()
httpCode, err := handler(w, r, p)
duration := time.Since(start)
var msg string
if err != nil {
msg = err.Error()
} else if httpCode != http.StatusOK {
msg = http.StatusText(httpCode)
}
if len(msg) > 0 {
http.Error(w, msg, httpCode)
stats.RecordEvent(stats.ErroredRequest)
}
if len(msg) > 0 || glog.V(2) {
reqString := r.URL.Path + " " + r.RemoteAddr
if glog.V(3) {
reqString = r.URL.RequestURI() + " " + r.RemoteAddr
}
if len(msg) > 0 {
glog.Errorf("[API - %9s] %s (%d - %s)", duration, reqString, httpCode, msg)
} else {
glog.Infof("[API - %9s] %s (%d)", duration, reqString, httpCode)
}
}
stats.RecordEvent(stats.HandledRequest)
stats.RecordTiming(stats.ResponseTime, duration)
}
}

View file

@ -1,120 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package api
import (
"encoding/json"
"net/http"
"net/url"
"runtime"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
const jsonContentType = "application/json; charset=UTF-8"
func handleError(err error) (int, error) {
if err == nil {
return http.StatusOK, nil
} else if _, ok := err.(models.NotFoundError); ok {
stats.RecordEvent(stats.ClientError)
return http.StatusNotFound, nil
} else if _, ok := err.(models.ClientError); ok {
stats.RecordEvent(stats.ClientError)
return http.StatusBadRequest, nil
}
return http.StatusInternalServerError, err
}
func (s *Server) check(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
_, err := w.Write([]byte("STILL-ALIVE"))
return handleError(err)
}
func (s *Server) stats(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
w.Header().Set("Content-Type", jsonContentType)
var err error
var val interface{}
query := r.URL.Query()
stats.DefaultStats.GoRoutines = runtime.NumGoroutine()
if _, flatten := query["flatten"]; flatten {
val = stats.DefaultStats.Flattened()
} else {
val = stats.DefaultStats
}
if _, pretty := query["pretty"]; pretty {
var buf []byte
buf, err = json.MarshalIndent(val, "", " ")
if err == nil {
_, err = w.Write(buf)
}
} else {
err = json.NewEncoder(w).Encode(val)
}
return handleError(err)
}
func (s *Server) getTorrent(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
infohash, err := url.QueryUnescape(p.ByName("infohash"))
if err != nil {
return http.StatusNotFound, err
}
torrent, err := s.tracker.FindTorrent(infohash)
if err != nil {
return handleError(err)
}
w.Header().Set("Content-Type", jsonContentType)
e := json.NewEncoder(w)
return handleError(e.Encode(torrent))
}
func (s *Server) putTorrent(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
var torrent models.Torrent
err := json.NewDecoder(r.Body).Decode(&torrent)
if err != nil {
return http.StatusBadRequest, err
}
s.tracker.PutTorrent(&torrent)
return http.StatusOK, nil
}
func (s *Server) delTorrent(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
infohash, err := url.QueryUnescape(p.ByName("infohash"))
if err != nil {
return http.StatusNotFound, err
}
s.tracker.DeleteTorrent(infohash)
return http.StatusOK, nil
}
func (s *Server) getClient(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
if err := s.tracker.ClientApproved(p.ByName("clientID")); err != nil {
return http.StatusNotFound, err
}
return http.StatusOK, nil
}
func (s *Server) putClient(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
s.tracker.PutClient(p.ByName("clientID"))
return http.StatusOK, nil
}
func (s *Server) delClient(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
s.tracker.DeleteClient(p.ByName("clientID"))
return http.StatusOK, nil
}

View file

@ -1,121 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package chihaya implements the ability to boot the Chihaya BitTorrent
// tracker with your own imports that can dynamically register additional
// functionality.
package chihaya
import (
"flag"
"os"
"os/signal"
"runtime"
"sync"
"syscall"
"github.com/golang/glog"
"github.com/chihaya/chihaya/api"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/http"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
"github.com/chihaya/chihaya/udp"
)
var (
maxProcs int
configPath string
)
func init() {
flag.IntVar(&maxProcs, "maxprocs", runtime.NumCPU(), "maximum parallel threads")
flag.StringVar(&configPath, "config", "", "path to the configuration file")
}
type server interface {
Serve()
Stop()
}
// Boot starts Chihaya. By exporting this function, anyone can import their own
// custom drivers into their own package main and then call chihaya.Boot.
func Boot() {
defer glog.Flush()
flag.Parse()
runtime.GOMAXPROCS(maxProcs)
glog.V(1).Info("Set max threads to ", maxProcs)
debugBoot()
defer debugShutdown()
cfg, err := config.Open(configPath)
if err != nil {
glog.Fatalf("Failed to parse configuration file: %s\n", err)
}
if cfg == &config.DefaultConfig {
glog.V(1).Info("Using default config")
} else {
glog.V(1).Infof("Loaded config file: %s", configPath)
}
stats.DefaultStats = stats.New(cfg.StatsConfig)
tkr, err := tracker.New(cfg)
if err != nil {
glog.Fatal("New: ", err)
}
var servers []server
if cfg.APIConfig.ListenAddr != "" {
servers = append(servers, api.NewServer(cfg, tkr))
}
if cfg.HTTPConfig.ListenAddr != "" {
servers = append(servers, http.NewServer(cfg, tkr))
}
if cfg.UDPConfig.ListenAddr != "" {
servers = append(servers, udp.NewServer(cfg, tkr))
}
var wg sync.WaitGroup
for _, srv := range servers {
wg.Add(1)
// If you don't explicitly pass the server, every goroutine captures the
// last server in the list.
go func(srv server) {
defer wg.Done()
srv.Serve()
}(srv)
}
shutdown := make(chan os.Signal)
signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM)
go func() {
wg.Wait()
signal.Stop(shutdown)
close(shutdown)
}()
<-shutdown
glog.Info("Shutting down...")
for _, srv := range servers {
srv.Stop()
}
<-shutdown
if err := tkr.Close(); err != nil {
glog.Errorf("Failed to shut down tracker cleanly: %s", err.Error())
}
}

View file

@ -1,11 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package main
import "github.com/chihaya/chihaya"
func main() {
chihaya.Boot()
}

View file

@ -1,193 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package config implements the configuration for a BitTorrent tracker
package config
import (
"encoding/json"
"io"
"os"
"time"
)
// Duration wraps a time.Duration and adds JSON marshalling.
type Duration struct{ time.Duration }
// MarshalJSON transforms a duration into JSON.
func (d *Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// UnmarshalJSON transform JSON into a Duration.
func (d *Duration) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
d.Duration, err = time.ParseDuration(str)
return err
}
// SubnetConfig is the configuration used to specify if local peers should be
// given a preference when responding to an announce.
type SubnetConfig struct {
PreferredSubnet bool `json:"preferredSubnet,omitempty"`
PreferredIPv4Subnet int `json:"preferredIPv4Subnet,omitempty"`
PreferredIPv6Subnet int `json:"preferredIPv6Subnet,omitempty"`
}
// NetConfig is the configuration used to tune networking behaviour.
type NetConfig struct {
AllowIPSpoofing bool `json:"allowIPSpoofing"`
DualStackedPeers bool `json:"dualStackedPeers"`
RealIPHeader string `json:"realIPHeader"`
RespectAF bool `json:"respectAF"`
SubnetConfig
}
// StatsConfig is the configuration used to record runtime statistics.
type StatsConfig struct {
BufferSize int `json:"statsBufferSize"`
IncludeMem bool `json:"includeMemStats"`
VerboseMem bool `json:"verboseMemStats"`
MemUpdateInterval Duration `json:"memStatsInterval"`
}
// WhitelistConfig is the configuration used enable and store a whitelist of
// acceptable torrent client peer ID prefixes.
type WhitelistConfig struct {
ClientWhitelistEnabled bool `json:"clientWhitelistEnabled"`
ClientWhitelist []string `json:"clientWhitelist,omitempty"`
}
// TrackerConfig is the configuration for tracker functionality.
type TrackerConfig struct {
CreateOnAnnounce bool `json:"createOnAnnounce"`
PurgeInactiveTorrents bool `json:"purgeInactiveTorrents"`
Announce Duration `json:"announce"`
MinAnnounce Duration `json:"minAnnounce"`
ReapInterval Duration `json:"reapInterval"`
ReapRatio float64 `json:"reapRatio"`
NumWantFallback int `json:"defaultNumWant"`
TorrentMapShards int `json:"torrentMapShards"`
JWKSetURI string `json:"jwkSetURI"`
JWKSetUpdateInterval Duration `json:"jwkSetUpdateInterval"`
JWTAudience string `json:"jwtAudience"`
NetConfig
WhitelistConfig
}
// APIConfig is the configuration for an HTTP JSON API server.
type APIConfig struct {
ListenAddr string `json:"apiListenAddr"`
RequestTimeout Duration `json:"apiRequestTimeout"`
ReadTimeout Duration `json:"apiReadTimeout"`
WriteTimeout Duration `json:"apiWriteTimeout"`
ListenLimit int `json:"apiListenLimit"`
}
// HTTPConfig is the configuration for the HTTP protocol.
type HTTPConfig struct {
ListenAddr string `json:"httpListenAddr"`
RequestTimeout Duration `json:"httpRequestTimeout"`
ReadTimeout Duration `json:"httpReadTimeout"`
WriteTimeout Duration `json:"httpWriteTimeout"`
ListenLimit int `json:"httpListenLimit"`
}
// UDPConfig is the configuration for the UDP protocol.
type UDPConfig struct {
ListenAddr string `json:"udpListenAddr"`
ReadBufferSize int `json:"udpReadBufferSize"`
}
// Config is the global configuration for an instance of Chihaya.
type Config struct {
TrackerConfig
APIConfig
HTTPConfig
UDPConfig
StatsConfig
}
// DefaultConfig is a configuration that can be used as a fallback value.
var DefaultConfig = Config{
TrackerConfig: TrackerConfig{
CreateOnAnnounce: true,
PurgeInactiveTorrents: true,
Announce: Duration{30 * time.Minute},
MinAnnounce: Duration{15 * time.Minute},
ReapInterval: Duration{60 * time.Second},
ReapRatio: 1.25,
NumWantFallback: 50,
TorrentMapShards: 1,
JWKSetURI: "",
JWKSetUpdateInterval: Duration{5 * time.Minute},
JWTAudience: "",
NetConfig: NetConfig{
AllowIPSpoofing: true,
DualStackedPeers: true,
RespectAF: false,
},
WhitelistConfig: WhitelistConfig{
ClientWhitelistEnabled: false,
},
},
APIConfig: APIConfig{
ListenAddr: "localhost:6880",
RequestTimeout: Duration{10 * time.Second},
ReadTimeout: Duration{10 * time.Second},
WriteTimeout: Duration{10 * time.Second},
},
HTTPConfig: HTTPConfig{
ListenAddr: "localhost:6881",
RequestTimeout: Duration{10 * time.Second},
ReadTimeout: Duration{10 * time.Second},
WriteTimeout: Duration{10 * time.Second},
},
UDPConfig: UDPConfig{
ListenAddr: "localhost:6882",
},
StatsConfig: StatsConfig{
BufferSize: 0,
IncludeMem: true,
VerboseMem: false,
MemUpdateInterval: Duration{5 * time.Second},
},
}
// Open is a shortcut to open a file, read it, and generate a Config.
// It supports relative and absolute paths. Given "", it returns DefaultConfig.
func Open(path string) (*Config, error) {
if path == "" {
return &DefaultConfig, nil
}
f, err := os.Open(os.ExpandEnv(path))
if err != nil {
return nil, err
}
defer f.Close()
conf, err := Decode(f)
if err != nil {
return nil, err
}
return conf, nil
}
// Decode casts an io.Reader into a JSONDecoder and decodes it into a *Config.
func Decode(r io.Reader) (*Config, error) {
conf := DefaultConfig
err := json.NewDecoder(r).Decode(&conf)
return &conf, err
}

View file

@ -1,56 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package chihaya
import (
"flag"
"net/http"
"os"
"runtime/pprof"
_ "net/http/pprof"
"github.com/golang/glog"
)
var (
profile string
debugAddr string
profileFile *os.File
)
func init() {
flag.StringVar(&profile, "profile", "", "if non-empty, path to write CPU profiling data")
flag.StringVar(&debugAddr, "debug", "", "if non-empty, address to serve debug data")
}
func debugBoot() {
var err error
if debugAddr != "" {
go func() {
glog.Info("Starting debug HTTP on ", debugAddr)
glog.Fatal(http.ListenAndServe(debugAddr, nil))
}()
}
if profile != "" {
profileFile, err = os.Create(profile)
if err != nil {
glog.Fatalf("Failed to create profile file: %s\n", err)
}
pprof.StartCPUProfile(profileFile)
glog.Info("Started profiling")
}
}
func debugShutdown() {
if profileFile != nil {
profileFile.Close()
pprof.StopCPUProfile()
glog.Info("Stopped profiling")
}
}

View file

@ -1,35 +0,0 @@
{
"createOnAnnounce": true,
"purgeInactiveTorrents": true,
"announce": "30m",
"minAnnounce": "15m",
"reapInterval": "60s",
"reapRatio": 1.25,
"defaultNumWant": 50,
"torrentMapShards": 1,
"jwkSetURI": "",
"jwkSetUpdateInterval": "5m",
"jwtAudience": "",
"allowIPSpoofing": true,
"dualStackedPeers": true,
"realIPHeader": "",
"respectAF": false,
"clientWhitelistEnabled": false,
"clientWhitelist": ["OP1011"],
"apiListenAddr": "localhost:6880",
"apiRequestTimeout": "4s",
"apiReadTimeout": "4s",
"apiWriteTimeout": "4s",
"apiListenLimit": 0,
"udpListenAddr": "localhost:6881",
"httpListenAddr": "localhost:6881",
"httpRequestTimeout": "4s",
"httpReadTimeout": "4s",
"httpWriteTimeout": "4s",
"httpListenLimit": 0,
"driver": "noop",
"statsBufferSize": 0,
"includeMemStats": true,
"verboseMemStats": false,
"memStatsInterval": "5s"
}

View file

@ -1,299 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net/http/httptest"
"reflect"
"strconv"
"testing"
"time"
"github.com/chihaya/bencode"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/tracker"
"github.com/chihaya/chihaya/tracker/models"
)
func TestPublicAnnounce(t *testing.T) {
srv, err := setupTracker(nil, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
peer1 := makePeerParams("peer1", true)
peer2 := makePeerParams("peer2", true)
peer3 := makePeerParams("peer3", false)
peer1["event"] = "started"
expected := makeResponse(1, 0, peer1)
checkAnnounce(peer1, expected, srv, t)
expected = makeResponse(2, 0, peer2)
checkAnnounce(peer2, expected, srv, t)
expected = makeResponse(2, 1, peer1, peer2)
checkAnnounce(peer3, expected, srv, t)
peer1["event"] = "stopped"
expected = makeResponse(1, 1, nil)
checkAnnounce(peer1, expected, srv, t)
expected = makeResponse(1, 1, peer2)
checkAnnounce(peer3, expected, srv, t)
}
func TestTorrentPurging(t *testing.T) {
tkr, err := tracker.New(&config.DefaultConfig)
if err != nil {
t.Fatalf("failed to create new tracker instance: %s", err)
}
srv, err := setupTracker(nil, tkr)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
// Add one seeder.
peer := makePeerParams("peer1", true)
announce(peer, srv)
// Make sure the torrent was created.
_, err = tkr.FindTorrent(infoHash)
if err != nil {
t.Fatalf("expected torrent to exist after announce: %s", err)
}
// Remove seeder.
peer = makePeerParams("peer1", true)
peer["event"] = "stopped"
announce(peer, srv)
_, err = tkr.FindTorrent(infoHash)
if err != models.ErrTorrentDNE {
t.Fatalf("expected torrent to have been purged: %s", err)
}
}
func TestStalePeerPurging(t *testing.T) {
cfg := config.DefaultConfig
cfg.MinAnnounce = config.Duration{10 * time.Millisecond}
cfg.ReapInterval = config.Duration{10 * time.Millisecond}
tkr, err := tracker.New(&cfg)
if err != nil {
t.Fatalf("failed to create new tracker instance: %s", err)
}
srv, err := setupTracker(&cfg, tkr)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
// Add one seeder.
peer1 := makePeerParams("peer1", true)
announce(peer1, srv)
// Make sure the torrent was created.
_, err = tkr.FindTorrent(infoHash)
if err != nil {
t.Fatalf("expected torrent to exist after announce: %s", err)
}
// Add a leecher.
peer2 := makePeerParams("peer2", false)
expected := makeResponse(1, 1, peer1)
expected["min interval"] = int64(0)
checkAnnounce(peer2, expected, srv, t)
// Let them both expire.
time.Sleep(30 * time.Millisecond)
_, err = tkr.FindTorrent(infoHash)
if err != models.ErrTorrentDNE {
t.Fatalf("expected torrent to have been purged: %s", err)
}
}
func TestPreferredSubnet(t *testing.T) {
cfg := config.DefaultConfig
cfg.PreferredSubnet = true
cfg.PreferredIPv4Subnet = 8
cfg.PreferredIPv6Subnet = 16
cfg.DualStackedPeers = false
srv, err := setupTracker(&cfg, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
peerA1 := makePeerParams("peerA1", false, "44.0.0.1")
peerA2 := makePeerParams("peerA2", false, "44.0.0.2")
peerA3 := makePeerParams("peerA3", false, "44.0.0.3")
peerA4 := makePeerParams("peerA4", false, "44.0.0.4")
peerB1 := makePeerParams("peerB1", false, "45.0.0.1")
peerB2 := makePeerParams("peerB2", false, "45.0.0.2")
peerC1 := makePeerParams("peerC1", false, "fc01::1")
peerC2 := makePeerParams("peerC2", false, "fc01::2")
peerC3 := makePeerParams("peerC3", false, "fc01::3")
peerD1 := makePeerParams("peerD1", false, "fc02::1")
peerD2 := makePeerParams("peerD2", false, "fc02::2")
expected := makeResponse(0, 1, peerA1)
checkAnnounce(peerA1, expected, srv, t)
expected = makeResponse(0, 2, peerA1)
checkAnnounce(peerA2, expected, srv, t)
expected = makeResponse(0, 3, peerA1, peerA2)
checkAnnounce(peerB1, expected, srv, t)
peerB2["numwant"] = "1"
expected = makeResponse(0, 4, peerB1)
checkAnnounce(peerB2, expected, srv, t)
checkAnnounce(peerB2, expected, srv, t)
peerA3["numwant"] = "2"
expected = makeResponse(0, 5, peerA1, peerA2)
checkAnnounce(peerA3, expected, srv, t)
checkAnnounce(peerA3, expected, srv, t)
peerA4["numwant"] = "3"
expected = makeResponse(0, 6, peerA1, peerA2, peerA3)
checkAnnounce(peerA4, expected, srv, t)
checkAnnounce(peerA4, expected, srv, t)
expected = makeResponse(0, 7, peerA1, peerA2, peerA3, peerA4, peerB1, peerB2)
checkAnnounce(peerC1, expected, srv, t)
peerC2["numwant"] = "1"
expected = makeResponse(0, 8, peerC1)
checkAnnounce(peerC2, expected, srv, t)
checkAnnounce(peerC2, expected, srv, t)
peerC3["numwant"] = "2"
expected = makeResponse(0, 9, peerC1, peerC2)
checkAnnounce(peerC3, expected, srv, t)
checkAnnounce(peerC3, expected, srv, t)
expected = makeResponse(0, 10, peerA1, peerA2, peerA3, peerA4, peerB1, peerB2, peerC1, peerC2, peerC3)
checkAnnounce(peerD1, expected, srv, t)
peerD2["numwant"] = "1"
expected = makeResponse(0, 11, peerD1)
checkAnnounce(peerD2, expected, srv, t)
checkAnnounce(peerD2, expected, srv, t)
}
func TestCompactAnnounce(t *testing.T) {
srv, err := setupTracker(nil, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
compact := "\xff\x09\x7f\x05\x04\xd2"
ip := "255.9.127.5" // Use the same IP for all of them so we don't have to worry about order.
peer1 := makePeerParams("peer1", false, ip)
peer1["compact"] = "1"
peer2 := makePeerParams("peer2", false, ip)
peer2["compact"] = "1"
peer3 := makePeerParams("peer3", false, ip)
peer3["compact"] = "1"
expected := makeResponse(0, 1)
expected["peers"] = compact
checkAnnounce(peer1, expected, srv, t)
expected = makeResponse(0, 2)
expected["peers"] = compact
checkAnnounce(peer2, expected, srv, t)
expected = makeResponse(0, 3)
expected["peers"] = compact + compact
checkAnnounce(peer3, expected, srv, t)
}
func makePeerParams(id string, seed bool, extra ...string) params {
left := "1"
if seed {
left = "0"
}
ip := "10.0.0.1"
if len(extra) >= 1 {
ip = extra[0]
}
return params{
"info_hash": infoHash,
"peer_id": id,
"ip": ip,
"port": "1234",
"uploaded": "0",
"downloaded": "0",
"left": left,
"compact": "0",
"numwant": "50",
}
}
func peerFromParams(peer params) bencode.Dict {
port, _ := strconv.ParseInt(peer["port"], 10, 64)
return bencode.Dict{
"peer id": peer["peer_id"],
"ip": peer["ip"],
"port": port,
}
}
func makeResponse(seeders, leechers int64, peers ...params) bencode.Dict {
dict := bencode.Dict{
"complete": seeders,
"incomplete": leechers,
"interval": int64(1800),
"min interval": int64(900),
}
if !(len(peers) == 1 && peers[0] == nil) {
peerList := bencode.List{}
for _, peer := range peers {
peerList = append(peerList, peerFromParams(peer))
}
dict["peers"] = peerList
}
return dict
}
func checkAnnounce(p params, expected interface{}, srv *httptest.Server, t *testing.T) bool {
body, err := announce(p, srv)
if err != nil {
t.Error(err)
return false
}
if e, ok := expected.(bencode.Dict); ok {
sortPeersInResponse(e)
}
got, err := bencode.Unmarshal(body)
if e, ok := got.(bencode.Dict); ok {
sortPeersInResponse(e)
}
if !reflect.DeepEqual(got, expected) {
t.Errorf("\ngot: %#v\nwanted: %#v", got, expected)
return false
}
return true
}

View file

@ -1,152 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package http implements a BitTorrent tracker over the HTTP protocol as per
// BEP 3.
package http
import (
"net"
"net/http"
"time"
"github.com/golang/glog"
"github.com/julienschmidt/httprouter"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
)
// ResponseHandler is an HTTP handler that returns a status code.
type ResponseHandler func(http.ResponseWriter, *http.Request, httprouter.Params) (int, error)
// Server represents an HTTP serving torrent tracker.
type Server struct {
config *config.Config
tracker *tracker.Tracker
grace *graceful.Server
stopping bool
}
// makeHandler wraps our ResponseHandlers while timing requests, collecting,
// stats, logging, and handling errors.
func makeHandler(handler ResponseHandler) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
start := time.Now()
httpCode, err := handler(w, r, p)
duration := time.Since(start)
var msg string
if err != nil {
msg = err.Error()
} else if httpCode != http.StatusOK {
msg = http.StatusText(httpCode)
}
if len(msg) > 0 {
http.Error(w, msg, httpCode)
stats.RecordEvent(stats.ErroredRequest)
}
if len(msg) > 0 || glog.V(2) {
reqString := r.URL.Path + " " + r.RemoteAddr
if glog.V(3) {
reqString = r.URL.RequestURI() + " " + r.RemoteAddr
}
if len(msg) > 0 {
glog.Errorf("[HTTP - %9s] %s (%d - %s)", duration, reqString, httpCode, msg)
} else {
glog.Infof("[HTTP - %9s] %s (%d)", duration, reqString, httpCode)
}
}
stats.RecordEvent(stats.HandledRequest)
stats.RecordTiming(stats.ResponseTime, duration)
}
}
// newRouter returns a router with all the routes.
func newRouter(s *Server) *httprouter.Router {
r := httprouter.New()
r.GET("/announce", makeHandler(s.serveAnnounce))
r.GET("/scrape", makeHandler(s.serveScrape))
return r
}
// connState is used by graceful in order to gracefully shutdown. It also
// keeps track of connection stats.
func (s *Server) connState(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
glog.Errorf("Connection transitioned to unknown state %s (%d)", state, state)
}
}
// Serve runs an HTTP server, blocking until the server has shut down.
func (s *Server) Serve() {
glog.V(0).Info("Starting HTTP on ", s.config.HTTPConfig.ListenAddr)
if s.config.HTTPConfig.ListenLimit != 0 {
glog.V(0).Info("Limiting connections to ", s.config.HTTPConfig.ListenLimit)
}
grace := &graceful.Server{
Timeout: s.config.HTTPConfig.RequestTimeout.Duration,
ConnState: s.connState,
ListenLimit: s.config.HTTPConfig.ListenLimit,
NoSignalHandling: true,
Server: &http.Server{
Addr: s.config.HTTPConfig.ListenAddr,
Handler: newRouter(s),
ReadTimeout: s.config.HTTPConfig.ReadTimeout.Duration,
WriteTimeout: s.config.HTTPConfig.WriteTimeout.Duration,
},
}
s.grace = grace
grace.SetKeepAlivesEnabled(false)
grace.ShutdownInitiated = func() { s.stopping = true }
if err := grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
glog.Errorf("Failed to gracefully run HTTP server: %s", err.Error())
return
}
}
glog.Info("HTTP server shut down cleanly")
}
// Stop cleanly shuts down the server.
func (s *Server) Stop() {
if !s.stopping {
s.grace.Stop(s.grace.Timeout)
}
}
// NewServer returns a new HTTP server for a given configuration and tracker.
func NewServer(cfg *config.Config, tkr *tracker.Tracker) *Server {
return &Server{
config: cfg,
tracker: tkr,
}
}

View file

@ -1,90 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"github.com/chihaya/bencode"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
)
type params map[string]string
var infoHash = string([]byte{0x89, 0xd4, 0xbc, 0x52, 0x11, 0x16, 0xca, 0x1d, 0x42, 0xa2, 0xf3, 0x0d, 0x1f, 0x27, 0x4d, 0x94, 0xe4, 0x68, 0x1d, 0xaf})
func init() {
stats.DefaultStats = stats.New(config.StatsConfig{})
}
func setupTracker(cfg *config.Config, tkr *tracker.Tracker) (*httptest.Server, error) {
if cfg == nil {
cfg = &config.DefaultConfig
}
if tkr == nil {
var err error
tkr, err = tracker.New(cfg)
if err != nil {
return nil, err
}
}
return createServer(tkr, cfg)
}
func createServer(tkr *tracker.Tracker, cfg *config.Config) (*httptest.Server, error) {
srv := &Server{
config: cfg,
tracker: tkr,
}
return httptest.NewServer(newRouter(srv)), nil
}
func announce(p params, srv *httptest.Server) ([]byte, error) {
values := &url.Values{}
for k, v := range p {
values.Add(k, v)
}
body, _, err := fetchPath(srv.URL + "/announce?" + values.Encode())
return body, err
}
func fetchPath(path string) ([]byte, int, error) {
response, err := http.Get(path)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
return body, response.StatusCode, err
}
type peerList bencode.List
func (p peerList) Len() int {
return len(p)
}
func (p peerList) Less(i, j int) bool {
return p[i].(bencode.Dict)["peer id"].(string) < p[j].(bencode.Dict)["peer id"].(string)
}
func (p peerList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func sortPeersInResponse(dict bencode.Dict) {
if peers, ok := dict["peers"].(bencode.List); ok {
sort.Stable(peerList(peers))
}
}

View file

@ -1,112 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package query implements a faster single-purpose URL Query parser.
package query
import (
"errors"
"net/url"
"strconv"
"strings"
)
// Query represents a parsed URL.Query.
type Query struct {
Infohashes []string
Params map[string]string
}
// New parses a raw url query.
func New(query string) (*Query, error) {
var (
keyStart, keyEnd int
valStart, valEnd int
firstInfohash string
onKey = true
hasInfohash = false
q = &Query{
Infohashes: nil,
Params: make(map[string]string),
}
)
for i, length := 0, len(query); i < length; i++ {
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
last := i == length-1
if separator || last {
if onKey && !last {
keyStart = i + 1
continue
}
if last && !separator && !onKey {
valEnd = i
}
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
if err != nil {
return nil, err
}
var valStr string
if valEnd > 0 {
valStr, err = url.QueryUnescape(query[valStart : valEnd+1])
if err != nil {
return nil, err
}
}
q.Params[strings.ToLower(keyStr)] = valStr
if keyStr == "info_hash" {
if hasInfohash {
// Multiple infohashes
if q.Infohashes == nil {
q.Infohashes = []string{firstInfohash}
}
q.Infohashes = append(q.Infohashes, valStr)
} else {
firstInfohash = valStr
hasInfohash = true
}
}
valEnd = 0
onKey = true
keyStart = i + 1
} else if query[i] == '=' {
onKey = false
valStart = i + 1
valEnd = 0
} else if onKey {
keyEnd = i
} else {
valEnd = i
}
}
return q, nil
}
// Uint64 is a helper to obtain a uint of any length from a Query. After being
// called, you can safely cast the uint64 to your desired length.
func (q *Query) Uint64(key string) (uint64, error) {
str, exists := q.Params[key]
if !exists {
return 0, errors.New("value does not exist for key: " + key)
}
val, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}
return val, nil
}

View file

@ -1,100 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package query
import (
"net/url"
"testing"
)
var (
baseAddr = "https://www.subdomain.tracker.com:80/"
testInfoHash = "01234567890123456789"
testPeerID = "-TEST01-6wfG2wk6wWLc"
ValidAnnounceArguments = []url.Values{
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
url.Values{"info_hash": {""}, "peer_id": {""}, "compact": {""}},
}
InvalidQueries = []string{
baseAddr + "announce/?" + "info_hash=%0%a",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
if len(boxed) != len(unboxed) {
return false
}
for mapKey, mapVal := range boxed {
// Always expect box to hold only one element
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
return false
}
}
return true
}
func TestValidQueries(t *testing.T) {
for parseIndex, parseVal := range ValidAnnounceArguments {
parsedQueryObj, err := New(baseAddr + "announce/?" + parseVal.Encode())
if err != nil {
t.Error(err)
}
if !mapArrayEqual(parseVal, parsedQueryObj.Params) {
t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.Params)
}
}
}
func TestInvalidQueries(t *testing.T) {
for parseIndex, parseStr := range InvalidQueries {
parsedQueryObj, err := New(parseStr)
if err == nil {
t.Error("Should have produced error", parseIndex)
}
if parsedQueryObj != nil {
t.Error("Should be nil after error", parsedQueryObj, parseIndex)
}
}
}
func BenchmarkParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := New(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}

View file

@ -1,46 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
func handleTorrentError(err error, w *Writer) (int, error) {
if err == nil {
return http.StatusOK, nil
} else if models.IsPublicError(err) {
w.WriteError(err)
stats.RecordEvent(stats.ClientError)
return http.StatusOK, nil
}
return http.StatusInternalServerError, err
}
func (s *Server) serveAnnounce(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
writer := &Writer{w}
ann, err := s.newAnnounce(r, p)
if err != nil {
return handleTorrentError(err, writer)
}
return handleTorrentError(s.tracker.HandleAnnounce(ann, writer), writer)
}
func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
writer := &Writer{w}
scrape, err := s.newScrape(r, p)
if err != nil {
return handleTorrentError(err, writer)
}
return handleTorrentError(s.tracker.HandleScrape(scrape, writer), writer)
}

View file

@ -1,98 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"testing"
"github.com/chihaya/bencode"
)
func TestPublicScrape(t *testing.T) {
srv, err := setupTracker(nil, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
scrapeParams := params{"info_hash": infoHash}
// Add one seeder.
peer := makePeerParams("peer1", true)
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(1, 0, 0), srv, t)
// Add another seeder.
peer = makePeerParams("peer2", true)
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(2, 0, 0), srv, t)
// Add a leecher.
peer = makePeerParams("peer3", false)
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(2, 1, 0), srv, t)
// Remove seeder.
peer = makePeerParams("peer1", true)
peer["event"] = "stopped"
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(1, 1, 0), srv, t)
// Complete torrent.
peer = makePeerParams("peer3", true)
peer["event"] = "complete"
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(2, 0, 0), srv, t)
}
func makeScrapeResponse(seeders, leechers, downloaded int64) bencode.Dict {
return bencode.Dict{
"files": bencode.Dict{
infoHash: bencode.Dict{
"complete": seeders,
"incomplete": leechers,
"downloaded": downloaded,
},
},
}
}
func checkScrape(p params, expected interface{}, srv *httptest.Server, t *testing.T) bool {
values := &url.Values{}
for k, v := range p {
values.Add(k, v)
}
response, err := http.Get(srv.URL + "/scrape?" + values.Encode())
if err != nil {
t.Error(err)
return false
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
t.Error(err)
return false
}
got, err := bencode.Unmarshal(body)
if !reflect.DeepEqual(got, expected) {
t.Errorf("\ngot: %#v\nwanted: %#v", got, expected)
return false
}
return true
}

View file

@ -1,197 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"errors"
"net"
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/http/query"
"github.com/chihaya/chihaya/tracker/models"
)
// newAnnounce parses an HTTP request and generates a models.Announce.
func (s *Server) newAnnounce(r *http.Request, p httprouter.Params) (*models.Announce, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
compact := q.Params["compact"] != "0"
event, _ := q.Params["event"]
numWant := requestedPeerCount(q, s.config.NumWantFallback)
infohash, exists := q.Params["info_hash"]
if !exists {
return nil, models.ErrMalformedRequest
}
peerID, exists := q.Params["peer_id"]
if !exists {
return nil, models.ErrMalformedRequest
}
jwt, exists := q.Params["jwt"]
if s.config.JWKSetURI != "" && !exists {
return nil, models.ErrMalformedRequest
}
port, err := q.Uint64("port")
if err != nil {
return nil, models.ErrMalformedRequest
}
left, err := q.Uint64("left")
if err != nil {
return nil, models.ErrMalformedRequest
}
ipv4, ipv6, err := requestedIP(q, r, &s.config.NetConfig)
if err != nil {
return nil, models.ErrMalformedRequest
}
ipv4Endpoint := models.Endpoint{ipv4, uint16(port)}
ipv6Endpoint := models.Endpoint{ipv6, uint16(port)}
downloaded, err := q.Uint64("downloaded")
if err != nil {
return nil, models.ErrMalformedRequest
}
uploaded, err := q.Uint64("uploaded")
if err != nil {
return nil, models.ErrMalformedRequest
}
return &models.Announce{
Config: s.config,
Compact: compact,
Downloaded: downloaded,
Event: event,
IPv4: ipv4Endpoint,
IPv6: ipv6Endpoint,
Infohash: infohash,
Left: left,
NumWant: numWant,
PeerID: peerID,
Uploaded: uploaded,
JWT: jwt,
}, nil
}
// newScrape parses an HTTP request and generates a models.Scrape.
func (s *Server) newScrape(r *http.Request, p httprouter.Params) (*models.Scrape, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
if q.Infohashes == nil {
if _, exists := q.Params["info_hash"]; !exists {
// There aren't any infohashes.
return nil, models.ErrMalformedRequest
}
q.Infohashes = []string{q.Params["info_hash"]}
}
return &models.Scrape{
Config: s.config,
Infohashes: q.Infohashes,
}, nil
}
// requestedPeerCount returns the wanted peer count or the provided fallback.
func requestedPeerCount(q *query.Query, fallback int) int {
if numWantStr, exists := q.Params["numwant"]; exists {
numWant, err := strconv.Atoi(numWantStr)
if err != nil {
return fallback
}
return numWant
}
return fallback
}
// requestedIP returns the IP address for a request. If there are multiple in
// the request, one IPv4 and one IPv6 will be returned.
func requestedIP(q *query.Query, r *http.Request, cfg *config.NetConfig) (v4, v6 net.IP, err error) {
var done bool
if cfg.AllowIPSpoofing {
if str, ok := q.Params["ip"]; ok {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
if str, ok := q.Params["ipv4"]; ok {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
if str, ok := q.Params["ipv6"]; ok {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
}
if cfg.RealIPHeader != "" {
if xRealIPs, ok := r.Header[cfg.RealIPHeader]; ok {
if v4, v6, done = getIPs(string(xRealIPs[0]), v4, v6, cfg); done {
return
}
}
} else {
if r.RemoteAddr == "" && v4 == nil {
if v4, v6, done = getIPs("127.0.0.1", v4, v6, cfg); done {
return
}
}
if v4, v6, done = getIPs(r.RemoteAddr, v4, v6, cfg); done {
return
}
}
if v4 == nil && v6 == nil {
err = errors.New("failed to parse IP address")
}
return
}
func getIPs(ipstr string, ipv4, ipv6 net.IP, cfg *config.NetConfig) (net.IP, net.IP, bool) {
host, _, err := net.SplitHostPort(ipstr)
if err != nil {
host = ipstr
}
if ip := net.ParseIP(host); ip != nil {
ipTo4 := ip.To4()
if ipv4 == nil && ipTo4 != nil {
ipv4 = ipTo4
} else if ipv6 == nil && ipTo4 == nil {
ipv6 = ip
}
}
var done bool
if cfg.DualStackedPeers {
done = ipv4 != nil && ipv6 != nil
} else {
done = ipv4 != nil || ipv6 != nil
}
return ipv4, ipv6, done
}

View file

@ -1,118 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"bytes"
"net/http"
"github.com/chihaya/bencode"
"github.com/chihaya/chihaya/tracker/models"
)
// Writer implements the tracker.Writer interface for the HTTP protocol.
type Writer struct {
http.ResponseWriter
}
// WriteError writes a bencode dict with a failure reason.
func (w *Writer) WriteError(err error) error {
bencoder := bencode.NewEncoder(w)
return bencoder.Encode(bencode.Dict{
"failure reason": err.Error(),
})
}
// WriteAnnounce writes a bencode dict representation of an AnnounceResponse.
func (w *Writer) WriteAnnounce(res *models.AnnounceResponse) error {
dict := bencode.Dict{
"complete": res.Complete,
"incomplete": res.Incomplete,
"interval": res.Interval,
"min interval": res.MinInterval,
}
if res.Compact {
if res.IPv4Peers != nil {
dict["peers"] = compactPeers(false, res.IPv4Peers)
}
if res.IPv6Peers != nil {
compact := compactPeers(true, res.IPv6Peers)
// Don't bother writing the IPv6 field if there is no value.
if len(compact) > 0 {
dict["peers6"] = compact
}
}
} else if res.IPv4Peers != nil || res.IPv6Peers != nil {
dict["peers"] = peersList(res.IPv4Peers, res.IPv6Peers)
}
bencoder := bencode.NewEncoder(w)
return bencoder.Encode(dict)
}
// WriteScrape writes a bencode dict representation of a ScrapeResponse.
func (w *Writer) WriteScrape(res *models.ScrapeResponse) error {
dict := bencode.Dict{
"files": filesDict(res.Files),
}
bencoder := bencode.NewEncoder(w)
return bencoder.Encode(dict)
}
func compactPeers(ipv6 bool, peers models.PeerList) []byte {
var compactPeers bytes.Buffer
if ipv6 {
for _, peer := range peers {
compactPeers.Write(peer.IP)
compactPeers.Write([]byte{byte(peer.Port >> 8), byte(peer.Port & 0xff)})
}
} else {
for _, peer := range peers {
compactPeers.Write(peer.IP)
compactPeers.Write([]byte{byte(peer.Port >> 8), byte(peer.Port & 0xff)})
}
}
return compactPeers.Bytes()
}
func peersList(ipv4s, ipv6s models.PeerList) (peers []bencode.Dict) {
for _, peer := range ipv4s {
peers = append(peers, peerDict(&peer, false))
}
for _, peer := range ipv6s {
peers = append(peers, peerDict(&peer, true))
}
return peers
}
func peerDict(peer *models.Peer, ipv6 bool) bencode.Dict {
return bencode.Dict{
"ip": peer.IP.String(),
"peer id": peer.ID,
"port": peer.Port,
}
}
func filesDict(torrents []*models.Torrent) bencode.Dict {
d := bencode.NewDict()
for _, torrent := range torrents {
d[torrent.Infohash] = torrentDict(torrent)
}
return d
}
func torrentDict(torrent *models.Torrent) bencode.Dict {
return bencode.Dict{
"complete": torrent.Seeders.Len(),
"incomplete": torrent.Leechers.Len(),
"downloaded": torrent.Snatches,
}
}

View file

@ -1,78 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package stats
import "runtime"
// BasicMemStats includes a few of the fields from runtime.MemStats suitable for
// general logging.
type BasicMemStats struct {
// General statistics.
Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed)
Sys uint64 // bytes obtained from system (sum of XxxSys in runtime)
Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs
Frees uint64 // number of frees
// Main allocation heap statistics.
HeapAlloc uint64 // bytes allocated and still in use
HeapSys uint64 // bytes obtained from system
HeapIdle uint64 // bytes in idle spans
HeapInuse uint64 // bytes in non-idle span
HeapReleased uint64 // bytes released to the OS
HeapObjects uint64 // total number of allocated objects
// Garbage collector statistics.
PauseTotalNs uint64
LatestPauseNs uint64
}
type memStatsPlaceholder interface{}
// MemStatsWrapper wraps runtime.MemStats with an optionally less verbose JSON
// representation. The JSON field names correspond exactly to the runtime field
// names to avoid reimplementing the entire struct.
type MemStatsWrapper struct {
memStatsPlaceholder `json:"Memory"`
basic *BasicMemStats
cache *runtime.MemStats
}
func NewMemStatsWrapper(verbose bool) *MemStatsWrapper {
stats := &MemStatsWrapper{cache: &runtime.MemStats{}}
if verbose {
stats.memStatsPlaceholder = stats.cache
} else {
stats.basic = &BasicMemStats{}
stats.memStatsPlaceholder = stats.basic
}
return stats
}
// Update fetches the current memstats from runtime and resets the cache.
func (s *MemStatsWrapper) Update() {
runtime.ReadMemStats(s.cache)
if s.basic != nil {
// Gross, but any decent editor can generate this in a couple commands.
s.basic.Alloc = s.cache.Alloc
s.basic.TotalAlloc = s.cache.TotalAlloc
s.basic.Sys = s.cache.Sys
s.basic.Lookups = s.cache.Lookups
s.basic.Mallocs = s.cache.Mallocs
s.basic.Frees = s.cache.Frees
s.basic.HeapAlloc = s.cache.HeapAlloc
s.basic.HeapSys = s.cache.HeapSys
s.basic.HeapIdle = s.cache.HeapIdle
s.basic.HeapInuse = s.cache.HeapInuse
s.basic.HeapReleased = s.cache.HeapReleased
s.basic.HeapObjects = s.cache.HeapObjects
s.basic.PauseTotalNs = s.cache.PauseTotalNs
s.basic.LatestPauseNs = s.cache.PauseNs[(s.cache.NumGC+255)%256]
}
}

View file

@ -1,291 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package stats implements a means of tracking processing statistics for a
// BitTorrent tracker.
package stats
import (
"time"
"github.com/pushrax/faststats"
"github.com/pushrax/flatjson"
"github.com/chihaya/chihaya/config"
)
const (
Announce = iota
Scrape
Completed
NewLeech
DeletedLeech
ReapedLeech
NewSeed
DeletedSeed
ReapedSeed
NewTorrent
DeletedTorrent
ReapedTorrent
AcceptedConnection
ClosedConnection
HandledRequest
ErroredRequest
ClientError
ResponseTime
)
// DefaultStats is a default instance of stats tracking that uses an unbuffered
// channel for broadcasting events unless specified otherwise via a command
// line flag.
var DefaultStats *Stats
type PeerClassStats struct {
Current int64 // Current peer count.
Joined uint64 // Peers that announced.
Left uint64 // Peers that paused or stopped.
Reaped uint64 // Peers cleaned up after inactivity.
}
type PeerStats struct {
PeerClassStats `json:"Peers"` // Stats for all peers.
Seeds PeerClassStats // Stats for seeds only.
Completed uint64 // Number of transitions from leech to seed.
}
type PercentileTimes struct {
P50 *faststats.Percentile
P90 *faststats.Percentile
P95 *faststats.Percentile
}
type Stats struct {
Started time.Time // Time at which Chihaya was booted.
OpenConnections int64 `json:"connectionsOpen"`
ConnectionsAccepted uint64 `json:"connectionsAccepted"`
BytesTransmitted uint64 `json:"bytesTransmitted"`
GoRoutines int `json:"runtimeGoRoutines"`
RequestsHandled uint64 `json:"requestsHandled"`
RequestsErrored uint64 `json:"requestsErrored"`
ClientErrors uint64 `json:"requestsBad"`
ResponseTime PercentileTimes
Announces uint64 `json:"trackerAnnounces"`
Scrapes uint64 `json:"trackerScrapes"`
TorrentsSize uint64 `json:"torrentsSize"`
TorrentsAdded uint64 `json:"torrentsAdded"`
TorrentsRemoved uint64 `json:"torrentsRemoved"`
TorrentsReaped uint64 `json:"torrentsReaped"`
IPv4Peers PeerStats `json:"peersIPv4"`
IPv6Peers PeerStats `json:"peersIPv6"`
*MemStatsWrapper `json:",omitempty"`
events chan int
ipv4PeerEvents chan int
ipv6PeerEvents chan int
responseTimeEvents chan time.Duration
recordMemStats <-chan time.Time
flattened flatjson.Map
}
func New(cfg config.StatsConfig) *Stats {
s := &Stats{
Started: time.Now(),
events: make(chan int, cfg.BufferSize),
GoRoutines: 0,
ipv4PeerEvents: make(chan int, cfg.BufferSize),
ipv6PeerEvents: make(chan int, cfg.BufferSize),
responseTimeEvents: make(chan time.Duration, cfg.BufferSize),
ResponseTime: PercentileTimes{
P50: faststats.NewPercentile(0.5),
P90: faststats.NewPercentile(0.9),
P95: faststats.NewPercentile(0.95),
},
}
if cfg.IncludeMem {
s.MemStatsWrapper = NewMemStatsWrapper(cfg.VerboseMem)
s.recordMemStats = time.After(cfg.MemUpdateInterval.Duration)
}
s.flattened = flatjson.Flatten(s)
go s.handleEvents()
return s
}
func (s *Stats) Flattened() flatjson.Map {
return s.flattened
}
func (s *Stats) Close() {
close(s.events)
}
func (s *Stats) Uptime() time.Duration {
return time.Since(s.Started)
}
func (s *Stats) RecordEvent(event int) {
s.events <- event
}
func (s *Stats) RecordPeerEvent(event int, ipv6 bool) {
if ipv6 {
s.ipv6PeerEvents <- event
} else {
s.ipv4PeerEvents <- event
}
}
func (s *Stats) RecordTiming(event int, duration time.Duration) {
switch event {
case ResponseTime:
s.responseTimeEvents <- duration
default:
panic("stats: RecordTiming called with an unknown event")
}
}
func (s *Stats) handleEvents() {
for {
select {
case event := <-s.events:
s.handleEvent(event)
case event := <-s.ipv4PeerEvents:
s.handlePeerEvent(&s.IPv4Peers, event)
case event := <-s.ipv6PeerEvents:
s.handlePeerEvent(&s.IPv6Peers, event)
case duration := <-s.responseTimeEvents:
f := float64(duration) / float64(time.Millisecond)
s.ResponseTime.P50.AddSample(f)
s.ResponseTime.P90.AddSample(f)
s.ResponseTime.P95.AddSample(f)
case <-s.recordMemStats:
s.MemStatsWrapper.Update()
}
}
}
func (s *Stats) handleEvent(event int) {
switch event {
case Announce:
s.Announces++
case Scrape:
s.Scrapes++
case NewTorrent:
s.TorrentsAdded++
s.TorrentsSize++
case DeletedTorrent:
s.TorrentsRemoved++
s.TorrentsSize--
case ReapedTorrent:
s.TorrentsReaped++
s.TorrentsSize--
case AcceptedConnection:
s.ConnectionsAccepted++
s.OpenConnections++
case ClosedConnection:
s.OpenConnections--
case HandledRequest:
s.RequestsHandled++
case ClientError:
s.ClientErrors++
case ErroredRequest:
s.RequestsErrored++
default:
panic("stats: RecordEvent called with an unknown event")
}
}
func (s *Stats) handlePeerEvent(ps *PeerStats, event int) {
switch event {
case Completed:
ps.Completed++
ps.Seeds.Current++
case NewLeech:
ps.Joined++
ps.Current++
case DeletedLeech:
ps.Left++
ps.Current--
case ReapedLeech:
ps.Reaped++
ps.Current--
case NewSeed:
ps.Seeds.Joined++
ps.Seeds.Current++
ps.Joined++
ps.Current++
case DeletedSeed:
ps.Seeds.Left++
ps.Seeds.Current--
ps.Left++
ps.Current--
case ReapedSeed:
ps.Seeds.Reaped++
ps.Seeds.Current--
ps.Reaped++
ps.Current--
default:
panic("stats: RecordPeerEvent called with an unknown event")
}
}
// RecordEvent broadcasts an event to the default stats queue.
func RecordEvent(event int) {
if DefaultStats != nil {
DefaultStats.RecordEvent(event)
}
}
// RecordPeerEvent broadcasts a peer event to the default stats queue.
func RecordPeerEvent(event int, ipv6 bool) {
if DefaultStats != nil {
DefaultStats.RecordPeerEvent(event, ipv6)
}
}
// RecordTiming broadcasts a timing event to the default stats queue.
func RecordTiming(event int, duration time.Duration) {
if DefaultStats != nil {
DefaultStats.RecordTiming(event, duration)
}
}

View file

@ -1,241 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package tracker
import (
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
// HandleAnnounce encapsulates all of the logic of handling a BitTorrent
// client's Announce without being coupled to any transport protocol.
func (tkr *Tracker) HandleAnnounce(ann *models.Announce, w Writer) (err error) {
if tkr.Config.ClientWhitelistEnabled {
if err = tkr.ClientApproved(ann.ClientID()); err != nil {
return err
}
}
if tkr.Config.JWKSetURI != "" {
err := tkr.validateJWT(ann.JWT, ann.Infohash)
if err != nil {
return err
}
}
torrent, err := tkr.FindTorrent(ann.Infohash)
if err == models.ErrTorrentDNE && tkr.Config.CreateOnAnnounce {
torrent = &models.Torrent{
Infohash: ann.Infohash,
Seeders: models.NewPeerMap(true, tkr.Config),
Leechers: models.NewPeerMap(false, tkr.Config),
}
tkr.PutTorrent(torrent)
stats.RecordEvent(stats.NewTorrent)
} else if err != nil {
return err
}
ann.BuildPeer(torrent)
_, err = tkr.updateSwarm(ann)
if err != nil {
return err
}
_, err = tkr.handleEvent(ann)
if err != nil {
return err
}
if tkr.Config.PurgeInactiveTorrents && torrent.PeerCount() == 0 {
// Rather than deleting the torrent explicitly, let the tracker driver
// ensure there are no race conditions.
tkr.PurgeInactiveTorrent(torrent.Infohash)
stats.RecordEvent(stats.DeletedTorrent)
}
stats.RecordEvent(stats.Announce)
return w.WriteAnnounce(newAnnounceResponse(ann))
}
// updateSwarm handles the changes to a torrent's swarm given an announce.
func (tkr *Tracker) updateSwarm(ann *models.Announce) (created bool, err error) {
var createdv4, createdv6 bool
tkr.TouchTorrent(ann.Torrent.Infohash)
if ann.HasIPv4() {
createdv4, err = tkr.updatePeer(ann, ann.PeerV4)
if err != nil {
return
}
}
if ann.HasIPv6() {
createdv6, err = tkr.updatePeer(ann, ann.PeerV6)
if err != nil {
return
}
}
return createdv4 || createdv6, nil
}
func (tkr *Tracker) updatePeer(ann *models.Announce, peer *models.Peer) (created bool, err error) {
p, t := ann.Peer, ann.Torrent
switch {
case t.Seeders.Contains(p.Key()):
err = tkr.PutSeeder(t.Infohash, p)
if err != nil {
return
}
case t.Leechers.Contains(p.Key()):
err = tkr.PutLeecher(t.Infohash, p)
if err != nil {
return
}
default:
if ann.Left == 0 {
err = tkr.PutSeeder(t.Infohash, p)
if err != nil {
return
}
stats.RecordPeerEvent(stats.NewSeed, p.HasIPv6())
} else {
err = tkr.PutLeecher(t.Infohash, p)
if err != nil {
return
}
stats.RecordPeerEvent(stats.NewLeech, p.HasIPv6())
}
created = true
}
return
}
// handleEvent checks to see whether an announce has an event and if it does,
// properly handles that event.
func (tkr *Tracker) handleEvent(ann *models.Announce) (snatched bool, err error) {
var snatchedv4, snatchedv6 bool
if ann.HasIPv4() {
snatchedv4, err = tkr.handlePeerEvent(ann, ann.PeerV4)
if err != nil {
return
}
}
if ann.HasIPv6() {
snatchedv6, err = tkr.handlePeerEvent(ann, ann.PeerV6)
if err != nil {
return
}
}
if snatchedv4 || snatchedv6 {
err = tkr.IncrementTorrentSnatches(ann.Torrent.Infohash)
if err != nil {
return
}
ann.Torrent.Snatches++
return true, nil
}
return false, nil
}
func (tkr *Tracker) handlePeerEvent(ann *models.Announce, p *models.Peer) (snatched bool, err error) {
p, t := ann.Peer, ann.Torrent
switch {
case ann.Event == "stopped" || ann.Event == "paused":
// updateSwarm checks if the peer is active on the torrent,
// so one of these branches must be followed.
if t.Seeders.Contains(p.Key()) {
err = tkr.DeleteSeeder(t.Infohash, p)
if err != nil {
return
}
stats.RecordPeerEvent(stats.DeletedSeed, p.HasIPv6())
} else if t.Leechers.Contains(p.Key()) {
err = tkr.DeleteLeecher(t.Infohash, p)
if err != nil {
return
}
stats.RecordPeerEvent(stats.DeletedLeech, p.HasIPv6())
}
case t.Leechers.Contains(p.Key()) && (ann.Event == "completed" || ann.Left == 0):
// A leecher has completed or this is the first time we've seen them since
// they've completed.
err = tkr.leecherFinished(t, p)
if err != nil {
return
}
// Only mark as snatched if we receive the completed event.
if ann.Event == "completed" {
snatched = true
}
}
return
}
// leecherFinished moves a peer from the leeching pool to the seeder pool.
func (tkr *Tracker) leecherFinished(t *models.Torrent, p *models.Peer) error {
if err := tkr.DeleteLeecher(t.Infohash, p); err != nil {
return err
}
if err := tkr.PutSeeder(t.Infohash, p); err != nil {
return err
}
stats.RecordPeerEvent(stats.Completed, p.HasIPv6())
return nil
}
func newAnnounceResponse(ann *models.Announce) *models.AnnounceResponse {
seedCount := ann.Torrent.Seeders.Len()
leechCount := ann.Torrent.Leechers.Len()
res := &models.AnnounceResponse{
Announce: ann,
Complete: seedCount,
Incomplete: leechCount,
Interval: ann.Config.Announce.Duration,
MinInterval: ann.Config.MinAnnounce.Duration,
Compact: ann.Compact,
}
if ann.NumWant > 0 && ann.Event != "stopped" && ann.Event != "paused" {
res.IPv4Peers, res.IPv6Peers = getPeers(ann)
if len(res.IPv4Peers)+len(res.IPv6Peers) == 0 {
models.AppendPeer(&res.IPv4Peers, &res.IPv6Peers, ann, ann.Peer)
}
}
return res
}
// getPeers returns lists IPv4 and IPv6 peers on a given torrent sized according
// to the wanted parameter.
func getPeers(ann *models.Announce) (ipv4s, ipv6s models.PeerList) {
ipv4s, ipv6s = models.PeerList{}, models.PeerList{}
if ann.Left == 0 {
// If they're seeding, give them only leechers.
return ann.Torrent.Leechers.AppendPeers(ipv4s, ipv6s, ann, ann.NumWant)
}
// If they're leeching, prioritize giving them seeders.
ipv4s, ipv6s = ann.Torrent.Seeders.AppendPeers(ipv4s, ipv6s, ann, ann.NumWant)
return ann.Torrent.Leechers.AppendPeers(ipv4s, ipv6s, ann, ann.NumWant-len(ipv4s)-len(ipv6s))
}

View file

@ -1,147 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package tracker
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"time"
oidchttp "github.com/coreos/go-oidc/http"
"github.com/coreos/go-oidc/jose"
"github.com/golang/glog"
)
const jwkTTLFallback = 5 * time.Minute
func (tkr *Tracker) updateJWKSetForever() {
defer tkr.shutdownWG.Done()
client := &http.Client{Timeout: 5 * time.Second}
// Get initial JWK Set.
err := tkr.updateJWKSet(client)
if err != nil {
glog.Warningf("Failed to get initial JWK Set: %s", err)
}
for {
select {
case <-tkr.shuttingDown:
return
case <-time.After(tkr.Config.JWKSetUpdateInterval.Duration):
err = tkr.updateJWKSet(client)
if err != nil {
glog.Warningf("Failed to update JWK Set: %s", err)
}
}
}
}
type jwkSet struct {
Keys []jose.JWK `json:"keys"`
Issuer string `json:"issuer"`
validUntil time.Time
}
func (tkr *Tracker) updateJWKSet(client *http.Client) error {
glog.Info("Attemping to update JWK Set")
resp, err := client.Get(tkr.Config.JWKSetURI)
if err != nil {
return err
}
defer resp.Body.Close()
var jwks jwkSet
err = json.NewDecoder(resp.Body).Decode(&jwks)
if err != nil {
return err
}
if len(jwks.Keys) == 0 {
return errors.New("Failed to find any keys from JWK Set URI")
}
if jwks.Issuer == "" {
return errors.New("Failed to find any issuer from JWK Set URI")
}
ttl, _, _ := oidchttp.Cacheable(resp.Header)
if ttl == 0 {
ttl = jwkTTLFallback
}
jwks.validUntil = time.Now().Add(ttl)
tkr.jwkSet = jwks
glog.Info("Successfully updated JWK Set")
return nil
}
func validateJWTSignature(jwt *jose.JWT, jwkSet *jwkSet) (bool, error) {
for _, jwk := range jwkSet.Keys {
v, err := jose.NewVerifier(jwk)
if err != nil {
return false, err
}
if err := v.Verify(jwt.Signature, []byte(jwt.Data())); err == nil {
return true, nil
}
}
return false, nil
}
func (tkr *Tracker) validateJWT(jwtStr, infohash string) error {
jwkSet := tkr.jwkSet
if time.Now().After(jwkSet.validUntil) {
return fmt.Errorf("Failed verify JWT due to stale JWK Set")
}
jwt, err := jose.ParseJWT(jwtStr)
if err != nil {
return err
}
validated, err := validateJWTSignature(&jwt, &jwkSet)
if err != nil {
return err
} else if !validated {
return errors.New("Failed to verify JWT with all available verifiers")
}
claims, err := jwt.Claims()
if err != nil {
return err
}
if claimedIssuer, ok, err := claims.StringClaim("iss"); claimedIssuer != jwkSet.Issuer || err != nil || !ok {
return errors.New("Failed to validate JWT issuer claim")
}
if claimedAudience, ok, err := claims.StringClaim("aud"); claimedAudience != tkr.Config.JWTAudience || err != nil || !ok {
return errors.New("Failed to validate JWT audience claim")
}
claimedInfohash, ok, err := claims.StringClaim("infohash")
if err != nil || !ok {
return errors.New("Failed to validate JWT infohash claim")
}
unescapedInfohash, err := url.QueryUnescape(claimedInfohash)
if err != nil {
return errors.New("Failed to unescape JWT infohash claim")
}
if unescapedInfohash != infohash {
return errors.New("Failed to match infohash claim with requested infohash")
}
return nil
}

View file

@ -1,227 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package models implements the common data types used throughout a BitTorrent
// tracker.
package models
import (
"net"
"strings"
"time"
"github.com/chihaya/chihaya/config"
)
var (
// ErrMalformedRequest is returned when a request does not contain the
// required parameters needed to create a model.
ErrMalformedRequest = ClientError("malformed request")
// ErrBadRequest is returned when a request is invalid in the peer's
// current state. For example, announcing a "completed" event while
// not a leecher or a "stopped" event while not active.
ErrBadRequest = ClientError("bad request")
// ErrTorrentDNE is returned when a torrent does not exist.
ErrTorrentDNE = NotFoundError("torrent does not exist")
// ErrClientUnapproved is returned when a clientID is not in the whitelist.
ErrClientUnapproved = ClientError("client is not approved")
)
type ClientError string
type NotFoundError ClientError
type ProtocolError ClientError
func (e ClientError) Error() string { return string(e) }
func (e NotFoundError) Error() string { return string(e) }
func (e ProtocolError) Error() string { return string(e) }
// IsPublicError determines whether an error should be propogated to the client.
func IsPublicError(err error) bool {
_, cl := err.(ClientError)
_, nf := err.(NotFoundError)
_, pc := err.(ProtocolError)
return cl || nf || pc
}
// PeerList represents a list of peers: either seeders or leechers.
type PeerList []Peer
// PeerKey is the key used to uniquely identify a peer in a swarm.
type PeerKey string
// NewPeerKey creates a properly formatted PeerKey.
func NewPeerKey(peerID string, ip net.IP) PeerKey {
return PeerKey(peerID + "//" + ip.String())
}
// IP parses and returns the IP address for a given PeerKey.
func (pk PeerKey) IP() net.IP {
ip := net.ParseIP(strings.Split(string(pk), "//")[1])
if rval := ip.To4(); rval != nil {
return rval
}
return ip
}
// PeerID returns the PeerID section of a PeerKey.
func (pk PeerKey) PeerID() string {
return strings.Split(string(pk), "//")[0]
}
// Endpoint is an IP and port pair.
//
// IP always has length net.IPv4len if IPv4, and net.IPv6len if IPv6.
type Endpoint struct {
IP net.IP `json:"ip"`
Port uint16 `json:"port"`
}
// Peer represents a participant in a BitTorrent swarm.
type Peer struct {
ID string `json:"id"`
Uploaded uint64 `json:"uploaded"`
Downloaded uint64 `json:"downloaded"`
Left uint64 `json:"left"`
LastAnnounce int64 `json:"lastAnnounce"`
Endpoint
}
// HasIPv4 determines if a peer's IP address can be represented as an IPv4
// address.
func (p *Peer) HasIPv4() bool {
return !p.HasIPv6()
}
// HasIPv6 determines if a peer's IP address can be represented as an IPv6
// address.
func (p *Peer) HasIPv6() bool {
return len(p.IP) == net.IPv6len
}
// Key returns a PeerKey for the given peer.
func (p *Peer) Key() PeerKey {
return NewPeerKey(p.ID, p.IP)
}
// Torrent represents a BitTorrent swarm and its metadata.
type Torrent struct {
Infohash string `json:"infohash"`
Snatches uint64 `json:"snatches"`
LastAction int64 `json:"lastAction"`
Seeders *PeerMap `json:"seeders"`
Leechers *PeerMap `json:"leechers"`
}
// PeerCount returns the total number of peers connected on this Torrent.
func (t *Torrent) PeerCount() int {
return t.Seeders.Len() + t.Leechers.Len()
}
// Announce is an Announce by a Peer.
type Announce struct {
Config *config.Config `json:"config"`
Compact bool `json:"compact"`
Downloaded uint64 `json:"downloaded"`
Event string `json:"event"`
IPv4 Endpoint `json:"ipv4"`
IPv6 Endpoint `json:"ipv6"`
Infohash string `json:"infohash"`
Left uint64 `json:"left"`
NumWant int `json:"numwant"`
PeerID string `json:"peer_id"`
Uploaded uint64 `json:"uploaded"`
JWT string `json:"jwt"`
Torrent *Torrent `json:"-"`
Peer *Peer `json:"-"`
PeerV4 *Peer `json:"-"` // Only valid if HasIPv4() is true.
PeerV6 *Peer `json:"-"` // Only valid if HasIPv6() is true.
}
// ClientID returns the part of a PeerID that identifies a Peer's client
// software.
func (a *Announce) ClientID() (clientID string) {
length := len(a.PeerID)
if length >= 6 {
if a.PeerID[0] == '-' {
if length >= 7 {
clientID = a.PeerID[1:7]
}
} else {
clientID = a.PeerID[:6]
}
}
return
}
// HasIPv4 determines whether or not an announce has an IPv4 endpoint.
func (a *Announce) HasIPv4() bool {
return a.IPv4.IP != nil
}
// HasIPv6 determines whether or not an announce has an IPv6 endpoint.
func (a *Announce) HasIPv6() bool {
return a.IPv6.IP != nil
}
// BuildPeer creates the Peer representation of an Announce. BuildPeer creates
// one peer for each IP in the announce, and panics if there are none.
func (a *Announce) BuildPeer(t *Torrent) {
a.Peer = &Peer{
ID: a.PeerID,
Uploaded: a.Uploaded,
Downloaded: a.Downloaded,
Left: a.Left,
LastAnnounce: time.Now().Unix(),
}
if t != nil {
a.Torrent = t
}
if a.HasIPv4() && a.HasIPv6() {
a.PeerV4 = a.Peer
a.PeerV4.Endpoint = a.IPv4
peer6 := *a.Peer
a.PeerV6 = &peer6
a.PeerV6.Endpoint = a.IPv6
} else if a.HasIPv4() {
a.PeerV4 = a.Peer
a.PeerV4.Endpoint = a.IPv4
} else if a.HasIPv6() {
a.PeerV6 = a.Peer
a.PeerV6.Endpoint = a.IPv6
} else {
panic("models: announce must have an IP")
}
return
}
// AnnounceResponse contains the information needed to fulfill an announce.
type AnnounceResponse struct {
Announce *Announce
Complete, Incomplete int
Interval, MinInterval time.Duration
IPv4Peers, IPv6Peers PeerList
Compact bool
}
// Scrape is a Scrape by a Peer.
type Scrape struct {
Config *config.Config `json:"config"`
Infohashes []string
}
// ScrapeResponse contains the information needed to fulfill a scrape.
type ScrapeResponse struct {
Files []*Torrent
}

View file

@ -1,64 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package models
import "testing"
type PeerClientPair struct {
announce Announce
clientID string
}
var TestClients = []PeerClientPair{
{Announce{PeerID: "-AZ3034-6wfG2wk6wWLc"}, "AZ3034"},
{Announce{PeerID: "-AZ3042-6ozMq5q6Q3NX"}, "AZ3042"},
{Announce{PeerID: "-BS5820-oy4La2MWGEFj"}, "BS5820"},
{Announce{PeerID: "-AR6360-6oZyyMWoOOBe"}, "AR6360"},
{Announce{PeerID: "-AG2083-s1hiF8vGAAg0"}, "AG2083"},
{Announce{PeerID: "-AG3003-lEl2Mm4NEO4n"}, "AG3003"},
{Announce{PeerID: "-MR1100-00HS~T7*65rm"}, "MR1100"},
{Announce{PeerID: "-LK0140-ATIV~nbEQAMr"}, "LK0140"},
{Announce{PeerID: "-KT2210-347143496631"}, "KT2210"},
{Announce{PeerID: "-TR0960-6ep6svaa61r4"}, "TR0960"},
{Announce{PeerID: "-XX1150-dv220cotgj4d"}, "XX1150"},
{Announce{PeerID: "-AZ2504-192gwethivju"}, "AZ2504"},
{Announce{PeerID: "-KT4310-3L4UvarKuqIu"}, "KT4310"},
{Announce{PeerID: "-AZ2060-0xJQ02d4309O"}, "AZ2060"},
{Announce{PeerID: "-BD0300-2nkdf08Jd890"}, "BD0300"},
{Announce{PeerID: "-A~0010-a9mn9DFkj39J"}, "A~0010"},
{Announce{PeerID: "-UT2300-MNu93JKnm930"}, "UT2300"},
{Announce{PeerID: "-UT2300-KT4310KT4301"}, "UT2300"},
{Announce{PeerID: "T03A0----f089kjsdf6e"}, "T03A0-"},
{Announce{PeerID: "S58B-----nKl34GoNb75"}, "S58B--"},
{Announce{PeerID: "M4-4-0--9aa757Efd5Bl"}, "M4-4-0"},
{Announce{PeerID: "AZ2500BTeYUzyabAfo6U"}, "AZ2500"}, // BitTyrant
{Announce{PeerID: "exbc0JdSklm834kj9Udf"}, "exbc0J"}, // Old BitComet
{Announce{PeerID: "FUTB0L84j542mVc84jkd"}, "FUTB0L"}, // Alt BitComet
{Announce{PeerID: "XBT054d-8602Jn83NnF9"}, "XBT054"}, // XBT
{Announce{PeerID: "OP1011affbecbfabeefb"}, "OP1011"}, // Opera
{Announce{PeerID: "-ML2.7.2-kgjjfkd9762"}, "ML2.7."}, // MLDonkey
{Announce{PeerID: "-BOWA0C-SDLFJWEIORNM"}, "BOWA0C"}, // Bits on Wheels
{Announce{PeerID: "Q1-0-0--dsn34DFn9083"}, "Q1-0-0"}, // Queen Bee
{Announce{PeerID: "Q1-10-0-Yoiumn39BDfO"}, "Q1-10-"}, // Queen Bee Alt
{Announce{PeerID: "346------SDFknl33408"}, "346---"}, // TorreTopia
{Announce{PeerID: "QVOD0054ABFFEDCCDEDB"}, "QVOD00"}, // Qvod
{Announce{PeerID: ""}, ""},
{Announce{PeerID: "-"}, ""},
{Announce{PeerID: "12345"}, ""},
{Announce{PeerID: "-12345"}, ""},
{Announce{PeerID: "123456"}, "123456"},
{Announce{PeerID: "-123456"}, "123456"},
}
func TestClientID(t *testing.T) {
for _, pair := range TestClients {
if parsedID := pair.announce.ClientID(); parsedID != pair.clientID {
t.Error("Incorrectly parsed peer ID", pair.announce.PeerID, "as", parsedID)
}
}
}

View file

@ -1,205 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package models
import (
"net"
"sync"
"sync/atomic"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
)
// PeerMap is a thread-safe map from PeerKeys to Peers. When PreferredSubnet is
// enabled, it is a thread-safe map of maps from MaskedIPs to Peerkeys to Peers.
type PeerMap struct {
Peers map[string]map[PeerKey]Peer `json:"peers"`
Seeders bool `json:"seeders"`
Config config.SubnetConfig `json:"config"`
Size int32 `json:"size"`
sync.RWMutex
}
// NewPeerMap initializes the map for a new PeerMap.
func NewPeerMap(seeders bool, cfg *config.Config) *PeerMap {
pm := &PeerMap{
Peers: make(map[string]map[PeerKey]Peer),
Seeders: seeders,
Config: cfg.NetConfig.SubnetConfig,
}
if !pm.Config.PreferredSubnet {
pm.Peers[""] = make(map[PeerKey]Peer)
}
return pm
}
// Contains is true if a peer is contained with a PeerMap.
func (pm *PeerMap) Contains(pk PeerKey) bool {
pm.RLock()
defer pm.RUnlock()
if pm.Config.PreferredSubnet {
maskedIP := pm.mask(pk.IP())
peers, exists := pm.Peers[maskedIP]
if !exists {
return false
}
_, exists = peers[pk]
return exists
}
_, exists := pm.Peers[""][pk]
return exists
}
func (pm *PeerMap) mask(ip net.IP) string {
if !pm.Config.PreferredSubnet {
return ""
}
var maskedIP net.IP
if len(ip) == net.IPv6len {
maskedIP = ip.Mask(net.CIDRMask(pm.Config.PreferredIPv6Subnet, 128))
} else {
maskedIP = ip.Mask(net.CIDRMask(pm.Config.PreferredIPv4Subnet, 32))
}
return maskedIP.String()
}
// LookUp is a thread-safe read from a PeerMap.
func (pm *PeerMap) LookUp(pk PeerKey) (peer Peer, exists bool) {
pm.RLock()
defer pm.RUnlock()
maskedIP := pm.mask(pk.IP())
peers, exists := pm.Peers[maskedIP]
if !exists {
return Peer{}, false
}
peer, exists = peers[pk]
return
}
// Put is a thread-safe write to a PeerMap.
func (pm *PeerMap) Put(p Peer) {
pm.Lock()
defer pm.Unlock()
maskedIP := pm.mask(p.IP)
_, exists := pm.Peers[maskedIP]
if !exists {
pm.Peers[maskedIP] = make(map[PeerKey]Peer)
}
_, exists = pm.Peers[maskedIP][p.Key()]
if !exists {
atomic.AddInt32(&(pm.Size), 1)
}
pm.Peers[maskedIP][p.Key()] = p
}
// Delete is a thread-safe delete from a PeerMap.
func (pm *PeerMap) Delete(pk PeerKey) {
pm.Lock()
defer pm.Unlock()
maskedIP := pm.mask(pk.IP())
_, exists := pm.Peers[maskedIP][pk]
if exists {
atomic.AddInt32(&(pm.Size), -1)
delete(pm.Peers[maskedIP], pk)
}
}
// Len returns the number of peers within a PeerMap.
func (pm *PeerMap) Len() int {
return int(atomic.LoadInt32(&pm.Size))
}
// Purge iterates over all of the peers within a PeerMap and deletes them if
// they are older than the provided time.
func (pm *PeerMap) Purge(unixtime int64) {
pm.Lock()
defer pm.Unlock()
for _, subnetmap := range pm.Peers {
for key, peer := range subnetmap {
if peer.LastAnnounce <= unixtime {
atomic.AddInt32(&(pm.Size), -1)
delete(subnetmap, key)
if pm.Seeders {
stats.RecordPeerEvent(stats.ReapedSeed, peer.HasIPv6())
} else {
stats.RecordPeerEvent(stats.ReapedLeech, peer.HasIPv6())
}
}
}
}
}
// AppendPeers adds peers to given IPv4 or IPv6 lists.
func (pm *PeerMap) AppendPeers(ipv4s, ipv6s PeerList, ann *Announce, wanted int) (PeerList, PeerList) {
maskedIP := pm.mask(ann.Peer.IP)
pm.RLock()
defer pm.RUnlock()
count := 0
// Attempt to append all the peers in the same subnet.
for _, peer := range pm.Peers[maskedIP] {
if count >= wanted {
break
} else if peersEquivalent(&peer, ann.Peer) {
continue
} else {
count += AppendPeer(&ipv4s, &ipv6s, ann, &peer)
}
}
// Add any more peers out of the other subnets.
for subnet, peers := range pm.Peers {
if subnet == maskedIP {
continue
} else {
for _, peer := range peers {
if count >= wanted {
break
} else if peersEquivalent(&peer, ann.Peer) {
continue
} else {
count += AppendPeer(&ipv4s, &ipv6s, ann, &peer)
}
}
}
}
return ipv4s, ipv6s
}
// AppendPeer adds a peer to its corresponding peerlist.
func AppendPeer(ipv4s, ipv6s *PeerList, ann *Announce, peer *Peer) int {
if ann.HasIPv6() && peer.HasIPv6() {
*ipv6s = append(*ipv6s, *peer)
return 1
} else if ann.Config.RespectAF && ann.HasIPv4() && peer.HasIPv4() {
*ipv4s = append(*ipv4s, *peer)
return 1
} else if !ann.Config.RespectAF && peer.HasIPv4() {
*ipv4s = append(*ipv4s, *peer)
return 1
}
return 0
}
// peersEquivalent checks if two peers represent the same entity.
func peersEquivalent(a, b *Peer) bool {
return a.ID == b.ID
}

View file

@ -1,28 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package tracker
import (
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
// HandleScrape encapsulates all the logic of handling a BitTorrent client's
// scrape without being coupled to any transport protocol.
func (tkr *Tracker) HandleScrape(scrape *models.Scrape, w Writer) (err error) {
var torrents []*models.Torrent
for _, infohash := range scrape.Infohashes {
torrent, err := tkr.FindTorrent(infohash)
if err != nil {
return err
}
torrents = append(torrents, torrent)
}
stats.RecordEvent(stats.Scrape)
return w.WriteScrape(&models.ScrapeResponse{
Files: torrents,
})
}

View file

@ -1,274 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package tracker
import (
"hash/fnv"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
type Torrents struct {
torrents map[string]*models.Torrent
sync.RWMutex
}
type Storage struct {
shards []Torrents
size int32
clients map[string]bool
clientsM sync.RWMutex
}
func NewStorage(cfg *config.Config) *Storage {
s := &Storage{
shards: make([]Torrents, cfg.TorrentMapShards),
clients: make(map[string]bool),
}
for i := range s.shards {
s.shards[i].torrents = make(map[string]*models.Torrent)
}
return s
}
func (s *Storage) Len() int {
return int(atomic.LoadInt32(&s.size))
}
func (s *Storage) getShardIndex(infohash string) uint32 {
idx := fnv.New32()
idx.Write([]byte(infohash))
return idx.Sum32() % uint32(len(s.shards))
}
func (s *Storage) getTorrentShard(infohash string, readonly bool) *Torrents {
shardindex := s.getShardIndex(infohash)
if readonly {
s.shards[shardindex].RLock()
} else {
s.shards[shardindex].Lock()
}
return &s.shards[shardindex]
}
func (s *Storage) TouchTorrent(infohash string) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
torrent.LastAction = time.Now().Unix()
return nil
}
func (s *Storage) FindTorrent(infohash string) (*models.Torrent, error) {
shard := s.getTorrentShard(infohash, true)
defer shard.RUnlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return nil, models.ErrTorrentDNE
}
torrentCopy := *torrent
return &torrentCopy, nil
}
func (s *Storage) PutTorrent(torrent *models.Torrent) {
shard := s.getTorrentShard(torrent.Infohash, false)
defer shard.Unlock()
_, exists := shard.torrents[torrent.Infohash]
if !exists {
atomic.AddInt32(&s.size, 1)
}
torrentCopy := *torrent
shard.torrents[torrent.Infohash] = &torrentCopy
}
func (s *Storage) DeleteTorrent(infohash string) {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
if _, exists := shard.torrents[infohash]; exists {
atomic.AddInt32(&s.size, -1)
delete(shard.torrents, infohash)
}
}
func (s *Storage) IncrementTorrentSnatches(infohash string) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
torrent.Snatches++
return nil
}
func (s *Storage) PutLeecher(infohash string, p *models.Peer) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
torrent.Leechers.Put(*p)
return nil
}
func (s *Storage) DeleteLeecher(infohash string, p *models.Peer) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
torrent.Leechers.Delete(p.Key())
return nil
}
func (s *Storage) PutSeeder(infohash string, p *models.Peer) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
torrent.Seeders.Put(*p)
return nil
}
func (s *Storage) DeleteSeeder(infohash string, p *models.Peer) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
torrent.Seeders.Delete(p.Key())
return nil
}
func (s *Storage) PurgeInactiveTorrent(infohash string) error {
shard := s.getTorrentShard(infohash, false)
defer shard.Unlock()
torrent, exists := shard.torrents[infohash]
if !exists {
return models.ErrTorrentDNE
}
if torrent.PeerCount() == 0 {
atomic.AddInt32(&s.size, -1)
delete(shard.torrents, infohash)
}
return nil
}
func (s *Storage) PurgeInactivePeers(purgeEmptyTorrents bool, before time.Time) error {
unixtime := before.Unix()
// Build a list of keys to process.
index := 0
maxkeys := s.Len()
keys := make([]string, maxkeys)
for i := range s.shards {
shard := &s.shards[i]
shard.RLock()
for infohash := range shard.torrents {
keys[index] = infohash
index++
if index >= maxkeys {
break
}
}
shard.RUnlock()
if index >= maxkeys {
break
}
}
// Process the keys while allowing other goroutines to run.
for _, infohash := range keys {
runtime.Gosched()
shard := s.getTorrentShard(infohash, false)
torrent := shard.torrents[infohash]
if torrent == nil {
// The torrent has already been deleted since keys were computed.
shard.Unlock()
continue
}
torrent.Seeders.Purge(unixtime)
torrent.Leechers.Purge(unixtime)
peers := torrent.PeerCount()
shard.Unlock()
if purgeEmptyTorrents && peers == 0 {
s.PurgeInactiveTorrent(infohash)
stats.RecordEvent(stats.ReapedTorrent)
}
}
return nil
}
func (s *Storage) ClientApproved(peerID string) error {
s.clientsM.RLock()
defer s.clientsM.RUnlock()
_, exists := s.clients[peerID]
if !exists {
return models.ErrClientUnapproved
}
return nil
}
func (s *Storage) PutClient(peerID string) {
s.clientsM.Lock()
defer s.clientsM.Unlock()
s.clients[peerID] = true
}
func (s *Storage) DeleteClient(peerID string) {
s.clientsM.Lock()
defer s.clientsM.Unlock()
delete(s.clients, peerID)
}

View file

@ -1,109 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package tracker provides a generic interface for manipulating a
// BitTorrent tracker's fast-moving data.
package tracker
import (
"sync"
"time"
"github.com/golang/glog"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/tracker/models"
)
// Tracker represents the logic necessary to service BitTorrent announces,
// independently of the underlying data transports used.
type Tracker struct {
Config *config.Config
jwkSet jwkSet
shuttingDown chan struct{}
shutdownWG sync.WaitGroup
*Storage
}
// New creates a new Tracker, and opens any necessary connections.
// Maintenance routines are automatically spawned in the background.
func New(cfg *config.Config) (*Tracker, error) {
tkr := &Tracker{
Config: cfg,
Storage: NewStorage(cfg),
shuttingDown: make(chan struct{}),
}
glog.Info("Starting garbage collection goroutine")
tkr.shutdownWG.Add(1)
go tkr.purgeInactivePeers(
cfg.PurgeInactiveTorrents,
time.Duration(float64(cfg.MinAnnounce.Duration)*cfg.ReapRatio),
cfg.ReapInterval.Duration,
)
if tkr.Config.JWKSetURI != "" {
glog.Info("Starting JWK Set update goroutine")
tkr.shutdownWG.Add(1)
go tkr.updateJWKSetForever()
}
if cfg.ClientWhitelistEnabled {
tkr.LoadApprovedClients(cfg.ClientWhitelist)
}
return tkr, nil
}
// Close gracefully shutdowns a Tracker by closing any database connections.
func (tkr *Tracker) Close() error {
close(tkr.shuttingDown)
tkr.shutdownWG.Wait()
return nil
}
// LoadApprovedClients loads a list of client IDs into the tracker's storage.
func (tkr *Tracker) LoadApprovedClients(clients []string) {
for _, client := range clients {
tkr.PutClient(client)
}
}
// Writer serializes a tracker's responses, and is implemented for each
// response transport used by the tracker. Only one of these may be called
// per request, and only once.
//
// Note, data passed into any of these functions will not contain sensitive
// information, so it may be passed back the client freely.
type Writer interface {
WriteError(err error) error
WriteAnnounce(*models.AnnounceResponse) error
WriteScrape(*models.ScrapeResponse) error
}
// purgeInactivePeers periodically walks the torrent database and removes
// peers that haven't announced recently.
func (tkr *Tracker) purgeInactivePeers(purgeEmptyTorrents bool, threshold, interval time.Duration) {
defer tkr.shutdownWG.Done()
for {
select {
case <-tkr.shuttingDown:
return
case <-time.After(interval):
before := time.Now().Add(-threshold)
glog.V(0).Infof("Purging peers with no announces since %s", before)
err := tkr.PurgeInactivePeers(purgeEmptyTorrents, before)
if err != nil {
glog.Errorf("Error purging torrents: %s", err)
}
}
}
}

View file

@ -1,87 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"encoding/binary"
"net"
"testing"
"github.com/chihaya/chihaya/config"
)
func requestAnnounce(sock *net.UDPConn, connID []byte, hash string) ([]byte, error) {
txID := makeTransactionID()
peerID := []byte("-UT2210-b4a2h9a9f5c4")
var request []byte
request = append(request, connID...)
request = append(request, announceAction...)
request = append(request, txID...)
request = append(request, []byte(hash)...)
request = append(request, peerID...)
request = append(request, make([]byte, 8)...) // Downloaded
request = append(request, make([]byte, 8)...) // Left
request = append(request, make([]byte, 8)...) // Uploaded
request = append(request, make([]byte, 4)...) // Event
request = append(request, make([]byte, 4)...) // IP
request = append(request, make([]byte, 4)...) // Key
request = append(request, make([]byte, 4)...) // NumWant
request = append(request, make([]byte, 2)...) // Port
return doRequest(sock, request, txID)
}
func TestAnnounce(t *testing.T) {
srv, done, err := setupTracker(&config.DefaultConfig)
if err != nil {
t.Fatal(err)
}
_, sock, err := setupSocket()
if err != nil {
t.Fatal(err)
}
connID, err := requestConnectionID(sock)
if err != nil {
t.Fatal(err)
}
announce, err := requestAnnounce(sock, connID, "aaaaaaaaaaaaaaaaaaaa")
if err != nil {
t.Fatal(err)
}
// Parse the response.
var action, txID, interval, leechers, seeders uint32
buf := bytes.NewReader(announce)
binary.Read(buf, binary.BigEndian, &action)
binary.Read(buf, binary.BigEndian, &txID)
binary.Read(buf, binary.BigEndian, &interval)
binary.Read(buf, binary.BigEndian, &leechers)
binary.Read(buf, binary.BigEndian, &seeders)
if action != uint32(announceActionID) {
t.Fatal("expected announce action")
}
if interval != uint32(config.DefaultConfig.Announce.Seconds()) {
t.Fatal("incorrect interval")
}
if leechers != uint32(0) {
t.Fatal("incorrect leecher count")
}
// We're the only seeder.
if seeders != uint32(1) {
t.Fatal("incorrect seeder count")
}
srv.Stop()
<-done
}

View file

@ -1,90 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"net"
)
// ConnectionIDGenerator represents the logic to generate 64-bit UDP
// connection IDs from peer IP addresses.
type ConnectionIDGenerator struct {
iv, iv2 []byte
block cipher.Block
}
// NewConnectionIDGenerator creates a ConnectionIDGenerator and generates its
// AES key and first initialization vector.
func NewConnectionIDGenerator() (gen *ConnectionIDGenerator, err error) {
gen = &ConnectionIDGenerator{}
key := make([]byte, 16)
_, err = rand.Read(key)
if err != nil {
return
}
gen.block, err = aes.NewCipher(key)
if err != nil {
return
}
err = gen.NewIV()
return
}
// Generate returns the 64-bit connection ID for an IP
func (g *ConnectionIDGenerator) Generate(ip net.IP) []byte {
return g.generate(ip, g.iv)
}
func (g *ConnectionIDGenerator) generate(ip net.IP, iv []byte) []byte {
for len(ip) < 8 {
ip = append(ip, ip...) // Not enough bits in output.
}
ct := make([]byte, 16)
stream := cipher.NewCFBDecrypter(g.block, iv)
stream.XORKeyStream(ct, ip)
for i := len(ip) - 1; i >= 8; i-- {
ct[i-8] ^= ct[i]
}
return ct[:8]
}
// Matches checks if the given connection ID matches an IP with the current or
// previous initialization vectors.
func (g *ConnectionIDGenerator) Matches(id []byte, ip net.IP) bool {
if expected := g.generate(ip, g.iv); bytes.Equal(id, expected) {
return true
}
if iv2 := g.iv2; iv2 != nil {
if expected := g.generate(ip, iv2); bytes.Equal(id, expected) {
return true
}
}
return false
}
// NewIV generates a new initialization vector and rotates the current one.
func (g *ConnectionIDGenerator) NewIV() error {
newiv := make([]byte, 16)
if _, err := rand.Read(newiv); err != nil {
return err
}
g.iv2 = g.iv
g.iv = newiv
return nil
}

View file

@ -1,70 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"net"
"testing"
)
func TestInitReturnsNoError(t *testing.T) {
if _, err := NewConnectionIDGenerator(); err != nil {
t.Error("Init returned", err)
}
}
func testGenerateConnectionID(t *testing.T, ip net.IP) {
gen, _ := NewConnectionIDGenerator()
id1 := gen.Generate(ip)
id2 := gen.Generate(ip)
if !bytes.Equal(id1, id2) {
t.Errorf("Connection ID mismatch: %x != %x", id1, id2)
}
if len(id1) != 8 {
t.Errorf("Connection ID had length: %d != 8", len(id1))
}
if bytes.Count(id1, []byte{0}) == 8 {
t.Errorf("Connection ID was 0")
}
}
func TestGenerateConnectionIDIPv4(t *testing.T) {
testGenerateConnectionID(t, net.ParseIP("192.168.1.123").To4())
}
func TestGenerateConnectionIDIPv6(t *testing.T) {
testGenerateConnectionID(t, net.ParseIP("1:2:3:4::5:6"))
}
func TestMatchesWorksWithPreviousIV(t *testing.T) {
gen, _ := NewConnectionIDGenerator()
ip := net.ParseIP("192.168.1.123").To4()
id1 := gen.Generate(ip)
if !gen.Matches(id1, ip) {
t.Errorf("Connection ID mismatch for current IV")
}
gen.NewIV()
if !gen.Matches(id1, ip) {
t.Errorf("Connection ID mismatch for previous IV")
}
id2 := gen.Generate(ip)
gen.NewIV()
if gen.Matches(id1, ip) {
t.Errorf("Connection ID matched for discarded IV")
}
if !gen.Matches(id2, ip) {
t.Errorf("Connection ID mismatch for previous IV")
}
}

View file

@ -1,267 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"encoding/binary"
"errors"
"net"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
const (
connectActionID uint32 = iota
announceActionID
scrapeActionID
errorActionID
announceDualStackActionID
)
// Option-Types described in BEP41 and BEP45.
const (
optionEndOfOptions byte = 0x0
optionNOP = 0x1
optionURLData = 0x2
optionIPv6 = 0x3
)
var (
// initialConnectionID is the magic initial connection ID specified by BEP 15.
initialConnectionID = []byte{0, 0, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80}
// emptyIPs are the value of an IP field that has been left blank.
emptyIPv4 = []byte{0, 0, 0, 0}
emptyIPv6 = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
// eventIDs map IDs to event names.
eventIDs = []string{
"",
"completed",
"started",
"stopped",
}
errMalformedPacket = models.ProtocolError("malformed packet")
errMalformedIP = models.ProtocolError("malformed IP address")
errMalformedEvent = models.ProtocolError("malformed event ID")
errUnknownAction = models.ProtocolError("unknown action ID")
errBadConnectionID = errors.New("bad connection ID")
)
// handleTorrentError writes err to w if err is a models.ClientError.
func handleTorrentError(err error, w *Writer) {
if err == nil {
return
}
if models.IsPublicError(err) {
w.WriteError(err)
stats.RecordEvent(stats.ClientError)
}
}
// handlePacket decodes and processes one UDP request, returning the response.
func (s *Server) handlePacket(packet []byte, addr *net.UDPAddr) (response []byte, actionName string, err error) {
if len(packet) < 16 {
// Malformed, no client packets are less than 16 bytes.
// We explicitly return nothing in case this is a DoS attempt.
err = errMalformedPacket
return
}
connID := packet[0:8]
action := binary.BigEndian.Uint32(packet[8:12])
transactionID := packet[12:16]
writer := &Writer{
buf: new(bytes.Buffer),
connectionID: connID,
transactionID: transactionID,
}
defer func() { response = writer.buf.Bytes() }()
if action != 0 && !s.connIDGen.Matches(connID, addr.IP) {
err = errBadConnectionID
writer.WriteError(err)
return
}
switch action {
case connectActionID:
actionName = "connect"
if !bytes.Equal(connID, initialConnectionID) {
err = errMalformedPacket
return
}
writer.writeHeader(0)
writer.buf.Write(s.connIDGen.Generate(addr.IP))
case announceActionID:
actionName = "announce"
var ann *models.Announce
ann, err = s.newAnnounce(packet, addr.IP)
if err == nil {
err = s.tracker.HandleAnnounce(ann, writer)
}
handleTorrentError(err, writer)
case scrapeActionID:
actionName = "scrape"
var scrape *models.Scrape
scrape, err = s.newScrape(packet)
if err == nil {
err = s.tracker.HandleScrape(scrape, writer)
}
handleTorrentError(err, writer)
default:
err = errUnknownAction
}
return
}
// newAnnounce decodes one announce packet, returning a models.Announce.
func (s *Server) newAnnounce(packet []byte, ip net.IP) (*models.Announce, error) {
if len(packet) < 98 {
return nil, errMalformedPacket
}
infohash := packet[16:36]
peerID := packet[36:56]
downloaded := binary.BigEndian.Uint64(packet[56:64])
left := binary.BigEndian.Uint64(packet[64:72])
uploaded := binary.BigEndian.Uint64(packet[72:80])
eventID := packet[83]
if eventID > 3 {
return nil, errMalformedEvent
}
ipv4bytes := packet[84:88]
if s.config.AllowIPSpoofing && !bytes.Equal(ipv4bytes, emptyIPv4) {
ip = net.ParseIP(string(ipv4bytes))
}
if ip == nil {
return nil, errMalformedIP
} else if ipv4 := ip.To4(); ipv4 != nil {
ip = ipv4
}
numWant := binary.BigEndian.Uint32(packet[92:96])
port := binary.BigEndian.Uint16(packet[96:98])
announce := &models.Announce{
Config: s.config,
Downloaded: downloaded,
Event: eventIDs[eventID],
IPv4: models.Endpoint{
IP: ip,
Port: port,
},
Infohash: string(infohash),
Left: left,
NumWant: int(numWant),
PeerID: string(peerID),
Uploaded: uploaded,
}
if err := s.handleOptionalParameters(packet, announce); err != nil {
return nil, err
}
return announce, nil
}
// handleOptionalParameters parses the optional parameters as described in BEP41
// and updates an announce with the values parsed.
func (s *Server) handleOptionalParameters(packet []byte, announce *models.Announce) error {
if len(packet) > 98 {
optionStartIndex := 98
for optionStartIndex < len(packet)-1 {
option := packet[optionStartIndex]
switch option {
case optionEndOfOptions:
return nil
case optionNOP:
optionStartIndex++
case optionURLData:
if optionStartIndex+1 > len(packet)-1 {
return errMalformedPacket
}
length := int(packet[optionStartIndex+1])
if optionStartIndex+1+length > len(packet)-1 {
return errMalformedPacket
}
// TODO: Actually parse the URL Data as described in BEP41.
optionStartIndex += 1 + length
case optionIPv6:
if optionStartIndex+19 > len(packet)-1 {
return errMalformedPacket
}
ipv6bytes := packet[optionStartIndex+1 : optionStartIndex+17]
if s.config.AllowIPSpoofing && !bytes.Equal(ipv6bytes, emptyIPv6) {
announce.IPv6.IP = net.ParseIP(string(ipv6bytes)).To16()
announce.IPv6.Port = binary.BigEndian.Uint16(packet[optionStartIndex+17 : optionStartIndex+19])
if announce.IPv6.IP == nil {
return errMalformedIP
}
}
optionStartIndex += 19
default:
return nil
}
}
}
// There was no optional parameters to parse.
return nil
}
// newScrape decodes one announce packet, returning a models.Scrape.
func (s *Server) newScrape(packet []byte) (*models.Scrape, error) {
if len(packet) < 36 {
return nil, errMalformedPacket
}
var infohashes []string
packet = packet[16:]
if len(packet)%20 != 0 {
return nil, errMalformedPacket
}
for len(packet) >= 20 {
infohash := packet[:20]
infohashes = append(infohashes, string(infohash))
packet = packet[20:]
}
return &models.Scrape{
Config: s.config,
Infohashes: infohashes,
}, nil
}

View file

@ -1,77 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"fmt"
"net"
"testing"
"github.com/chihaya/chihaya/config"
)
func doRequest(sock *net.UDPConn, request, txID []byte) ([]byte, error) {
response := make([]byte, 1024)
n, err := sendRequest(sock, request, response)
if err != nil {
return nil, err
}
if !bytes.Equal(response[4:8], txID) {
return nil, fmt.Errorf("transaction ID mismatch")
}
return response[:n], nil
}
func requestScrape(sock *net.UDPConn, connID []byte, hashes []string) ([]byte, error) {
txID := makeTransactionID()
var request []byte
request = append(request, connID...)
request = append(request, scrapeAction...)
request = append(request, txID...)
for _, hash := range hashes {
request = append(request, []byte(hash)...)
}
return doRequest(sock, request, txID)
}
func TestScrapeEmpty(t *testing.T) {
srv, done, err := setupTracker(&config.DefaultConfig)
if err != nil {
t.Fatal(err)
}
_, sock, err := setupSocket()
if err != nil {
t.Fatal(err)
}
connID, err := requestConnectionID(sock)
if err != nil {
t.Fatal(err)
}
scrape, err := requestScrape(sock, connID, []string{"aaaaaaaaaaaaaaaaaaaa"})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(scrape[:4], errorAction) {
t.Error("expected error response")
}
if string(scrape[8:]) != "torrent does not exist\000" {
t.Error("expected torrent to not exist")
}
srv.Stop()
<-done
}

View file

@ -1,148 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package udp implements a BitTorrent tracker over the UDP protocol as per
// BEP 15.
package udp
import (
"errors"
"net"
"sync"
"time"
"github.com/golang/glog"
"github.com/pushrax/bufferpool"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/tracker"
)
// Server represents a UDP torrent tracker.
type Server struct {
config *config.Config
tracker *tracker.Tracker
sock *net.UDPConn
connIDGen *ConnectionIDGenerator
closing chan struct{}
booting chan struct{}
wg sync.WaitGroup
}
func (s *Server) serve() error {
if s.sock != nil {
return errors.New("server already booted")
}
udpAddr, err := net.ResolveUDPAddr("udp", s.config.UDPConfig.ListenAddr)
if err != nil {
close(s.booting)
return err
}
sock, err := net.ListenUDP("udp", udpAddr)
if err != nil {
close(s.booting)
return err
}
defer sock.Close()
if s.config.UDPConfig.ReadBufferSize > 0 {
sock.SetReadBuffer(s.config.UDPConfig.ReadBufferSize)
}
pool := bufferpool.New(1000, 2048)
s.sock = sock
close(s.booting)
for {
select {
case <-s.closing:
return nil
default:
}
buffer := pool.TakeSlice()
sock.SetReadDeadline(time.Now().Add(time.Second))
n, addr, err := sock.ReadFromUDP(buffer)
if err != nil {
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
pool.GiveSlice(buffer)
continue
}
return err
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
start := time.Now()
response, action, err := s.handlePacket(buffer[:n], addr)
defer pool.GiveSlice(buffer)
duration := time.Since(start)
if len(response) > 0 {
sock.WriteToUDP(response, addr)
}
if glog.V(2) {
if err != nil {
glog.Infof("[UDP - %9s] %s %s (%s)", duration, action, addr, err)
} else {
glog.Infof("[UDP - %9s] %s %s", duration, action, addr)
}
}
}()
}
return nil
}
// Serve runs a UDP server, blocking until the server has shut down.
func (s *Server) Serve() {
glog.V(0).Info("Starting UDP on ", s.config.UDPConfig.ListenAddr)
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case <-s.closing:
return
case <-time.After(time.Hour):
s.connIDGen.NewIV()
}
}
}()
if err := s.serve(); err != nil {
glog.Errorf("Failed to run UDP server: %s", err.Error())
} else {
glog.Info("UDP server shut down cleanly")
}
}
// Stop cleanly shuts down the server.
func (s *Server) Stop() {
close(s.closing)
s.sock.SetReadDeadline(time.Now())
s.wg.Wait()
}
// NewServer returns a new UDP server for a given configuration and tracker.
func NewServer(cfg *config.Config, tkr *tracker.Tracker) *Server {
gen, err := NewConnectionIDGenerator()
if err != nil {
panic(err)
}
return &Server{
config: cfg,
tracker: tkr,
connIDGen: gen,
closing: make(chan struct{}),
booting: make(chan struct{}),
}
}

View file

@ -1,129 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"crypto/rand"
"fmt"
"net"
"testing"
"time"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/tracker"
)
var (
connectAction = []byte{0, 0, 0, byte(connectActionID)}
announceAction = []byte{0, 0, 0, byte(announceActionID)}
scrapeAction = []byte{0, 0, 0, byte(scrapeActionID)}
errorAction = []byte{0, 0, 0, byte(errorActionID)}
)
func setupTracker(cfg *config.Config) (*Server, chan struct{}, error) {
tkr, err := tracker.New(cfg)
if err != nil {
return nil, nil, err
}
srv := NewServer(cfg, tkr)
done := make(chan struct{})
go func() {
if err := srv.serve(); err != nil {
panic(err)
}
close(done)
}()
<-srv.booting
return srv, done, nil
}
func setupSocket() (*net.UDPAddr, *net.UDPConn, error) {
srvAddr, err := net.ResolveUDPAddr("udp", config.DefaultConfig.UDPConfig.ListenAddr)
if err != nil {
return nil, nil, err
}
sock, err := net.DialUDP("udp", nil, srvAddr)
if err != nil {
return nil, nil, err
}
return srvAddr, sock, nil
}
func makeTransactionID() []byte {
out := make([]byte, 4)
rand.Read(out)
return out
}
func sendRequest(sock *net.UDPConn, request, response []byte) (int, error) {
if _, err := sock.Write(request); err != nil {
return 0, err
}
sock.SetReadDeadline(time.Now().Add(time.Second))
n, err := sock.Read(response)
if err != nil {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
return 0, fmt.Errorf("no response from tracker: %s", err)
}
}
return n, err
}
func requestConnectionID(sock *net.UDPConn) ([]byte, error) {
txID := makeTransactionID()
request := []byte{}
request = append(request, initialConnectionID...)
request = append(request, connectAction...)
request = append(request, txID...)
response := make([]byte, 1024)
n, err := sendRequest(sock, request, response)
if err != nil {
return nil, err
}
if n != 16 {
return nil, fmt.Errorf("packet length mismatch: %d != 16", n)
}
if !bytes.Equal(response[4:8], txID) {
return nil, fmt.Errorf("transaction ID mismatch")
}
if !bytes.Equal(response[0:4], connectAction) {
return nil, fmt.Errorf("action mismatch")
}
return response[8:16], nil
}
func TestRequestConnectionID(t *testing.T) {
srv, done, err := setupTracker(&config.DefaultConfig)
if err != nil {
t.Fatal(err)
}
_, sock, err := setupSocket()
if err != nil {
t.Fatal(err)
}
if _, err = requestConnectionID(sock); err != nil {
t.Fatal(err)
}
srv.Stop()
<-done
}

View file

@ -1,97 +0,0 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package udp
import (
"bytes"
"encoding/binary"
"time"
"github.com/chihaya/chihaya/tracker/models"
)
// Writer implements the tracker.Writer interface for the UDP protocol.
type Writer struct {
buf *bytes.Buffer
connectionID []byte
transactionID []byte
}
// WriteError writes the failure reason as a null-terminated string.
func (w *Writer) WriteError(err error) error {
w.writeHeader(errorActionID)
w.buf.WriteString(err.Error())
w.buf.WriteRune('\000')
return nil
}
// WriteAnnounce encodes an announce response by selecting the proper announce
// format based on the BitTorrent spec.
func (w *Writer) WriteAnnounce(resp *models.AnnounceResponse) (err error) {
if resp.Announce.HasIPv6() {
err = w.WriteAnnounceIPv6(resp)
} else {
err = w.WriteAnnounceIPv4(resp)
}
return
}
// WriteAnnounceIPv6 encodes an announce response according to BEP45.
func (w *Writer) WriteAnnounceIPv6(resp *models.AnnounceResponse) error {
w.writeHeader(announceDualStackActionID)
binary.Write(w.buf, binary.BigEndian, uint32(resp.Interval/time.Second))
binary.Write(w.buf, binary.BigEndian, uint32(resp.Incomplete))
binary.Write(w.buf, binary.BigEndian, uint32(resp.Complete))
binary.Write(w.buf, binary.BigEndian, uint32(len(resp.IPv4Peers)))
binary.Write(w.buf, binary.BigEndian, uint32(len(resp.IPv6Peers)))
for _, peer := range resp.IPv4Peers {
w.buf.Write(peer.IP)
binary.Write(w.buf, binary.BigEndian, peer.Port)
}
for _, peer := range resp.IPv6Peers {
w.buf.Write(peer.IP)
binary.Write(w.buf, binary.BigEndian, peer.Port)
}
return nil
}
// WriteAnnounceIPv4 encodes an announce response according to BEP15.
func (w *Writer) WriteAnnounceIPv4(resp *models.AnnounceResponse) error {
w.writeHeader(announceActionID)
binary.Write(w.buf, binary.BigEndian, uint32(resp.Interval/time.Second))
binary.Write(w.buf, binary.BigEndian, uint32(resp.Incomplete))
binary.Write(w.buf, binary.BigEndian, uint32(resp.Complete))
for _, peer := range resp.IPv4Peers {
w.buf.Write(peer.IP)
binary.Write(w.buf, binary.BigEndian, peer.Port)
}
return nil
}
// WriteScrape encodes a scrape response according to BEP15.
func (w *Writer) WriteScrape(resp *models.ScrapeResponse) error {
w.writeHeader(scrapeActionID)
for _, torrent := range resp.Files {
binary.Write(w.buf, binary.BigEndian, uint32(torrent.Seeders.Len()))
binary.Write(w.buf, binary.BigEndian, uint32(torrent.Snatches))
binary.Write(w.buf, binary.BigEndian, uint32(torrent.Leechers.Len()))
}
return nil
}
// writeHeader writes the action and transaction ID to the response.
func (w *Writer) writeHeader(action uint32) {
binary.Write(w.buf, binary.BigEndian, action)
w.buf.Write(w.transactionID)
}