Merge pull request #363 from jzelinskie/req-san
Request Sanitizer via library
This commit is contained in:
commit
80558648d7
10 changed files with 253 additions and 165 deletions
|
@ -78,18 +78,39 @@ func (i InfoHash) String() string {
|
||||||
|
|
||||||
// AnnounceRequest represents the parsed parameters from an announce request.
|
// AnnounceRequest represents the parsed parameters from an announce request.
|
||||||
type AnnounceRequest struct {
|
type AnnounceRequest struct {
|
||||||
Event Event
|
Event Event
|
||||||
InfoHash InfoHash
|
InfoHash InfoHash
|
||||||
Compact bool
|
Compact bool
|
||||||
NumWant uint32
|
EventProvided bool
|
||||||
Left uint64
|
NumWantProvided bool
|
||||||
Downloaded uint64
|
IPProvided bool
|
||||||
Uploaded uint64
|
NumWant uint32
|
||||||
|
Left uint64
|
||||||
|
Downloaded uint64
|
||||||
|
Uploaded uint64
|
||||||
|
|
||||||
Peer
|
Peer
|
||||||
Params
|
Params
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogFields renders the current response as a set of log fields.
|
||||||
|
func (r AnnounceRequest) LogFields() log.Fields {
|
||||||
|
return log.Fields{
|
||||||
|
"event": r.Event,
|
||||||
|
"infoHash": r.InfoHash,
|
||||||
|
"compact": r.Compact,
|
||||||
|
"eventProvided": r.EventProvided,
|
||||||
|
"numWantProvided": r.NumWantProvided,
|
||||||
|
"ipProvided": r.IPProvided,
|
||||||
|
"numWant": r.NumWant,
|
||||||
|
"left": r.Left,
|
||||||
|
"downloaded": r.Downloaded,
|
||||||
|
"uploaded": r.Uploaded,
|
||||||
|
"peer": r.Peer,
|
||||||
|
"params": r.Params,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// AnnounceResponse represents the parameters used to create an announce
|
// AnnounceResponse represents the parameters used to create an announce
|
||||||
// response.
|
// response.
|
||||||
type AnnounceResponse struct {
|
type AnnounceResponse struct {
|
||||||
|
@ -102,15 +123,15 @@ type AnnounceResponse struct {
|
||||||
IPv6Peers []Peer
|
IPv6Peers []Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogFields renders the current response as a set of Logrus fields.
|
// LogFields renders the current response as a set of log fields.
|
||||||
func (ar AnnounceResponse) LogFields() log.Fields {
|
func (r AnnounceResponse) LogFields() log.Fields {
|
||||||
return log.Fields{
|
return log.Fields{
|
||||||
"compact": ar.Compact,
|
"compact": r.Compact,
|
||||||
"complete": ar.Complete,
|
"complete": r.Complete,
|
||||||
"interval": ar.Interval,
|
"interval": r.Interval,
|
||||||
"minInterval": ar.MinInterval,
|
"minInterval": r.MinInterval,
|
||||||
"ipv4Peers": ar.IPv4Peers,
|
"ipv4Peers": r.IPv4Peers,
|
||||||
"ipv6Peers": ar.IPv6Peers,
|
"ipv6Peers": r.IPv6Peers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,6 +142,15 @@ type ScrapeRequest struct {
|
||||||
Params Params
|
Params Params
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogFields renders the current response as a set of log fields.
|
||||||
|
func (r ScrapeRequest) LogFields() log.Fields {
|
||||||
|
return log.Fields{
|
||||||
|
"addressFamily": r.AddressFamily,
|
||||||
|
"infoHashes": r.InfoHashes,
|
||||||
|
"params": r.Params,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ScrapeResponse represents the parameters used to create a scrape response.
|
// ScrapeResponse represents the parameters used to create a scrape response.
|
||||||
//
|
//
|
||||||
// The Scrapes must be in the same order as the InfoHashes in the corresponding
|
// The Scrapes must be in the same order as the InfoHashes in the corresponding
|
||||||
|
@ -147,6 +177,17 @@ type Scrape struct {
|
||||||
// AddressFamily is the address family of an IP address.
|
// AddressFamily is the address family of an IP address.
|
||||||
type AddressFamily uint8
|
type AddressFamily uint8
|
||||||
|
|
||||||
|
func (af AddressFamily) String() string {
|
||||||
|
switch af {
|
||||||
|
case IPv4:
|
||||||
|
return "IPv4"
|
||||||
|
case IPv6:
|
||||||
|
return "IPv6"
|
||||||
|
default:
|
||||||
|
panic("tried to print unknown AddressFamily")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// AddressFamily constants.
|
// AddressFamily constants.
|
||||||
const (
|
const (
|
||||||
IPv4 AddressFamily = iota
|
IPv4 AddressFamily = iota
|
||||||
|
@ -159,6 +200,10 @@ type IP struct {
|
||||||
AddressFamily
|
AddressFamily
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ip IP) String() string {
|
||||||
|
return ip.IP.String()
|
||||||
|
}
|
||||||
|
|
||||||
// Peer represents the connection details of a peer that is returned in an
|
// Peer represents the connection details of a peer that is returned in an
|
||||||
// announce response.
|
// announce response.
|
||||||
type Peer struct {
|
type Peer struct {
|
||||||
|
|
48
bittorrent/sanitize.go
Normal file
48
bittorrent/sanitize.go
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
package bittorrent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/chihaya/chihaya/pkg/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrInvalidIP indicates an invalid IP for an Announce.
|
||||||
|
var ErrInvalidIP = ClientError("invalid IP")
|
||||||
|
|
||||||
|
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
|
||||||
|
// IP address into the proper format.
|
||||||
|
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
|
||||||
|
if !r.NumWantProvided {
|
||||||
|
r.NumWant = defaultNumWant
|
||||||
|
} else if r.NumWant > maxNumWant {
|
||||||
|
r.NumWant = maxNumWant
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip := r.Peer.IP.To4(); ip != nil {
|
||||||
|
r.Peer.IP.IP = ip
|
||||||
|
r.Peer.IP.AddressFamily = IPv4
|
||||||
|
} else if len(r.Peer.IP.IP) == net.IPv6len { // implies r.Peer.IP.To4() == nil
|
||||||
|
r.Peer.IP.AddressFamily = IPv6
|
||||||
|
} else {
|
||||||
|
return ErrInvalidIP
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("sanitized announce", r, log.Fields{
|
||||||
|
"maxNumWant": maxNumWant,
|
||||||
|
"defaultNumWant": defaultNumWant,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SanitizeScrape enforces a max number of infohashes for a single scrape
|
||||||
|
// request.
|
||||||
|
func SanitizeScrape(r *ScrapeRequest, maxScrapeInfoHashes uint32) error {
|
||||||
|
if len(r.InfoHashes) > int(maxScrapeInfoHashes) {
|
||||||
|
r.InfoHashes = r.InfoHashes[:maxScrapeInfoHashes]
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("sanitized scrape", r, log.Fields{
|
||||||
|
"maxScrapeInfoHashes": maxScrapeInfoHashes,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -24,14 +24,6 @@ chihaya:
|
||||||
# BitTorrent traffic.
|
# BitTorrent traffic.
|
||||||
addr: "0.0.0.0:6881"
|
addr: "0.0.0.0:6881"
|
||||||
|
|
||||||
# When enabled, the IP address used to connect to the tracker will not
|
|
||||||
# override the value clients advertise as their IP address.
|
|
||||||
allow_ip_spoofing: false
|
|
||||||
|
|
||||||
# The HTTP Header containing the IP address of the client.
|
|
||||||
# This is only necessary if using a reverse proxy.
|
|
||||||
real_ip_header: "x-real-ip"
|
|
||||||
|
|
||||||
# The path to the required files to listen via HTTPS.
|
# The path to the required files to listen via HTTPS.
|
||||||
tls_cert_path: ""
|
tls_cert_path: ""
|
||||||
tls_key_path: ""
|
tls_key_path: ""
|
||||||
|
@ -44,6 +36,23 @@ chihaya:
|
||||||
# Disabling this should increase performance/decrease load.
|
# Disabling this should increase performance/decrease load.
|
||||||
enable_request_timing: false
|
enable_request_timing: false
|
||||||
|
|
||||||
|
# When enabled, the IP address used to connect to the tracker will not
|
||||||
|
# override the value clients advertise as their IP address.
|
||||||
|
allow_ip_spoofing: false
|
||||||
|
|
||||||
|
# The HTTP Header containing the IP address of the client.
|
||||||
|
# This is only necessary if using a reverse proxy.
|
||||||
|
real_ip_header: "x-real-ip"
|
||||||
|
|
||||||
|
# The maximum number of peers returned for an individual request.
|
||||||
|
max_numwant: 100
|
||||||
|
|
||||||
|
# The default number of peers returned for an individual request.
|
||||||
|
default_numwant: 50
|
||||||
|
|
||||||
|
# The maximum number of infohashes that can be scraped in one request.
|
||||||
|
max_scrape_infohashes: 50
|
||||||
|
|
||||||
# This block defines configuration for the tracker's UDP interface.
|
# This block defines configuration for the tracker's UDP interface.
|
||||||
# If you do not wish to run this, delete this section.
|
# If you do not wish to run this, delete this section.
|
||||||
udp:
|
udp:
|
||||||
|
@ -51,10 +60,6 @@ chihaya:
|
||||||
# BitTorrent traffic.
|
# BitTorrent traffic.
|
||||||
addr: "0.0.0.0:6881"
|
addr: "0.0.0.0:6881"
|
||||||
|
|
||||||
# When enabled, the IP address used to connect to the tracker will not
|
|
||||||
# override the value clients advertise as their IP address.
|
|
||||||
allow_ip_spoofing: false
|
|
||||||
|
|
||||||
# The leeway for a timestamp on a connection ID.
|
# The leeway for a timestamp on a connection ID.
|
||||||
max_clock_skew: 10s
|
max_clock_skew: 10s
|
||||||
|
|
||||||
|
@ -65,6 +70,20 @@ chihaya:
|
||||||
# Disabling this should increase performance/decrease load.
|
# Disabling this should increase performance/decrease load.
|
||||||
enable_request_timing: false
|
enable_request_timing: false
|
||||||
|
|
||||||
|
# When enabled, the IP address used to connect to the tracker will not
|
||||||
|
# override the value clients advertise as their IP address.
|
||||||
|
allow_ip_spoofing: false
|
||||||
|
|
||||||
|
# The maximum number of peers returned for an individual request.
|
||||||
|
max_numwant: 100
|
||||||
|
|
||||||
|
# The default number of peers returned for an individual request.
|
||||||
|
default_numwant: 50
|
||||||
|
|
||||||
|
# The maximum number of infohashes that can be scraped in one request.
|
||||||
|
max_scrape_infohashes: 50
|
||||||
|
|
||||||
|
|
||||||
# This block defines configuration used for the storage of peer data.
|
# This block defines configuration used for the storage of peer data.
|
||||||
storage:
|
storage:
|
||||||
name: memory
|
name: memory
|
||||||
|
|
|
@ -21,9 +21,6 @@ func init() {
|
||||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidIP indicates an invalid IP.
|
|
||||||
var ErrInvalidIP = bittorrent.ClientError("invalid IP")
|
|
||||||
|
|
||||||
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||||
prometheus.HistogramOpts{
|
prometheus.HistogramOpts{
|
||||||
Name: "chihaya_http_response_duration_milliseconds",
|
Name: "chihaya_http_response_duration_milliseconds",
|
||||||
|
@ -65,11 +62,10 @@ type Config struct {
|
||||||
Addr string `yaml:"addr"`
|
Addr string `yaml:"addr"`
|
||||||
ReadTimeout time.Duration `yaml:"read_timeout"`
|
ReadTimeout time.Duration `yaml:"read_timeout"`
|
||||||
WriteTimeout time.Duration `yaml:"write_timeout"`
|
WriteTimeout time.Duration `yaml:"write_timeout"`
|
||||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
|
||||||
RealIPHeader string `yaml:"real_ip_header"`
|
|
||||||
TLSCertPath string `yaml:"tls_cert_path"`
|
TLSCertPath string `yaml:"tls_cert_path"`
|
||||||
TLSKeyPath string `yaml:"tls_key_path"`
|
TLSKeyPath string `yaml:"tls_key_path"`
|
||||||
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
||||||
|
ParseOptions `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogFields renders the current config as a set of Logrus fields.
|
// LogFields renders the current config as a set of Logrus fields.
|
||||||
|
@ -78,11 +74,14 @@ func (cfg Config) LogFields() log.Fields {
|
||||||
"addr": cfg.Addr,
|
"addr": cfg.Addr,
|
||||||
"readTimeout": cfg.ReadTimeout,
|
"readTimeout": cfg.ReadTimeout,
|
||||||
"writeTimeout": cfg.WriteTimeout,
|
"writeTimeout": cfg.WriteTimeout,
|
||||||
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
|
||||||
"realIPHeader": cfg.RealIPHeader,
|
|
||||||
"tlsCertPath": cfg.TLSCertPath,
|
"tlsCertPath": cfg.TLSCertPath,
|
||||||
"tlsKeyPath": cfg.TLSKeyPath,
|
"tlsKeyPath": cfg.TLSKeyPath,
|
||||||
"enableRequestTiming": cfg.EnableRequestTiming,
|
"enableRequestTiming": cfg.EnableRequestTiming,
|
||||||
|
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
||||||
|
"realIPHeader": cfg.RealIPHeader,
|
||||||
|
"maxNumWant": cfg.MaxNumWant,
|
||||||
|
"defaultNumWant": cfg.DefaultNumWant,
|
||||||
|
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +218,7 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
req, err := ParseAnnounce(r, f.RealIPHeader, f.AllowIPSpoofing)
|
req, err := ParseAnnounce(r, f.ParseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
|
@ -258,7 +257,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
req, err := ParseScrape(r)
|
req, err := ParseScrape(r, f.ParseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
|
@ -278,7 +277,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou
|
||||||
req.AddressFamily = bittorrent.IPv6
|
req.AddressFamily = bittorrent.IPv6
|
||||||
} else {
|
} else {
|
||||||
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
|
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
|
||||||
WriteError(w, ErrInvalidIP)
|
WriteError(w, bittorrent.ErrInvalidIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
af = new(bittorrent.AddressFamily)
|
af = new(bittorrent.AddressFamily)
|
||||||
|
|
|
@ -7,12 +7,21 @@ import (
|
||||||
"github.com/chihaya/chihaya/bittorrent"
|
"github.com/chihaya/chihaya/bittorrent"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
|
// ParseOptions is the configuration used to parse an Announce Request.
|
||||||
//
|
//
|
||||||
// If allowIPSpoofing is true, IPs provided via params will be used.
|
// If AllowIPSpoofing is true, IPs provided via BitTorrent params will be used.
|
||||||
// If realIPHeader is not empty string, the first value of the HTTP Header with
|
// If RealIPHeader is not empty string, the value of the first HTTP Header with
|
||||||
// that name will be used.
|
// that name will be used.
|
||||||
func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (*bittorrent.AnnounceRequest, error) {
|
type ParseOptions struct {
|
||||||
|
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||||
|
RealIPHeader string `yaml:"real_ip_header"`
|
||||||
|
MaxNumWant uint32 `yaml:"max_numwant"`
|
||||||
|
DefaultNumWant uint32 `yaml:"default_numwant"`
|
||||||
|
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
|
||||||
|
func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
|
||||||
qp, err := bittorrent.ParseURLData(r.RequestURI)
|
qp, err := bittorrent.ParseURLData(r.RequestURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -20,15 +29,23 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
|
||||||
|
|
||||||
request := &bittorrent.AnnounceRequest{Params: qp}
|
request := &bittorrent.AnnounceRequest{Params: qp}
|
||||||
|
|
||||||
eventStr, _ := qp.String("event")
|
// Attempt to parse the event from the request.
|
||||||
request.Event, err = bittorrent.NewEvent(eventStr)
|
var eventStr string
|
||||||
if err != nil {
|
eventStr, request.EventProvided = qp.String("event")
|
||||||
return nil, bittorrent.ClientError("failed to provide valid client event")
|
if request.EventProvided {
|
||||||
|
request.Event, err = bittorrent.NewEvent(eventStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, bittorrent.ClientError("failed to provide valid client event")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
request.Event = bittorrent.None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine if the client expects a compact response.
|
||||||
compactStr, _ := qp.String("compact")
|
compactStr, _ := qp.String("compact")
|
||||||
request.Compact = compactStr != "" && compactStr != "0"
|
request.Compact = compactStr != "" && compactStr != "0"
|
||||||
|
|
||||||
|
// Parse the infohash from the request.
|
||||||
infoHashes := qp.InfoHashes()
|
infoHashes := qp.InfoHashes()
|
||||||
if len(infoHashes) < 1 {
|
if len(infoHashes) < 1 {
|
||||||
return nil, bittorrent.ClientError("no info_hash parameter supplied")
|
return nil, bittorrent.ClientError("no info_hash parameter supplied")
|
||||||
|
@ -38,6 +55,7 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
|
||||||
}
|
}
|
||||||
request.InfoHash = infoHashes[0]
|
request.InfoHash = infoHashes[0]
|
||||||
|
|
||||||
|
// Parse the PeerID from the request.
|
||||||
peerID, ok := qp.String("peer_id")
|
peerID, ok := qp.String("peer_id")
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: peer_id")
|
return nil, bittorrent.ClientError("failed to parse parameter: peer_id")
|
||||||
|
@ -47,43 +65,55 @@ func ParseAnnounce(r *http.Request, realIPHeader string, allowIPSpoofing bool) (
|
||||||
}
|
}
|
||||||
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
|
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
|
||||||
|
|
||||||
|
// Determine the number of remaining bytes for the client.
|
||||||
request.Left, err = qp.Uint64("left")
|
request.Left, err = qp.Uint64("left")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: left")
|
return nil, bittorrent.ClientError("failed to parse parameter: left")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine the number of bytes downloaded by the client.
|
||||||
request.Downloaded, err = qp.Uint64("downloaded")
|
request.Downloaded, err = qp.Uint64("downloaded")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
|
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine the number of bytes shared by the client.
|
||||||
request.Uploaded, err = qp.Uint64("uploaded")
|
request.Uploaded, err = qp.Uint64("uploaded")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
|
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Determine the number of peers the client wants in the response.
|
||||||
numwant, err := qp.Uint64("numwant")
|
numwant, err := qp.Uint64("numwant")
|
||||||
if err != nil && err != bittorrent.ErrKeyNotFound {
|
if err != nil && err != bittorrent.ErrKeyNotFound {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
|
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
|
||||||
}
|
}
|
||||||
|
// If there were no errors, the user actually provided the numwant.
|
||||||
|
request.NumWantProvided = err == nil
|
||||||
request.NumWant = uint32(numwant)
|
request.NumWant = uint32(numwant)
|
||||||
|
|
||||||
|
// Parse the port where the client is listening.
|
||||||
port, err := qp.Uint64("port")
|
port, err := qp.Uint64("port")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: port")
|
return nil, bittorrent.ClientError("failed to parse parameter: port")
|
||||||
}
|
}
|
||||||
request.Peer.Port = uint16(port)
|
request.Peer.Port = uint16(port)
|
||||||
|
|
||||||
request.Peer.IP.IP = requestedIP(r, qp, realIPHeader, allowIPSpoofing)
|
// Parse the IP address where the client is listening.
|
||||||
|
request.Peer.IP.IP, request.IPProvided = requestedIP(r, qp, opts)
|
||||||
if request.Peer.IP.IP == nil {
|
if request.Peer.IP.IP == nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse peer IP address")
|
return nil, bittorrent.ClientError("failed to parse peer IP address")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return request, nil
|
return request, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request.
|
// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request.
|
||||||
func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
|
func ParseScrape(r *http.Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
|
||||||
qp, err := bittorrent.ParseURLData(r.RequestURI)
|
qp, err := bittorrent.ParseURLData(r.RequestURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -99,39 +129,35 @@ func ParseScrape(r *http.Request) (*bittorrent.ScrapeRequest, error) {
|
||||||
Params: qp,
|
Params: qp,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return request, nil
|
return request, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestedIP determines the IP address for a BitTorrent client request.
|
// requestedIP determines the IP address for a BitTorrent client request.
|
||||||
//
|
func requestedIP(r *http.Request, p bittorrent.Params, opts ParseOptions) (ip net.IP, provided bool) {
|
||||||
// If allowIPSpoofing is true, IPs provided via params will be used.
|
if opts.AllowIPSpoofing {
|
||||||
// If realIPHeader is not empty string, the first value of the HTTP Header with
|
|
||||||
// that name will be used.
|
|
||||||
func requestedIP(r *http.Request, p bittorrent.Params, realIPHeader string, allowIPSpoofing bool) net.IP {
|
|
||||||
if allowIPSpoofing {
|
|
||||||
if ipstr, ok := p.String("ip"); ok {
|
if ipstr, ok := p.String("ip"); ok {
|
||||||
ip := net.ParseIP(ipstr)
|
return net.ParseIP(ipstr), true
|
||||||
return ip
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ipstr, ok := p.String("ipv4"); ok {
|
if ipstr, ok := p.String("ipv4"); ok {
|
||||||
ip := net.ParseIP(ipstr)
|
return net.ParseIP(ipstr), true
|
||||||
return ip
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ipstr, ok := p.String("ipv6"); ok {
|
if ipstr, ok := p.String("ipv6"); ok {
|
||||||
ip := net.ParseIP(ipstr)
|
return net.ParseIP(ipstr), true
|
||||||
return ip
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if realIPHeader != "" {
|
if opts.RealIPHeader != "" {
|
||||||
if ips, ok := r.Header[realIPHeader]; ok && len(ips) > 0 {
|
if ips, ok := r.Header[opts.RealIPHeader]; ok && len(ips) > 0 {
|
||||||
ip := net.ParseIP(ips[0])
|
return net.ParseIP(ips[0]), false
|
||||||
return ip
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
host, _, _ := net.SplitHostPort(r.RemoteAddr)
|
host, _, _ := net.SplitHostPort(r.RemoteAddr)
|
||||||
return net.ParseIP(host)
|
return net.ParseIP(host), false
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,9 +27,6 @@ func init() {
|
||||||
prometheus.MustRegister(promResponseDurationMilliseconds)
|
prometheus.MustRegister(promResponseDurationMilliseconds)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidIP indicates an invalid IP.
|
|
||||||
var ErrInvalidIP = bittorrent.ClientError("invalid IP")
|
|
||||||
|
|
||||||
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||||
prometheus.HistogramOpts{
|
prometheus.HistogramOpts{
|
||||||
Name: "chihaya_udp_response_duration_milliseconds",
|
Name: "chihaya_udp_response_duration_milliseconds",
|
||||||
|
@ -71,8 +68,8 @@ type Config struct {
|
||||||
Addr string `yaml:"addr"`
|
Addr string `yaml:"addr"`
|
||||||
PrivateKey string `yaml:"private_key"`
|
PrivateKey string `yaml:"private_key"`
|
||||||
MaxClockSkew time.Duration `yaml:"max_clock_skew"`
|
MaxClockSkew time.Duration `yaml:"max_clock_skew"`
|
||||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
|
||||||
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
||||||
|
ParseOptions `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogFields renders the current config as a set of Logrus fields.
|
// LogFields renders the current config as a set of Logrus fields.
|
||||||
|
@ -81,8 +78,11 @@ func (cfg Config) LogFields() log.Fields {
|
||||||
"addr": cfg.Addr,
|
"addr": cfg.Addr,
|
||||||
"privateKey": cfg.PrivateKey,
|
"privateKey": cfg.PrivateKey,
|
||||||
"maxClockSkew": cfg.MaxClockSkew,
|
"maxClockSkew": cfg.MaxClockSkew,
|
||||||
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
|
||||||
"enableRequestTiming": cfg.EnableRequestTiming,
|
"enableRequestTiming": cfg.EnableRequestTiming,
|
||||||
|
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
||||||
|
"maxNumWant": cfg.MaxNumWant,
|
||||||
|
"defaultNumWant": cfg.DefaultNumWant,
|
||||||
|
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
|
||||||
actionName = "announce"
|
actionName = "announce"
|
||||||
|
|
||||||
var req *bittorrent.AnnounceRequest
|
var req *bittorrent.AnnounceRequest
|
||||||
req, err = ParseAnnounce(r, t.AllowIPSpoofing, actionID == announceV6ActionID)
|
req, err = ParseAnnounce(r, actionID == announceV6ActionID, t.ParseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
WriteError(w, txID, err)
|
WriteError(w, txID, err)
|
||||||
return
|
return
|
||||||
|
@ -303,7 +303,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
|
||||||
actionName = "scrape"
|
actionName = "scrape"
|
||||||
|
|
||||||
var req *bittorrent.ScrapeRequest
|
var req *bittorrent.ScrapeRequest
|
||||||
req, err = ParseScrape(r)
|
req, err = ParseScrape(r, t.ParseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
WriteError(w, txID, err)
|
WriteError(w, txID, err)
|
||||||
return
|
return
|
||||||
|
@ -315,7 +315,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
|
||||||
req.AddressFamily = bittorrent.IPv6
|
req.AddressFamily = bittorrent.IPv6
|
||||||
} else {
|
} else {
|
||||||
log.Error("udp: invalid IP: neither v4 nor v6", log.Fields{"IP": r.IP})
|
log.Error("udp: invalid IP: neither v4 nor v6", log.Fields{"IP": r.IP})
|
||||||
WriteError(w, txID, ErrInvalidIP)
|
WriteError(w, txID, bittorrent.ErrInvalidIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
af = new(bittorrent.AddressFamily)
|
af = new(bittorrent.AddressFamily)
|
||||||
|
|
|
@ -45,14 +45,21 @@ var (
|
||||||
errUnknownOptionType = bittorrent.ClientError("unknown option type")
|
errUnknownOptionType = bittorrent.ClientError("unknown option type")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ParseOptions is the configuration used to parse an Announce Request.
|
||||||
|
//
|
||||||
|
// If AllowIPSpoofing is true, IPs provided via params will be used.
|
||||||
|
type ParseOptions struct {
|
||||||
|
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||||
|
MaxNumWant uint32 `yaml:"max_numwant"`
|
||||||
|
DefaultNumWant uint32 `yaml:"default_numwant"`
|
||||||
|
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
|
||||||
|
}
|
||||||
|
|
||||||
// ParseAnnounce parses an AnnounceRequest from a UDP request.
|
// ParseAnnounce parses an AnnounceRequest from a UDP request.
|
||||||
//
|
//
|
||||||
// If allowIPSpoofing is true, IPs provided via params will be used.
|
// If v6 is true, the announce is parsed the "opentracker way":
|
||||||
//
|
|
||||||
// If v6 is true the announce will be parsed as an IPv6 announce "the
|
|
||||||
// opentracker way", see
|
|
||||||
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
// http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||||
func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceRequest, error) {
|
func ParseAnnounce(r Request, v6 bool, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
|
||||||
ipEnd := 84 + net.IPv4len
|
ipEnd := 84 + net.IPv4len
|
||||||
if v6 {
|
if v6 {
|
||||||
ipEnd = 84 + net.IPv6len
|
ipEnd = 84 + net.IPv6len
|
||||||
|
@ -74,12 +81,14 @@ func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceReq
|
||||||
}
|
}
|
||||||
|
|
||||||
ip := r.IP
|
ip := r.IP
|
||||||
|
ipProvided := false
|
||||||
ipbytes := r.Packet[84:ipEnd]
|
ipbytes := r.Packet[84:ipEnd]
|
||||||
if allowIPSpoofing {
|
if opts.AllowIPSpoofing {
|
||||||
// Make sure the bytes are copied to a new slice.
|
// Make sure the bytes are copied to a new slice.
|
||||||
copy(ip, net.IP(ipbytes))
|
copy(ip, net.IP(ipbytes))
|
||||||
|
ipProvided = true
|
||||||
}
|
}
|
||||||
if !allowIPSpoofing && r.IP == nil {
|
if !opts.AllowIPSpoofing && r.IP == nil {
|
||||||
// We have no IP address to fallback on.
|
// We have no IP address to fallback on.
|
||||||
return nil, errMalformedIP
|
return nil, errMalformedIP
|
||||||
}
|
}
|
||||||
|
@ -92,20 +101,29 @@ func ParseAnnounce(r Request, allowIPSpoofing, v6 bool) (*bittorrent.AnnounceReq
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &bittorrent.AnnounceRequest{
|
request := &bittorrent.AnnounceRequest{
|
||||||
Event: eventIDs[eventID],
|
Event: eventIDs[eventID],
|
||||||
InfoHash: bittorrent.InfoHashFromBytes(infohash),
|
InfoHash: bittorrent.InfoHashFromBytes(infohash),
|
||||||
NumWant: uint32(numWant),
|
NumWant: uint32(numWant),
|
||||||
Left: left,
|
Left: left,
|
||||||
Downloaded: downloaded,
|
Downloaded: downloaded,
|
||||||
Uploaded: uploaded,
|
Uploaded: uploaded,
|
||||||
|
IPProvided: ipProvided,
|
||||||
|
NumWantProvided: true,
|
||||||
|
EventProvided: true,
|
||||||
Peer: bittorrent.Peer{
|
Peer: bittorrent.Peer{
|
||||||
ID: bittorrent.PeerIDFromBytes(peerID),
|
ID: bittorrent.PeerIDFromBytes(peerID),
|
||||||
IP: bittorrent.IP{IP: ip},
|
IP: bittorrent.IP{IP: ip},
|
||||||
Port: port,
|
Port: port,
|
||||||
},
|
},
|
||||||
Params: params,
|
Params: params,
|
||||||
}, nil
|
}
|
||||||
|
|
||||||
|
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return request, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type buffer struct {
|
type buffer struct {
|
||||||
|
@ -170,7 +188,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseScrape parses a ScrapeRequest from a UDP request.
|
// ParseScrape parses a ScrapeRequest from a UDP request.
|
||||||
func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
|
func ParseScrape(r Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
|
||||||
// If a scrape isn't at least 36 bytes long, it's malformed.
|
// If a scrape isn't at least 36 bytes long, it's malformed.
|
||||||
if len(r.Packet) < 36 {
|
if len(r.Packet) < 36 {
|
||||||
return nil, errMalformedPacket
|
return nil, errMalformedPacket
|
||||||
|
@ -190,7 +208,11 @@ func ParseScrape(r Request) (*bittorrent.ScrapeRequest, error) {
|
||||||
r.Packet = r.Packet[20:]
|
r.Packet = r.Packet[20:]
|
||||||
}
|
}
|
||||||
|
|
||||||
return &bittorrent.ScrapeRequest{
|
// Sanitize the request.
|
||||||
InfoHashes: infohashes,
|
request := &bittorrent.ScrapeRequest{InfoHashes: infohashes}
|
||||||
}, nil
|
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return request, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,6 @@ package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"github.com/chihaya/chihaya/bittorrent"
|
"github.com/chihaya/chihaya/bittorrent"
|
||||||
"github.com/chihaya/chihaya/storage"
|
"github.com/chihaya/chihaya/storage"
|
||||||
|
@ -67,56 +65,6 @@ func (h *swarmInteractionHook) HandleScrape(ctx context.Context, _ *bittorrent.S
|
||||||
return ctx, nil
|
return ctx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidIP indicates an invalid IP for an Announce.
|
|
||||||
var ErrInvalidIP = errors.New("invalid IP")
|
|
||||||
|
|
||||||
// sanitizationHook enforces semantic assumptions about requests that may have
|
|
||||||
// not been accounted for in a tracker frontend.
|
|
||||||
//
|
|
||||||
// The SanitizationHook performs the following checks:
|
|
||||||
// - maxNumWant: Checks whether the numWant parameter of an announce is below
|
|
||||||
// a limit. Sets it to the limit if the value is higher.
|
|
||||||
// - defaultNumWant: Checks whether the numWant parameter of an announce is
|
|
||||||
// zero. Sets it to the default if it is.
|
|
||||||
// - IP sanitization: Checks whether the announcing Peer's IP address is either
|
|
||||||
// IPv4 or IPv6. Returns ErrInvalidIP if the address is neither IPv4 nor
|
|
||||||
// IPv6. Sets the Peer.AddressFamily field accordingly. Truncates IPv4
|
|
||||||
// addresses to have a length of 4 bytes.
|
|
||||||
type sanitizationHook struct {
|
|
||||||
maxNumWant uint32
|
|
||||||
defaultNumWant uint32
|
|
||||||
maxScrapeInfoHashes uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *sanitizationHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
|
||||||
if req.NumWant > h.maxNumWant {
|
|
||||||
req.NumWant = h.maxNumWant
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.NumWant == 0 {
|
|
||||||
req.NumWant = h.defaultNumWant
|
|
||||||
}
|
|
||||||
|
|
||||||
if ip := req.Peer.IP.To4(); ip != nil {
|
|
||||||
req.Peer.IP.IP = ip
|
|
||||||
req.Peer.IP.AddressFamily = bittorrent.IPv4
|
|
||||||
} else if len(req.Peer.IP.IP) == net.IPv6len { // implies req.Peer.IP.To4() == nil
|
|
||||||
req.Peer.IP.AddressFamily = bittorrent.IPv6
|
|
||||||
} else {
|
|
||||||
return ctx, ErrInvalidIP
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *sanitizationHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
|
|
||||||
if len(req.InfoHashes) > int(h.maxScrapeInfoHashes) {
|
|
||||||
req.InfoHashes = req.InfoHashes[:h.maxScrapeInfoHashes]
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type skipResponseHook struct{}
|
type skipResponseHook struct{}
|
||||||
|
|
||||||
// SkipResponseHookKey is a key for the context of an Announce or Scrape to
|
// SkipResponseHookKey is a key for the context of an Announce or Scrape to
|
||||||
|
|
|
@ -15,10 +15,7 @@ import (
|
||||||
|
|
||||||
// Config holds the configuration common across all middleware.
|
// Config holds the configuration common across all middleware.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
AnnounceInterval time.Duration `yaml:"announce_interval"`
|
AnnounceInterval time.Duration `yaml:"announce_interval"`
|
||||||
MaxNumWant uint32 `yaml:"max_numwant"`
|
|
||||||
DefaultNumWant uint32 `yaml:"default_numwant"`
|
|
||||||
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ frontend.TrackerLogic = &Logic{}
|
var _ frontend.TrackerLogic = &Logic{}
|
||||||
|
@ -26,17 +23,12 @@ var _ frontend.TrackerLogic = &Logic{}
|
||||||
// NewLogic creates a new instance of a TrackerLogic that executes the provided
|
// NewLogic creates a new instance of a TrackerLogic that executes the provided
|
||||||
// middleware hooks.
|
// middleware hooks.
|
||||||
func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
|
func NewLogic(cfg Config, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
|
||||||
l := &Logic{
|
return &Logic{
|
||||||
announceInterval: cfg.AnnounceInterval,
|
announceInterval: cfg.AnnounceInterval,
|
||||||
peerStore: peerStore,
|
peerStore: peerStore,
|
||||||
preHooks: []Hook{&sanitizationHook{cfg.MaxNumWant, cfg.DefaultNumWant, cfg.MaxScrapeInfoHashes}},
|
preHooks: append(preHooks, &responseHook{store: peerStore}),
|
||||||
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
|
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
|
||||||
}
|
}
|
||||||
|
|
||||||
l.preHooks = append(l.preHooks, preHooks...)
|
|
||||||
l.preHooks = append(l.preHooks, &responseHook{store: peerStore})
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Logic is an implementation of the TrackerLogic that functions by
|
// Logic is an implementation of the TrackerLogic that functions by
|
||||||
|
|
|
@ -80,15 +80,4 @@ func BenchmarkHookOverhead(b *testing.B) {
|
||||||
benchHookListV6(b, nopHooks)
|
benchHookListV6(b, nopHooks)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var sanHooks hookList
|
|
||||||
for i := 1; i < 4; i++ {
|
|
||||||
sanHooks = append(sanHooks, &sanitizationHook{maxNumWant: 50})
|
|
||||||
b.Run(fmt.Sprintf("%dsanitation-v4", i), func(b *testing.B) {
|
|
||||||
benchHookListV4(b, sanHooks)
|
|
||||||
})
|
|
||||||
b.Run(fmt.Sprintf("%dsanitation-v6", i), func(b *testing.B) {
|
|
||||||
benchHookListV6(b, sanHooks)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue