pkg/log: create wrapper around logrus

This commit is contained in:
Leo Balduf 2017-06-20 14:58:44 +02:00
parent 153ad325b7
commit 8ed171b0ea
13 changed files with 211 additions and 77 deletions

View file

@ -7,7 +7,7 @@ import (
"net" "net"
"time" "time"
log "github.com/Sirupsen/logrus" "github.com/chihaya/chihaya/pkg/log"
) )
// PeerID represents a peer ID. // PeerID represents a peer ID.

View file

@ -6,7 +6,7 @@ import (
"strconv" "strconv"
"strings" "strings"
log "github.com/Sirupsen/logrus" "github.com/chihaya/chihaya/pkg/log"
) )
// Params is used to fetch (optional) request parameters from an Announce. // Params is used to fetch (optional) request parameters from an Announce.
@ -123,7 +123,7 @@ func parseQuery(query string) (q *QueryParams, err error) {
// But frontends record these errors to prometheus, which generates // But frontends record these errors to prometheus, which generates
// a lot of time series. // a lot of time series.
// We log it here for debugging instead. // We log it here for debugging instead.
log.WithFields(log.Fields{"error": err}).Debug("failed to unescape query param key") log.Debug("failed to unescape query param key", log.Err(err))
return nil, ErrInvalidQueryEscape return nil, ErrInvalidQueryEscape
} }
value, err = url.QueryUnescape(value) value, err = url.QueryUnescape(value)
@ -132,7 +132,7 @@ func parseQuery(query string) (q *QueryParams, err error) {
// But frontends record these errors to prometheus, which generates // But frontends record these errors to prometheus, which generates
// a lot of time series. // a lot of time series.
// We log it here for debugging instead. // We log it here for debugging instead.
log.WithFields(log.Fields{"error": err}).Debug("failed to unescape query param value") log.Debug("failed to unescape query param value", log.Err(err))
return nil, ErrInvalidQueryEscape return nil, ErrInvalidQueryEscape
} }

View file

@ -8,12 +8,13 @@ import (
"strings" "strings"
"syscall" "syscall"
log "github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/chihaya/chihaya/frontend/http" "github.com/chihaya/chihaya/frontend/http"
"github.com/chihaya/chihaya/frontend/udp" "github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware" "github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/prometheus" "github.com/chihaya/chihaya/pkg/prometheus"
"github.com/chihaya/chihaya/pkg/stop" "github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/storage" "github.com/chihaya/chihaya/storage"
@ -48,7 +49,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
r.sg = stop.NewGroup() r.sg = stop.NewGroup()
log.WithFields(log.Fields{"addr": cfg.PrometheusAddr}).Info("starting Prometheus server") log.Info("starting Prometheus server", log.Fields{"addr": cfg.PrometheusAddr})
r.sg.Add(prometheus.NewServer(cfg.PrometheusAddr)) r.sg.Add(prometheus.NewServer(cfg.PrometheusAddr))
if ps == nil { if ps == nil {
@ -56,7 +57,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
if err != nil { if err != nil {
return errors.New("failed to create memory storage: " + err.Error()) return errors.New("failed to create memory storage: " + err.Error())
} }
log.WithFields(ps.LogFields()).Info("started storage") log.Info("started storage", ps.LogFields())
} }
r.peerStore = ps r.peerStore = ps
@ -64,14 +65,14 @@ func (r *Run) Start(ps storage.PeerStore) error {
if err != nil { if err != nil {
return errors.New("failed to validate hook config: " + err.Error()) return errors.New("failed to validate hook config: " + err.Error())
} }
log.WithFields(log.Fields{ log.Info("starting middleware", log.Fields{
"preHooks": cfg.PreHooks.Names(), "preHooks": cfg.PreHooks.Names(),
"postHooks": cfg.PostHooks.Names(), "postHooks": cfg.PostHooks.Names(),
}).Info("starting middleware") })
r.logic = middleware.NewLogic(cfg.Config, r.peerStore, preHooks, postHooks) r.logic = middleware.NewLogic(cfg.Config, r.peerStore, preHooks, postHooks)
if cfg.HTTPConfig.Addr != "" { if cfg.HTTPConfig.Addr != "" {
log.WithFields(cfg.HTTPConfig.LogFields()).Info("starting HTTP frontend") log.Info("starting HTTP frontend", cfg.HTTPConfig.LogFields())
httpfe, err := http.NewFrontend(r.logic, cfg.HTTPConfig) httpfe, err := http.NewFrontend(r.logic, cfg.HTTPConfig)
if err != nil { if err != nil {
return err return err
@ -80,7 +81,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
} }
if cfg.UDPConfig.Addr != "" { if cfg.UDPConfig.Addr != "" {
log.WithFields(cfg.UDPConfig.LogFields()).Info("starting UDP frontend") log.Info("starting UDP frontend", cfg.UDPConfig.LogFields())
udpfe, err := udp.NewFrontend(r.logic, cfg.UDPConfig) udpfe, err := udp.NewFrontend(r.logic, cfg.UDPConfig)
if err != nil { if err != nil {
return err return err
@ -176,7 +177,7 @@ func main() {
return err return err
} }
if jsonLog { if jsonLog {
log.SetFormatter(&log.JSONFormatter{}) log.SetFormatter(&logrus.JSONFormatter{})
} }
debugLog, err := cmd.Flags().GetBool("debug") debugLog, err := cmd.Flags().GetBool("debug")
@ -185,7 +186,7 @@ func main() {
} }
if debugLog { if debugLog {
log.Info("enabling debug logging") log.Info("enabling debug logging")
log.SetLevel(log.DebugLevel) log.SetDebug(true)
} }
cpuProfilePath, err := cmd.Flags().GetString("cpuprofile") cpuProfilePath, err := cmd.Flags().GetString("cpuprofile")
@ -193,7 +194,7 @@ func main() {
return err return err
} }
if cpuProfilePath != "" { if cpuProfilePath != "" {
log.WithFields(log.Fields{"path": cpuProfilePath}).Info("enabling CPU profiling") log.Info("enabling CPU profiling", log.Fields{"path": cpuProfilePath})
f, err := os.Create(cpuProfilePath) f, err := os.Create(cpuProfilePath)
if err != nil { if err != nil {
return err return err

View file

@ -9,12 +9,12 @@ import (
"net/http" "net/http"
"time" "time"
log "github.com/Sirupsen/logrus"
"github.com/julienschmidt/httprouter" "github.com/julienschmidt/httprouter"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend" "github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/pkg/log"
) )
func init() { func init() {
@ -117,7 +117,7 @@ func NewFrontend(logic frontend.TrackerLogic, cfg Config) (*Frontend, error) {
go func() { go func() {
if err := f.listenAndServe(); err != nil { if err := f.listenAndServe(); err != nil {
log.Fatal("failed while serving http: " + err.Error()) log.Fatal("failed while serving http", log.Err(err))
} }
}() }()
@ -230,7 +230,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou
host, _, err := net.SplitHostPort(r.RemoteAddr) host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil { if err != nil {
log.Errorln("http: unable to determine remote address for scrape:", err) log.Error("http: unable to determine remote address for scrape", log.Err(err))
WriteError(w, err) WriteError(w, err)
return return
} }
@ -241,7 +241,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprou
} else if len(reqIP) == net.IPv6len { // implies reqIP.To4() == nil } else if len(reqIP) == net.IPv6len { // implies reqIP.To4() == nil
req.AddressFamily = bittorrent.IPv6 req.AddressFamily = bittorrent.IPv6
} else { } else {
log.Errorln("http: invalid IP: neither v4 nor v6, RemoteAddr was", r.RemoteAddr) log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
WriteError(w, ErrInvalidIP) WriteError(w, ErrInvalidIP)
return return
} }

View file

@ -3,10 +3,9 @@ package http
import ( import (
"net/http" "net/http"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend/http/bencode" "github.com/chihaya/chihaya/frontend/http/bencode"
"github.com/chihaya/chihaya/pkg/log"
) )
// WriteError communicates an error to a BitTorrent client over HTTP. // WriteError communicates an error to a BitTorrent client over HTTP.
@ -15,7 +14,7 @@ func WriteError(w http.ResponseWriter, err error) error {
if _, clientErr := err.(bittorrent.ClientError); clientErr { if _, clientErr := err.(bittorrent.ClientError); clientErr {
message = err.Error() message = err.Error()
} else { } else {
log.Errorf("http: internal error: %s", err) log.Error("http: internal error", log.Err(err))
} }
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)

View file

@ -11,12 +11,12 @@ import (
"sync" "sync"
"time" "time"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend" "github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/frontend/udp/bytepool" "github.com/chihaya/chihaya/frontend/udp/bytepool"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop" "github.com/chihaya/chihaya/pkg/stop"
) )
@ -107,7 +107,7 @@ func NewFrontend(logic frontend.TrackerLogic, cfg Config) (*Frontend, error) {
} }
cfg.PrivateKey = string(pkeyRunes) cfg.PrivateKey = string(pkeyRunes)
log.Warn("UDP private key was not provided, using generated key: ", cfg.PrivateKey) log.Warn("UDP private key was not provided, using generated key", log.Fields{"key": cfg.PrivateKey})
} }
f := &Frontend{ f := &Frontend{
@ -118,7 +118,7 @@ func NewFrontend(logic frontend.TrackerLogic, cfg Config) (*Frontend, error) {
go func() { go func() {
if err := f.listenAndServe(); err != nil { if err := f.listenAndServe(); err != nil {
log.Fatal("failed while serving udp: " + err.Error()) log.Fatal("failed while serving udp", log.Err(err))
} }
}() }()
@ -313,7 +313,7 @@ func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil } else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
req.AddressFamily = bittorrent.IPv6 req.AddressFamily = bittorrent.IPv6
} else { } else {
log.Errorln("udp: invalid IP: neither v4 nor v6, IP was", r.IP) log.Error("udp: invalid IP: neither v4 nor v6", log.Fields{"IP": r.IP})
WriteError(w, txID, ErrInvalidIP) WriteError(w, txID, ErrInvalidIP)
return return
} }

View file

@ -19,11 +19,11 @@ import (
jc "github.com/SermoDigital/jose/crypto" jc "github.com/SermoDigital/jose/crypto"
"github.com/SermoDigital/jose/jws" "github.com/SermoDigital/jose/jws"
"github.com/SermoDigital/jose/jwt" "github.com/SermoDigital/jose/jwt"
log "github.com/Sirupsen/logrus"
"github.com/mendsley/gojwk" "github.com/mendsley/gojwk"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware" "github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop" "github.com/chihaya/chihaya/pkg/stop"
) )
@ -44,6 +44,16 @@ type Config struct {
JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"` JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"`
} }
// LogFields implements log.Fielder for a Config.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"issuer": cfg.Issuer,
"audience": cfg.Audience,
"JWKSetURL": cfg.JWKSetURL,
"JWKUpdateInterval": cfg.JWKUpdateInterval,
}
}
type hook struct { type hook struct {
cfg Config cfg Config
publicKeys map[string]crypto.PublicKey publicKeys map[string]crypto.PublicKey
@ -52,7 +62,7 @@ type hook struct {
// NewHook returns an instance of the JWT middleware. // NewHook returns an instance of the JWT middleware.
func NewHook(cfg Config) (middleware.Hook, error) { func NewHook(cfg Config) (middleware.Hook, error) {
log.Debugf("creating new JWT middleware with config: %#v", cfg) log.Debug("creating new JWT middleware", cfg)
h := &hook{ h := &hook{
cfg: cfg, cfg: cfg,
publicKeys: map[string]crypto.PublicKey{}, publicKeys: map[string]crypto.PublicKey{},
@ -83,7 +93,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
func (h *hook) updateKeys() error { func (h *hook) updateKeys() error {
resp, err := http.Get(h.cfg.JWKSetURL) resp, err := http.Get(h.cfg.JWKSetURL)
if err != nil { if err != nil {
log.Errorln("failed to fetch JWK Set: " + err.Error()) log.Error("failed to fetch JWK Set", log.Err(err))
return err return err
} }
@ -91,7 +101,7 @@ func (h *hook) updateKeys() error {
err = json.NewDecoder(resp.Body).Decode(&parsedJWKs) err = json.NewDecoder(resp.Body).Decode(&parsedJWKs)
if err != nil { if err != nil {
resp.Body.Close() resp.Body.Close()
log.Errorln("failed to decode JWK JSON: " + err.Error()) log.Error("failed to decode JWK JSON", log.Err(err))
return err return err
} }
resp.Body.Close() resp.Body.Close()
@ -100,7 +110,7 @@ func (h *hook) updateKeys() error {
for _, parsedJWK := range parsedJWKs.Keys { for _, parsedJWK := range parsedJWKs.Keys {
publicKey, err := parsedJWK.DecodePublicKey() publicKey, err := parsedJWK.DecodePublicKey()
if err != nil { if err != nil {
log.Errorln("failed to decode JWK into public key: " + err.Error()) log.Error("failed to decode JWK into public key", log.Err(err))
return err return err
} }
keys[parsedJWK.Kid] = publicKey keys[parsedJWK.Kid] = publicKey
@ -156,55 +166,53 @@ func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string,
claims := parsedJWT.Claims() claims := parsedJWT.Claims()
if iss, ok := claims.Issuer(); !ok || iss != cfgIss { if iss, ok := claims.Issuer(); !ok || iss != cfgIss {
log.WithFields(log.Fields{ log.Debug("unequal or missing issuer when validating JWT", log.Fields{
"exists": ok, "exists": ok,
"claim": iss, "claim": iss,
"config": cfgIss, "config": cfgIss,
}).Debugln("unequal or missing issuer when validating JWT") })
return jwt.ErrInvalidISSClaim return jwt.ErrInvalidISSClaim
} }
if auds, ok := claims.Audience(); !ok || !in(cfgAud, auds) { if auds, ok := claims.Audience(); !ok || !in(cfgAud, auds) {
log.WithFields(log.Fields{ log.Debug("unequal or missing audience when validating JWT", log.Fields{
"exists": ok, "exists": ok,
"claim": strings.Join(auds, ","), "claim": strings.Join(auds, ","),
"config": cfgAud, "config": cfgAud,
}).Debugln("unequal or missing audience when validating JWT") })
return jwt.ErrInvalidAUDClaim return jwt.ErrInvalidAUDClaim
} }
ihHex := hex.EncodeToString(ih[:]) ihHex := hex.EncodeToString(ih[:])
if ihClaim, ok := claims.Get("infohash").(string); !ok || ihClaim != ihHex { if ihClaim, ok := claims.Get("infohash").(string); !ok || ihClaim != ihHex {
log.WithFields(log.Fields{ log.Debug("unequal or missing infohash when validating JWT", log.Fields{
"exists": ok, "exists": ok,
"claim": ihClaim, "claim": ihClaim,
"request": ihHex, "request": ihHex,
}).Debugln("unequal or missing infohash when validating JWT") })
return errors.New("claim \"infohash\" is invalid") return errors.New("claim \"infohash\" is invalid")
} }
parsedJWS := parsedJWT.(jws.JWS) parsedJWS := parsedJWT.(jws.JWS)
kid, ok := parsedJWS.Protected().Get("kid").(string) kid, ok := parsedJWS.Protected().Get("kid").(string)
if !ok { if !ok {
log.WithFields(log.Fields{ log.Debug("missing kid when validating JWT", log.Fields{
"exists": ok, "exists": ok,
"claim": kid, "claim": kid,
}).Debugln("missing kid when validating JWT") })
return errors.New("invalid kid") return errors.New("invalid kid")
} }
publicKey, ok := publicKeys[kid] publicKey, ok := publicKeys[kid]
if !ok { if !ok {
log.WithFields(log.Fields{ log.Debug("missing public key forkid when validating JWT", log.Fields{
"kid": kid, "kid": kid,
}).Debugln("missing public key for kid when validating JWT") })
return errors.New("signed by unknown kid") return errors.New("signed by unknown kid")
} }
err = parsedJWS.Verify(publicKey, jc.SigningMethodRS256) err = parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
if err != nil { if err != nil {
log.WithFields(log.Fields{ log.Debug("failed to verify signature of JWT", log.Err(err))
"err": err,
}).Debugln("failed to verify signature of JWT")
return err return err
} }

View file

@ -6,9 +6,9 @@ import (
"context" "context"
"time" "time"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend" "github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop" "github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/storage" "github.com/chihaya/chihaya/storage"
) )
@ -61,7 +61,7 @@ func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequ
} }
} }
log.WithFields(resp.LogFields()).Debug("generated announce response") log.Debug("generated announce response", resp)
return ctx, resp, nil return ctx, resp, nil
} }
@ -71,7 +71,7 @@ func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceReque
var err error var err error
for _, h := range l.postHooks { for _, h := range l.postHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil { if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
log.Errorln("chihaya: post-announce hooks failed:", err.Error()) log.Error("post-announce hooks failed", log.Err(err))
return return
} }
} }
@ -88,7 +88,7 @@ func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest)
} }
} }
log.WithFields(resp.LogFields()).Debug("generated scrape response") log.Debug("generated scrape response", resp)
return ctx, resp, nil return ctx, resp, nil
} }
@ -98,7 +98,7 @@ func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest,
var err error var err error
for _, h := range l.postHooks { for _, h := range l.postHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil { if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
log.Errorln("chihaya: post-scrape hooks failed:", err.Error()) log.Error("post-scrape hooks failed", log.Err(err))
return return
} }
} }

124
pkg/log/log.go Normal file
View file

@ -0,0 +1,124 @@
// Package log adds a thin wrapper around logrus to improve non-debug logging
// performance.
package log
import (
"fmt"
"github.com/Sirupsen/logrus"
)
var (
l = logrus.New()
debug = false
)
// SetDebug controls debug logging.
func SetDebug(to bool) {
debug = to
}
// SetFormatter sets the formatter.
func SetFormatter(to logrus.Formatter) {
l.Formatter = to
}
// Fields is a map of logging fields.
type Fields map[string]interface{}
// LogFields implements Fielder for Fields.
func (f Fields) LogFields() Fields {
return f
}
// A Fielder provides Fields via the LogFields method.
type Fielder interface {
LogFields() Fields
}
// err is a wrapper around an error.
type err struct {
e error
}
// LogFields provides Fields for logging.
func (e err) LogFields() Fields {
return Fields{"error": e.e.Error()}
}
// Err is a wrapper around errors that implements Fielder.
func Err(e error) Fielder {
return err{e}
}
// mergeFielders merges the Fields of multiple Fielders.
// Fields from the first Fielder will be used unchanged, Fields from subsequent
// Fielders will be prefixed with "%d.", starting from 1.
//
// must be called with len(fielders) > 0
func mergeFielders(fielders ...Fielder) logrus.Fields {
if fielders[0] == nil {
return nil
}
fields := fielders[0].LogFields()
for i := 1; i < len(fielders); i++ {
if fielders[i] == nil {
continue
}
prefix := fmt.Sprint(i, ".")
ff := fielders[i].LogFields()
for k, v := range ff {
fields[prefix+k] = v
}
}
return logrus.Fields(fields)
}
// Debug logs at the debug level if debug logging is enabled.
func Debug(v interface{}, fielders ...Fielder) {
if debug {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Debug(v)
} else {
l.Debug(v)
}
}
}
// Info logs at the info level.
func Info(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Info(v)
} else {
l.Info(v)
}
}
// Warn logs at the warning level.
func Warn(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Warn(v)
} else {
l.Warn(v)
}
}
// Error logs at the error level.
func Error(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Error(v)
} else {
l.Error(v)
}
}
// Fatal logs at the fatal level and exits with a status code != 0.
func Fatal(v interface{}, fielders ...Fielder) {
if len(fielders) != 0 {
l.WithFields(mergeFielders(fielders...)).Fatal(v)
} else {
l.Fatal(v)
}
}

View file

@ -6,7 +6,7 @@ import (
"context" "context"
"net/http" "net/http"
log "github.com/Sirupsen/logrus" "github.com/chihaya/chihaya/pkg/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -42,7 +42,7 @@ func NewServer(addr string) *Server {
go func() { go func() {
if err := s.srv.ListenAndServe(); err != http.ErrServerClosed { if err := s.srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatal("failed while serving prometheus: " + err.Error()) log.Fatal("failed while serving prometheus", log.Err(err))
} }
}() }()

View file

@ -10,10 +10,10 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
log "github.com/Sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/storage" "github.com/chihaya/chihaya/storage"
) )
@ -77,40 +77,41 @@ func (cfg Config) LogFields() log.Fields {
// This function warns to the logger when a value is changed. // This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config { func (cfg Config) Validate() Config {
validcfg := cfg validcfg := cfg
if cfg.ShardCount <= 0 { if cfg.ShardCount <= 0 {
validcfg.ShardCount = defaultShardCount validcfg.ShardCount = defaultShardCount
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".ShardCount", "name": Name + ".ShardCount",
"provided": cfg.ShardCount, "provided": cfg.ShardCount,
"default": validcfg.ShardCount, "default": validcfg.ShardCount,
}).Warnln("falling back to default configuration") })
} }
if cfg.GarbageCollectionInterval <= 0 { if cfg.GarbageCollectionInterval <= 0 {
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".GarbageCollectionInterval", "name": Name + ".GarbageCollectionInterval",
"provided": cfg.GarbageCollectionInterval, "provided": cfg.GarbageCollectionInterval,
"default": validcfg.GarbageCollectionInterval, "default": validcfg.GarbageCollectionInterval,
}).Warnln("falling back to default configuration") })
} }
if cfg.PrometheusReportingInterval <= 0 { if cfg.PrometheusReportingInterval <= 0 {
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PrometheusReportingInterval", "name": Name + ".PrometheusReportingInterval",
"provided": cfg.PrometheusReportingInterval, "provided": cfg.PrometheusReportingInterval,
"default": validcfg.PrometheusReportingInterval, "default": validcfg.PrometheusReportingInterval,
}).Warnln("falling back to default configuration") })
} }
if cfg.PeerLifetime <= 0 { if cfg.PeerLifetime <= 0 {
validcfg.PeerLifetime = defaultPeerLifetime validcfg.PeerLifetime = defaultPeerLifetime
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PeerLifetime", "name": Name + ".PeerLifetime",
"provided": cfg.PeerLifetime, "provided": cfg.PeerLifetime,
"default": validcfg.PeerLifetime, "default": validcfg.PeerLifetime,
}).Warnln("falling back to default configuration") })
} }
return validcfg return validcfg
@ -139,7 +140,7 @@ func New(provided Config) (storage.PeerStore, error) {
return return
case <-time.After(cfg.GarbageCollectionInterval): case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime) before := time.Now().Add(-cfg.PeerLifetime)
log.Debugln("memory: purging peers with no announces since", before) log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
ps.collectGarbage(before) ps.collectGarbage(before)
} }
} }
@ -174,7 +175,7 @@ func New(provided Config) (storage.PeerStore, error) {
case <-t.C: case <-t.C:
before := time.Now() before := time.Now()
ps.populateProm() ps.populateProm()
log.Debugf("memory: populateProm() took %s", time.Since(before)) log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
} }
} }
}() }()

View file

@ -11,10 +11,10 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
log "github.com/Sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/storage" "github.com/chihaya/chihaya/storage"
) )
@ -82,40 +82,42 @@ func (cfg Config) LogFields() log.Fields {
// This function warns to the logger when a value is changed. // This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config { func (cfg Config) Validate() Config {
validcfg := cfg validcfg := cfg
if cfg.ShardCount <= 0 { if cfg.ShardCount <= 0 {
validcfg.ShardCount = defaultShardCount validcfg.ShardCount = defaultShardCount
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".ShardCount", "name": Name + ".ShardCount",
"provided": cfg.ShardCount, "provided": cfg.ShardCount,
"default": validcfg.ShardCount, "default": validcfg.ShardCount,
}).Warnln("falling back to default configuration") })
} }
if cfg.GarbageCollectionInterval <= 0 { if cfg.GarbageCollectionInterval <= 0 {
validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval validcfg.GarbageCollectionInterval = defaultGarbageCollectionInterval
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".GarbageCollectionInterval", "name": Name + ".GarbageCollectionInterval",
"provided": cfg.GarbageCollectionInterval, "provided": cfg.GarbageCollectionInterval,
"default": validcfg.GarbageCollectionInterval, "default": validcfg.GarbageCollectionInterval,
}).Warnln("falling back to default configuration") })
} }
if cfg.PrometheusReportingInterval <= 0 { if cfg.PrometheusReportingInterval <= 0 {
validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval validcfg.PrometheusReportingInterval = defaultPrometheusReportingInterval
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PrometheusReportingInterval", "name": Name + ".PrometheusReportingInterval",
"provided": cfg.PrometheusReportingInterval, "provided": cfg.PrometheusReportingInterval,
"default": validcfg.PrometheusReportingInterval, "default": validcfg.PrometheusReportingInterval,
}).Warnln("falling back to default configuration") })
} }
if cfg.PeerLifetime <= 0 { if cfg.PeerLifetime <= 0 {
validcfg.PeerLifetime = defaultPeerLifetime validcfg.PeerLifetime = defaultPeerLifetime
log.WithFields(log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".PeerLifetime", "name": Name + ".PeerLifetime",
"provided": cfg.PeerLifetime, "provided": cfg.PeerLifetime,
"default": validcfg.PeerLifetime, "default": validcfg.PeerLifetime,
}).Warnln("falling back to default configuration") })
} }
return validcfg return validcfg
@ -147,7 +149,7 @@ func New(provided Config) (storage.PeerStore, error) {
return return
case <-time.After(cfg.GarbageCollectionInterval): case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime) before := time.Now().Add(-cfg.PeerLifetime)
log.Debugln("memory: purging peers with no announces since", before) log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
ps.collectGarbage(before) ps.collectGarbage(before)
} }
} }
@ -182,7 +184,7 @@ func New(provided Config) (storage.PeerStore, error) {
case <-t.C: case <-t.C:
before := time.Now() before := time.Now()
ps.populateProm() ps.populateProm()
log.Debugf("memory: populateProm() took %s", time.Since(before)) log.Debug("storage: populateProm() finished", log.Fields{"timeTaken": time.Since(before)})
} }
} }
}() }()

View file

@ -4,9 +4,8 @@ import (
"errors" "errors"
"sync" "sync"
log "github.com/Sirupsen/logrus"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop" "github.com/chihaya/chihaya/pkg/stop"
) )
@ -84,14 +83,14 @@ type PeerStore interface {
// returned. // returned.
ScrapeSwarm(infoHash bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) bittorrent.Scrape ScrapeSwarm(infoHash bittorrent.InfoHash, addressFamily bittorrent.AddressFamily) bittorrent.Scrape
// stop is an interface that expects a Stop method to stop the // stop.Stopper is an interface that expects a Stop method to stop the
// PeerStore. // PeerStore.
// For more details see the documentation in the stop package. // For more details see the documentation in the stop package.
stop.Stopper stop.Stopper
// LogFields returns a loggable version of the data used to configure and // log.Fielder returns a loggable version of the data used to configure and
// operate a particular peer store. // operate a particular peer store.
LogFields() log.Fields log.Fielder
} }
// RegisterDriver makes a Driver available by the provided name. // RegisterDriver makes a Driver available by the provided name.