consistent logging

This commit is contained in:
Jimmy Zelinskie 2014-06-26 17:10:39 -04:00
parent 8775764fb6
commit caba155b5d
5 changed files with 51 additions and 30 deletions

View file

@ -88,7 +88,7 @@ func New() *Config {
// New.
func Open(path string) (*Config, error) {
if path == "" {
log.Info("chihaya: using default configuration")
log.V(1).Info("using default config")
return New(), nil
}
@ -102,6 +102,7 @@ func Open(path string) (*Config, error) {
if err != nil {
return nil, err
}
log.V(1).Infof("loaded config file: %s", path)
return conf, nil
}

19
main.go
View file

@ -37,25 +37,24 @@ func main() {
if profile {
f, err := os.Create("chihaya.cpu")
if err != nil {
log.Fatalf("chihaya: failed to create profile file: %s\n", err)
log.Fatalf("failed to create profile file: %s\n", err)
}
defer f.Close()
pprof.StartCPUProfile(f)
log.Info("chihaya: started profiling")
log.V(1).Info("started profiling")
}
// Load the config file.
conf, err := config.Open(configPath)
if err != nil {
log.Fatalf("chihaya: failed to parse configuration file: %s\n", err)
log.Fatalf("failed to parse configuration file: %s\n", err)
}
log.Infoln("chihaya: succesfully loaded config")
// Create a new server.
s, err := server.New(conf)
if err != nil {
log.Fatalf("chihaya: failed to create server: %s\n", err)
log.Fatalf("failed to create server: %s\n", err)
}
// Spawn a goroutine to handle interrupts and safely shut down.
@ -64,19 +63,19 @@ func main() {
signal.Notify(interrupts, os.Interrupt)
<-interrupts
log.Info("chihaya: caught interrupt, shutting down...")
log.V(1).Info("caught interrupt, shutting down...")
if profile {
pprof.StopCPUProfile()
log.Info("chihaya: stopped profiling")
log.V(1).Info("stopped profiling")
}
err := s.Stop()
if err != nil {
log.Fatalf("chihaya: failed to shutdown cleanly: %s", err)
log.Fatalf("failed to shutdown cleanly: %s", err)
}
log.Info("chihaya: shutdown cleanly")
log.V(1).Info("shutdown cleanly")
<-interrupts
@ -87,6 +86,6 @@ func main() {
// Start the server listening and handling requests.
err = s.ListenAndServe()
if err != nil {
log.Fatalf("chihaya: failed to start server: %s\n", err)
log.Fatalf("failed to start server: %s\n", err)
}
}

View file

@ -72,7 +72,16 @@ func (s Server) serveAnnounce(w http.ResponseWriter, r *http.Request) {
writeAnnounceResponse(w, announce, user, torrent)
log.Infof("chihaya: handled announce from %s", announce.IP)
w.(http.Flusher).Flush()
if log.V(5) {
log.Infof(
"announce: ip: %s, user: %s, torrent: %s",
announce.IP,
user.ID,
torrent.ID,
)
}
}
func updateTorrent(c tracker.Conn, a *models.Announce, p *models.Peer, t *models.Torrent) (created bool, err error) {
@ -174,12 +183,13 @@ func writeAnnounceResponse(w io.Writer, a *models.Announce, u *models.User, t *m
bencoder.Encode("peers")
var peerCount int
if a.Compact {
if a.Left == 0 {
peerCount = minInt(a.NumWant, leechCount)
} else {
peerCount = minInt(a.NumWant, leechCount+seedCount-1)
}
if a.Compact {
// 6 is the number of bytes 1 compact peer takes up.
bencoder.Encode(strconv.Itoa(peerCount * 6))
bencoder.Encode(":")
@ -187,17 +197,13 @@ func writeAnnounceResponse(w io.Writer, a *models.Announce, u *models.User, t *m
bencoder.Encode("l")
}
var count int
if a.Left == 0 {
// If they're seeding, give them only leechers
count = writePeers(w, u, t.Leechers, a.NumWant, a.Compact)
writePeers(w, u, t.Leechers, peerCount, a.Compact)
} else {
// If they're leeching, prioritize giving them seeders
count += writePeers(w, u, t.Seeders, a.NumWant, a.Compact)
count += writePeers(w, u, t.Leechers, a.NumWant-count, a.Compact)
}
if a.Compact && peerCount != count {
log.Errorf("calculated peer count (%d) != real count (%d)", peerCount, count)
count := writePeers(w, u, t.Seeders, peerCount, a.Compact)
writePeers(w, u, t.Leechers, peerCount-count, a.Compact)
}
if !a.Compact {

View file

@ -7,6 +7,7 @@ package server
import (
"io"
"net/http"
"strings"
log "github.com/golang/glog"
@ -26,15 +27,19 @@ func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request) {
fail(err, w, r)
}
var user *models.User
if s.conf.Private {
_, err = conn.FindUser(scrape.Passkey)
user, err = conn.FindUser(scrape.Passkey)
if err != nil {
fail(err, w, r)
return
}
}
var torrents []*models.Torrent
var (
torrents []*models.Torrent
torrentIDs []string
)
for _, infohash := range scrape.Infohashes {
torrent, err := conn.FindTorrent(infohash)
if err != nil {
@ -42,6 +47,7 @@ func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request) {
return
}
torrents = append(torrents, torrent)
torrentIDs = append(torrentIDs, string(torrent.ID))
}
bencoder := bencode.NewEncoder(w)
@ -52,9 +58,14 @@ func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request) {
}
bencoder.Encode("e")
log.V(3).Infof("chihaya: handled scrape from %s", r.RemoteAddr)
w.(http.Flusher).Flush()
log.V(5).Infof(
"scrape: ip: %s user: %s torrents: %s",
r.RemoteAddr,
user.ID,
strings.Join(torrentIDs, ", "),
)
}
func writeTorrentStatus(w io.Writer, t *models.Torrent) {

View file

@ -128,7 +128,11 @@ func fail(err error, w http.ResponseWriter, r *http.Request) {
length, _ := io.WriteString(w, msg)
w.Header().Add("Content-Length", string(length))
log.V(2).Infof("chihaya: handled failure: %s from %s ", errmsg, r.RemoteAddr)
w.(http.Flusher).Flush()
log.V(5).Infof(
"failed request: ip: %s failure: %s",
r.RemoteAddr,
errmsg,
)
}