Merge pull request #316 from mrd0ll4r/remove-time-now
Remove most calls to time.Now
This commit is contained in:
commit
1cc0738cbe
4 changed files with 100 additions and 32 deletions
|
@ -40,6 +40,10 @@ chihaya:
|
|||
read_timeout: 5s
|
||||
write_timeout: 5s
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
enable_request_timing: false
|
||||
|
||||
# This block defines configuration for the tracker's UDP interface.
|
||||
# If you do not wish to run this, delete this section.
|
||||
udp:
|
||||
|
@ -57,6 +61,10 @@ chihaya:
|
|||
# The key used to encrypt connection IDs.
|
||||
private_key: "paste a random string here that will be used to hmac connection IDs"
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
enable_request_timing: false
|
||||
|
||||
# This block defines configuration used for the storage of peer data.
|
||||
storage:
|
||||
# The frequency which stale peers are removed.
|
||||
|
|
|
@ -69,6 +69,7 @@ type Config struct {
|
|||
RealIPHeader string `yaml:"real_ip_header"`
|
||||
TLSCertPath string `yaml:"tls_cert_path"`
|
||||
TLSKeyPath string `yaml:"tls_key_path"`
|
||||
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
||||
}
|
||||
|
||||
// LogFields renders the current config as a set of Logrus fields.
|
||||
|
@ -81,6 +82,7 @@ func (cfg Config) LogFields() log.Fields {
|
|||
"realIPHeader": cfg.RealIPHeader,
|
||||
"tlsCertPath": cfg.TLSCertPath,
|
||||
"tlsKeyPath": cfg.TLSKeyPath,
|
||||
"enableRequestTiming": cfg.EnableRequestTiming,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,9 +170,18 @@ func (f *Frontend) listenAndServe() error {
|
|||
// announceRoute parses and responds to an Announce.
|
||||
func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
var start time.Time
|
||||
if f.EnableRequestTiming {
|
||||
start = time.Now()
|
||||
}
|
||||
var af *bittorrent.AddressFamily
|
||||
defer func() { recordResponseDuration("announce", af, err, time.Since(start)) }()
|
||||
defer func() {
|
||||
if f.EnableRequestTiming {
|
||||
recordResponseDuration("announce", af, err, time.Since(start))
|
||||
} else {
|
||||
recordResponseDuration("announce", af, err, time.Duration(0))
|
||||
}
|
||||
}()
|
||||
|
||||
req, err := ParseAnnounce(r, f.RealIPHeader, f.AllowIPSpoofing)
|
||||
if err != nil {
|
||||
|
@ -198,9 +209,18 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, _ httpr
|
|||
// scrapeRoute parses and responds to a Scrape.
|
||||
func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
var start time.Time
|
||||
if f.EnableRequestTiming {
|
||||
start = time.Now()
|
||||
}
|
||||
var af *bittorrent.AddressFamily
|
||||
defer func() { recordResponseDuration("scrape", af, err, time.Since(start)) }()
|
||||
defer func() {
|
||||
if f.EnableRequestTiming {
|
||||
recordResponseDuration("scrape", af, err, time.Since(start))
|
||||
} else {
|
||||
recordResponseDuration("scrape", af, err, time.Duration(0))
|
||||
}
|
||||
}()
|
||||
|
||||
req, err := ParseScrape(r)
|
||||
if err != nil {
|
||||
|
|
|
@ -71,6 +71,7 @@ type Config struct {
|
|||
PrivateKey string `yaml:"private_key"`
|
||||
MaxClockSkew time.Duration `yaml:"max_clock_skew"`
|
||||
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
|
||||
EnableRequestTiming bool `yaml:"enable_request_timing"`
|
||||
}
|
||||
|
||||
// LogFields renders the current config as a set of Logrus fields.
|
||||
|
@ -80,6 +81,7 @@ func (cfg Config) LogFields() log.Fields {
|
|||
"privateKey": cfg.PrivateKey,
|
||||
"maxClockSkew": cfg.MaxClockSkew,
|
||||
"allowIPSpoofing": cfg.AllowIPSpoofing,
|
||||
"enableRequestTiming": cfg.EnableRequestTiming,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,13 +203,20 @@ func (t *Frontend) listenAndServe() error {
|
|||
}
|
||||
|
||||
// Handle the request.
|
||||
start := time.Now()
|
||||
var start time.Time
|
||||
if t.EnableRequestTiming {
|
||||
start = time.Now()
|
||||
}
|
||||
action, af, err := t.handleRequest(
|
||||
// Make sure the IP is copied, not referenced.
|
||||
Request{buffer[:n], append([]byte{}, addr.IP...)},
|
||||
ResponseWriter{t.socket, addr},
|
||||
)
|
||||
if t.EnableRequestTiming {
|
||||
recordResponseDuration(action, af, err, time.Since(start))
|
||||
} else {
|
||||
recordResponseDuration(action, af, err, time.Duration(0))
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
|
@ -99,6 +100,25 @@ func New(cfg Config) (storage.PeerStore, error) {
|
|||
}
|
||||
}()
|
||||
|
||||
ps.wg.Add(1)
|
||||
go func() {
|
||||
defer ps.wg.Done()
|
||||
t := time.NewTicker(1 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-ps.closing:
|
||||
t.Stop()
|
||||
select {
|
||||
case <-t.C:
|
||||
default:
|
||||
}
|
||||
return
|
||||
case now := <-t.C:
|
||||
ps.setClock(now.UnixNano())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
|
@ -118,11 +138,22 @@ type swarm struct {
|
|||
type peerStore struct {
|
||||
shards []*peerShard
|
||||
closing chan struct{}
|
||||
// clock stores the current time nanoseconds, updated every second.
|
||||
// Must be accessed atomically!
|
||||
clock int64
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
var _ storage.PeerStore = &peerStore{}
|
||||
|
||||
func (ps *peerStore) getClock() int64 {
|
||||
return atomic.LoadInt64(&ps.clock)
|
||||
}
|
||||
|
||||
func (ps *peerStore) setClock(to int64) {
|
||||
atomic.StoreInt64(&ps.clock, to)
|
||||
}
|
||||
|
||||
func (ps *peerStore) shardIndex(infoHash bittorrent.InfoHash, af bittorrent.AddressFamily) uint32 {
|
||||
// There are twice the amount of shards specified by the user, the first
|
||||
// half is dedicated to IPv4 swarms and the second half is dedicated to
|
||||
|
@ -181,7 +212,7 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
|||
recordInfohashesDelta(1)
|
||||
}
|
||||
|
||||
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
|
||||
shard.swarms[ih].seeders[pk] = ps.getClock()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
|
@ -240,7 +271,7 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
|||
recordInfohashesDelta(1)
|
||||
}
|
||||
|
||||
shard.swarms[ih].leechers[pk] = time.Now().UnixNano()
|
||||
shard.swarms[ih].leechers[pk] = ps.getClock()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
|
@ -301,7 +332,7 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer)
|
|||
|
||||
delete(shard.swarms[ih].leechers, pk)
|
||||
|
||||
shard.swarms[ih].seeders[pk] = time.Now().UnixNano()
|
||||
shard.swarms[ih].seeders[pk] = ps.getClock()
|
||||
|
||||
shard.Unlock()
|
||||
return nil
|
||||
|
|
Loading…
Reference in a new issue