fixed some linting errors

found them using

```
gometalinter --skip=vendor --disable-all --enable=megacheck --enable=deadcode --enable=ineffassign --enable=interfacer --enable=errcheck ./...
```
This commit is contained in:
Alex Grintsvayg 2018-08-07 11:38:55 -04:00
parent d9b4c0f94d
commit 0a54d4da56
6 changed files with 29 additions and 25 deletions

View file

@ -108,12 +108,7 @@ func (c *Contact) UnmarshalBencode(b []byte) error {
return errors.Err("invalid IP")
}
err = bencode.DecodeBytes(raw[2], &c.Port)
if err != nil {
return err
}
return nil
return bencode.DecodeBytes(raw[2], &c.Port)
}
func sortByDistance(contacts []Contact, target bits.Bitmap) {

View file

@ -70,11 +70,7 @@ func (dht *DHT) connect(conn UDPConn) error {
dht.node = NewNode(contact.ID)
dht.tokenCache = newTokenCache(dht.node, tokenSecretRotationInterval)
err = dht.node.Connect(conn)
if err != nil {
return err
}
return nil
return dht.node.Connect(conn)
}
// Start starts the dht

View file

@ -57,7 +57,11 @@ func (dht *DHT) runAnnouncer() {
defer dht.grp.Done()
limiter := rate.NewLimiter(rate.Limit(dht.conf.AnnounceRate), dht.conf.AnnounceRate)
for {
limiter.Wait(context.Background()) // TODO: should use grp.ctx somehow? so when grp is closed, wait returns
err := limiter.Wait(context.Background()) // TODO: should use grp.ctx somehow? so when grp is closed, wait returns
if err != nil {
log.Error(errors.Prefix("rate limiter", err))
continue
}
select {
case limitCh <- time.Now():
case <-dht.grp.Ch():

View file

@ -250,10 +250,6 @@ func (cf *contactFinder) probe(cycleID string) *Contact {
return cf.closest(res.Contacts...)
}
func (cf *contactFinder) probeClosestOutstanding() {
}
// appendNewToShortlist appends any new contacts to the shortlist and sorts it by distance
// contacts that have already been added to the shortlist in the past are ignored
func (cf *contactFinder) appendNewToShortlist(contacts []Contact) {
@ -320,11 +316,7 @@ func (cf *contactFinder) isSearchFinished() bool {
cf.activeContactsMutex.Lock()
defer cf.activeContactsMutex.Unlock()
if len(cf.activeContacts) >= bucketSize {
return true
}
return false
return len(cf.activeContacts) >= bucketSize
}
func (cf *contactFinder) debug(format string, args ...interface{}) {

View file

@ -263,14 +263,20 @@ func (rt *routingTable) Update(c Contact) {
}
}
b.UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, true)
err := b.UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, true)
if err != nil {
log.Error(err)
}
}
// Fresh refreshes a contact if its already in the routing table
func (rt *routingTable) Fresh(c Contact) {
rt.mu.RLock()
defer rt.mu.RUnlock()
rt.bucketFor(c.ID).UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, false)
err := rt.bucketFor(c.ID).UpdatePeer(peer{Contact: c, Distance: rt.id.Xor(c.ID)}, false)
if err != nil {
log.Error(err)
}
}
// FailContact marks a contact as having failed, and removes it if it failed too many times

View file

@ -110,6 +110,9 @@ func (rpc *rpcReceiver) IterativeFindValue(r *http.Request, args *RpcIterativeFi
return err
}
foundContacts, found, err := FindContacts(rpc.dht.node, key, false, nil)
if err != nil {
return err
}
result.Contacts = foundContacts
result.FoundValue = found
return nil
@ -153,7 +156,11 @@ func (dht *DHT) runRPCServer(port int) {
s := rpc2.NewServer()
s.RegisterCodec(json.NewCodec(), "application/json")
s.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8")
s.RegisterService(&rpcReceiver{dht: dht}, "rpc")
err := s.RegisterService(&rpcReceiver{dht: dht}, "rpc")
if err != nil {
log.Error(errors.Prefix("registering rpc service", err))
return
}
handler := mux.NewRouter()
handler.Handle("/", s)
@ -171,6 +178,10 @@ func (dht *DHT) runRPCServer(port int) {
}()
<-dht.grp.Ch()
server.Shutdown(context.Background())
err = server.Shutdown(context.Background())
if err != nil {
log.Error(errors.Prefix("shutting down rpc service", err))
return
}
wg.Wait()
}