diff --git a/.golangci-lint.yaml b/.golangci-lint.yaml new file mode 100644 index 0000000..15f497c --- /dev/null +++ b/.golangci-lint.yaml @@ -0,0 +1,41 @@ +--- +run: + timeout: "5m" +output: + sort-results: true +linters-settings: + goimports: + local-prefixes: "github.com/chihaya/chihaya" +linters: + enable: + - "bidichk" + - "bodyclose" + - "deadcode" + - "errcheck" + - "errname" + - "errorlint" + - "gofumpt" + - "goimports" + - "goprintffuncname" + - "gosec" + - "gosimple" + - "govet" + - "ifshort" + - "importas" + - "ineffassign" + - "makezero" + - "prealloc" + - "predeclared" + - "promlinter" + - "revive" + - "rowserrcheck" + - "staticcheck" + - "structcheck" + - "stylecheck" + - "tenv" + - "typecheck" + - "unconvert" + - "unused" + - "varcheck" + - "wastedassign" + - "whitespace" diff --git a/bittorrent/params_test.go b/bittorrent/params_test.go index 5e6c70c..dced055 100644 --- a/bittorrent/params_test.go +++ b/bittorrent/params_test.go @@ -92,7 +92,7 @@ func TestParseInvalidURLData(t *testing.T) { func TestParseShouldNotPanicURLData(t *testing.T) { for _, parseStr := range shouldNotPanicQueries { - ParseURLData(parseStr) + _, _ = ParseURLData(parseStr) } } diff --git a/frontend/http/bencode/decoder_test.go b/frontend/http/bencode/decoder_test.go index ebb0378..2aaf736 100644 --- a/frontend/http/bencode/decoder_test.go +++ b/frontend/http/bencode/decoder_test.go @@ -46,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) { d2 := NewDecoder(&bufferLoop{"i42e"}) for i := 0; i < b.N; i++ { - d1.Decode() - d2.Decode() + _, _ = d1.Decode() + _, _ = d2.Decode() } } @@ -79,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) { dec := NewDecoder(&bufferLoop{string(buf)}) for i := 0; i < b.N; i++ { - dec.Decode() + _, _ = dec.Decode() } } diff --git a/frontend/http/bencode/encoder_test.go b/frontend/http/bencode/encoder_test.go index b173f3c..3842486 100644 --- a/frontend/http/bencode/encoder_test.go +++ b/frontend/http/bencode/encoder_test.go @@ -50,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) { encoder := NewEncoder(buf) for i := 0; i < b.N; i++ { - encoder.Encode("test") - encoder.Encode(123) + _ = encoder.Encode("test") + _ = encoder.Encode(123) } } @@ -67,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) { encoder := NewEncoder(buf) for i := 0; i < b.N; i++ { - encoder.Encode(data) + _ = encoder.Encode(data) } } diff --git a/frontend/http/frontend.go b/frontend/http/frontend.go index 556476f..2704788 100644 --- a/frontend/http/frontend.go +++ b/frontend/http/frontend.go @@ -317,7 +317,7 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http req, err := ParseAnnounce(r, f.ParseOptions) if err != nil { - WriteError(w, err) + _ = WriteError(w, err) return } af = new(bittorrent.AddressFamily) @@ -326,14 +326,14 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http ctx := injectRouteParamsToContext(context.Background(), ps) ctx, resp, err := f.logic.HandleAnnounce(ctx, req) if err != nil { - WriteError(w, err) + _ = WriteError(w, err) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") err = WriteAnnounceResponse(w, resp) if err != nil { - WriteError(w, err) + _ = WriteError(w, err) return } @@ -358,14 +358,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro req, err := ParseScrape(r, f.ParseOptions) if err != nil { - WriteError(w, err) + _ = WriteError(w, err) return } host, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { log.Error("http: unable to determine remote address for scrape", log.Err(err)) - WriteError(w, err) + _ = WriteError(w, err) return } @@ -376,7 +376,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro req.AddressFamily = bittorrent.IPv6 } else { log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr}) - WriteError(w, bittorrent.ErrInvalidIP) + _ = WriteError(w, bittorrent.ErrInvalidIP) return } af = new(bittorrent.AddressFamily) @@ -385,14 +385,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro ctx := injectRouteParamsToContext(context.Background(), ps) ctx, resp, err := f.logic.HandleScrape(ctx, req) if err != nil { - WriteError(w, err) + _ = WriteError(w, err) return } w.Header().Set("Content-Type", "text/plain; charset=utf-8") err = WriteScrapeResponse(w, resp) if err != nil { - WriteError(w, err) + _ = WriteError(w, err) return } diff --git a/frontend/udp/bytepool/bytepool.go b/frontend/udp/bytepool/bytepool.go index 93507a9..148247b 100644 --- a/frontend/udp/bytepool/bytepool.go +++ b/frontend/udp/bytepool/bytepool.go @@ -11,7 +11,7 @@ type BytePool struct { func New(length int) *BytePool { var bp BytePool bp.Pool.New = func() interface{} { - return make([]byte, length, length) + return make([]byte, length) } return &bp } diff --git a/frontend/udp/frontend.go b/frontend/udp/frontend.go index 98f8b75..695e8ec 100644 --- a/frontend/udp/frontend.go +++ b/frontend/udp/frontend.go @@ -148,7 +148,7 @@ func (t *Frontend) Stop() stop.Result { c := make(stop.Channel) go func() { close(t.closing) - t.socket.SetReadDeadline(time.Now()) + _ = t.socket.SetReadDeadline(time.Now()) t.wg.Wait() c.Done(t.socket.Close()) }() @@ -244,7 +244,7 @@ type ResponseWriter struct { // Write implements the io.Writer interface for a ResponseWriter. func (w ResponseWriter) Write(b []byte) (int, error) { - w.socket.WriteToUDP(b, w.addr) + _, _ = w.socket.WriteToUDP(b, w.addr) return len(b), nil } diff --git a/frontend/udp/parser.go b/frontend/udp/parser.go index 96ea9d8..85a5eb1 100644 --- a/frontend/udp/parser.go +++ b/frontend/udp/parser.go @@ -24,8 +24,8 @@ const ( // Option-Types as described in BEP 41 and BEP 45. const ( optionEndOfOptions byte = 0x0 - optionNOP = 0x1 - optionURLData = 0x2 + optionNOP byte = 0x1 + optionURLData byte = 0x2 ) var ( @@ -161,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) { return bittorrent.ParseURLData("") } - var buf = newBuffer() + buf := newBuffer() defer buf.free() for i := 0; i < len(packet); { diff --git a/frontend/udp/writer.go b/frontend/udp/writer.go index c23619f..a26db64 100644 --- a/frontend/udp/writer.go +++ b/frontend/udp/writer.go @@ -20,7 +20,7 @@ func WriteError(w io.Writer, txID []byte, err error) { writeHeader(buf, txID, errorActionID) buf.WriteString(err.Error()) buf.WriteRune('\000') - w.Write(buf.Bytes()) + _, _ = w.Write(buf.Bytes()) buf.free() } @@ -37,9 +37,9 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, } else { writeHeader(buf, txID, announceActionID) } - binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second)) - binary.Write(buf, binary.BigEndian, resp.Incomplete) - binary.Write(buf, binary.BigEndian, resp.Complete) + _ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second)) + _ = binary.Write(buf, binary.BigEndian, resp.Incomplete) + _ = binary.Write(buf, binary.BigEndian, resp.Complete) peers := resp.IPv4Peers if v6Peers { @@ -48,10 +48,10 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, for _, peer := range peers { buf.Write(peer.IP.IP) - binary.Write(buf, binary.BigEndian, peer.Port) + _ = binary.Write(buf, binary.BigEndian, peer.Port) } - w.Write(buf.Bytes()) + _, _ = w.Write(buf.Bytes()) buf.free() } @@ -62,12 +62,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) { writeHeader(buf, txID, scrapeActionID) for _, scrape := range resp.Files { - binary.Write(buf, binary.BigEndian, scrape.Complete) - binary.Write(buf, binary.BigEndian, scrape.Snatches) - binary.Write(buf, binary.BigEndian, scrape.Incomplete) + _ = binary.Write(buf, binary.BigEndian, scrape.Complete) + _ = binary.Write(buf, binary.BigEndian, scrape.Snatches) + _ = binary.Write(buf, binary.BigEndian, scrape.Incomplete) } - w.Write(buf.Bytes()) + _, _ = w.Write(buf.Bytes()) buf.free() } @@ -78,13 +78,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) { writeHeader(buf, txID, connectActionID) buf.Write(connID) - w.Write(buf.Bytes()) + _, _ = w.Write(buf.Bytes()) buf.free() } // writeHeader writes the action and transaction ID to the provided response // buffer. func writeHeader(w io.Writer, txID []byte, action uint32) { - binary.Write(w, binary.BigEndian, action) - w.Write(txID) + _ = binary.Write(w, binary.BigEndian, action) + _, _ = w.Write(txID) } diff --git a/middleware/jwt/jwt.go b/middleware/jwt/jwt.go index a7541ac..f8c760a 100644 --- a/middleware/jwt/jwt.go +++ b/middleware/jwt/jwt.go @@ -105,7 +105,7 @@ func NewHook(cfg Config) (middleware.Hook, error) { return case <-time.After(cfg.JWKUpdateInterval): log.Debug("performing fetch of JWKs") - h.updateKeys() + _ = h.updateKeys() } } }() diff --git a/storage/memory/peer_store.go b/storage/memory/peer_store.go index a3358b2..5e95e31 100644 --- a/storage/memory/peer_store.go +++ b/storage/memory/peer_store.go @@ -142,7 +142,7 @@ func New(provided Config) (storage.PeerStore, error) { case <-time.After(cfg.GarbageCollectionInterval): before := time.Now().Add(-cfg.PeerLifetime) log.Debug("storage: purging peers with no announces since", log.Fields{"before": before}) - ps.collectGarbage(before) + _ = ps.collectGarbage(before) } } }() @@ -183,7 +183,8 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer { peer := bittorrent.Peer{ ID: bittorrent.PeerIDFromString(string(pk[:20])), Port: binary.BigEndian.Uint16([]byte(pk[20:22])), - IP: bittorrent.IP{IP: net.IP(pk[22:])}} + IP: bittorrent.IP{IP: net.IP(pk[22:])}, + } if ip := peer.IP.To4(); ip != nil { peer.IP.IP = ip diff --git a/storage/redis/peer_store.go b/storage/redis/peer_store.go index 59c2413..fbcf54a 100644 --- a/storage/redis/peer_store.go +++ b/storage/redis/peer_store.go @@ -245,7 +245,8 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer { peer := bittorrent.Peer{ ID: bittorrent.PeerIDFromString(string(pk[:20])), Port: binary.BigEndian.Uint16([]byte(pk[20:22])), - IP: bittorrent.IP{IP: net.IP(pk[22:])}} + IP: bittorrent.IP{IP: net.IP(pk[22:])}, + } if ip := peer.IP.To4(); ip != nil { peer.IP.IP = ip @@ -356,9 +357,9 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error conn := ps.rb.open() defer conn.Close() - conn.Send("MULTI") - conn.Send("HSET", encodedSeederInfoHash, pk, ct) - conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct) + _ = conn.Send("MULTI") + _ = conn.Send("HSET", encodedSeederInfoHash, pk, ct) + _ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct) reply, err := redis.Int64s(conn.Do("EXEC")) if err != nil { return err @@ -437,9 +438,9 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error conn := ps.rb.open() defer conn.Close() - conn.Send("MULTI") - conn.Send("HSET", encodedLeecherInfoHash, pk, ct) - conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct) + _ = conn.Send("MULTI") + _ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct) + _ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct) reply, err := redis.Int64s(conn.Do("EXEC")) if err != nil { return err @@ -509,10 +510,10 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) conn := ps.rb.open() defer conn.Close() - conn.Send("MULTI") - conn.Send("HDEL", encodedLeecherInfoHash, pk) - conn.Send("HSET", encodedSeederInfoHash, pk, ct) - conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct) + _ = conn.Send("MULTI") + _ = conn.Send("HDEL", encodedLeecherInfoHash, pk) + _ = conn.Send("HSET", encodedSeederInfoHash, pk, ct) + _ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct) reply, err := redis.Int64s(conn.Do("EXEC")) if err != nil { return err @@ -782,10 +783,10 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error { // in other words, it's removed automatically after `HDEL` the last field. //_, err := conn.Do("DEL", ihStr) - conn.Send("MULTI") - conn.Send("HDEL", group, ihStr) + _ = conn.Send("MULTI") + _ = conn.Send("HDEL", group, ihStr) if isSeeder { - conn.Send("DECR", ps.infohashCountKey(group)) + _ = conn.Send("DECR", ps.infohashCountKey(group)) } _, err = redis.Values(conn.Do("EXEC")) if err != nil && err != redis.ErrNil { diff --git a/storage/storage_bench.go b/storage/storage_bench.go index 5f8e8ce..dee1f6f 100644 --- a/storage/storage_bench.go +++ b/storage/storage_bench.go @@ -53,8 +53,10 @@ func generatePeers() (a [1000]bittorrent.Peer) { return } -type executionFunc func(int, PeerStore, *benchData) error -type setupFunc func(PeerStore, *benchData) error +type ( + executionFunc func(int, PeerStore, *benchData) error + setupFunc func(PeerStore, *benchData) error +) func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) { bd := &benchData{generateInfohashes(), generatePeers()} @@ -185,6 +187,7 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) if err != nil { + return err } return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) }) @@ -211,7 +214,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) { // DeleteNonexist can run in parallel. func DeleteNonexist(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) + _ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) return nil }) } @@ -222,7 +225,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) { // DeleteNonexist can run in parallel. func DeleteNonexist1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) + _ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) return nil }) } @@ -233,7 +236,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) { // DeleteNonexist1kInfohash can run in parallel. func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) + _ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) return nil }) } @@ -244,7 +247,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { // DeleteNonexist1kInfohash1k can run in parallel. func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) + _ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) return nil }) } @@ -255,7 +258,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { // GradNonexist can run in parallel. func GradNonexist(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) + _ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) return nil }) } @@ -266,7 +269,7 @@ func GradNonexist(b *testing.B, ps PeerStore) { // GradNonexist1k can run in parallel. func GradNonexist1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) + _ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) return nil }) } @@ -277,7 +280,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) { // GradNonexist1kInfohash can run in parallel. func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) + _ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) return nil }) } @@ -289,7 +292,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { // GradNonexist1kInfohash1k can run in parallel. func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { - ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) + _ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) return nil }) }