Merge pull request #549 from jzelinskie/fix-vet
.github: split workflows into build/lint, add new linters
This commit is contained in:
commit
828edb8fd8
48 changed files with 533 additions and 358 deletions
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
@ -1 +1,3 @@
|
|||
github: [ jzelinskie ]
|
||||
---
|
||||
github:
|
||||
- "jzelinskie"
|
||||
|
|
37
.github/dependabot.yml
vendored
37
.github/dependabot.yml
vendored
|
@ -1,22 +1,23 @@
|
|||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
labels:
|
||||
- "component/dependencies"
|
||||
|
|
115
.github/workflows/CI.yaml
vendored
115
.github/workflows/CI.yaml
vendored
|
@ -1,115 +0,0 @@
|
|||
name: CI
|
||||
on:
|
||||
# See the documentation for more intricate event dispatch here:
|
||||
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#on
|
||||
push:
|
||||
branches:
|
||||
- "!dependabot/*"
|
||||
- "*"
|
||||
pull_request:
|
||||
branches:
|
||||
- "*"
|
||||
jobs:
|
||||
build:
|
||||
name: Build & Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.15
|
||||
- name: Build
|
||||
run: go build -v ./cmd/...
|
||||
- name: Vet
|
||||
run: go vet ./...
|
||||
- name: Imports
|
||||
uses: Jerome1337/goimports-action@v1.0.3
|
||||
- name: Format
|
||||
uses: Jerome1337/gofmt-action@v1.0.4
|
||||
- name: Lint
|
||||
uses: Jerome1337/golint-action@v1.0.2
|
||||
|
||||
unit:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.15
|
||||
- name: Unit Tests
|
||||
run: go test -v -race $(go list ./...)
|
||||
|
||||
e2e-mem:
|
||||
name: E2E Tests (Memory Storage)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.15
|
||||
- name: End-to-End Test
|
||||
run: |
|
||||
go install ./cmd/chihaya
|
||||
cat ./dist/example_config.yaml
|
||||
chihaya --config=./dist/example_config.yaml --debug &
|
||||
pid=$!
|
||||
sleep 2
|
||||
chihaya e2e --debug
|
||||
kill $pid
|
||||
|
||||
e2e-redis:
|
||||
name: E2E Tests (Redis Storage)
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
redis:
|
||||
image: redis
|
||||
ports: ["6379:6379"]
|
||||
options: --entrypoint redis-server
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.15
|
||||
- name: Configure redis storage
|
||||
run: |
|
||||
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
|
||||
chmod +x faq-linux-amd64
|
||||
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
|
||||
cat ./dist/example_redis_config.yaml
|
||||
- name: End-to-End Test
|
||||
run: |
|
||||
go install ./cmd/chihaya
|
||||
chihaya --config=./dist/example_redis_config.yaml --debug &
|
||||
pid=$!
|
||||
sleep 2
|
||||
chihaya e2e --debug
|
||||
kill $pid
|
||||
|
||||
dist:
|
||||
name: Helm Template
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Helm
|
||||
uses: engineerd/configurator@v0.0.5
|
||||
with:
|
||||
name: helm
|
||||
pathInArchive: linux-amd64/helm
|
||||
fromGitHubReleases: true
|
||||
repo: helm/helm
|
||||
version: ^v3
|
||||
urlTemplate: https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Helm Template
|
||||
working-directory: ./dist/helm/chihaya
|
||||
run: helm template . --debug
|
112
.github/workflows/build.yaml
vendored
Normal file
112
.github/workflows/build.yaml
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
---
|
||||
name: "Build & Test"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "!dependabot/*"
|
||||
- "main"
|
||||
pull_request:
|
||||
branches: ["*"]
|
||||
jobs:
|
||||
build:
|
||||
name: "Go Build"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Build"
|
||||
run: "go build ./cmd/..."
|
||||
|
||||
unit:
|
||||
name: "Run Unit Tests"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Run `go test`"
|
||||
run: "go test -race ./..."
|
||||
|
||||
e2e-mem:
|
||||
name: "E2E Memory Tests"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Install and configure chihaya"
|
||||
run: |
|
||||
go install ./cmd/chihaya
|
||||
cat ./dist/example_config.yaml
|
||||
- name: "Run end-to-end tests"
|
||||
run: |
|
||||
chihaya --config=./dist/example_config.yaml --debug &
|
||||
pid=$!
|
||||
sleep 2
|
||||
chihaya e2e --debug
|
||||
kill $pid
|
||||
|
||||
e2e-redis:
|
||||
name: "E2E Redis Tests"
|
||||
runs-on: "ubuntu-latest"
|
||||
services:
|
||||
redis:
|
||||
image: "redis"
|
||||
ports: ["6379:6379"]
|
||||
options: "--entrypoint redis-server"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Install and configure chihaya"
|
||||
run: |
|
||||
go install ./cmd/chihaya
|
||||
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
|
||||
chmod +x faq-linux-amd64
|
||||
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
|
||||
cat ./dist/example_redis_config.yaml
|
||||
- name: "Run end-to-end tests"
|
||||
run: |
|
||||
chihaya --config=./dist/example_redis_config.yaml --debug &
|
||||
pid=$!
|
||||
sleep 2
|
||||
chihaya e2e --debug
|
||||
kill $pid
|
||||
|
||||
image-build:
|
||||
name: "Docker Build"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "docker/setup-qemu-action@v1"
|
||||
- uses: "docker/setup-buildx-action@v1"
|
||||
with:
|
||||
driver-opts: "image=moby/buildkit:master"
|
||||
- uses: "docker/build-push-action@v1"
|
||||
with:
|
||||
push: false
|
||||
tags: "latest"
|
||||
|
||||
helm:
|
||||
name: "Helm Template"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- name: "Install Helm"
|
||||
uses: "engineerd/configurator@v0.0.5"
|
||||
with:
|
||||
name: "helm"
|
||||
pathInArchive: "linux-amd64/helm"
|
||||
fromGitHubReleases: true
|
||||
repo: "helm/helm"
|
||||
version: "^v3"
|
||||
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
|
||||
token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- name: "Run `helm template`"
|
||||
working-directory: "./dist/helm/chihaya"
|
||||
run: "helm template . --debug"
|
86
.github/workflows/lint.yaml
vendored
Normal file
86
.github/workflows/lint.yaml
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
---
|
||||
name: "Lint"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "!dependabot/*"
|
||||
- "main"
|
||||
pull_request:
|
||||
branches: ["*"]
|
||||
jobs:
|
||||
go-mod-tidy:
|
||||
name: "Lint Go Modules"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Run `go mod tidy`"
|
||||
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
|
||||
|
||||
go-fmt:
|
||||
name: "Format Go"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- name: "Install gofumpt"
|
||||
run: "go install mvdan.cc/gofumpt@latest"
|
||||
- name: "Run `gofumpt`"
|
||||
run: |
|
||||
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
|
||||
if [ -n "$GOFUMPT_OUTPUT" ]; then
|
||||
echo "The following files are not correctly formatted:"
|
||||
echo "${GOFUMPT_OUTPUT}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go-lint:
|
||||
name: "Lint Go"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "actions/setup-go@v2"
|
||||
with:
|
||||
go-version: "^1.17"
|
||||
- uses: "golangci/golangci-lint-action@v2"
|
||||
with:
|
||||
version: "v1.43"
|
||||
skip-go-installation: true
|
||||
skip-pkg-cache: true
|
||||
skip-build-cache: false
|
||||
|
||||
extra-lint:
|
||||
name: "Lint YAML & Markdown"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "bewuethr/yamllint-action@v1.1.1"
|
||||
with:
|
||||
config-file: ".yamllint"
|
||||
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
|
||||
with:
|
||||
files: "."
|
||||
config_file: ".markdownlint.yaml"
|
||||
|
||||
codeql:
|
||||
name: "Analyze with CodeQL"
|
||||
runs-on: "ubuntu-latest"
|
||||
permissions:
|
||||
actions: "read"
|
||||
contents: "read"
|
||||
security-events: "write"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: ["go"]
|
||||
steps:
|
||||
- uses: "actions/checkout@v2"
|
||||
- uses: "github/codeql-action/init@v1"
|
||||
with:
|
||||
languages: "${{ matrix.language }}"
|
||||
- uses: "github/codeql-action/autobuild@v1"
|
||||
- uses: "github/codeql-action/analyze@v1"
|
50
.golangci.yaml
Normal file
50
.golangci.yaml
Normal file
|
@ -0,0 +1,50 @@
|
|||
---
|
||||
run:
|
||||
timeout: "5m"
|
||||
output:
|
||||
sort-results: true
|
||||
linters-settings:
|
||||
goimports:
|
||||
local-prefixes: "github.com/chihaya/chihaya"
|
||||
gosec:
|
||||
excludes:
|
||||
- "G404" # Allow the usage of math/rand
|
||||
linters:
|
||||
enable:
|
||||
- "bidichk"
|
||||
- "bodyclose"
|
||||
- "deadcode"
|
||||
- "errcheck"
|
||||
- "errname"
|
||||
- "errorlint"
|
||||
- "gofumpt"
|
||||
- "goimports"
|
||||
- "goprintffuncname"
|
||||
- "gosec"
|
||||
- "gosimple"
|
||||
- "govet"
|
||||
- "ifshort"
|
||||
- "importas"
|
||||
- "ineffassign"
|
||||
- "makezero"
|
||||
- "prealloc"
|
||||
- "predeclared"
|
||||
- "revive"
|
||||
- "rowserrcheck"
|
||||
- "staticcheck"
|
||||
- "structcheck"
|
||||
- "stylecheck"
|
||||
- "tenv"
|
||||
- "typecheck"
|
||||
- "unconvert"
|
||||
- "unused"
|
||||
- "varcheck"
|
||||
- "wastedassign"
|
||||
- "whitespace"
|
||||
issues:
|
||||
include:
|
||||
- "EXC0012" # Exported should have comment
|
||||
- "EXC0012" # Exported should have comment
|
||||
- "EXC0013" # Package comment should be of form
|
||||
- "EXC0014" # Comment on exported should be of form
|
||||
- "EXC0015" # Should have a package comment
|
3
.markdownlint.yaml
Normal file
3
.markdownlint.yaml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
line-length: false
|
||||
no-hard-tabs: false
|
11
.yamllint
Normal file
11
.yamllint
Normal file
|
@ -0,0 +1,11 @@
|
|||
# vim: ft=yaml
|
||||
---
|
||||
yaml-files:
|
||||
- "*.yaml"
|
||||
- "*.yml"
|
||||
- ".yamllint"
|
||||
ignore: "dist/helm/"
|
||||
extends: "default"
|
||||
rules:
|
||||
quoted-strings: "enable"
|
||||
line-length: "disable"
|
|
@ -1,3 +1,5 @@
|
|||
# How to Contribute
|
||||
|
||||
## Discussion
|
||||
|
||||
Long-term discussion and bug reports are maintained via [GitHub Issues].
|
||||
|
@ -55,7 +57,7 @@ All files should have `gofmt` executed on them and code should strive to have fu
|
|||
We follow a rough convention for commit messages that is designed to answer two questions: what changed and why.
|
||||
The subject line should feature the what and the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
```git
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
|
@ -66,7 +68,7 @@ Fixes #38
|
|||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
```git
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
|
|
18
README.md
18
README.md
|
@ -1,7 +1,7 @@
|
|||
# Chihaya
|
||||
|
||||
[](https://github.com/chihaya/chihaya/actions)
|
||||
[](https://quay.io/repository/jzelinskie/chihaya)
|
||||
[](https://github.com/chihaya/chihaya/actions)
|
||||
[](https://quay.io/repository/jzelinskie/chihaya?tab=tags)
|
||||
[](https://godoc.org/github.com/chihaya/chihaya)
|
||||
[](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
|
||||
[](https://web.libera.chat/#chihaya)
|
||||
|
@ -57,10 +57,10 @@ For more information read [CONTRIBUTING.md].
|
|||
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
|
||||
|
||||
```sh
|
||||
$ git clone git@github.com:chihaya/chihaya.git
|
||||
$ cd chihaya
|
||||
$ go build ./cmd/chihaya
|
||||
$ ./chihaya --help
|
||||
git clone git@github.com:chihaya/chihaya.git
|
||||
cd chihaya
|
||||
go build ./cmd/chihaya
|
||||
./chihaya --help
|
||||
```
|
||||
|
||||
[latest stable version of Go]: https://golang.org/dl
|
||||
|
@ -79,15 +79,15 @@ The following will run all tests and benchmarks.
|
|||
Removing `-bench` will just run unit tests.
|
||||
|
||||
```sh
|
||||
$ go test -bench $(go list ./...)
|
||||
go test -bench $(go list ./...)
|
||||
```
|
||||
|
||||
The Chihaya executable contains a command to end-to-end test a BitTorrent tracker.
|
||||
See
|
||||
|
||||
```sh
|
||||
$ chihaya --help
|
||||
```
|
||||
chihaya --help
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
func TestClientID(t *testing.T) {
|
||||
var clientTable = []struct{ peerID, clientID string }{
|
||||
clientTable := []struct{ peerID, clientID string }{
|
||||
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
|
||||
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
|
||||
{"-BS5820-oy4La2MWGEFj", "BS5820"},
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
var table = []struct {
|
||||
table := []struct {
|
||||
data string
|
||||
expected Event
|
||||
expectedErr error
|
||||
|
|
|
@ -187,15 +187,15 @@ func (qp *QueryParams) String(key string) (string, bool) {
|
|||
return value, ok
|
||||
}
|
||||
|
||||
// Uint64 returns a uint parsed from a query. After being called, it is safe to
|
||||
// Uint returns a uint parsed from a query. After being called, it is safe to
|
||||
// cast the uint64 to your desired length.
|
||||
func (qp *QueryParams) Uint64(key string) (uint64, error) {
|
||||
func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
|
||||
str, exists := qp.params[key]
|
||||
if !exists {
|
||||
return 0, ErrKeyNotFound
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(str, 10, 64)
|
||||
val, err := strconv.ParseUint(str, 10, bitSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ func TestParseInvalidURLData(t *testing.T) {
|
|||
|
||||
func TestParseShouldNotPanicURLData(t *testing.T) {
|
||||
for _, parseStr := range shouldNotPanicQueries {
|
||||
ParseURLData(parseStr)
|
||||
_, _ = ParseURLData(parseStr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
@ -100,7 +100,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
|
|||
}
|
||||
|
||||
func combineErrors(prefix string, errs []error) error {
|
||||
var errStrs []string
|
||||
errStrs := make([]string, 0, len(errs))
|
||||
for _, err := range errs {
|
||||
errStrs = append(errStrs, err.Error())
|
||||
}
|
||||
|
@ -144,15 +144,13 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
quit := make(chan os.Signal)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
reload := makeReloadChan()
|
||||
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-reload:
|
||||
log.Info("reloading; received SIGUSR1")
|
||||
case <-reload.Done():
|
||||
log.Info("reloading; received reload signal")
|
||||
peerStore, err := r.Stop(true)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -161,8 +159,8 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
|
|||
if err := r.Start(peerStore); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-quit:
|
||||
log.Info("shutting down; received SIGINT/SIGTERM")
|
||||
case <-ctx.Done():
|
||||
log.Info("shutting down; received shutdown signal")
|
||||
if _, err := r.Stop(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -210,7 +208,7 @@ func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
func main() {
|
||||
var rootCmd = &cobra.Command{
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "chihaya",
|
||||
Short: "BitTorrent Tracker",
|
||||
Long: "A customizable, multi-protocol BitTorrent Tracker",
|
||||
|
@ -229,7 +227,7 @@ func main() {
|
|||
|
||||
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
|
||||
|
||||
var e2eCmd = &cobra.Command{
|
||||
e2eCmd := &cobra.Command{
|
||||
Use: "e2e",
|
||||
Short: "exec e2e tests",
|
||||
Long: "Execute the Chihaya end-to-end test suite",
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
|
||||
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func makeReloadChan() <-chan os.Signal {
|
||||
reload := make(chan os.Signal)
|
||||
signal.Notify(reload, syscall.SIGUSR1)
|
||||
return reload
|
||||
// ReloadSignals are the signals that the current OS will send to the process
|
||||
// when a configuration reload is requested.
|
||||
var ReloadSignals = []os.Signal{
|
||||
syscall.SIGUSR1,
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
@ -8,8 +9,6 @@ import (
|
|||
"syscall"
|
||||
)
|
||||
|
||||
func makeReloadChan() <-chan os.Signal {
|
||||
reload := make(chan os.Signal)
|
||||
signal.Notify(reload, syscall.SIGHUP)
|
||||
return reload
|
||||
var ReloadSignals = []os.Signal{
|
||||
syscall.SIGHUP,
|
||||
}
|
||||
|
|
81
dist/example_config.yaml
vendored
81
dist/example_config.yaml
vendored
|
@ -1,15 +1,16 @@
|
|||
---
|
||||
chihaya:
|
||||
# The interval communicated with BitTorrent clients informing them how
|
||||
# frequently they should announce in between client events.
|
||||
announce_interval: 30m
|
||||
announce_interval: "30m"
|
||||
|
||||
# The interval communicated with BitTorrent clients informing them of the
|
||||
# minimal duration between announces.
|
||||
min_announce_interval: 15m
|
||||
min_announce_interval: "15m"
|
||||
|
||||
# The network interface that will bind to an HTTP endpoint that can be
|
||||
# scraped by programs collecting metrics.
|
||||
#
|
||||
#
|
||||
# /metrics serves metrics in the Prometheus format
|
||||
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
|
||||
metrics_addr: "0.0.0.0:6880"
|
||||
|
@ -30,14 +31,14 @@ chihaya:
|
|||
tls_key_path: ""
|
||||
|
||||
# The timeout durations for HTTP requests.
|
||||
read_timeout: 5s
|
||||
write_timeout: 5s
|
||||
read_timeout: "5s"
|
||||
write_timeout: "5s"
|
||||
|
||||
# When true, persistent connections will be allowed. Generally this is not
|
||||
# useful for a public tracker, but helps performance in some cases (use of
|
||||
# a reverse proxy, or when there are few clients issuing many requests).
|
||||
enable_keepalive: false
|
||||
idle_timeout: 30s
|
||||
idle_timeout: "30s"
|
||||
|
||||
# Whether to time requests.
|
||||
# Disabling this should increase performance/decrease load.
|
||||
|
@ -88,7 +89,7 @@ chihaya:
|
|||
addr: "0.0.0.0:6969"
|
||||
|
||||
# The leeway for a timestamp on a connection ID.
|
||||
max_clock_skew: 10s
|
||||
max_clock_skew: "10s"
|
||||
|
||||
# The key used to encrypt connection IDs.
|
||||
private_key: "paste a random string here that will be used to hmac connection IDs"
|
||||
|
@ -113,17 +114,17 @@ chihaya:
|
|||
|
||||
# This block defines configuration used for the storage of peer data.
|
||||
storage:
|
||||
name: memory
|
||||
name: "memory"
|
||||
config:
|
||||
# The frequency which stale peers are removed.
|
||||
# This balances between
|
||||
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||
gc_interval: 3m
|
||||
gc_interval: "3m"
|
||||
|
||||
# The amount of time until a peer is considered stale.
|
||||
# To avoid churn, keep this slightly larger than `announce_interval`
|
||||
peer_lifetime: 31m
|
||||
peer_lifetime: "31m"
|
||||
|
||||
# The number of partitions data will be divided into in order to provide a
|
||||
# higher degree of parallelism.
|
||||
|
@ -131,7 +132,7 @@ chihaya:
|
|||
|
||||
# The interval at which metrics about the number of infohashes and peers
|
||||
# are collected and posted to Prometheus.
|
||||
prometheus_reporting_interval: 1s
|
||||
prometheus_reporting_interval: "1s"
|
||||
|
||||
# This block defines configuration used for redis storage.
|
||||
# storage:
|
||||
|
@ -141,56 +142,56 @@ chihaya:
|
|||
# # This balances between
|
||||
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||
# gc_interval: 3m
|
||||
# gc_interval: "3m"
|
||||
|
||||
# # The interval at which metrics about the number of infohashes and peers
|
||||
# # are collected and posted to Prometheus.
|
||||
# prometheus_reporting_interval: 1s
|
||||
# prometheus_reporting_interval: "1s"
|
||||
|
||||
# # The amount of time until a peer is considered stale.
|
||||
# # To avoid churn, keep this slightly larger than `announce_interval`
|
||||
# peer_lifetime: 31m
|
||||
# peer_lifetime: "31m"
|
||||
|
||||
# # The address of redis storage.
|
||||
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
|
||||
|
||||
# # The timeout for reading a command reply from redis.
|
||||
# redis_read_timeout: 15s
|
||||
# redis_read_timeout: "15s"
|
||||
|
||||
# # The timeout for writing a command to redis.
|
||||
# redis_write_timeout: 15s
|
||||
# redis_write_timeout: "15s"
|
||||
|
||||
# # The timeout for connecting to redis server.
|
||||
# redis_connect_timeout: 15s
|
||||
# redis_connect_timeout: "15s"
|
||||
|
||||
# This block defines configuration used for middleware executed before a
|
||||
# response has been returned to a BitTorrent client.
|
||||
prehooks:
|
||||
#- name: jwt
|
||||
# options:
|
||||
# issuer: "https://issuer.com"
|
||||
# audience: "https://chihaya.issuer.com"
|
||||
# jwk_set_url: "https://issuer.com/keys"
|
||||
# jwk_set_update_interval: 5m
|
||||
# - name: "jwt"
|
||||
# options:
|
||||
# issuer: "https://issuer.com"
|
||||
# audience: "https://chihaya.issuer.com"
|
||||
# jwk_set_url: "https://issuer.com/keys"
|
||||
# jwk_set_update_interval: "5m"
|
||||
|
||||
#- name: client approval
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "OP1011"
|
||||
# blacklist:
|
||||
# - "OP1012"
|
||||
# - name: "client approval"
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "OP1011"
|
||||
# blacklist:
|
||||
# - "OP1012"
|
||||
|
||||
#- name: interval variation
|
||||
# options:
|
||||
# modify_response_probability: 0.2
|
||||
# max_increase_delta: 60
|
||||
# modify_min_interval: true
|
||||
# - name: "interval variation"
|
||||
# options:
|
||||
# modify_response_probability: 0.2
|
||||
# max_increase_delta: 60
|
||||
# modify_min_interval: true
|
||||
|
||||
# This block defines configuration used for torrent approval, it requires to be given
|
||||
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
|
||||
#- name: torrent approval
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
|
||||
# blacklist:
|
||||
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"
|
||||
# - name: "torrent approval"
|
||||
# options:
|
||||
# whitelist:
|
||||
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
|
||||
# blacklist:
|
||||
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"
|
||||
|
|
11
dist/prometheus/prometheus.yaml
vendored
11
dist/prometheus/prometheus.yaml
vendored
|
@ -1,11 +1,12 @@
|
|||
---
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
evaluation_interval: 5s
|
||||
scrape_interval: "5s"
|
||||
evaluation_interval: "5s"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: 'local-chihaya' # you can name this however you want
|
||||
scrape_interval: 5s # optionally override the global scrape_interval
|
||||
- job_name: "local-chihaya" # you can name this however you want
|
||||
scrape_interval: "5s" # optionally override the global scrape_interval
|
||||
static_configs:
|
||||
- targets: ['localhost:6881'] # provide the address of chihaya's prometheus endpoint
|
||||
- targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Architecture
|
||||
|
||||
### Overview
|
||||
## Overview
|
||||
|
||||
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
|
||||
Frontends parse requests and write responses for the particular protocol they implement.
|
||||
|
@ -11,6 +11,6 @@ After all PreHooks have executed, any missing response fields that are required
|
|||
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
|
||||
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
|
||||
|
||||
### Diagram
|
||||
## Diagram
|
||||
|
||||

|
||||

|
||||
|
|
|
@ -44,11 +44,11 @@ The typical control flow of handling announces, in more detail, is:
|
|||
6. Send the response to the Client.
|
||||
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
|
||||
8. Finish, accept next request.
|
||||
9. For invalid requests or errors during processing: Send an error response to the client.
|
||||
This step may be skipped for suspected denial-of-service attacks.
|
||||
The error response may contain information about the cause of the error.
|
||||
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
|
||||
Then finish, and accept the next request.
|
||||
9. For invalid requests or errors during processing: Send an error response to the client.
|
||||
This step may be skipped for suspected denial-of-service attacks.
|
||||
The error response may contain information about the cause of the error.
|
||||
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
|
||||
Then finish, and accept the next request.
|
||||
|
||||
#### Configuration
|
||||
|
||||
|
@ -62,20 +62,22 @@ Frontends may provide runtime metrics, such as the number of requests or their d
|
|||
Metrics must be reported using [Prometheus].
|
||||
|
||||
A frontend should provide at least the following metrics:
|
||||
|
||||
- The number of valid and invalid requests handled
|
||||
- The average time it takes to handle a single request.
|
||||
This request timing should be made optional using a config entry.
|
||||
This request timing should be made optional using a config entry.
|
||||
|
||||
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
|
||||
If the frontend serves multiple transports or networks, metrics for them should be separable.
|
||||
|
||||
It is recommended to publish one Prometheus `HistogramVec` with:
|
||||
|
||||
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
|
||||
- A value holding the duration in milliseconds of the reported request
|
||||
- Labels for:
|
||||
- `action` (= `announce`, `scrape`, ...)
|
||||
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
|
||||
- `error` (= A textual representation of the error encountered during processing.)
|
||||
- `action` (= `announce`, `scrape`, ...)
|
||||
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
|
||||
- `error` (= A textual representation of the error encountered during processing.)
|
||||
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
|
||||
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
|
||||
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
|
||||
|
@ -106,4 +108,4 @@ This way, a PreHook can communicate with a PostHook by setting a context value.
|
|||
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
|
||||
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
|
||||
[Prometheus]: https://prometheus.io/
|
||||
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
||||
|
|
|
@ -59,7 +59,7 @@ All the InfoHashes (swarms) are also stored in a redis hash, with IP family as t
|
|||
|
||||
Here is an example:
|
||||
|
||||
```
|
||||
```yaml
|
||||
- IPv4
|
||||
- IPv4_S_<infohash 1>: <modification time>
|
||||
- IPv4_L_<infohash 1>: <modification time>
|
||||
|
@ -73,15 +73,14 @@ Here is an example:
|
|||
- <peer 3 key>: <modification time>
|
||||
```
|
||||
|
||||
|
||||
In this case, prometheus would record two swarms, three seeders, and one leecher.
|
||||
These three keys per address family are used to record the count of swarms, seeders, and leechers.
|
||||
|
||||
```
|
||||
```yaml
|
||||
- IPv4_infohash_count: 2
|
||||
- IPv4_S_count: 3
|
||||
- IPv4_L_count: 1
|
||||
```
|
||||
|
||||
Note: IPv4_infohash_count has a different meaning compared to the `memory` storage:
|
||||
Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
|
||||
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.
|
||||
|
|
|
@ -46,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) {
|
|||
d2 := NewDecoder(&bufferLoop{"i42e"})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
d1.Decode()
|
||||
d2.Decode()
|
||||
_, _ = d1.Decode()
|
||||
_, _ = d2.Decode()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) {
|
|||
dec := NewDecoder(&bufferLoop{string(buf)})
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
dec.Decode()
|
||||
_, _ = dec.Decode()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
|||
err = marshalInt(w, int64(v))
|
||||
|
||||
case int64:
|
||||
err = marshalInt(w, int64(v))
|
||||
err = marshalInt(w, v)
|
||||
|
||||
case uint:
|
||||
err = marshalUint(w, uint64(v))
|
||||
|
@ -78,7 +78,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
|||
err = marshalUint(w, uint64(v))
|
||||
|
||||
case uint64:
|
||||
err = marshalUint(w, uint64(v))
|
||||
err = marshalUint(w, v)
|
||||
|
||||
case time.Duration: // Assume seconds
|
||||
err = marshalInt(w, int64(v/time.Second))
|
||||
|
@ -90,7 +90,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
|||
err = marshalList(w, v)
|
||||
|
||||
case []Dict:
|
||||
var interfaceSlice = make([]interface{}, len(v))
|
||||
interfaceSlice := make([]interface{}, len(v))
|
||||
for i, d := range v {
|
||||
interfaceSlice[i] = d
|
||||
}
|
||||
|
|
|
@ -50,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) {
|
|||
encoder := NewEncoder(buf)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
encoder.Encode("test")
|
||||
encoder.Encode(123)
|
||||
_ = encoder.Encode("test")
|
||||
_ = encoder.Encode(123)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -67,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) {
|
|||
encoder := NewEncoder(buf)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
encoder.Encode(data)
|
||||
_ = encoder.Encode(data)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,6 +164,7 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
|
|||
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
|
||||
var err error
|
||||
f.tlsCfg = &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
}
|
||||
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
|
||||
|
@ -265,7 +266,7 @@ func (f *Frontend) serveHTTP(l net.Listener) error {
|
|||
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
||||
|
||||
// Start the HTTP server.
|
||||
if err := f.srv.Serve(l); err != http.ErrServerClosed {
|
||||
if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -285,7 +286,7 @@ func (f *Frontend) serveHTTPS(l net.Listener) error {
|
|||
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
||||
|
||||
// Start the HTTP server.
|
||||
if err := f.tlsSrv.ServeTLS(l, "", ""); err != http.ErrServerClosed {
|
||||
if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -317,7 +318,7 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
|
|||
|
||||
req, err := ParseAnnounce(r, f.ParseOptions)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
af = new(bittorrent.AddressFamily)
|
||||
|
@ -326,14 +327,14 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
|
|||
ctx := injectRouteParamsToContext(context.Background(), ps)
|
||||
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
err = WriteAnnounceResponse(w, resp)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -358,14 +359,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
|
|||
|
||||
req, err := ParseScrape(r, f.ParseOptions)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
log.Error("http: unable to determine remote address for scrape", log.Err(err))
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -376,7 +377,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
|
|||
req.AddressFamily = bittorrent.IPv6
|
||||
} else {
|
||||
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
|
||||
WriteError(w, bittorrent.ErrInvalidIP)
|
||||
_ = WriteError(w, bittorrent.ErrInvalidIP)
|
||||
return
|
||||
}
|
||||
af = new(bittorrent.AddressFamily)
|
||||
|
@ -385,14 +386,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
|
|||
ctx := injectRouteParamsToContext(context.Background(), ps)
|
||||
ctx, resp, err := f.logic.HandleScrape(ctx, req)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
err = WriteScrapeResponse(w, resp)
|
||||
if err != nil {
|
||||
WriteError(w, err)
|
||||
_ = WriteError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
|
@ -73,26 +74,26 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
|
|||
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
|
||||
|
||||
// Determine the number of remaining bytes for the client.
|
||||
request.Left, err = qp.Uint64("left")
|
||||
request.Left, err = qp.Uint("left", 64)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: left")
|
||||
}
|
||||
|
||||
// Determine the number of bytes downloaded by the client.
|
||||
request.Downloaded, err = qp.Uint64("downloaded")
|
||||
request.Downloaded, err = qp.Uint("downloaded", 64)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
|
||||
}
|
||||
|
||||
// Determine the number of bytes shared by the client.
|
||||
request.Uploaded, err = qp.Uint64("uploaded")
|
||||
request.Uploaded, err = qp.Uint("uploaded", 64)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
|
||||
}
|
||||
|
||||
// Determine the number of peers the client wants in the response.
|
||||
numwant, err := qp.Uint64("numwant")
|
||||
if err != nil && err != bittorrent.ErrKeyNotFound {
|
||||
numwant, err := qp.Uint("numwant", 32)
|
||||
if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
|
||||
}
|
||||
// If there were no errors, the user actually provided the numwant.
|
||||
|
@ -100,7 +101,7 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
|
|||
request.NumWant = uint32(numwant)
|
||||
|
||||
// Parse the port where the client is listening.
|
||||
port, err := qp.Uint64("port")
|
||||
port, err := qp.Uint("port", 16)
|
||||
if err != nil {
|
||||
return nil, bittorrent.ClientError("failed to parse parameter: port")
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -26,8 +27,9 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
|||
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
||||
var errString string
|
||||
if err != nil {
|
||||
if _, ok := err.(bittorrent.ClientError); ok {
|
||||
errString = err.Error()
|
||||
var clientErr bittorrent.ClientError
|
||||
if errors.As(err, &clientErr) {
|
||||
errString = clientErr.Error()
|
||||
} else {
|
||||
errString = "internal error"
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
|
@ -11,8 +12,9 @@ import (
|
|||
// WriteError communicates an error to a BitTorrent client over HTTP.
|
||||
func WriteError(w http.ResponseWriter, err error) error {
|
||||
message := "internal server error"
|
||||
if _, clientErr := err.(bittorrent.ClientError); clientErr {
|
||||
message = err.Error()
|
||||
var clientErr bittorrent.ClientError
|
||||
if errors.As(err, &clientErr) {
|
||||
message = clientErr.Error()
|
||||
} else {
|
||||
log.Error("http: internal error", log.Err(err))
|
||||
}
|
||||
|
@ -57,7 +59,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
|
|||
}
|
||||
|
||||
// Add the peers to the dictionary.
|
||||
var peers []bencode.Dict
|
||||
peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
|
||||
for _, peer := range resp.IPv4Peers {
|
||||
peers = append(peers, dict(peer))
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
var table = []struct {
|
||||
table := []struct {
|
||||
reason, expected string
|
||||
}{
|
||||
{"hello world", "d14:failure reason11:hello worlde"},
|
||||
|
@ -29,7 +29,7 @@ func TestWriteError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWriteStatus(t *testing.T) {
|
||||
var table = []struct {
|
||||
table := []struct {
|
||||
reason, expected string
|
||||
}{
|
||||
{"something is missing", "d14:failure reason20:something is missinge"},
|
||||
|
|
|
@ -11,24 +11,27 @@ type BytePool struct {
|
|||
func New(length int) *BytePool {
|
||||
var bp BytePool
|
||||
bp.Pool.New = func() interface{} {
|
||||
return make([]byte, length, length)
|
||||
b := make([]byte, length)
|
||||
return &b
|
||||
}
|
||||
return &bp
|
||||
}
|
||||
|
||||
// Get returns a byte slice from the pool.
|
||||
func (bp *BytePool) Get() []byte {
|
||||
return bp.Pool.Get().([]byte)
|
||||
func (bp *BytePool) Get() *[]byte {
|
||||
return bp.Pool.Get().(*[]byte)
|
||||
}
|
||||
|
||||
// Put returns a byte slice to the pool.
|
||||
func (bp *BytePool) Put(b []byte) {
|
||||
b = b[:cap(b)]
|
||||
func (bp *BytePool) Put(b *[]byte) {
|
||||
*b = (*b)[:cap(*b)]
|
||||
|
||||
// Zero out the bytes.
|
||||
// Apparently this specific expression is optimized by the compiler, see
|
||||
// github.com/golang/go/issues/5373.
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
// This specific expression is optimized by the compiler:
|
||||
// https://github.com/golang/go/issues/5373.
|
||||
for i := range *b {
|
||||
(*b)[i] = 0
|
||||
}
|
||||
|
||||
bp.Pool.Put(b)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
|
@ -123,8 +124,7 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
|
|||
},
|
||||
}
|
||||
|
||||
err := f.listen()
|
||||
if err != nil {
|
||||
if err := f.listen(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,7 @@ func (t *Frontend) Stop() stop.Result {
|
|||
c := make(stop.Channel)
|
||||
go func() {
|
||||
close(t.closing)
|
||||
t.socket.SetReadDeadline(time.Now())
|
||||
_ = t.socket.SetReadDeadline(time.Now())
|
||||
t.wg.Wait()
|
||||
c.Done(t.socket.Close())
|
||||
}()
|
||||
|
@ -185,10 +185,11 @@ func (t *Frontend) serve() error {
|
|||
|
||||
// Read a UDP packet into a reusable buffer.
|
||||
buffer := pool.Get()
|
||||
n, addr, err := t.socket.ReadFromUDP(buffer)
|
||||
n, addr, err := t.socket.ReadFromUDP(*buffer)
|
||||
if err != nil {
|
||||
pool.Put(buffer)
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||
var netErr net.Error
|
||||
if errors.As(err, &netErr); netErr.Temporary() {
|
||||
// A temporary failure is not fatal; just pretend it never happened.
|
||||
continue
|
||||
}
|
||||
|
@ -217,7 +218,7 @@ func (t *Frontend) serve() error {
|
|||
}
|
||||
action, af, err := t.handleRequest(
|
||||
// Make sure the IP is copied, not referenced.
|
||||
Request{buffer[:n], append([]byte{}, addr.IP...)},
|
||||
Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
|
||||
ResponseWriter{t.socket, addr},
|
||||
)
|
||||
if t.EnableRequestTiming {
|
||||
|
@ -244,7 +245,7 @@ type ResponseWriter struct {
|
|||
|
||||
// Write implements the io.Writer interface for a ResponseWriter.
|
||||
func (w ResponseWriter) Write(b []byte) (int, error) {
|
||||
w.socket.WriteToUDP(b, w.addr)
|
||||
_, _ = w.socket.WriteToUDP(b, w.addr)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@ const (
|
|||
// Option-Types as described in BEP 41 and BEP 45.
|
||||
const (
|
||||
optionEndOfOptions byte = 0x0
|
||||
optionNOP = 0x1
|
||||
optionURLData = 0x2
|
||||
optionNOP byte = 0x1
|
||||
optionURLData byte = 0x2
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -115,7 +115,7 @@ func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.Ann
|
|||
request := &bittorrent.AnnounceRequest{
|
||||
Event: eventIDs[eventID],
|
||||
InfoHash: bittorrent.InfoHashFromBytes(infohash),
|
||||
NumWant: uint32(numWant),
|
||||
NumWant: numWant,
|
||||
Left: left,
|
||||
Downloaded: downloaded,
|
||||
Uploaded: uploaded,
|
||||
|
@ -161,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
|
|||
return bittorrent.ParseURLData("")
|
||||
}
|
||||
|
||||
var buf = newBuffer()
|
||||
buf := newBuffer()
|
||||
defer buf.free()
|
||||
|
||||
for i := 0; i < len(packet); {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package udp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
@ -51,7 +52,7 @@ func TestHandleOptionalParameters(t *testing.T) {
|
|||
for _, tt := range table {
|
||||
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
|
||||
params, err := handleOptionalParameters(tt.data)
|
||||
if err != tt.err {
|
||||
if !errors.Is(err, tt.err) {
|
||||
if tt.err == nil {
|
||||
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
|
||||
} else {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package udp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -26,8 +27,9 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
|||
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
||||
var errString string
|
||||
if err != nil {
|
||||
if _, ok := err.(bittorrent.ClientError); ok {
|
||||
errString = err.Error()
|
||||
var clientErr bittorrent.ClientError
|
||||
if errors.As(err, &clientErr) {
|
||||
errString = clientErr.Error()
|
||||
} else {
|
||||
errString = "internal error"
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package udp
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
@ -12,15 +13,16 @@ import (
|
|||
// WriteError writes the failure reason as a null-terminated string.
|
||||
func WriteError(w io.Writer, txID []byte, err error) {
|
||||
// If the client wasn't at fault, acknowledge it.
|
||||
if _, ok := err.(bittorrent.ClientError); !ok {
|
||||
err = fmt.Errorf("internal error occurred: %s", err.Error())
|
||||
var clientErr bittorrent.ClientError
|
||||
if !errors.As(err, &clientErr) {
|
||||
err = fmt.Errorf("internal error occurred: %w", err)
|
||||
}
|
||||
|
||||
buf := newBuffer()
|
||||
writeHeader(buf, txID, errorActionID)
|
||||
buf.WriteString(err.Error())
|
||||
buf.WriteRune('\000')
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
|
@ -37,9 +39,9 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
|
|||
} else {
|
||||
writeHeader(buf, txID, announceActionID)
|
||||
}
|
||||
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
|
||||
binary.Write(buf, binary.BigEndian, resp.Incomplete)
|
||||
binary.Write(buf, binary.BigEndian, resp.Complete)
|
||||
_ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
|
||||
_ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
|
||||
_ = binary.Write(buf, binary.BigEndian, resp.Complete)
|
||||
|
||||
peers := resp.IPv4Peers
|
||||
if v6Peers {
|
||||
|
@ -48,10 +50,10 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
|
|||
|
||||
for _, peer := range peers {
|
||||
buf.Write(peer.IP.IP)
|
||||
binary.Write(buf, binary.BigEndian, peer.Port)
|
||||
_ = binary.Write(buf, binary.BigEndian, peer.Port)
|
||||
}
|
||||
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
|
@ -62,12 +64,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
|
|||
writeHeader(buf, txID, scrapeActionID)
|
||||
|
||||
for _, scrape := range resp.Files {
|
||||
binary.Write(buf, binary.BigEndian, scrape.Complete)
|
||||
binary.Write(buf, binary.BigEndian, scrape.Snatches)
|
||||
binary.Write(buf, binary.BigEndian, scrape.Incomplete)
|
||||
_ = binary.Write(buf, binary.BigEndian, scrape.Complete)
|
||||
_ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
|
||||
_ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
|
||||
}
|
||||
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
|
@ -78,13 +80,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) {
|
|||
writeHeader(buf, txID, connectActionID)
|
||||
buf.Write(connID)
|
||||
|
||||
w.Write(buf.Bytes())
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
buf.free()
|
||||
}
|
||||
|
||||
// writeHeader writes the action and transaction ID to the provided response
|
||||
// buffer.
|
||||
func writeHeader(w io.Writer, txID []byte, action uint32) {
|
||||
binary.Write(w, binary.BigEndian, action)
|
||||
w.Write(txID)
|
||||
_ = binary.Write(w, binary.BigEndian, action)
|
||||
_, _ = w.Write(txID)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
|||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
|
|
|
@ -2,6 +2,7 @@ package middleware
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/chihaya/chihaya/bittorrent"
|
||||
"github.com/chihaya/chihaya/storage"
|
||||
|
@ -37,12 +38,12 @@ func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorre
|
|||
switch {
|
||||
case req.Event == bittorrent.Stopped:
|
||||
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
|
||||
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
|
||||
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
||||
return ctx, err
|
||||
}
|
||||
case req.Event == bittorrent.Completed:
|
||||
|
@ -106,7 +107,7 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
|
|||
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
|
||||
seeding := req.Left == 0
|
||||
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
|
||||
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
|||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
|
@ -93,8 +93,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
|||
}
|
||||
|
||||
log.Debug("performing initial fetch of JWKs")
|
||||
err := h.updateKeys()
|
||||
if err != nil {
|
||||
if err := h.updateKeys(); err != nil {
|
||||
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
|
||||
}
|
||||
|
||||
|
@ -105,7 +104,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
|||
return
|
||||
case <-time.After(cfg.JWKUpdateInterval):
|
||||
log.Debug("performing fetch of JWKs")
|
||||
h.updateKeys()
|
||||
_ = h.updateKeys()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
//
|
||||
// Calling DeriveEntropyFromRequest multiple times yields the same values.
|
||||
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
|
||||
v0 := binary.BigEndian.Uint64([]byte(req.InfoHash[:8])) + binary.BigEndian.Uint64([]byte(req.InfoHash[8:16]))
|
||||
v1 := binary.BigEndian.Uint64([]byte(req.Peer.ID[:8])) + binary.BigEndian.Uint64([]byte(req.Peer.ID[8:16]))
|
||||
v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
|
||||
v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
|
||||
return v0, v1
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
|||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
|
|
|
@ -29,7 +29,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
|||
var cfg Config
|
||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
||||
}
|
||||
|
||||
return NewHook(cfg)
|
||||
|
@ -77,8 +77,7 @@ type hook struct {
|
|||
// NewHook creates a middleware to randomly modify the announce interval from
|
||||
// the given config.
|
||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
||||
err := checkConfig(cfg)
|
||||
if err != nil {
|
||||
if err := checkConfig(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -96,12 +95,12 @@ func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceReque
|
|||
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
|
||||
// Generate the increase delta.
|
||||
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
|
||||
addSeconds := time.Duration(v+1) * time.Second
|
||||
deltaDuration := time.Duration(v+1) * time.Second
|
||||
|
||||
resp.Interval += addSeconds
|
||||
resp.Interval += deltaDuration
|
||||
|
||||
if h.cfg.ModifyMinInterval {
|
||||
resp.MinInterval += addSeconds
|
||||
resp.MinInterval += deltaDuration
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
|
|
|
@ -4,6 +4,7 @@ package metrics
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
||||
|
@ -49,7 +50,7 @@ func NewServer(addr string) *Server {
|
|||
}
|
||||
|
||||
go func() {
|
||||
if err := s.srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||
if err := s.srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatal("failed while serving prometheus", log.Err(err))
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -4,6 +4,7 @@ package memory
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
@ -79,7 +80,7 @@ func (cfg Config) LogFields() log.Fields {
|
|||
func (cfg Config) Validate() Config {
|
||||
validcfg := cfg
|
||||
|
||||
if cfg.ShardCount <= 0 {
|
||||
if cfg.ShardCount <= 0 || cfg.ShardCount > (math.MaxInt/2) {
|
||||
validcfg.ShardCount = defaultShardCount
|
||||
log.Warn("falling back to default configuration", log.Fields{
|
||||
"name": Name + ".ShardCount",
|
||||
|
@ -142,7 +143,7 @@ func New(provided Config) (storage.PeerStore, error) {
|
|||
case <-time.After(cfg.GarbageCollectionInterval):
|
||||
before := time.Now().Add(-cfg.PeerLifetime)
|
||||
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
|
||||
ps.collectGarbage(before)
|
||||
_ = ps.collectGarbage(before)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -183,7 +184,8 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
|||
peer := bittorrent.Peer{
|
||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
|
||||
IP: bittorrent.IP{IP: net.IP(pk[22:])},
|
||||
}
|
||||
|
||||
if ip := peer.IP.To4(); ip != nil {
|
||||
peer.IP.IP = ip
|
||||
|
|
|
@ -25,6 +25,7 @@ package redis
|
|||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -245,7 +246,8 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
|||
peer := bittorrent.Peer{
|
||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
|
||||
IP: bittorrent.IP{IP: net.IP(pk[22:])},
|
||||
}
|
||||
|
||||
if ip := peer.IP.To4(); ip != nil {
|
||||
peer.IP.IP = ip
|
||||
|
@ -300,7 +302,7 @@ func (ps *peerStore) populateProm() {
|
|||
defer conn.Close()
|
||||
|
||||
for _, group := range ps.groups() {
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && err != redis.ErrNil {
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: GET counter failure", log.Fields{
|
||||
"key": ps.infohashCountKey(group),
|
||||
"error": err,
|
||||
|
@ -308,7 +310,7 @@ func (ps *peerStore) populateProm() {
|
|||
} else {
|
||||
numInfohashes += n
|
||||
}
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && err != redis.ErrNil {
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: GET counter failure", log.Fields{
|
||||
"key": ps.seederCountKey(group),
|
||||
"error": err,
|
||||
|
@ -316,7 +318,7 @@ func (ps *peerStore) populateProm() {
|
|||
} else {
|
||||
numSeeders += n
|
||||
}
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && err != redis.ErrNil {
|
||||
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: GET counter failure", log.Fields{
|
||||
"key": ps.leecherCountKey(group),
|
||||
"error": err,
|
||||
|
@ -356,9 +358,9 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
|||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
conn.Send("MULTI")
|
||||
conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -437,9 +439,9 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
|||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
conn.Send("MULTI")
|
||||
conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
|
||||
conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
|
||||
_ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
|
||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -509,10 +511,10 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer)
|
|||
conn := ps.rb.open()
|
||||
defer conn.Close()
|
||||
|
||||
conn.Send("MULTI")
|
||||
conn.Send("HDEL", encodedLeecherInfoHash, pk)
|
||||
conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HDEL", encodedLeecherInfoHash, pk)
|
||||
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -782,13 +784,13 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
|
|||
// in other words, it's removed automatically after `HDEL` the last field.
|
||||
//_, err := conn.Do("DEL", ihStr)
|
||||
|
||||
conn.Send("MULTI")
|
||||
conn.Send("HDEL", group, ihStr)
|
||||
_ = conn.Send("MULTI")
|
||||
_ = conn.Send("HDEL", group, ihStr)
|
||||
if isSeeder {
|
||||
conn.Send("DECR", ps.infohashCountKey(group))
|
||||
_ = conn.Send("DECR", ps.infohashCountKey(group))
|
||||
}
|
||||
_, err = redis.Values(conn.Do("EXEC"))
|
||||
if err != nil && err != redis.ErrNil {
|
||||
if err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: Redis EXEC failure", log.Fields{
|
||||
"group": group,
|
||||
"infohash": ihStr,
|
||||
|
@ -796,7 +798,7 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
|
|||
})
|
||||
}
|
||||
} else {
|
||||
if _, err = conn.Do("UNWATCH"); err != nil && err != redis.ErrNil {
|
||||
if _, err = conn.Do("UNWATCH"); err != nil && !errors.Is(err, redis.ErrNil) {
|
||||
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,8 @@ func createNew() s.PeerStore {
|
|||
RedisBroker: redisURL,
|
||||
RedisReadTimeout: 10 * time.Second,
|
||||
RedisWriteTimeout: 10 * time.Second,
|
||||
RedisConnectTimeout: 10 * time.Second})
|
||||
RedisConnectTimeout: 10 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func (rc *redisConnector) NewPool() *redis.Pool {
|
|||
},
|
||||
// PINGs connections that have been idle more than 10 seconds
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
if time.Since(t) < time.Duration(10*time.Second) {
|
||||
if time.Since(t) < 10*time.Second {
|
||||
return nil
|
||||
}
|
||||
_, err := c.Do("PING")
|
||||
|
@ -80,7 +80,7 @@ func (rc *redisConnector) NewPool() *redis.Pool {
|
|||
|
||||
// Open a new Redis connection
|
||||
func (rc *redisConnector) open() (redis.Conn, error) {
|
||||
var opts = []redis.DialOption{
|
||||
opts := []redis.DialOption{
|
||||
redis.DialDatabase(rc.URL.DB),
|
||||
redis.DialReadTimeout(rc.ReadTimeout),
|
||||
redis.DialWriteTimeout(rc.WriteTimeout),
|
||||
|
@ -119,7 +119,7 @@ func parseRedisURL(target string) (*redisURL, error) {
|
|||
return nil, errors.New("no redis scheme found")
|
||||
}
|
||||
|
||||
db := 0 //default redis db
|
||||
db := 0 // default redis db
|
||||
parts := strings.Split(u.Path, "/")
|
||||
if len(parts) != 1 {
|
||||
db, err = strconv.Atoi(parts[1])
|
||||
|
|
|
@ -53,8 +53,10 @@ func generatePeers() (a [1000]bittorrent.Peer) {
|
|||
return
|
||||
}
|
||||
|
||||
type executionFunc func(int, PeerStore, *benchData) error
|
||||
type setupFunc func(PeerStore, *benchData) error
|
||||
type (
|
||||
executionFunc func(int, PeerStore, *benchData) error
|
||||
setupFunc func(PeerStore, *benchData) error
|
||||
)
|
||||
|
||||
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
|
||||
bd := &benchData{generateInfohashes(), generatePeers()}
|
||||
|
@ -185,6 +187,7 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
|
|||
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
})
|
||||
|
@ -211,7 +214,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist can run in parallel.
|
||||
func DeleteNonexist(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -222,7 +225,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist can run in parallel.
|
||||
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -233,7 +236,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist1kInfohash can run in parallel.
|
||||
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -244,7 +247,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
|||
// DeleteNonexist1kInfohash1k can run in parallel.
|
||||
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -255,7 +258,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist can run in parallel.
|
||||
func GradNonexist(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -266,7 +269,7 @@ func GradNonexist(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist1k can run in parallel.
|
||||
func GradNonexist1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -277,7 +280,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist1kInfohash can run in parallel.
|
||||
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -289,7 +292,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
|||
// GradNonexist1kInfohash1k can run in parallel.
|
||||
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue