Compare commits
1 commit
main
...
dependabot
Author | SHA1 | Date | |
---|---|---|---|
|
9cbeb49ec4 |
55 changed files with 708 additions and 1568 deletions
.github
.golangci.yaml.markdownlint.yaml.yamllintCONTRIBUTING.mdLICENSEREADME.mdbittorrent
cmd/chihaya
dist
docs
frontend
go.modgo.summiddleware
pkg/metrics
storage
4
.github/FUNDING.yml
vendored
4
.github/FUNDING.yml
vendored
|
@ -1,3 +1 @@
|
||||||
---
|
github: [ jzelinskie ]
|
||||||
github:
|
|
||||||
- "jzelinskie"
|
|
||||||
|
|
26
.github/dependabot.yml
vendored
26
.github/dependabot.yml
vendored
|
@ -1,23 +1,7 @@
|
||||||
---
|
|
||||||
version: 2
|
version: 2
|
||||||
updates:
|
updates:
|
||||||
- package-ecosystem: "github-actions"
|
- package-ecosystem: gomod
|
||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "monthly"
|
interval: monthly
|
||||||
labels:
|
open-pull-requests-limit: 10
|
||||||
- "component/dependencies"
|
|
||||||
|
|
||||||
- package-ecosystem: "gomod"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "monthly"
|
|
||||||
labels:
|
|
||||||
- "component/dependencies"
|
|
||||||
|
|
||||||
- package-ecosystem: "docker"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "monthly"
|
|
||||||
labels:
|
|
||||||
- "component/dependencies"
|
|
||||||
|
|
115
.github/workflows/CI.yaml
vendored
Normal file
115
.github/workflows/CI.yaml
vendored
Normal file
|
@ -0,0 +1,115 @@
|
||||||
|
name: CI
|
||||||
|
on:
|
||||||
|
# See the documentation for more intricate event dispatch here:
|
||||||
|
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#on
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- "!dependabot/*"
|
||||||
|
- "*"
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- "*"
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build & Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Setup
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.15
|
||||||
|
- name: Build
|
||||||
|
run: go build -v ./cmd/...
|
||||||
|
- name: Vet
|
||||||
|
run: go vet ./...
|
||||||
|
- name: Imports
|
||||||
|
uses: Jerome1337/goimports-action@v1.0.2
|
||||||
|
- name: Format
|
||||||
|
uses: Jerome1337/gofmt-action@v1.0.2
|
||||||
|
- name: Lint
|
||||||
|
uses: Jerome1337/golint-action@v1.0.2
|
||||||
|
|
||||||
|
unit:
|
||||||
|
name: Unit Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Setup
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.15
|
||||||
|
- name: Unit Tests
|
||||||
|
run: go test -v -race $(go list ./...)
|
||||||
|
|
||||||
|
e2e-mem:
|
||||||
|
name: E2E Tests (Memory Storage)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Setup
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.15
|
||||||
|
- name: End-to-End Test
|
||||||
|
run: |
|
||||||
|
go install ./cmd/chihaya
|
||||||
|
cat ./dist/example_config.yaml
|
||||||
|
chihaya --config=./dist/example_config.yaml --debug &
|
||||||
|
pid=$!
|
||||||
|
sleep 2
|
||||||
|
chihaya e2e --debug
|
||||||
|
kill $pid
|
||||||
|
|
||||||
|
e2e-redis:
|
||||||
|
name: E2E Tests (Redis Storage)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
services:
|
||||||
|
redis:
|
||||||
|
image: redis
|
||||||
|
ports: ["6379:6379"]
|
||||||
|
options: --entrypoint redis-server
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Setup
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ^1.15
|
||||||
|
- name: Configure redis storage
|
||||||
|
run: |
|
||||||
|
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
|
||||||
|
chmod +x faq-linux-amd64
|
||||||
|
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
|
||||||
|
cat ./dist/example_redis_config.yaml
|
||||||
|
- name: End-to-End Test
|
||||||
|
run: |
|
||||||
|
go install ./cmd/chihaya
|
||||||
|
chihaya --config=./dist/example_redis_config.yaml --debug &
|
||||||
|
pid=$!
|
||||||
|
sleep 2
|
||||||
|
chihaya e2e --debug
|
||||||
|
kill $pid
|
||||||
|
|
||||||
|
dist:
|
||||||
|
name: Helm Template
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Install Helm
|
||||||
|
uses: engineerd/configurator@v0.0.5
|
||||||
|
with:
|
||||||
|
name: helm
|
||||||
|
pathInArchive: linux-amd64/helm
|
||||||
|
fromGitHubReleases: true
|
||||||
|
repo: helm/helm
|
||||||
|
version: ^v3
|
||||||
|
urlTemplate: https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Helm Template
|
||||||
|
working-directory: ./dist/helm/chihaya
|
||||||
|
run: helm template . --debug
|
112
.github/workflows/build.yaml
vendored
112
.github/workflows/build.yaml
vendored
|
@ -1,112 +0,0 @@
|
||||||
---
|
|
||||||
name: "Build & Test"
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- "!dependabot/*"
|
|
||||||
- "main"
|
|
||||||
pull_request:
|
|
||||||
branches: ["*"]
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: "Go Build"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- name: "Build"
|
|
||||||
run: "go build ./cmd/..."
|
|
||||||
|
|
||||||
unit:
|
|
||||||
name: "Run Unit Tests"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- name: "Run `go test`"
|
|
||||||
run: "go test -race ./..."
|
|
||||||
|
|
||||||
e2e-mem:
|
|
||||||
name: "E2E Memory Tests"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- name: "Install and configure chihaya"
|
|
||||||
run: |
|
|
||||||
go install ./cmd/chihaya
|
|
||||||
cat ./dist/example_config.yaml
|
|
||||||
- name: "Run end-to-end tests"
|
|
||||||
run: |
|
|
||||||
chihaya --config=./dist/example_config.yaml --debug &
|
|
||||||
pid=$!
|
|
||||||
sleep 2
|
|
||||||
chihaya e2e --debug
|
|
||||||
kill $pid
|
|
||||||
|
|
||||||
e2e-redis:
|
|
||||||
name: "E2E Redis Tests"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
services:
|
|
||||||
redis:
|
|
||||||
image: "redis"
|
|
||||||
ports: ["6379:6379"]
|
|
||||||
options: "--entrypoint redis-server"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- name: "Install and configure chihaya"
|
|
||||||
run: |
|
|
||||||
go install ./cmd/chihaya
|
|
||||||
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
|
|
||||||
chmod +x faq-linux-amd64
|
|
||||||
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
|
|
||||||
cat ./dist/example_redis_config.yaml
|
|
||||||
- name: "Run end-to-end tests"
|
|
||||||
run: |
|
|
||||||
chihaya --config=./dist/example_redis_config.yaml --debug &
|
|
||||||
pid=$!
|
|
||||||
sleep 2
|
|
||||||
chihaya e2e --debug
|
|
||||||
kill $pid
|
|
||||||
|
|
||||||
image-build:
|
|
||||||
name: "Docker Build"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "docker/setup-qemu-action@v1"
|
|
||||||
- uses: "docker/setup-buildx-action@v1"
|
|
||||||
with:
|
|
||||||
driver-opts: "image=moby/buildkit:master"
|
|
||||||
- uses: "docker/build-push-action@v1"
|
|
||||||
with:
|
|
||||||
push: false
|
|
||||||
tags: "latest"
|
|
||||||
|
|
||||||
helm:
|
|
||||||
name: "Helm Template"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- name: "Install Helm"
|
|
||||||
uses: "engineerd/configurator@v0.0.5"
|
|
||||||
with:
|
|
||||||
name: "helm"
|
|
||||||
pathInArchive: "linux-amd64/helm"
|
|
||||||
fromGitHubReleases: true
|
|
||||||
repo: "helm/helm"
|
|
||||||
version: "^v3"
|
|
||||||
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
|
|
||||||
token: "${{ secrets.GITHUB_TOKEN }}"
|
|
||||||
- name: "Run `helm template`"
|
|
||||||
working-directory: "./dist/helm/chihaya"
|
|
||||||
run: "helm template . --debug"
|
|
86
.github/workflows/lint.yaml
vendored
86
.github/workflows/lint.yaml
vendored
|
@ -1,86 +0,0 @@
|
||||||
---
|
|
||||||
name: "Lint"
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- "!dependabot/*"
|
|
||||||
- "main"
|
|
||||||
pull_request:
|
|
||||||
branches: ["*"]
|
|
||||||
jobs:
|
|
||||||
go-mod-tidy:
|
|
||||||
name: "Lint Go Modules"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- name: "Run `go mod tidy`"
|
|
||||||
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
|
|
||||||
|
|
||||||
go-fmt:
|
|
||||||
name: "Format Go"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- name: "Install gofumpt"
|
|
||||||
run: "go install mvdan.cc/gofumpt@latest"
|
|
||||||
- name: "Run `gofumpt`"
|
|
||||||
run: |
|
|
||||||
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
|
|
||||||
if [ -n "$GOFUMPT_OUTPUT" ]; then
|
|
||||||
echo "The following files are not correctly formatted:"
|
|
||||||
echo "${GOFUMPT_OUTPUT}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
go-lint:
|
|
||||||
name: "Lint Go"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "actions/setup-go@v2"
|
|
||||||
with:
|
|
||||||
go-version: "^1.17"
|
|
||||||
- uses: "golangci/golangci-lint-action@v2"
|
|
||||||
with:
|
|
||||||
version: "v1.43"
|
|
||||||
skip-go-installation: true
|
|
||||||
skip-pkg-cache: true
|
|
||||||
skip-build-cache: false
|
|
||||||
|
|
||||||
extra-lint:
|
|
||||||
name: "Lint YAML & Markdown"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "bewuethr/yamllint-action@v1.1.1"
|
|
||||||
with:
|
|
||||||
config-file: ".yamllint"
|
|
||||||
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
|
|
||||||
with:
|
|
||||||
files: "."
|
|
||||||
config_file: ".markdownlint.yaml"
|
|
||||||
|
|
||||||
codeql:
|
|
||||||
name: "Analyze with CodeQL"
|
|
||||||
runs-on: "ubuntu-latest"
|
|
||||||
permissions:
|
|
||||||
actions: "read"
|
|
||||||
contents: "read"
|
|
||||||
security-events: "write"
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: ["go"]
|
|
||||||
steps:
|
|
||||||
- uses: "actions/checkout@v2"
|
|
||||||
- uses: "github/codeql-action/init@v1"
|
|
||||||
with:
|
|
||||||
languages: "${{ matrix.language }}"
|
|
||||||
- uses: "github/codeql-action/autobuild@v1"
|
|
||||||
- uses: "github/codeql-action/analyze@v1"
|
|
|
@ -1,50 +0,0 @@
|
||||||
---
|
|
||||||
run:
|
|
||||||
timeout: "5m"
|
|
||||||
output:
|
|
||||||
sort-results: true
|
|
||||||
linters-settings:
|
|
||||||
goimports:
|
|
||||||
local-prefixes: "github.com/chihaya/chihaya"
|
|
||||||
gosec:
|
|
||||||
excludes:
|
|
||||||
- "G404" # Allow the usage of math/rand
|
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
- "bidichk"
|
|
||||||
- "bodyclose"
|
|
||||||
- "deadcode"
|
|
||||||
- "errcheck"
|
|
||||||
- "errname"
|
|
||||||
- "errorlint"
|
|
||||||
- "gofumpt"
|
|
||||||
- "goimports"
|
|
||||||
- "goprintffuncname"
|
|
||||||
- "gosec"
|
|
||||||
- "gosimple"
|
|
||||||
- "govet"
|
|
||||||
- "ifshort"
|
|
||||||
- "importas"
|
|
||||||
- "ineffassign"
|
|
||||||
- "makezero"
|
|
||||||
- "prealloc"
|
|
||||||
- "predeclared"
|
|
||||||
- "revive"
|
|
||||||
- "rowserrcheck"
|
|
||||||
- "staticcheck"
|
|
||||||
- "structcheck"
|
|
||||||
- "stylecheck"
|
|
||||||
- "tenv"
|
|
||||||
- "typecheck"
|
|
||||||
- "unconvert"
|
|
||||||
- "unused"
|
|
||||||
- "varcheck"
|
|
||||||
- "wastedassign"
|
|
||||||
- "whitespace"
|
|
||||||
issues:
|
|
||||||
include:
|
|
||||||
- "EXC0012" # Exported should have comment
|
|
||||||
- "EXC0012" # Exported should have comment
|
|
||||||
- "EXC0013" # Package comment should be of form
|
|
||||||
- "EXC0014" # Comment on exported should be of form
|
|
||||||
- "EXC0015" # Should have a package comment
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
line-length: false
|
|
||||||
no-hard-tabs: false
|
|
11
.yamllint
11
.yamllint
|
@ -1,11 +0,0 @@
|
||||||
# vim: ft=yaml
|
|
||||||
---
|
|
||||||
yaml-files:
|
|
||||||
- "*.yaml"
|
|
||||||
- "*.yml"
|
|
||||||
- ".yamllint"
|
|
||||||
ignore: "dist/helm/"
|
|
||||||
extends: "default"
|
|
||||||
rules:
|
|
||||||
quoted-strings: "enable"
|
|
||||||
line-length: "disable"
|
|
|
@ -1,3 +1,78 @@
|
||||||
## Contributing to LBRY
|
## Discussion
|
||||||
|
|
||||||
https://lbry.tech/contribute
|
Long-term discussion and bug reports are maintained via [GitHub Issues].
|
||||||
|
Code review is done via [GitHub Pull Requests].
|
||||||
|
Real-time discussion is done via [freenode IRC].
|
||||||
|
|
||||||
|
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
|
||||||
|
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
|
||||||
|
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
|
||||||
|
|
||||||
|
## Pull Request Procedure
|
||||||
|
|
||||||
|
If you're looking to contribute, search the GitHub for issues labeled "low-hanging fruit".
|
||||||
|
You can also hop into IRC and ask a developer who's online for their opinion.
|
||||||
|
|
||||||
|
Small, self-describing fixes are perfectly fine to submit without discussion.
|
||||||
|
However, please do not submit a massive Pull Request without prior communication.
|
||||||
|
Large, unannounced changes usually lead to confusion and time wasted for everyone.
|
||||||
|
If you were planning to write a large change, post an issue on GitHub first and discuss it.
|
||||||
|
|
||||||
|
Pull Requests will be treated as "review requests", and we will give feedback we expect to see corrected on style and substance before merging.
|
||||||
|
Changes contributed via Pull Request should focus on a single issue at a time.
|
||||||
|
We will not accept pull-requests that try to "sneak" unrelated changes in.
|
||||||
|
|
||||||
|
The average contribution flow is as follows:
|
||||||
|
|
||||||
|
- Determine what to work on via creating and issue or finding an issue you want to solve.
|
||||||
|
- Create a topic branch from where you want to base your work. This is usually `master`.
|
||||||
|
- Make commits of logical units.
|
||||||
|
- Make sure your commit messages are in the proper format
|
||||||
|
- Push your changes to a topic branch in your fork of the repository.
|
||||||
|
- Submit a pull request.
|
||||||
|
- Your PR will be reviewed and merged by one of the maintainers.
|
||||||
|
- You may be asked to make changes and [rebase] your commits.
|
||||||
|
|
||||||
|
[rebase]: https://git-scm.com/book/en/v2/Git-Branching-Rebasin://git-scm.com/book/en/v2/Git-Branching-Rebasing
|
||||||
|
|
||||||
|
## Style
|
||||||
|
|
||||||
|
Any new files should include the license header found at the top of every source file.
|
||||||
|
|
||||||
|
### Go
|
||||||
|
|
||||||
|
The project follows idiomatic [Go conventions] for style.
|
||||||
|
If you're just starting out writing Go, you can check out this [meta-package] that documents style idiomatic style decisions you will find in open source Go code.
|
||||||
|
All files should have `gofmt` executed on them and code should strive to have full coverage of static analysis tools like [govet] and [golint].
|
||||||
|
|
||||||
|
[Go conventions]: https://github.com/golang/go/wiki/CodeReviewComments
|
||||||
|
[meta-package]: https://github.com/jzelinskie/conventions
|
||||||
|
[govet]: https://golang.org/cmd/vet
|
||||||
|
[golint]: https://github.com/golang/lint
|
||||||
|
|
||||||
|
### Commit Messages
|
||||||
|
|
||||||
|
We follow a rough convention for commit messages that is designed to answer two questions: what changed and why.
|
||||||
|
The subject line should feature the what and the body of the commit should describe the why.
|
||||||
|
|
||||||
|
```
|
||||||
|
scripts: add the test-cluster command
|
||||||
|
|
||||||
|
this uses tmux to setup a test cluster that you can easily kill and
|
||||||
|
start for debugging.
|
||||||
|
|
||||||
|
Fixes #38
|
||||||
|
```
|
||||||
|
|
||||||
|
The format can be described more formally as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
<subsystem>: <what changed>
|
||||||
|
<BLANK LINE>
|
||||||
|
<why this change was made>
|
||||||
|
<BLANK LINE>
|
||||||
|
<footer>
|
||||||
|
```
|
||||||
|
|
||||||
|
The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters.
|
||||||
|
This allows the message to be easier to read on GitHub as well as in various git tools.
|
||||||
|
|
18
LICENSE
18
LICENSE
|
@ -1,21 +1,3 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2015-2022 LBRY Inc
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
|
|
||||||
following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Chihaya is released under a BSD 2-Clause license, reproduced below.
|
Chihaya is released under a BSD 2-Clause license, reproduced below.
|
||||||
|
|
||||||
Copyright (c) 2015, The Chihaya Authors
|
Copyright (c) 2015, The Chihaya Authors
|
||||||
|
|
190
README.md
190
README.md
|
@ -1,146 +1,104 @@
|
||||||
# LBRY Tracker
|
# Chihaya
|
||||||
|
|
||||||
The LBRY tracker is a server that helps peers find each other. It was forked from [Chihaya](https://github.com/chihaya/chihaya), an open-source [BitTorrent tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker).
|
[](https://github.com/chihaya/chihaya/actions)
|
||||||
|
[](https://quay.io/repository/jzelinskie/chihaya)
|
||||||
|
[](https://goreportcard.com/report/github.com/chihaya/chihaya)
|
||||||
|
[](https://godoc.org/github.com/chihaya/chihaya)
|
||||||
|

|
||||||
|
[](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
|
||||||
|
[](http://webchat.freenode.net/?channels=chihaya)
|
||||||
|
|
||||||
|
**Note:** The master branch may be in an unstable or even broken state during development.
|
||||||
|
Please use [releases] instead of the master branch in order to get stable binaries.
|
||||||
|
|
||||||
## Installation and Usage
|
Chihaya is an open source [BitTorrent tracker] written in [Go].
|
||||||
|
|
||||||
### Building from HEAD
|
Differentiating features include:
|
||||||
|
|
||||||
|
- HTTP and UDP protocols
|
||||||
|
- IPv4 and IPv6 support
|
||||||
|
- Pre/Post middleware hooks
|
||||||
|
- [YAML] configuration
|
||||||
|
- Metrics via [Prometheus]
|
||||||
|
- High Availability via [Redis]
|
||||||
|
- Kubernetes deployment via [Helm]
|
||||||
|
|
||||||
|
[releases]: https://github.com/chihaya/chihaya/releases
|
||||||
|
[BitTorrent tracker]: https://en.wikipedia.org/wiki/BitTorrent_tracker
|
||||||
|
[Go]: https://golang.org
|
||||||
|
[YAML]: https://yaml.org
|
||||||
|
[Prometheus]: https://prometheus.io
|
||||||
|
[Redis]: https://redis.io
|
||||||
|
[Helm]: https://helm.sh
|
||||||
|
|
||||||
|
## Why Chihaya?
|
||||||
|
|
||||||
|
Chihaya is built for developers looking to integrate BitTorrent into a preexisting production environment.
|
||||||
|
Chihaya's pluggable architecture and middleware framework offers a simple and flexible integration point that abstracts the BitTorrent tracker protocols.
|
||||||
|
The most common use case for Chihaya is enabling peer-to-peer cloud software deployments.
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Contributing
|
||||||
|
|
||||||
|
Long-term discussion and bug reports are maintained via [GitHub Issues].
|
||||||
|
Code review is done via [GitHub Pull Requests].
|
||||||
|
Real-time discussion is done via [freenode IRC].
|
||||||
|
|
||||||
|
For more information read [CONTRIBUTING.md].
|
||||||
|
|
||||||
|
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
|
||||||
|
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
|
||||||
|
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
|
||||||
|
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md
|
||||||
|
|
||||||
|
### Getting Started
|
||||||
|
|
||||||
|
#### Building from HEAD
|
||||||
|
|
||||||
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
|
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
git clone git@github.com:lbryio/tracker.git
|
$ git clone git@github.com:chihaya/chihaya.git
|
||||||
cd tracker
|
$ cd chihaya
|
||||||
go build ./cmd/chihaya
|
$ go build ./cmd/chihaya
|
||||||
./chihaya --help
|
$ ./chihaya --help
|
||||||
```
|
```
|
||||||
|
|
||||||
[latest stable version of Go]: https://golang.org/dl
|
[latest stable version of Go]: https://golang.org/dl
|
||||||
[working Go environment]: https://golang.org/doc/code.html
|
[working Go environment]: https://golang.org/doc/code.html
|
||||||
|
|
||||||
### Testing
|
#### Docker
|
||||||
|
|
||||||
|
Docker containers are available for [HEAD] and [stable] releases.
|
||||||
|
|
||||||
|
[HEAD]: https://quay.io/jzelinskie/chihaya-git
|
||||||
|
[stable]: https://quay.io/jzelinskie/chihaya
|
||||||
|
|
||||||
|
#### Testing
|
||||||
|
|
||||||
The following will run all tests and benchmarks.
|
The following will run all tests and benchmarks.
|
||||||
Removing `-bench` will just run unit tests.
|
Removing `-bench` will just run unit tests.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
go test -bench $(go list ./...)
|
$ go test -bench $(go list ./...)
|
||||||
```
|
```
|
||||||
|
|
||||||
The tracker executable contains a command to end-to-end test a BitTorrent tracker.
|
The Chihaya executable contains a command to end-to-end test a BitTorrent tracker.
|
||||||
See
|
See
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
tracker --help
|
$ chihaya --help
|
||||||
```
|
```
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
Configuration of the tracker is done via one YAML configuration file.
|
Configuration of Chihaya is done via one YAML configuration file.
|
||||||
The `dist/` directory contains an example configuration file.
|
The `dist/` directory contains an example configuration file.
|
||||||
Files and directories under `docs/` contain detailed information about configuring middleware, storage implementations, architecture etc.
|
Files and directories under `docs/` contain detailed information about configuring middleware, storage implementations, architecture etc.
|
||||||
|
|
||||||
This is an example for an UDP server running on 9252 with metrics enabled. Remember to **change the private key** to some random string.
|
## Related projects
|
||||||
|
|
||||||
```
|
- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs
|
||||||
---
|
- [OpenTracker](http://erdgeist.org/arts/software/opentracker): a popular BitTorrent tracker written in C
|
||||||
chihaya:
|
- [Ocelot](https://github.com/WhatCD/Ocelot): a private BitTorrent tracker written in C++
|
||||||
announce_interval: "30m"
|
|
||||||
min_announce_interval: "15m"
|
|
||||||
metrics_addr: "0.0.0.0:6880"
|
|
||||||
udp:
|
|
||||||
addr: "0.0.0.0:9252"
|
|
||||||
max_clock_skew: "10s"
|
|
||||||
private_key: ">>>>CHANGE THIS TO SOME RANDOM THING<<<<"
|
|
||||||
enable_request_timing: false
|
|
||||||
allow_ip_spoofing: false
|
|
||||||
max_numwant: 100
|
|
||||||
default_numwant: 50
|
|
||||||
max_scrape_infohashes: 50
|
|
||||||
storage:
|
|
||||||
name: "memory"
|
|
||||||
config:
|
|
||||||
gc_interval: "3m"
|
|
||||||
peer_lifetime: "31m"
|
|
||||||
shard_count: 1024
|
|
||||||
prometheus_reporting_interval: "1s"
|
|
||||||
```
|
|
||||||
|
|
||||||
# Running from Docker
|
|
||||||
|
|
||||||
This section assumes `docker` and `docker-compose` to be installed on a Linux distro. Please check official docs on how to install [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/).
|
|
||||||
|
|
||||||
## Docker Compose from lbry/tracker
|
|
||||||
In order to define a tracker service and let Docker Compose manage it, create a file named `docker-compose.yml` with:
|
|
||||||
```
|
|
||||||
version: "3"
|
|
||||||
services:
|
|
||||||
tracker:
|
|
||||||
image: lbry/tracker
|
|
||||||
command: --config /config/conf.yml
|
|
||||||
volumes:
|
|
||||||
- .:/config
|
|
||||||
network_mode: host
|
|
||||||
restart: always
|
|
||||||
```
|
|
||||||
Unfortunately the tracker does not work without `network_mode: host` due some bug with UDP on Docker. In this mode, firewall configuration needs to be done manually. If using `ufw`, try `ufw allow 9252`.
|
|
||||||
|
|
||||||
Now, move the configuration to the same directory as `docker-compose.yml`, naming it `conf.yml`. If it is not ready, check the configuration section above.
|
|
||||||
|
|
||||||
Start the tracker by running the following in the same directory as the compose file:
|
|
||||||
`docker-compose up -d`
|
|
||||||
Logs can be read with:
|
|
||||||
`docker-compose logs`
|
|
||||||
To stop:
|
|
||||||
`docker-compose down`
|
|
||||||
|
|
||||||
## Building the containter
|
|
||||||
A Dockerfile is provided within the repo. To build the container locally, run this command on the same directory the repo was cloned:
|
|
||||||
`sudo docker build -f Dockerfile . -t some_name/tracker:latest`
|
|
||||||
It will produce an image called `some_name/tracker`, which can be used in the Docker Compose section.
|
|
||||||
|
|
||||||
# Running from source as a service
|
|
||||||
|
|
||||||
For ease of maintenance, it is recommended to run the tracker as a service.
|
|
||||||
|
|
||||||
This is an example for running it under as the current user using `systemd`:
|
|
||||||
```
|
|
||||||
[Unit]
|
|
||||||
Description=Chihaya BT tracker
|
|
||||||
After=network.target
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
#User=chihaya
|
|
||||||
#Group=chihaya
|
|
||||||
WorkingDirectory=/home/user/github/tracker
|
|
||||||
ExecStart=/home/user/github/tracker/chihaya --config dist/example_config.yaml
|
|
||||||
Restart=on-failure
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
```
|
|
||||||
|
|
||||||
To try it, change `/home/user/github/tracker` to where the code was cloned and run:
|
|
||||||
```bash=
|
|
||||||
mkdir -p ~/.config/systemd/user
|
|
||||||
# PASTE FILE IN ~/.config/systemd/user/tracker.service
|
|
||||||
systemctl --user enable tracker
|
|
||||||
systemctl --user start tracker
|
|
||||||
systemctl --user status tracker
|
|
||||||
```
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
LBRY's code changes are MIT licensed, and the upstream Chihaya code is licensed under a BSD 2-Clause license. For the full license, see [LICENSE](LICENSE).
|
|
||||||
|
|
||||||
## Security
|
|
||||||
|
|
||||||
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
|
|
||||||
|
|
||||||
## Contact
|
|
||||||
|
|
||||||
The primary contact for this project is [@shyba](mailto:vshyba@lbry.com).
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestClientID(t *testing.T) {
|
func TestClientID(t *testing.T) {
|
||||||
clientTable := []struct{ peerID, clientID string }{
|
var clientTable = []struct{ peerID, clientID string }{
|
||||||
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
|
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
|
||||||
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
|
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
|
||||||
{"-BS5820-oy4La2MWGEFj", "BS5820"},
|
{"-BS5820-oy4La2MWGEFj", "BS5820"},
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNew(t *testing.T) {
|
func TestNew(t *testing.T) {
|
||||||
table := []struct {
|
var table = []struct {
|
||||||
data string
|
data string
|
||||||
expected Event
|
expected Event
|
||||||
expectedErr error
|
expectedErr error
|
||||||
|
|
|
@ -187,15 +187,15 @@ func (qp *QueryParams) String(key string) (string, bool) {
|
||||||
return value, ok
|
return value, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uint returns a uint parsed from a query. After being called, it is safe to
|
// Uint64 returns a uint parsed from a query. After being called, it is safe to
|
||||||
// cast the uint64 to your desired length.
|
// cast the uint64 to your desired length.
|
||||||
func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
|
func (qp *QueryParams) Uint64(key string) (uint64, error) {
|
||||||
str, exists := qp.params[key]
|
str, exists := qp.params[key]
|
||||||
if !exists {
|
if !exists {
|
||||||
return 0, ErrKeyNotFound
|
return 0, ErrKeyNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
val, err := strconv.ParseUint(str, 10, bitSize)
|
val, err := strconv.ParseUint(str, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@ func TestParseInvalidURLData(t *testing.T) {
|
||||||
|
|
||||||
func TestParseShouldNotPanicURLData(t *testing.T) {
|
func TestParseShouldNotPanicURLData(t *testing.T) {
|
||||||
for _, parseStr := range shouldNotPanicQueries {
|
for _, parseStr := range shouldNotPanicQueries {
|
||||||
_, _ = ParseURLData(parseStr)
|
ParseURLData(parseStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,9 +9,15 @@ import (
|
||||||
// ErrInvalidIP indicates an invalid IP for an Announce.
|
// ErrInvalidIP indicates an invalid IP for an Announce.
|
||||||
var ErrInvalidIP = ClientError("invalid IP")
|
var ErrInvalidIP = ClientError("invalid IP")
|
||||||
|
|
||||||
|
// ErrInvalidPort indicates an invalid Port for an Announce.
|
||||||
|
var ErrInvalidPort = ClientError("invalid port")
|
||||||
|
|
||||||
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
|
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
|
||||||
// IP address into the proper format.
|
// IP address into the proper format.
|
||||||
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
|
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
|
||||||
|
if r.Port == 0 {
|
||||||
|
return ErrInvalidPort
|
||||||
|
}
|
||||||
|
|
||||||
if !r.NumWantProvided {
|
if !r.NumWantProvided {
|
||||||
r.NumWant = defaultNumWant
|
r.NumWant = defaultNumWant
|
||||||
|
|
|
@ -13,7 +13,6 @@ import (
|
||||||
|
|
||||||
// Imports to register middleware drivers.
|
// Imports to register middleware drivers.
|
||||||
_ "github.com/chihaya/chihaya/middleware/clientapproval"
|
_ "github.com/chihaya/chihaya/middleware/clientapproval"
|
||||||
_ "github.com/chihaya/chihaya/middleware/fixedpeer"
|
|
||||||
_ "github.com/chihaya/chihaya/middleware/jwt"
|
_ "github.com/chihaya/chihaya/middleware/jwt"
|
||||||
_ "github.com/chihaya/chihaya/middleware/torrentapproval"
|
_ "github.com/chihaya/chihaya/middleware/torrentapproval"
|
||||||
_ "github.com/chihaya/chihaya/middleware/varinterval"
|
_ "github.com/chihaya/chihaya/middleware/varinterval"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -100,7 +100,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func combineErrors(prefix string, errs []error) error {
|
func combineErrors(prefix string, errs []error) error {
|
||||||
errStrs := make([]string, 0, len(errs))
|
var errStrs []string
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
errStrs = append(errStrs, err.Error())
|
errStrs = append(errStrs, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -144,13 +144,15 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
quit := make(chan os.Signal)
|
||||||
reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
|
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
reload := makeReloadChan()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-reload.Done():
|
case <-reload:
|
||||||
log.Info("reloading; received reload signal")
|
log.Info("reloading; received SIGUSR1")
|
||||||
peerStore, err := r.Stop(true)
|
peerStore, err := r.Stop(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -159,8 +161,8 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||||
if err := r.Start(peerStore); err != nil {
|
if err := r.Start(peerStore); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-quit:
|
||||||
log.Info("shutting down; received shutdown signal")
|
log.Info("shutting down; received SIGINT/SIGTERM")
|
||||||
if _, err := r.Stop(false); err != nil {
|
if _, err := r.Stop(false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -208,7 +210,7 @@ func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
rootCmd := &cobra.Command{
|
var rootCmd = &cobra.Command{
|
||||||
Use: "chihaya",
|
Use: "chihaya",
|
||||||
Short: "BitTorrent Tracker",
|
Short: "BitTorrent Tracker",
|
||||||
Long: "A customizable, multi-protocol BitTorrent Tracker",
|
Long: "A customizable, multi-protocol BitTorrent Tracker",
|
||||||
|
@ -227,7 +229,7 @@ func main() {
|
||||||
|
|
||||||
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
|
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
|
||||||
|
|
||||||
e2eCmd := &cobra.Command{
|
var e2eCmd = &cobra.Command{
|
||||||
Use: "e2e",
|
Use: "e2e",
|
||||||
Short: "exec e2e tests",
|
Short: "exec e2e tests",
|
||||||
Long: "Execute the Chihaya end-to-end test suite",
|
Long: "Execute the Chihaya end-to-end test suite",
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
|
|
||||||
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
|
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReloadSignals are the signals that the current OS will send to the process
|
func makeReloadChan() <-chan os.Signal {
|
||||||
// when a configuration reload is requested.
|
reload := make(chan os.Signal)
|
||||||
var ReloadSignals = []os.Signal{
|
signal.Notify(reload, syscall.SIGUSR1)
|
||||||
syscall.SIGUSR1,
|
return reload
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
//go:build windows
|
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
@ -9,6 +8,8 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ReloadSignals = []os.Signal{
|
func makeReloadChan() <-chan os.Signal {
|
||||||
syscall.SIGHUP,
|
reload := make(chan os.Signal)
|
||||||
|
signal.Notify(reload, syscall.SIGHUP)
|
||||||
|
return reload
|
||||||
}
|
}
|
||||||
|
|
81
dist/example_config.yaml
vendored
81
dist/example_config.yaml
vendored
|
@ -1,16 +1,15 @@
|
||||||
---
|
|
||||||
chihaya:
|
chihaya:
|
||||||
# The interval communicated with BitTorrent clients informing them how
|
# The interval communicated with BitTorrent clients informing them how
|
||||||
# frequently they should announce in between client events.
|
# frequently they should announce in between client events.
|
||||||
announce_interval: "30m"
|
announce_interval: 30m
|
||||||
|
|
||||||
# The interval communicated with BitTorrent clients informing them of the
|
# The interval communicated with BitTorrent clients informing them of the
|
||||||
# minimal duration between announces.
|
# minimal duration between announces.
|
||||||
min_announce_interval: "15m"
|
min_announce_interval: 15m
|
||||||
|
|
||||||
# The network interface that will bind to an HTTP endpoint that can be
|
# The network interface that will bind to an HTTP endpoint that can be
|
||||||
# scraped by programs collecting metrics.
|
# scraped by programs collecting metrics.
|
||||||
#
|
#
|
||||||
# /metrics serves metrics in the Prometheus format
|
# /metrics serves metrics in the Prometheus format
|
||||||
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
|
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
|
||||||
metrics_addr: "0.0.0.0:6880"
|
metrics_addr: "0.0.0.0:6880"
|
||||||
|
@ -31,14 +30,14 @@ chihaya:
|
||||||
tls_key_path: ""
|
tls_key_path: ""
|
||||||
|
|
||||||
# The timeout durations for HTTP requests.
|
# The timeout durations for HTTP requests.
|
||||||
read_timeout: "5s"
|
read_timeout: 5s
|
||||||
write_timeout: "5s"
|
write_timeout: 5s
|
||||||
|
|
||||||
# When true, persistent connections will be allowed. Generally this is not
|
# When true, persistent connections will be allowed. Generally this is not
|
||||||
# useful for a public tracker, but helps performance in some cases (use of
|
# useful for a public tracker, but helps performance in some cases (use of
|
||||||
# a reverse proxy, or when there are few clients issuing many requests).
|
# a reverse proxy, or when there are few clients issuing many requests).
|
||||||
enable_keepalive: false
|
enable_keepalive: false
|
||||||
idle_timeout: "30s"
|
idle_timeout: 30s
|
||||||
|
|
||||||
# Whether to time requests.
|
# Whether to time requests.
|
||||||
# Disabling this should increase performance/decrease load.
|
# Disabling this should increase performance/decrease load.
|
||||||
|
@ -89,7 +88,7 @@ chihaya:
|
||||||
addr: "0.0.0.0:6969"
|
addr: "0.0.0.0:6969"
|
||||||
|
|
||||||
# The leeway for a timestamp on a connection ID.
|
# The leeway for a timestamp on a connection ID.
|
||||||
max_clock_skew: "10s"
|
max_clock_skew: 10s
|
||||||
|
|
||||||
# The key used to encrypt connection IDs.
|
# The key used to encrypt connection IDs.
|
||||||
private_key: "paste a random string here that will be used to hmac connection IDs"
|
private_key: "paste a random string here that will be used to hmac connection IDs"
|
||||||
|
@ -114,17 +113,17 @@ chihaya:
|
||||||
|
|
||||||
# This block defines configuration used for the storage of peer data.
|
# This block defines configuration used for the storage of peer data.
|
||||||
storage:
|
storage:
|
||||||
name: "memory"
|
name: memory
|
||||||
config:
|
config:
|
||||||
# The frequency which stale peers are removed.
|
# The frequency which stale peers are removed.
|
||||||
# This balances between
|
# This balances between
|
||||||
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||||
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||||
gc_interval: "3m"
|
gc_interval: 3m
|
||||||
|
|
||||||
# The amount of time until a peer is considered stale.
|
# The amount of time until a peer is considered stale.
|
||||||
# To avoid churn, keep this slightly larger than `announce_interval`
|
# To avoid churn, keep this slightly larger than `announce_interval`
|
||||||
peer_lifetime: "31m"
|
peer_lifetime: 31m
|
||||||
|
|
||||||
# The number of partitions data will be divided into in order to provide a
|
# The number of partitions data will be divided into in order to provide a
|
||||||
# higher degree of parallelism.
|
# higher degree of parallelism.
|
||||||
|
@ -132,7 +131,7 @@ chihaya:
|
||||||
|
|
||||||
# The interval at which metrics about the number of infohashes and peers
|
# The interval at which metrics about the number of infohashes and peers
|
||||||
# are collected and posted to Prometheus.
|
# are collected and posted to Prometheus.
|
||||||
prometheus_reporting_interval: "1s"
|
prometheus_reporting_interval: 1s
|
||||||
|
|
||||||
# This block defines configuration used for redis storage.
|
# This block defines configuration used for redis storage.
|
||||||
# storage:
|
# storage:
|
||||||
|
@ -142,56 +141,56 @@ chihaya:
|
||||||
# # This balances between
|
# # This balances between
|
||||||
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
|
||||||
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
|
||||||
# gc_interval: "3m"
|
# gc_interval: 3m
|
||||||
|
|
||||||
# # The interval at which metrics about the number of infohashes and peers
|
# # The interval at which metrics about the number of infohashes and peers
|
||||||
# # are collected and posted to Prometheus.
|
# # are collected and posted to Prometheus.
|
||||||
# prometheus_reporting_interval: "1s"
|
# prometheus_reporting_interval: 1s
|
||||||
|
|
||||||
# # The amount of time until a peer is considered stale.
|
# # The amount of time until a peer is considered stale.
|
||||||
# # To avoid churn, keep this slightly larger than `announce_interval`
|
# # To avoid churn, keep this slightly larger than `announce_interval`
|
||||||
# peer_lifetime: "31m"
|
# peer_lifetime: 31m
|
||||||
|
|
||||||
# # The address of redis storage.
|
# # The address of redis storage.
|
||||||
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
|
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
|
||||||
|
|
||||||
# # The timeout for reading a command reply from redis.
|
# # The timeout for reading a command reply from redis.
|
||||||
# redis_read_timeout: "15s"
|
# redis_read_timeout: 15s
|
||||||
|
|
||||||
# # The timeout for writing a command to redis.
|
# # The timeout for writing a command to redis.
|
||||||
# redis_write_timeout: "15s"
|
# redis_write_timeout: 15s
|
||||||
|
|
||||||
# # The timeout for connecting to redis server.
|
# # The timeout for connecting to redis server.
|
||||||
# redis_connect_timeout: "15s"
|
# redis_connect_timeout: 15s
|
||||||
|
|
||||||
# This block defines configuration used for middleware executed before a
|
# This block defines configuration used for middleware executed before a
|
||||||
# response has been returned to a BitTorrent client.
|
# response has been returned to a BitTorrent client.
|
||||||
prehooks:
|
prehooks:
|
||||||
# - name: "jwt"
|
#- name: jwt
|
||||||
# options:
|
# options:
|
||||||
# issuer: "https://issuer.com"
|
# issuer: "https://issuer.com"
|
||||||
# audience: "https://chihaya.issuer.com"
|
# audience: "https://chihaya.issuer.com"
|
||||||
# jwk_set_url: "https://issuer.com/keys"
|
# jwk_set_url: "https://issuer.com/keys"
|
||||||
# jwk_set_update_interval: "5m"
|
# jwk_set_update_interval: 5m
|
||||||
|
|
||||||
# - name: "client approval"
|
#- name: client approval
|
||||||
# options:
|
# options:
|
||||||
# whitelist:
|
# whitelist:
|
||||||
# - "OP1011"
|
# - "OP1011"
|
||||||
# blacklist:
|
# blacklist:
|
||||||
# - "OP1012"
|
# - "OP1012"
|
||||||
|
|
||||||
# - name: "interval variation"
|
#- name: interval variation
|
||||||
# options:
|
# options:
|
||||||
# modify_response_probability: 0.2
|
# modify_response_probability: 0.2
|
||||||
# max_increase_delta: 60
|
# max_increase_delta: 60
|
||||||
# modify_min_interval: true
|
# modify_min_interval: true
|
||||||
|
|
||||||
# This block defines configuration used for torrent approval, it requires to be given
|
# This block defines configuration used for torrent approval, it requires to be given
|
||||||
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
|
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
|
||||||
# - name: "torrent approval"
|
#- name: torrent approval
|
||||||
# options:
|
# options:
|
||||||
# whitelist:
|
# whitelist:
|
||||||
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
|
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
|
||||||
# blacklist:
|
# blacklist:
|
||||||
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"
|
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"
|
||||||
|
|
11
dist/prometheus/prometheus.yaml
vendored
11
dist/prometheus/prometheus.yaml
vendored
|
@ -1,12 +1,11 @@
|
||||||
---
|
|
||||||
global:
|
global:
|
||||||
scrape_interval: "5s"
|
scrape_interval: 5s
|
||||||
evaluation_interval: "5s"
|
evaluation_interval: 5s
|
||||||
|
|
||||||
# A scrape configuration containing exactly one endpoint to scrape:
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||||
- job_name: "local-chihaya" # you can name this however you want
|
- job_name: 'local-chihaya' # you can name this however you want
|
||||||
scrape_interval: "5s" # optionally override the global scrape_interval
|
scrape_interval: 5s # optionally override the global scrape_interval
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint
|
- targets: ['localhost:6881'] # provide the address of chihaya's prometheus endpoint
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Architecture
|
# Architecture
|
||||||
|
|
||||||
## Overview
|
### Overview
|
||||||
|
|
||||||
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
|
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
|
||||||
Frontends parse requests and write responses for the particular protocol they implement.
|
Frontends parse requests and write responses for the particular protocol they implement.
|
||||||
|
@ -11,6 +11,6 @@ After all PreHooks have executed, any missing response fields that are required
|
||||||
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
|
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
|
||||||
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
|
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
|
||||||
|
|
||||||
## Diagram
|
### Diagram
|
||||||
|
|
||||||

|

|
||||||
|
|
|
@ -44,11 +44,11 @@ The typical control flow of handling announces, in more detail, is:
|
||||||
6. Send the response to the Client.
|
6. Send the response to the Client.
|
||||||
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
|
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
|
||||||
8. Finish, accept next request.
|
8. Finish, accept next request.
|
||||||
9. For invalid requests or errors during processing: Send an error response to the client.
|
9. For invalid requests or errors during processing: Send an error response to the client.
|
||||||
This step may be skipped for suspected denial-of-service attacks.
|
This step may be skipped for suspected denial-of-service attacks.
|
||||||
The error response may contain information about the cause of the error.
|
The error response may contain information about the cause of the error.
|
||||||
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
|
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
|
||||||
Then finish, and accept the next request.
|
Then finish, and accept the next request.
|
||||||
|
|
||||||
#### Configuration
|
#### Configuration
|
||||||
|
|
||||||
|
@ -62,22 +62,20 @@ Frontends may provide runtime metrics, such as the number of requests or their d
|
||||||
Metrics must be reported using [Prometheus].
|
Metrics must be reported using [Prometheus].
|
||||||
|
|
||||||
A frontend should provide at least the following metrics:
|
A frontend should provide at least the following metrics:
|
||||||
|
|
||||||
- The number of valid and invalid requests handled
|
- The number of valid and invalid requests handled
|
||||||
- The average time it takes to handle a single request.
|
- The average time it takes to handle a single request.
|
||||||
This request timing should be made optional using a config entry.
|
This request timing should be made optional using a config entry.
|
||||||
|
|
||||||
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
|
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
|
||||||
If the frontend serves multiple transports or networks, metrics for them should be separable.
|
If the frontend serves multiple transports or networks, metrics for them should be separable.
|
||||||
|
|
||||||
It is recommended to publish one Prometheus `HistogramVec` with:
|
It is recommended to publish one Prometheus `HistogramVec` with:
|
||||||
|
|
||||||
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
|
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
|
||||||
- A value holding the duration in milliseconds of the reported request
|
- A value holding the duration in milliseconds of the reported request
|
||||||
- Labels for:
|
- Labels for:
|
||||||
- `action` (= `announce`, `scrape`, ...)
|
- `action` (= `announce`, `scrape`, ...)
|
||||||
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
|
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
|
||||||
- `error` (= A textual representation of the error encountered during processing.)
|
- `error` (= A textual representation of the error encountered during processing.)
|
||||||
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
|
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
|
||||||
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
|
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
|
||||||
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
|
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
|
||||||
|
@ -108,4 +106,4 @@ This way, a PreHook can communicate with a PostHook by setting a context value.
|
||||||
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
|
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
|
||||||
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
|
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
|
||||||
[Prometheus]: https://prometheus.io/
|
[Prometheus]: https://prometheus.io/
|
||||||
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
|
|
@ -59,7 +59,7 @@ All the InfoHashes (swarms) are also stored in a redis hash, with IP family as t
|
||||||
|
|
||||||
Here is an example:
|
Here is an example:
|
||||||
|
|
||||||
```yaml
|
```
|
||||||
- IPv4
|
- IPv4
|
||||||
- IPv4_S_<infohash 1>: <modification time>
|
- IPv4_S_<infohash 1>: <modification time>
|
||||||
- IPv4_L_<infohash 1>: <modification time>
|
- IPv4_L_<infohash 1>: <modification time>
|
||||||
|
@ -73,14 +73,15 @@ Here is an example:
|
||||||
- <peer 3 key>: <modification time>
|
- <peer 3 key>: <modification time>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
In this case, prometheus would record two swarms, three seeders, and one leecher.
|
In this case, prometheus would record two swarms, three seeders, and one leecher.
|
||||||
These three keys per address family are used to record the count of swarms, seeders, and leechers.
|
These three keys per address family are used to record the count of swarms, seeders, and leechers.
|
||||||
|
|
||||||
```yaml
|
```
|
||||||
- IPv4_infohash_count: 2
|
- IPv4_infohash_count: 2
|
||||||
- IPv4_S_count: 3
|
- IPv4_S_count: 3
|
||||||
- IPv4_L_count: 1
|
- IPv4_L_count: 1
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
|
Note: IPv4_infohash_count has a different meaning compared to the `memory` storage:
|
||||||
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.
|
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.
|
||||||
|
|
|
@ -46,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) {
|
||||||
d2 := NewDecoder(&bufferLoop{"i42e"})
|
d2 := NewDecoder(&bufferLoop{"i42e"})
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, _ = d1.Decode()
|
d1.Decode()
|
||||||
_, _ = d2.Decode()
|
d2.Decode()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) {
|
||||||
dec := NewDecoder(&bufferLoop{string(buf)})
|
dec := NewDecoder(&bufferLoop{string(buf)})
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_, _ = dec.Decode()
|
dec.Decode()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
||||||
err = marshalInt(w, int64(v))
|
err = marshalInt(w, int64(v))
|
||||||
|
|
||||||
case int64:
|
case int64:
|
||||||
err = marshalInt(w, v)
|
err = marshalInt(w, int64(v))
|
||||||
|
|
||||||
case uint:
|
case uint:
|
||||||
err = marshalUint(w, uint64(v))
|
err = marshalUint(w, uint64(v))
|
||||||
|
@ -78,7 +78,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
||||||
err = marshalUint(w, uint64(v))
|
err = marshalUint(w, uint64(v))
|
||||||
|
|
||||||
case uint64:
|
case uint64:
|
||||||
err = marshalUint(w, v)
|
err = marshalUint(w, uint64(v))
|
||||||
|
|
||||||
case time.Duration: // Assume seconds
|
case time.Duration: // Assume seconds
|
||||||
err = marshalInt(w, int64(v/time.Second))
|
err = marshalInt(w, int64(v/time.Second))
|
||||||
|
@ -90,7 +90,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
|
||||||
err = marshalList(w, v)
|
err = marshalList(w, v)
|
||||||
|
|
||||||
case []Dict:
|
case []Dict:
|
||||||
interfaceSlice := make([]interface{}, len(v))
|
var interfaceSlice = make([]interface{}, len(v))
|
||||||
for i, d := range v {
|
for i, d := range v {
|
||||||
interfaceSlice[i] = d
|
interfaceSlice[i] = d
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) {
|
||||||
encoder := NewEncoder(buf)
|
encoder := NewEncoder(buf)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_ = encoder.Encode("test")
|
encoder.Encode("test")
|
||||||
_ = encoder.Encode(123)
|
encoder.Encode(123)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) {
|
||||||
encoder := NewEncoder(buf)
|
encoder := NewEncoder(buf)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
_ = encoder.Encode(data)
|
encoder.Encode(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,7 +164,6 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
|
||||||
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
|
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
|
||||||
var err error
|
var err error
|
||||||
f.tlsCfg = &tls.Config{
|
f.tlsCfg = &tls.Config{
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
Certificates: make([]tls.Certificate, 1),
|
Certificates: make([]tls.Certificate, 1),
|
||||||
}
|
}
|
||||||
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
|
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
|
||||||
|
@ -266,7 +265,7 @@ func (f *Frontend) serveHTTP(l net.Listener) error {
|
||||||
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
||||||
|
|
||||||
// Start the HTTP server.
|
// Start the HTTP server.
|
||||||
if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
|
if err := f.srv.Serve(l); err != http.ErrServerClosed {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -286,7 +285,7 @@ func (f *Frontend) serveHTTPS(l net.Listener) error {
|
||||||
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
|
||||||
|
|
||||||
// Start the HTTP server.
|
// Start the HTTP server.
|
||||||
if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
|
if err := f.tlsSrv.ServeTLS(l, "", ""); err != http.ErrServerClosed {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -318,7 +317,7 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
|
||||||
|
|
||||||
req, err := ParseAnnounce(r, f.ParseOptions)
|
req, err := ParseAnnounce(r, f.ParseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
af = new(bittorrent.AddressFamily)
|
af = new(bittorrent.AddressFamily)
|
||||||
|
@ -327,14 +326,14 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
|
||||||
ctx := injectRouteParamsToContext(context.Background(), ps)
|
ctx := injectRouteParamsToContext(context.Background(), ps)
|
||||||
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
|
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
err = WriteAnnounceResponse(w, resp)
|
err = WriteAnnounceResponse(w, resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -359,14 +358,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
|
||||||
|
|
||||||
req, err := ParseScrape(r, f.ParseOptions)
|
req, err := ParseScrape(r, f.ParseOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("http: unable to determine remote address for scrape", log.Err(err))
|
log.Error("http: unable to determine remote address for scrape", log.Err(err))
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -377,7 +376,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
|
||||||
req.AddressFamily = bittorrent.IPv6
|
req.AddressFamily = bittorrent.IPv6
|
||||||
} else {
|
} else {
|
||||||
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
|
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
|
||||||
_ = WriteError(w, bittorrent.ErrInvalidIP)
|
WriteError(w, bittorrent.ErrInvalidIP)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
af = new(bittorrent.AddressFamily)
|
af = new(bittorrent.AddressFamily)
|
||||||
|
@ -386,14 +385,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
|
||||||
ctx := injectRouteParamsToContext(context.Background(), ps)
|
ctx := injectRouteParamsToContext(context.Background(), ps)
|
||||||
ctx, resp, err := f.logic.HandleScrape(ctx, req)
|
ctx, resp, err := f.logic.HandleScrape(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
err = WriteScrapeResponse(w, resp)
|
err = WriteScrapeResponse(w, resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WriteError(w, err)
|
WriteError(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
@ -74,26 +73,26 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
|
||||||
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
|
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
|
||||||
|
|
||||||
// Determine the number of remaining bytes for the client.
|
// Determine the number of remaining bytes for the client.
|
||||||
request.Left, err = qp.Uint("left", 64)
|
request.Left, err = qp.Uint64("left")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: left")
|
return nil, bittorrent.ClientError("failed to parse parameter: left")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the number of bytes downloaded by the client.
|
// Determine the number of bytes downloaded by the client.
|
||||||
request.Downloaded, err = qp.Uint("downloaded", 64)
|
request.Downloaded, err = qp.Uint64("downloaded")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
|
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the number of bytes shared by the client.
|
// Determine the number of bytes shared by the client.
|
||||||
request.Uploaded, err = qp.Uint("uploaded", 64)
|
request.Uploaded, err = qp.Uint64("uploaded")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
|
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the number of peers the client wants in the response.
|
// Determine the number of peers the client wants in the response.
|
||||||
numwant, err := qp.Uint("numwant", 32)
|
numwant, err := qp.Uint64("numwant")
|
||||||
if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
|
if err != nil && err != bittorrent.ErrKeyNotFound {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
|
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
|
||||||
}
|
}
|
||||||
// If there were no errors, the user actually provided the numwant.
|
// If there were no errors, the user actually provided the numwant.
|
||||||
|
@ -101,7 +100,7 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
|
||||||
request.NumWant = uint32(numwant)
|
request.NumWant = uint32(numwant)
|
||||||
|
|
||||||
// Parse the port where the client is listening.
|
// Parse the port where the client is listening.
|
||||||
port, err := qp.Uint("port", 16)
|
port, err := qp.Uint64("port")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, bittorrent.ClientError("failed to parse parameter: port")
|
return nil, bittorrent.ClientError("failed to parse parameter: port")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -27,9 +26,8 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||||
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
||||||
var errString string
|
var errString string
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var clientErr bittorrent.ClientError
|
if _, ok := err.(bittorrent.ClientError); ok {
|
||||||
if errors.As(err, &clientErr) {
|
errString = err.Error()
|
||||||
errString = clientErr.Error()
|
|
||||||
} else {
|
} else {
|
||||||
errString = "internal error"
|
errString = "internal error"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/chihaya/chihaya/bittorrent"
|
"github.com/chihaya/chihaya/bittorrent"
|
||||||
|
@ -12,9 +11,8 @@ import (
|
||||||
// WriteError communicates an error to a BitTorrent client over HTTP.
|
// WriteError communicates an error to a BitTorrent client over HTTP.
|
||||||
func WriteError(w http.ResponseWriter, err error) error {
|
func WriteError(w http.ResponseWriter, err error) error {
|
||||||
message := "internal server error"
|
message := "internal server error"
|
||||||
var clientErr bittorrent.ClientError
|
if _, clientErr := err.(bittorrent.ClientError); clientErr {
|
||||||
if errors.As(err, &clientErr) {
|
message = err.Error()
|
||||||
message = clientErr.Error()
|
|
||||||
} else {
|
} else {
|
||||||
log.Error("http: internal error", log.Err(err))
|
log.Error("http: internal error", log.Err(err))
|
||||||
}
|
}
|
||||||
|
@ -59,7 +57,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the peers to the dictionary.
|
// Add the peers to the dictionary.
|
||||||
peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
|
var peers []bencode.Dict
|
||||||
for _, peer := range resp.IPv4Peers {
|
for _, peer := range resp.IPv4Peers {
|
||||||
peers = append(peers, dict(peer))
|
peers = append(peers, dict(peer))
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWriteError(t *testing.T) {
|
func TestWriteError(t *testing.T) {
|
||||||
table := []struct {
|
var table = []struct {
|
||||||
reason, expected string
|
reason, expected string
|
||||||
}{
|
}{
|
||||||
{"hello world", "d14:failure reason11:hello worlde"},
|
{"hello world", "d14:failure reason11:hello worlde"},
|
||||||
|
@ -29,7 +29,7 @@ func TestWriteError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWriteStatus(t *testing.T) {
|
func TestWriteStatus(t *testing.T) {
|
||||||
table := []struct {
|
var table = []struct {
|
||||||
reason, expected string
|
reason, expected string
|
||||||
}{
|
}{
|
||||||
{"something is missing", "d14:failure reason20:something is missinge"},
|
{"something is missing", "d14:failure reason20:something is missinge"},
|
||||||
|
|
|
@ -11,27 +11,24 @@ type BytePool struct {
|
||||||
func New(length int) *BytePool {
|
func New(length int) *BytePool {
|
||||||
var bp BytePool
|
var bp BytePool
|
||||||
bp.Pool.New = func() interface{} {
|
bp.Pool.New = func() interface{} {
|
||||||
b := make([]byte, length)
|
return make([]byte, length, length)
|
||||||
return &b
|
|
||||||
}
|
}
|
||||||
return &bp
|
return &bp
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a byte slice from the pool.
|
// Get returns a byte slice from the pool.
|
||||||
func (bp *BytePool) Get() *[]byte {
|
func (bp *BytePool) Get() []byte {
|
||||||
return bp.Pool.Get().(*[]byte)
|
return bp.Pool.Get().([]byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put returns a byte slice to the pool.
|
// Put returns a byte slice to the pool.
|
||||||
func (bp *BytePool) Put(b *[]byte) {
|
func (bp *BytePool) Put(b []byte) {
|
||||||
*b = (*b)[:cap(*b)]
|
b = b[:cap(b)]
|
||||||
|
|
||||||
// Zero out the bytes.
|
// Zero out the bytes.
|
||||||
// This specific expression is optimized by the compiler:
|
// Apparently this specific expression is optimized by the compiler, see
|
||||||
// https://github.com/golang/go/issues/5373.
|
// github.com/golang/go/issues/5373.
|
||||||
for i := range *b {
|
for i := range b {
|
||||||
(*b)[i] = 0
|
b[i] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
bp.Pool.Put(b)
|
bp.Pool.Put(b)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
|
@ -124,7 +123,8 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.listen(); err != nil {
|
err := f.listen()
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ func (t *Frontend) Stop() stop.Result {
|
||||||
c := make(stop.Channel)
|
c := make(stop.Channel)
|
||||||
go func() {
|
go func() {
|
||||||
close(t.closing)
|
close(t.closing)
|
||||||
_ = t.socket.SetReadDeadline(time.Now())
|
t.socket.SetReadDeadline(time.Now())
|
||||||
t.wg.Wait()
|
t.wg.Wait()
|
||||||
c.Done(t.socket.Close())
|
c.Done(t.socket.Close())
|
||||||
}()
|
}()
|
||||||
|
@ -185,11 +185,10 @@ func (t *Frontend) serve() error {
|
||||||
|
|
||||||
// Read a UDP packet into a reusable buffer.
|
// Read a UDP packet into a reusable buffer.
|
||||||
buffer := pool.Get()
|
buffer := pool.Get()
|
||||||
n, addr, err := t.socket.ReadFromUDP(*buffer)
|
n, addr, err := t.socket.ReadFromUDP(buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pool.Put(buffer)
|
pool.Put(buffer)
|
||||||
var netErr net.Error
|
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||||
if errors.As(err, &netErr); netErr.Temporary() {
|
|
||||||
// A temporary failure is not fatal; just pretend it never happened.
|
// A temporary failure is not fatal; just pretend it never happened.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -218,7 +217,7 @@ func (t *Frontend) serve() error {
|
||||||
}
|
}
|
||||||
action, af, err := t.handleRequest(
|
action, af, err := t.handleRequest(
|
||||||
// Make sure the IP is copied, not referenced.
|
// Make sure the IP is copied, not referenced.
|
||||||
Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
|
Request{buffer[:n], append([]byte{}, addr.IP...)},
|
||||||
ResponseWriter{t.socket, addr},
|
ResponseWriter{t.socket, addr},
|
||||||
)
|
)
|
||||||
if t.EnableRequestTiming {
|
if t.EnableRequestTiming {
|
||||||
|
@ -245,7 +244,7 @@ type ResponseWriter struct {
|
||||||
|
|
||||||
// Write implements the io.Writer interface for a ResponseWriter.
|
// Write implements the io.Writer interface for a ResponseWriter.
|
||||||
func (w ResponseWriter) Write(b []byte) (int, error) {
|
func (w ResponseWriter) Write(b []byte) (int, error) {
|
||||||
_, _ = w.socket.WriteToUDP(b, w.addr)
|
w.socket.WriteToUDP(b, w.addr)
|
||||||
return len(b), nil
|
return len(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,8 +24,8 @@ const (
|
||||||
// Option-Types as described in BEP 41 and BEP 45.
|
// Option-Types as described in BEP 41 and BEP 45.
|
||||||
const (
|
const (
|
||||||
optionEndOfOptions byte = 0x0
|
optionEndOfOptions byte = 0x0
|
||||||
optionNOP byte = 0x1
|
optionNOP = 0x1
|
||||||
optionURLData byte = 0x2
|
optionURLData = 0x2
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -115,7 +115,7 @@ func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.Ann
|
||||||
request := &bittorrent.AnnounceRequest{
|
request := &bittorrent.AnnounceRequest{
|
||||||
Event: eventIDs[eventID],
|
Event: eventIDs[eventID],
|
||||||
InfoHash: bittorrent.InfoHashFromBytes(infohash),
|
InfoHash: bittorrent.InfoHashFromBytes(infohash),
|
||||||
NumWant: numWant,
|
NumWant: uint32(numWant),
|
||||||
Left: left,
|
Left: left,
|
||||||
Downloaded: downloaded,
|
Downloaded: downloaded,
|
||||||
Uploaded: uploaded,
|
Uploaded: uploaded,
|
||||||
|
@ -161,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
|
||||||
return bittorrent.ParseURLData("")
|
return bittorrent.ParseURLData("")
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := newBuffer()
|
var buf = newBuffer()
|
||||||
defer buf.free()
|
defer buf.free()
|
||||||
|
|
||||||
for i := 0; i < len(packet); {
|
for i := 0; i < len(packet); {
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package udp
|
package udp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
@ -52,7 +51,7 @@ func TestHandleOptionalParameters(t *testing.T) {
|
||||||
for _, tt := range table {
|
for _, tt := range table {
|
||||||
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
|
||||||
params, err := handleOptionalParameters(tt.data)
|
params, err := handleOptionalParameters(tt.data)
|
||||||
if !errors.Is(err, tt.err) {
|
if err != tt.err {
|
||||||
if tt.err == nil {
|
if tt.err == nil {
|
||||||
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
|
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package udp
|
package udp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -27,9 +26,8 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
|
||||||
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
|
||||||
var errString string
|
var errString string
|
||||||
if err != nil {
|
if err != nil {
|
||||||
var clientErr bittorrent.ClientError
|
if _, ok := err.(bittorrent.ClientError); ok {
|
||||||
if errors.As(err, &clientErr) {
|
errString = err.Error()
|
||||||
errString = clientErr.Error()
|
|
||||||
} else {
|
} else {
|
||||||
errString = "internal error"
|
errString = "internal error"
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package udp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
@ -13,16 +12,15 @@ import (
|
||||||
// WriteError writes the failure reason as a null-terminated string.
|
// WriteError writes the failure reason as a null-terminated string.
|
||||||
func WriteError(w io.Writer, txID []byte, err error) {
|
func WriteError(w io.Writer, txID []byte, err error) {
|
||||||
// If the client wasn't at fault, acknowledge it.
|
// If the client wasn't at fault, acknowledge it.
|
||||||
var clientErr bittorrent.ClientError
|
if _, ok := err.(bittorrent.ClientError); !ok {
|
||||||
if !errors.As(err, &clientErr) {
|
err = fmt.Errorf("internal error occurred: %s", err.Error())
|
||||||
err = fmt.Errorf("internal error occurred: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := newBuffer()
|
buf := newBuffer()
|
||||||
writeHeader(buf, txID, errorActionID)
|
writeHeader(buf, txID, errorActionID)
|
||||||
buf.WriteString(err.Error())
|
buf.WriteString(err.Error())
|
||||||
buf.WriteRune('\000')
|
buf.WriteRune('\000')
|
||||||
_, _ = w.Write(buf.Bytes())
|
w.Write(buf.Bytes())
|
||||||
buf.free()
|
buf.free()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,9 +37,9 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
|
||||||
} else {
|
} else {
|
||||||
writeHeader(buf, txID, announceActionID)
|
writeHeader(buf, txID, announceActionID)
|
||||||
}
|
}
|
||||||
_ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
|
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
|
||||||
_ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
|
binary.Write(buf, binary.BigEndian, resp.Incomplete)
|
||||||
_ = binary.Write(buf, binary.BigEndian, resp.Complete)
|
binary.Write(buf, binary.BigEndian, resp.Complete)
|
||||||
|
|
||||||
peers := resp.IPv4Peers
|
peers := resp.IPv4Peers
|
||||||
if v6Peers {
|
if v6Peers {
|
||||||
|
@ -50,10 +48,10 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
|
||||||
|
|
||||||
for _, peer := range peers {
|
for _, peer := range peers {
|
||||||
buf.Write(peer.IP.IP)
|
buf.Write(peer.IP.IP)
|
||||||
_ = binary.Write(buf, binary.BigEndian, peer.Port)
|
binary.Write(buf, binary.BigEndian, peer.Port)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = w.Write(buf.Bytes())
|
w.Write(buf.Bytes())
|
||||||
buf.free()
|
buf.free()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,12 +62,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
|
||||||
writeHeader(buf, txID, scrapeActionID)
|
writeHeader(buf, txID, scrapeActionID)
|
||||||
|
|
||||||
for _, scrape := range resp.Files {
|
for _, scrape := range resp.Files {
|
||||||
_ = binary.Write(buf, binary.BigEndian, scrape.Complete)
|
binary.Write(buf, binary.BigEndian, scrape.Complete)
|
||||||
_ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
|
binary.Write(buf, binary.BigEndian, scrape.Snatches)
|
||||||
_ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
|
binary.Write(buf, binary.BigEndian, scrape.Incomplete)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = w.Write(buf.Bytes())
|
w.Write(buf.Bytes())
|
||||||
buf.free()
|
buf.free()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,13 +78,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) {
|
||||||
writeHeader(buf, txID, connectActionID)
|
writeHeader(buf, txID, connectActionID)
|
||||||
buf.Write(connID)
|
buf.Write(connID)
|
||||||
|
|
||||||
_, _ = w.Write(buf.Bytes())
|
w.Write(buf.Bytes())
|
||||||
buf.free()
|
buf.free()
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeHeader writes the action and transaction ID to the provided response
|
// writeHeader writes the action and transaction ID to the provided response
|
||||||
// buffer.
|
// buffer.
|
||||||
func writeHeader(w io.Writer, txID []byte, action uint32) {
|
func writeHeader(w io.Writer, txID []byte, action uint32) {
|
||||||
_ = binary.Write(w, binary.BigEndian, action)
|
binary.Write(w, binary.BigEndian, action)
|
||||||
_, _ = w.Write(txID)
|
w.Write(txID)
|
||||||
}
|
}
|
||||||
|
|
20
go.mod
20
go.mod
|
@ -6,23 +6,17 @@ require (
|
||||||
github.com/SermoDigital/jose v0.9.2-0.20180104203859-803625baeddc
|
github.com/SermoDigital/jose v0.9.2-0.20180104203859-803625baeddc
|
||||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
|
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
|
||||||
github.com/alicebob/miniredis v2.5.0+incompatible
|
github.com/alicebob/miniredis v2.5.0+incompatible
|
||||||
github.com/anacrolix/dht/v2 v2.15.1 // indirect
|
github.com/anacrolix/torrent v1.25.0
|
||||||
github.com/anacrolix/missinggo/v2 v2.5.3 // indirect
|
github.com/go-redsync/redsync v1.4.2
|
||||||
github.com/anacrolix/torrent v1.40.0
|
github.com/gomodule/redigo v2.0.0+incompatible
|
||||||
github.com/go-redsync/redsync/v4 v4.5.0
|
|
||||||
github.com/gomodule/redigo v1.8.8
|
|
||||||
github.com/julienschmidt/httprouter v1.3.0
|
github.com/julienschmidt/httprouter v1.3.0
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
|
||||||
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103
|
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103
|
||||||
github.com/minio/sha256-simd v1.0.0
|
github.com/minio/sha256-simd v0.1.1
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.11.0
|
github.com/prometheus/client_golang v1.9.0
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/sirupsen/logrus v1.8.0
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/spf13/cobra v1.1.3
|
||||||
github.com/sirupsen/logrus v1.8.1
|
|
||||||
github.com/spf13/cobra v1.3.0
|
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
|
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
|
@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||||
var cfg Config
|
var cfg Config
|
||||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewHook(cfg)
|
return NewHook(cfg)
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
// Package fixedpeers implements a Hook that
|
|
||||||
//appends a fixed peer to every Announce request
|
|
||||||
package fixedpeers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
yaml "gopkg.in/yaml.v2"
|
|
||||||
|
|
||||||
"github.com/chihaya/chihaya/bittorrent"
|
|
||||||
"github.com/chihaya/chihaya/middleware"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Name is the name by which this middleware is registered with Chihaya.
|
|
||||||
const Name = "fixed peers"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
middleware.RegisterDriver(Name, driver{})
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ middleware.Driver = driver{}
|
|
||||||
|
|
||||||
type driver struct{}
|
|
||||||
|
|
||||||
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
|
||||||
var cfg Config
|
|
||||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewHook(cfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
FixedPeers []string `yaml:"fixed_peers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type hook struct {
|
|
||||||
peers []bittorrent.Peer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHook returns an instance of the torrent approval middleware.
|
|
||||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
|
||||||
var peers []bittorrent.Peer
|
|
||||||
for _, peerString := range cfg.FixedPeers {
|
|
||||||
parts := strings.Split(peerString, ":")
|
|
||||||
port, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ip := net.ParseIP(parts[0]).To4()
|
|
||||||
if ip == nil {
|
|
||||||
panic("Invalid ip4 on fixed_peers")
|
|
||||||
}
|
|
||||||
peers = append(peers,
|
|
||||||
bittorrent.Peer{
|
|
||||||
ID: bittorrent.PeerID{0},
|
|
||||||
Port: uint16(port),
|
|
||||||
IP: bittorrent.IP{IP: ip},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
h := &hook{
|
|
||||||
peers: peers,
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
|
|
||||||
for _, peer := range h.peers {
|
|
||||||
resp.IPv4Peers = append(resp.IPv4Peers, peer)
|
|
||||||
resp.Complete += 1
|
|
||||||
}
|
|
||||||
return ctx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
|
|
||||||
// Scrapes don't require any protection.
|
|
||||||
return ctx, nil
|
|
||||||
}
|
|
|
@ -1,47 +0,0 @@
|
||||||
package fixedpeers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/chihaya/chihaya/bittorrent"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAppendFixedPeer(t *testing.T) {
|
|
||||||
conf := Config{
|
|
||||||
FixedPeers: []string{"8.8.8.8:4040", "1.1.1.1:111"},
|
|
||||||
}
|
|
||||||
h, err := NewHook(conf)
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
req := &bittorrent.AnnounceRequest{}
|
|
||||||
resp := &bittorrent.AnnounceResponse{}
|
|
||||||
|
|
||||||
hashbytes, err := hex.DecodeString("3000000000000000000000000000000000000000")
|
|
||||||
require.Nil(t, err)
|
|
||||||
|
|
||||||
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
|
|
||||||
|
|
||||||
req.InfoHash = hashinfo
|
|
||||||
|
|
||||||
nctx, err := h.HandleAnnounce(ctx, req, resp)
|
|
||||||
require.Equal(t, ctx, nctx)
|
|
||||||
peers := []bittorrent.Peer{
|
|
||||||
bittorrent.Peer{
|
|
||||||
ID: bittorrent.PeerID{0},
|
|
||||||
Port: 4040,
|
|
||||||
IP: bittorrent.IP{net.ParseIP("8.8.8.8"), bittorrent.IPv4},
|
|
||||||
},
|
|
||||||
bittorrent.Peer{
|
|
||||||
ID: bittorrent.PeerID{0},
|
|
||||||
Port: 111,
|
|
||||||
IP: bittorrent.IP{net.ParseIP("1.1.1.1"), bittorrent.IPv4},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.Equal(t, peers, resp.IPv4Peers)
|
|
||||||
}
|
|
|
@ -2,7 +2,7 @@ package middleware
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"github.com/chihaya/chihaya/bittorrent"
|
"github.com/chihaya/chihaya/bittorrent"
|
||||||
"github.com/chihaya/chihaya/storage"
|
"github.com/chihaya/chihaya/storage"
|
||||||
)
|
)
|
||||||
|
@ -35,16 +35,14 @@ func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorre
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case req.Port < 100:
|
|
||||||
return ctx, nil
|
|
||||||
case req.Event == bittorrent.Stopped:
|
case req.Event == bittorrent.Stopped:
|
||||||
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
|
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
|
||||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||||
return ctx, err
|
return ctx, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
|
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
|
||||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||||
return ctx, err
|
return ctx, err
|
||||||
}
|
}
|
||||||
case req.Event == bittorrent.Completed:
|
case req.Event == bittorrent.Completed:
|
||||||
|
@ -98,8 +96,8 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
|
||||||
|
|
||||||
// Add the Scrape data to the response.
|
// Add the Scrape data to the response.
|
||||||
s := h.store.ScrapeSwarm(req.InfoHash, req.IP.AddressFamily)
|
s := h.store.ScrapeSwarm(req.InfoHash, req.IP.AddressFamily)
|
||||||
resp.Incomplete += s.Incomplete
|
resp.Incomplete = s.Incomplete
|
||||||
resp.Complete += s.Complete
|
resp.Complete = s.Complete
|
||||||
|
|
||||||
err = h.appendPeers(req, resp)
|
err = h.appendPeers(req, resp)
|
||||||
return ctx, err
|
return ctx, err
|
||||||
|
@ -108,7 +106,7 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
|
||||||
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
|
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
|
||||||
seeding := req.Left == 0
|
seeding := req.Left == 0
|
||||||
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
|
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
|
||||||
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
|
if err != nil && err != storage.ErrResourceDoesNotExist {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,9 +123,9 @@ func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittor
|
||||||
|
|
||||||
switch req.IP.AddressFamily {
|
switch req.IP.AddressFamily {
|
||||||
case bittorrent.IPv4:
|
case bittorrent.IPv4:
|
||||||
resp.IPv4Peers = append(resp.IPv4Peers, peers...)
|
resp.IPv4Peers = peers
|
||||||
case bittorrent.IPv6:
|
case bittorrent.IPv6:
|
||||||
resp.IPv6Peers = append(resp.IPv6Peers, peers...)
|
resp.IPv6Peers = peers
|
||||||
default:
|
default:
|
||||||
panic("attempted to append peer that is neither IPv4 nor IPv6")
|
panic("attempted to append peer that is neither IPv4 nor IPv6")
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||||
var cfg Config
|
var cfg Config
|
||||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewHook(cfg)
|
return NewHook(cfg)
|
||||||
|
@ -93,7 +93,8 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("performing initial fetch of JWKs")
|
log.Debug("performing initial fetch of JWKs")
|
||||||
if err := h.updateKeys(); err != nil {
|
err := h.updateKeys()
|
||||||
|
if err != nil {
|
||||||
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
|
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +105,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
|
||||||
return
|
return
|
||||||
case <-time.After(cfg.JWKUpdateInterval):
|
case <-time.After(cfg.JWKUpdateInterval):
|
||||||
log.Debug("performing fetch of JWKs")
|
log.Debug("performing fetch of JWKs")
|
||||||
_ = h.updateKeys()
|
h.updateKeys()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
//
|
//
|
||||||
// Calling DeriveEntropyFromRequest multiple times yields the same values.
|
// Calling DeriveEntropyFromRequest multiple times yields the same values.
|
||||||
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
|
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
|
||||||
v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
|
v0 := binary.BigEndian.Uint64([]byte(req.InfoHash[:8])) + binary.BigEndian.Uint64([]byte(req.InfoHash[8:16]))
|
||||||
v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
|
v1 := binary.BigEndian.Uint64([]byte(req.Peer.ID[:8])) + binary.BigEndian.Uint64([]byte(req.Peer.ID[8:16]))
|
||||||
return v0, v1
|
return v0, v1
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||||
var cfg Config
|
var cfg Config
|
||||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewHook(cfg)
|
return NewHook(cfg)
|
||||||
|
|
|
@ -29,7 +29,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
|
||||||
var cfg Config
|
var cfg Config
|
||||||
err := yaml.Unmarshal(optionBytes, &cfg)
|
err := yaml.Unmarshal(optionBytes, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
|
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewHook(cfg)
|
return NewHook(cfg)
|
||||||
|
@ -77,7 +77,8 @@ type hook struct {
|
||||||
// NewHook creates a middleware to randomly modify the announce interval from
|
// NewHook creates a middleware to randomly modify the announce interval from
|
||||||
// the given config.
|
// the given config.
|
||||||
func NewHook(cfg Config) (middleware.Hook, error) {
|
func NewHook(cfg Config) (middleware.Hook, error) {
|
||||||
if err := checkConfig(cfg); err != nil {
|
err := checkConfig(cfg)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,12 +96,12 @@ func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceReque
|
||||||
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
|
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
|
||||||
// Generate the increase delta.
|
// Generate the increase delta.
|
||||||
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
|
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
|
||||||
deltaDuration := time.Duration(v+1) * time.Second
|
addSeconds := time.Duration(v+1) * time.Second
|
||||||
|
|
||||||
resp.Interval += deltaDuration
|
resp.Interval += addSeconds
|
||||||
|
|
||||||
if h.cfg.ModifyMinInterval {
|
if h.cfg.ModifyMinInterval {
|
||||||
resp.MinInterval += deltaDuration
|
resp.MinInterval += addSeconds
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctx, nil
|
return ctx, nil
|
||||||
|
|
|
@ -4,7 +4,6 @@ package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/pprof"
|
"net/http/pprof"
|
||||||
|
|
||||||
|
@ -50,7 +49,7 @@ func NewServer(addr string) *Server {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := s.srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
|
if err := s.srv.ListenAndServe(); err != http.ErrServerClosed {
|
||||||
log.Fatal("failed while serving prometheus", log.Err(err))
|
log.Fatal("failed while serving prometheus", log.Err(err))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -4,7 +4,6 @@ package memory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"math"
|
|
||||||
"net"
|
"net"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -80,7 +79,7 @@ func (cfg Config) LogFields() log.Fields {
|
||||||
func (cfg Config) Validate() Config {
|
func (cfg Config) Validate() Config {
|
||||||
validcfg := cfg
|
validcfg := cfg
|
||||||
|
|
||||||
if cfg.ShardCount <= 0 || cfg.ShardCount > (math.MaxInt/2) {
|
if cfg.ShardCount <= 0 {
|
||||||
validcfg.ShardCount = defaultShardCount
|
validcfg.ShardCount = defaultShardCount
|
||||||
log.Warn("falling back to default configuration", log.Fields{
|
log.Warn("falling back to default configuration", log.Fields{
|
||||||
"name": Name + ".ShardCount",
|
"name": Name + ".ShardCount",
|
||||||
|
@ -143,7 +142,7 @@ func New(provided Config) (storage.PeerStore, error) {
|
||||||
case <-time.After(cfg.GarbageCollectionInterval):
|
case <-time.After(cfg.GarbageCollectionInterval):
|
||||||
before := time.Now().Add(-cfg.PeerLifetime)
|
before := time.Now().Add(-cfg.PeerLifetime)
|
||||||
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
|
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
|
||||||
_ = ps.collectGarbage(before)
|
ps.collectGarbage(before)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -184,8 +183,7 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
||||||
peer := bittorrent.Peer{
|
peer := bittorrent.Peer{
|
||||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||||
IP: bittorrent.IP{IP: net.IP(pk[22:])},
|
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
|
||||||
}
|
|
||||||
|
|
||||||
if ip := peer.IP.To4(); ip != nil {
|
if ip := peer.IP.To4(); ip != nil {
|
||||||
peer.IP.IP = ip
|
peer.IP.IP = ip
|
||||||
|
|
|
@ -25,7 +25,6 @@ package redis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -246,8 +245,7 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
|
||||||
peer := bittorrent.Peer{
|
peer := bittorrent.Peer{
|
||||||
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
ID: bittorrent.PeerIDFromString(string(pk[:20])),
|
||||||
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
|
||||||
IP: bittorrent.IP{IP: net.IP(pk[22:])},
|
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
|
||||||
}
|
|
||||||
|
|
||||||
if ip := peer.IP.To4(); ip != nil {
|
if ip := peer.IP.To4(); ip != nil {
|
||||||
peer.IP.IP = ip
|
peer.IP.IP = ip
|
||||||
|
@ -302,7 +300,7 @@ func (ps *peerStore) populateProm() {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
for _, group := range ps.groups() {
|
for _, group := range ps.groups() {
|
||||||
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && err != redis.ErrNil {
|
||||||
log.Error("storage: GET counter failure", log.Fields{
|
log.Error("storage: GET counter failure", log.Fields{
|
||||||
"key": ps.infohashCountKey(group),
|
"key": ps.infohashCountKey(group),
|
||||||
"error": err,
|
"error": err,
|
||||||
|
@ -310,7 +308,7 @@ func (ps *peerStore) populateProm() {
|
||||||
} else {
|
} else {
|
||||||
numInfohashes += n
|
numInfohashes += n
|
||||||
}
|
}
|
||||||
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && err != redis.ErrNil {
|
||||||
log.Error("storage: GET counter failure", log.Fields{
|
log.Error("storage: GET counter failure", log.Fields{
|
||||||
"key": ps.seederCountKey(group),
|
"key": ps.seederCountKey(group),
|
||||||
"error": err,
|
"error": err,
|
||||||
|
@ -318,7 +316,7 @@ func (ps *peerStore) populateProm() {
|
||||||
} else {
|
} else {
|
||||||
numSeeders += n
|
numSeeders += n
|
||||||
}
|
}
|
||||||
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
|
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && err != redis.ErrNil {
|
||||||
log.Error("storage: GET counter failure", log.Fields{
|
log.Error("storage: GET counter failure", log.Fields{
|
||||||
"key": ps.leecherCountKey(group),
|
"key": ps.leecherCountKey(group),
|
||||||
"error": err,
|
"error": err,
|
||||||
|
@ -358,9 +356,9 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
||||||
conn := ps.rb.open()
|
conn := ps.rb.open()
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
_ = conn.Send("MULTI")
|
conn.Send("MULTI")
|
||||||
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||||
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -439,9 +437,9 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
|
||||||
conn := ps.rb.open()
|
conn := ps.rb.open()
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
_ = conn.Send("MULTI")
|
conn.Send("MULTI")
|
||||||
_ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
|
conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
|
||||||
_ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
|
conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
|
||||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -511,10 +509,10 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer)
|
||||||
conn := ps.rb.open()
|
conn := ps.rb.open()
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
_ = conn.Send("MULTI")
|
conn.Send("MULTI")
|
||||||
_ = conn.Send("HDEL", encodedLeecherInfoHash, pk)
|
conn.Send("HDEL", encodedLeecherInfoHash, pk)
|
||||||
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
conn.Send("HSET", encodedSeederInfoHash, pk, ct)
|
||||||
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
|
||||||
reply, err := redis.Int64s(conn.Do("EXEC"))
|
reply, err := redis.Int64s(conn.Do("EXEC"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -784,13 +782,13 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
|
||||||
// in other words, it's removed automatically after `HDEL` the last field.
|
// in other words, it's removed automatically after `HDEL` the last field.
|
||||||
//_, err := conn.Do("DEL", ihStr)
|
//_, err := conn.Do("DEL", ihStr)
|
||||||
|
|
||||||
_ = conn.Send("MULTI")
|
conn.Send("MULTI")
|
||||||
_ = conn.Send("HDEL", group, ihStr)
|
conn.Send("HDEL", group, ihStr)
|
||||||
if isSeeder {
|
if isSeeder {
|
||||||
_ = conn.Send("DECR", ps.infohashCountKey(group))
|
conn.Send("DECR", ps.infohashCountKey(group))
|
||||||
}
|
}
|
||||||
_, err = redis.Values(conn.Do("EXEC"))
|
_, err = redis.Values(conn.Do("EXEC"))
|
||||||
if err != nil && !errors.Is(err, redis.ErrNil) {
|
if err != nil && err != redis.ErrNil {
|
||||||
log.Error("storage: Redis EXEC failure", log.Fields{
|
log.Error("storage: Redis EXEC failure", log.Fields{
|
||||||
"group": group,
|
"group": group,
|
||||||
"infohash": ihStr,
|
"infohash": ihStr,
|
||||||
|
@ -798,7 +796,7 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if _, err = conn.Do("UNWATCH"); err != nil && !errors.Is(err, redis.ErrNil) {
|
if _, err = conn.Do("UNWATCH"); err != nil && err != redis.ErrNil {
|
||||||
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
|
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,7 @@ func createNew() s.PeerStore {
|
||||||
RedisBroker: redisURL,
|
RedisBroker: redisURL,
|
||||||
RedisReadTimeout: 10 * time.Second,
|
RedisReadTimeout: 10 * time.Second,
|
||||||
RedisWriteTimeout: 10 * time.Second,
|
RedisWriteTimeout: 10 * time.Second,
|
||||||
RedisConnectTimeout: 10 * time.Second,
|
RedisConnectTimeout: 10 * time.Second})
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,14 +7,13 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-redsync/redsync/v4"
|
"github.com/go-redsync/redsync"
|
||||||
"github.com/go-redsync/redsync/v4/redis/redigo"
|
"github.com/gomodule/redigo/redis"
|
||||||
redigolib "github.com/gomodule/redigo/redis"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// redisBackend represents a redis handler.
|
// redisBackend represents a redis handler.
|
||||||
type redisBackend struct {
|
type redisBackend struct {
|
||||||
pool *redigolib.Pool
|
pool *redis.Pool
|
||||||
redsync *redsync.Redsync
|
redsync *redsync.Redsync
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +27,7 @@ func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend
|
||||||
ConnectTimeout: cfg.RedisConnectTimeout,
|
ConnectTimeout: cfg.RedisConnectTimeout,
|
||||||
}
|
}
|
||||||
pool := rc.NewPool()
|
pool := rc.NewPool()
|
||||||
redsync := redsync.New(redigo.NewPool(pool))
|
redsync := redsync.New([]redsync.Pool{pool})
|
||||||
return &redisBackend{
|
return &redisBackend{
|
||||||
pool: pool,
|
pool: pool,
|
||||||
redsync: redsync,
|
redsync: redsync,
|
||||||
|
@ -36,7 +35,7 @@ func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend
|
||||||
}
|
}
|
||||||
|
|
||||||
// open returns or creates instance of Redis connection.
|
// open returns or creates instance of Redis connection.
|
||||||
func (rb *redisBackend) open() redigolib.Conn {
|
func (rb *redisBackend) open() redis.Conn {
|
||||||
return rb.pool.Get()
|
return rb.pool.Get()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,11 +48,11 @@ type redisConnector struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPool returns a new pool of Redis connections
|
// NewPool returns a new pool of Redis connections
|
||||||
func (rc *redisConnector) NewPool() *redigolib.Pool {
|
func (rc *redisConnector) NewPool() *redis.Pool {
|
||||||
return &redigolib.Pool{
|
return &redis.Pool{
|
||||||
MaxIdle: 3,
|
MaxIdle: 3,
|
||||||
IdleTimeout: 240 * time.Second,
|
IdleTimeout: 240 * time.Second,
|
||||||
Dial: func() (redigolib.Conn, error) {
|
Dial: func() (redis.Conn, error) {
|
||||||
c, err := rc.open()
|
c, err := rc.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -69,8 +68,8 @@ func (rc *redisConnector) NewPool() *redigolib.Pool {
|
||||||
return c, err
|
return c, err
|
||||||
},
|
},
|
||||||
// PINGs connections that have been idle more than 10 seconds
|
// PINGs connections that have been idle more than 10 seconds
|
||||||
TestOnBorrow: func(c redigolib.Conn, t time.Time) error {
|
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||||
if time.Since(t) < 10*time.Second {
|
if time.Since(t) < time.Duration(10*time.Second) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, err := c.Do("PING")
|
_, err := c.Do("PING")
|
||||||
|
@ -80,23 +79,23 @@ func (rc *redisConnector) NewPool() *redigolib.Pool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open a new Redis connection
|
// Open a new Redis connection
|
||||||
func (rc *redisConnector) open() (redigolib.Conn, error) {
|
func (rc *redisConnector) open() (redis.Conn, error) {
|
||||||
opts := []redigolib.DialOption{
|
var opts = []redis.DialOption{
|
||||||
redigolib.DialDatabase(rc.URL.DB),
|
redis.DialDatabase(rc.URL.DB),
|
||||||
redigolib.DialReadTimeout(rc.ReadTimeout),
|
redis.DialReadTimeout(rc.ReadTimeout),
|
||||||
redigolib.DialWriteTimeout(rc.WriteTimeout),
|
redis.DialWriteTimeout(rc.WriteTimeout),
|
||||||
redigolib.DialConnectTimeout(rc.ConnectTimeout),
|
redis.DialConnectTimeout(rc.ConnectTimeout),
|
||||||
}
|
}
|
||||||
|
|
||||||
if rc.URL.Password != "" {
|
if rc.URL.Password != "" {
|
||||||
opts = append(opts, redigolib.DialPassword(rc.URL.Password))
|
opts = append(opts, redis.DialPassword(rc.URL.Password))
|
||||||
}
|
}
|
||||||
|
|
||||||
if rc.SocketPath != "" {
|
if rc.SocketPath != "" {
|
||||||
return redigolib.Dial("unix", rc.SocketPath, opts...)
|
return redis.Dial("unix", rc.SocketPath, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return redigolib.Dial("tcp", rc.URL.Host, opts...)
|
return redis.Dial("tcp", rc.URL.Host, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A redisURL represents a parsed redisURL
|
// A redisURL represents a parsed redisURL
|
||||||
|
@ -120,7 +119,7 @@ func parseRedisURL(target string) (*redisURL, error) {
|
||||||
return nil, errors.New("no redis scheme found")
|
return nil, errors.New("no redis scheme found")
|
||||||
}
|
}
|
||||||
|
|
||||||
db := 0 // default redis db
|
db := 0 //default redis db
|
||||||
parts := strings.Split(u.Path, "/")
|
parts := strings.Split(u.Path, "/")
|
||||||
if len(parts) != 1 {
|
if len(parts) != 1 {
|
||||||
db, err = strconv.Atoi(parts[1])
|
db, err = strconv.Atoi(parts[1])
|
||||||
|
|
|
@ -53,10 +53,8 @@ func generatePeers() (a [1000]bittorrent.Peer) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type executionFunc func(int, PeerStore, *benchData) error
|
||||||
executionFunc func(int, PeerStore, *benchData) error
|
type setupFunc func(PeerStore, *benchData) error
|
||||||
setupFunc func(PeerStore, *benchData) error
|
|
||||||
)
|
|
||||||
|
|
||||||
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
|
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
|
||||||
bd := &benchData{generateInfohashes(), generatePeers()}
|
bd := &benchData{generateInfohashes(), generatePeers()}
|
||||||
|
@ -187,7 +185,6 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
|
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||||
})
|
})
|
||||||
|
@ -214,7 +211,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||||
// DeleteNonexist can run in parallel.
|
// DeleteNonexist can run in parallel.
|
||||||
func DeleteNonexist(b *testing.B, ps PeerStore) {
|
func DeleteNonexist(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
|
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -225,7 +222,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) {
|
||||||
// DeleteNonexist can run in parallel.
|
// DeleteNonexist can run in parallel.
|
||||||
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
|
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -236,7 +233,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) {
|
||||||
// DeleteNonexist1kInfohash can run in parallel.
|
// DeleteNonexist1kInfohash can run in parallel.
|
||||||
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -247,7 +244,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||||
// DeleteNonexist1kInfohash1k can run in parallel.
|
// DeleteNonexist1kInfohash1k can run in parallel.
|
||||||
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -258,7 +255,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||||
// GradNonexist can run in parallel.
|
// GradNonexist can run in parallel.
|
||||||
func GradNonexist(b *testing.B, ps PeerStore) {
|
func GradNonexist(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
|
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -269,7 +266,7 @@ func GradNonexist(b *testing.B, ps PeerStore) {
|
||||||
// GradNonexist1k can run in parallel.
|
// GradNonexist1k can run in parallel.
|
||||||
func GradNonexist1k(b *testing.B, ps PeerStore) {
|
func GradNonexist1k(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
|
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -280,7 +277,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) {
|
||||||
// GradNonexist1kInfohash can run in parallel.
|
// GradNonexist1kInfohash can run in parallel.
|
||||||
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
|
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -292,7 +289,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
|
||||||
// GradNonexist1kInfohash1k can run in parallel.
|
// GradNonexist1kInfohash1k can run in parallel.
|
||||||
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
|
||||||
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
|
||||||
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue