Compare commits

..

1 commit

Author SHA1 Message Date
dependabot-preview[bot]
9cbeb49ec4
Upgrade to GitHub-native Dependabot 2021-04-28 16:46:39 +00:00
55 changed files with 708 additions and 1568 deletions

4
.github/FUNDING.yml vendored
View file

@ -1,3 +1 @@
---
github:
- "jzelinskie"
github: [ jzelinskie ]

View file

@ -1,23 +1,7 @@
---
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: gomod
directory: "/"
schedule:
interval: monthly
open-pull-requests-limit: 10

115
.github/workflows/CI.yaml vendored Normal file
View file

@ -0,0 +1,115 @@
name: CI
on:
# See the documentation for more intricate event dispatch here:
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#on
push:
branches:
- "!dependabot/*"
- "*"
pull_request:
branches:
- "*"
jobs:
build:
name: Build & Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: Build
run: go build -v ./cmd/...
- name: Vet
run: go vet ./...
- name: Imports
uses: Jerome1337/goimports-action@v1.0.2
- name: Format
uses: Jerome1337/gofmt-action@v1.0.2
- name: Lint
uses: Jerome1337/golint-action@v1.0.2
unit:
name: Unit Tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: Unit Tests
run: go test -v -race $(go list ./...)
e2e-mem:
name: E2E Tests (Memory Storage)
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: End-to-End Test
run: |
go install ./cmd/chihaya
cat ./dist/example_config.yaml
chihaya --config=./dist/example_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
e2e-redis:
name: E2E Tests (Redis Storage)
runs-on: ubuntu-latest
services:
redis:
image: redis
ports: ["6379:6379"]
options: --entrypoint redis-server
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: Configure redis storage
run: |
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
chmod +x faq-linux-amd64
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
cat ./dist/example_redis_config.yaml
- name: End-to-End Test
run: |
go install ./cmd/chihaya
chihaya --config=./dist/example_redis_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
dist:
name: Helm Template
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Helm
uses: engineerd/configurator@v0.0.5
with:
name: helm
pathInArchive: linux-amd64/helm
fromGitHubReleases: true
repo: helm/helm
version: ^v3
urlTemplate: https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz
token: ${{ secrets.GITHUB_TOKEN }}
- name: Helm Template
working-directory: ./dist/helm/chihaya
run: helm template . --debug

View file

@ -1,112 +0,0 @@
---
name: "Build & Test"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
build:
name: "Go Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Build"
run: "go build ./cmd/..."
unit:
name: "Run Unit Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go test`"
run: "go test -race ./..."
e2e-mem:
name: "E2E Memory Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
cat ./dist/example_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
e2e-redis:
name: "E2E Redis Tests"
runs-on: "ubuntu-latest"
services:
redis:
image: "redis"
ports: ["6379:6379"]
options: "--entrypoint redis-server"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
chmod +x faq-linux-amd64
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
cat ./dist/example_redis_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_redis_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
image-build:
name: "Docker Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "docker/setup-qemu-action@v1"
- uses: "docker/setup-buildx-action@v1"
with:
driver-opts: "image=moby/buildkit:master"
- uses: "docker/build-push-action@v1"
with:
push: false
tags: "latest"
helm:
name: "Helm Template"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- name: "Install Helm"
uses: "engineerd/configurator@v0.0.5"
with:
name: "helm"
pathInArchive: "linux-amd64/helm"
fromGitHubReleases: true
repo: "helm/helm"
version: "^v3"
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
token: "${{ secrets.GITHUB_TOKEN }}"
- name: "Run `helm template`"
working-directory: "./dist/helm/chihaya"
run: "helm template . --debug"

View file

@ -1,86 +0,0 @@
---
name: "Lint"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
go-mod-tidy:
name: "Lint Go Modules"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go mod tidy`"
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
go-fmt:
name: "Format Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install gofumpt"
run: "go install mvdan.cc/gofumpt@latest"
- name: "Run `gofumpt`"
run: |
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
if [ -n "$GOFUMPT_OUTPUT" ]; then
echo "The following files are not correctly formatted:"
echo "${GOFUMPT_OUTPUT}"
exit 1
fi
go-lint:
name: "Lint Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- uses: "golangci/golangci-lint-action@v2"
with:
version: "v1.43"
skip-go-installation: true
skip-pkg-cache: true
skip-build-cache: false
extra-lint:
name: "Lint YAML & Markdown"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "bewuethr/yamllint-action@v1.1.1"
with:
config-file: ".yamllint"
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
with:
files: "."
config_file: ".markdownlint.yaml"
codeql:
name: "Analyze with CodeQL"
runs-on: "ubuntu-latest"
permissions:
actions: "read"
contents: "read"
security-events: "write"
strategy:
fail-fast: false
matrix:
language: ["go"]
steps:
- uses: "actions/checkout@v2"
- uses: "github/codeql-action/init@v1"
with:
languages: "${{ matrix.language }}"
- uses: "github/codeql-action/autobuild@v1"
- uses: "github/codeql-action/analyze@v1"

View file

@ -1,50 +0,0 @@
---
run:
timeout: "5m"
output:
sort-results: true
linters-settings:
goimports:
local-prefixes: "github.com/chihaya/chihaya"
gosec:
excludes:
- "G404" # Allow the usage of math/rand
linters:
enable:
- "bidichk"
- "bodyclose"
- "deadcode"
- "errcheck"
- "errname"
- "errorlint"
- "gofumpt"
- "goimports"
- "goprintffuncname"
- "gosec"
- "gosimple"
- "govet"
- "ifshort"
- "importas"
- "ineffassign"
- "makezero"
- "prealloc"
- "predeclared"
- "revive"
- "rowserrcheck"
- "staticcheck"
- "structcheck"
- "stylecheck"
- "tenv"
- "typecheck"
- "unconvert"
- "unused"
- "varcheck"
- "wastedassign"
- "whitespace"
issues:
include:
- "EXC0012" # Exported should have comment
- "EXC0012" # Exported should have comment
- "EXC0013" # Package comment should be of form
- "EXC0014" # Comment on exported should be of form
- "EXC0015" # Should have a package comment

View file

@ -1,3 +0,0 @@
---
line-length: false
no-hard-tabs: false

View file

@ -1,11 +0,0 @@
# vim: ft=yaml
---
yaml-files:
- "*.yaml"
- "*.yml"
- ".yamllint"
ignore: "dist/helm/"
extends: "default"
rules:
quoted-strings: "enable"
line-length: "disable"

View file

@ -1,3 +1,78 @@
## Contributing to LBRY
## Discussion
https://lbry.tech/contribute
Long-term discussion and bug reports are maintained via [GitHub Issues].
Code review is done via [GitHub Pull Requests].
Real-time discussion is done via [freenode IRC].
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
## Pull Request Procedure
If you're looking to contribute, search the GitHub for issues labeled "low-hanging fruit".
You can also hop into IRC and ask a developer who's online for their opinion.
Small, self-describing fixes are perfectly fine to submit without discussion.
However, please do not submit a massive Pull Request without prior communication.
Large, unannounced changes usually lead to confusion and time wasted for everyone.
If you were planning to write a large change, post an issue on GitHub first and discuss it.
Pull Requests will be treated as "review requests", and we will give feedback we expect to see corrected on style and substance before merging.
Changes contributed via Pull Request should focus on a single issue at a time.
We will not accept pull-requests that try to "sneak" unrelated changes in.
The average contribution flow is as follows:
- Determine what to work on via creating and issue or finding an issue you want to solve.
- Create a topic branch from where you want to base your work. This is usually `master`.
- Make commits of logical units.
- Make sure your commit messages are in the proper format
- Push your changes to a topic branch in your fork of the repository.
- Submit a pull request.
- Your PR will be reviewed and merged by one of the maintainers.
- You may be asked to make changes and [rebase] your commits.
[rebase]: https://git-scm.com/book/en/v2/Git-Branching-Rebasin://git-scm.com/book/en/v2/Git-Branching-Rebasing
## Style
Any new files should include the license header found at the top of every source file.
### Go
The project follows idiomatic [Go conventions] for style.
If you're just starting out writing Go, you can check out this [meta-package] that documents style idiomatic style decisions you will find in open source Go code.
All files should have `gofmt` executed on them and code should strive to have full coverage of static analysis tools like [govet] and [golint].
[Go conventions]: https://github.com/golang/go/wiki/CodeReviewComments
[meta-package]: https://github.com/jzelinskie/conventions
[govet]: https://golang.org/cmd/vet
[golint]: https://github.com/golang/lint
### Commit Messages
We follow a rough convention for commit messages that is designed to answer two questions: what changed and why.
The subject line should feature the what and the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various git tools.

18
LICENSE
View file

@ -1,21 +1,3 @@
The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Chihaya is released under a BSD 2-Clause license, reproduced below.
Copyright (c) 2015, The Chihaya Authors

190
README.md
View file

@ -1,146 +1,104 @@
# LBRY Tracker
# Chihaya
The LBRY tracker is a server that helps peers find each other. It was forked from [Chihaya](https://github.com/chihaya/chihaya), an open-source [BitTorrent tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker).
[![Build Status](https://github.com/chihaya/chihaya/workflows/CI/badge.svg)](https://github.com/chihaya/chihaya/actions)
[![Docker Repository on Quay.io](https://quay.io/repository/jzelinskie/chihaya/status "Docker Repository on Quay.io")](https://quay.io/repository/jzelinskie/chihaya)
[![Go Report Card](https://goreportcard.com/badge/github.com/chihaya/chihaya)](https://goreportcard.com/report/github.com/chihaya/chihaya)
[![GoDoc](https://godoc.org/github.com/chihaya/chihaya?status.svg)](https://godoc.org/github.com/chihaya/chihaya)
![Lines of Code](https://tokei.rs/b1/github/chihaya/chihaya)
[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
[![IRC Channel](https://img.shields.io/badge/freenode-%23chihaya-blue.svg "IRC Channel")](http://webchat.freenode.net/?channels=chihaya)
**Note:** The master branch may be in an unstable or even broken state during development.
Please use [releases] instead of the master branch in order to get stable binaries.
## Installation and Usage
Chihaya is an open source [BitTorrent tracker] written in [Go].
### Building from HEAD
Differentiating features include:
- HTTP and UDP protocols
- IPv4 and IPv6 support
- Pre/Post middleware hooks
- [YAML] configuration
- Metrics via [Prometheus]
- High Availability via [Redis]
- Kubernetes deployment via [Helm]
[releases]: https://github.com/chihaya/chihaya/releases
[BitTorrent tracker]: https://en.wikipedia.org/wiki/BitTorrent_tracker
[Go]: https://golang.org
[YAML]: https://yaml.org
[Prometheus]: https://prometheus.io
[Redis]: https://redis.io
[Helm]: https://helm.sh
## Why Chihaya?
Chihaya is built for developers looking to integrate BitTorrent into a preexisting production environment.
Chihaya's pluggable architecture and middleware framework offers a simple and flexible integration point that abstracts the BitTorrent tracker protocols.
The most common use case for Chihaya is enabling peer-to-peer cloud software deployments.
## Development
### Contributing
Long-term discussion and bug reports are maintained via [GitHub Issues].
Code review is done via [GitHub Pull Requests].
Real-time discussion is done via [freenode IRC].
For more information read [CONTRIBUTING.md].
[GitHub Issues]: https://github.com/chihaya/chihaya/issues
[GitHub Pull Requests]: https://github.com/chihaya/chihaya/pulls
[freenode IRC]: http://webchat.freenode.net/?channels=chihaya
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md
### Getting Started
#### Building from HEAD
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
```sh
git clone git@github.com:lbryio/tracker.git
cd tracker
go build ./cmd/chihaya
./chihaya --help
$ git clone git@github.com:chihaya/chihaya.git
$ cd chihaya
$ go build ./cmd/chihaya
$ ./chihaya --help
```
[latest stable version of Go]: https://golang.org/dl
[working Go environment]: https://golang.org/doc/code.html
### Testing
#### Docker
Docker containers are available for [HEAD] and [stable] releases.
[HEAD]: https://quay.io/jzelinskie/chihaya-git
[stable]: https://quay.io/jzelinskie/chihaya
#### Testing
The following will run all tests and benchmarks.
Removing `-bench` will just run unit tests.
```sh
go test -bench $(go list ./...)
$ go test -bench $(go list ./...)
```
The tracker executable contains a command to end-to-end test a BitTorrent tracker.
The Chihaya executable contains a command to end-to-end test a BitTorrent tracker.
See
```sh
tracker --help
```
$ chihaya --help
```
### Configuration
Configuration of the tracker is done via one YAML configuration file.
Configuration of Chihaya is done via one YAML configuration file.
The `dist/` directory contains an example configuration file.
Files and directories under `docs/` contain detailed information about configuring middleware, storage implementations, architecture etc.
This is an example for an UDP server running on 9252 with metrics enabled. Remember to **change the private key** to some random string.
## Related projects
```
---
chihaya:
announce_interval: "30m"
min_announce_interval: "15m"
metrics_addr: "0.0.0.0:6880"
udp:
addr: "0.0.0.0:9252"
max_clock_skew: "10s"
private_key: ">>>>CHANGE THIS TO SOME RANDOM THING<<<<"
enable_request_timing: false
allow_ip_spoofing: false
max_numwant: 100
default_numwant: 50
max_scrape_infohashes: 50
storage:
name: "memory"
config:
gc_interval: "3m"
peer_lifetime: "31m"
shard_count: 1024
prometheus_reporting_interval: "1s"
```
# Running from Docker
This section assumes `docker` and `docker-compose` to be installed on a Linux distro. Please check official docs on how to install [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/).
## Docker Compose from lbry/tracker
In order to define a tracker service and let Docker Compose manage it, create a file named `docker-compose.yml` with:
```
version: "3"
services:
tracker:
image: lbry/tracker
command: --config /config/conf.yml
volumes:
- .:/config
network_mode: host
restart: always
```
Unfortunately the tracker does not work without `network_mode: host` due some bug with UDP on Docker. In this mode, firewall configuration needs to be done manually. If using `ufw`, try `ufw allow 9252`.
Now, move the configuration to the same directory as `docker-compose.yml`, naming it `conf.yml`. If it is not ready, check the configuration section above.
Start the tracker by running the following in the same directory as the compose file:
`docker-compose up -d`
Logs can be read with:
`docker-compose logs`
To stop:
`docker-compose down`
## Building the containter
A Dockerfile is provided within the repo. To build the container locally, run this command on the same directory the repo was cloned:
`sudo docker build -f Dockerfile . -t some_name/tracker:latest`
It will produce an image called `some_name/tracker`, which can be used in the Docker Compose section.
# Running from source as a service
For ease of maintenance, it is recommended to run the tracker as a service.
This is an example for running it under as the current user using `systemd`:
```
[Unit]
Description=Chihaya BT tracker
After=network.target
[Service]
Type=simple
#User=chihaya
#Group=chihaya
WorkingDirectory=/home/user/github/tracker
ExecStart=/home/user/github/tracker/chihaya --config dist/example_config.yaml
Restart=on-failure
[Install]
WantedBy=multi-user.target
```
To try it, change `/home/user/github/tracker` to where the code was cloned and run:
```bash=
mkdir -p ~/.config/systemd/user
# PASTE FILE IN ~/.config/systemd/user/tracker.service
systemctl --user enable tracker
systemctl --user start tracker
systemctl --user status tracker
```
## Contributing
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
## License
LBRY's code changes are MIT licensed, and the upstream Chihaya code is licensed under a BSD 2-Clause license. For the full license, see [LICENSE](LICENSE).
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
## Contact
The primary contact for this project is [@shyba](mailto:vshyba@lbry.com).
- [BitTorrent.org](https://github.com/bittorrent/bittorrent.org): a static website containing the BitTorrent spec and all BEPs
- [OpenTracker](http://erdgeist.org/arts/software/opentracker): a popular BitTorrent tracker written in C
- [Ocelot](https://github.com/WhatCD/Ocelot): a private BitTorrent tracker written in C++

View file

@ -5,7 +5,7 @@ import (
)
func TestClientID(t *testing.T) {
clientTable := []struct{ peerID, clientID string }{
var clientTable = []struct{ peerID, clientID string }{
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
{"-BS5820-oy4La2MWGEFj", "BS5820"},

View file

@ -8,7 +8,7 @@ import (
)
func TestNew(t *testing.T) {
table := []struct {
var table = []struct {
data string
expected Event
expectedErr error

View file

@ -187,15 +187,15 @@ func (qp *QueryParams) String(key string) (string, bool) {
return value, ok
}
// Uint returns a uint parsed from a query. After being called, it is safe to
// Uint64 returns a uint parsed from a query. After being called, it is safe to
// cast the uint64 to your desired length.
func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
func (qp *QueryParams) Uint64(key string) (uint64, error) {
str, exists := qp.params[key]
if !exists {
return 0, ErrKeyNotFound
}
val, err := strconv.ParseUint(str, 10, bitSize)
val, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}

View file

@ -92,7 +92,7 @@ func TestParseInvalidURLData(t *testing.T) {
func TestParseShouldNotPanicURLData(t *testing.T) {
for _, parseStr := range shouldNotPanicQueries {
_, _ = ParseURLData(parseStr)
ParseURLData(parseStr)
}
}

View file

@ -9,9 +9,15 @@ import (
// ErrInvalidIP indicates an invalid IP for an Announce.
var ErrInvalidIP = ClientError("invalid IP")
// ErrInvalidPort indicates an invalid Port for an Announce.
var ErrInvalidPort = ClientError("invalid port")
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
// IP address into the proper format.
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
if r.Port == 0 {
return ErrInvalidPort
}
if !r.NumWantProvided {
r.NumWant = defaultNumWant

View file

@ -13,7 +13,6 @@ import (
// Imports to register middleware drivers.
_ "github.com/chihaya/chihaya/middleware/clientapproval"
_ "github.com/chihaya/chihaya/middleware/fixedpeer"
_ "github.com/chihaya/chihaya/middleware/jwt"
_ "github.com/chihaya/chihaya/middleware/torrentapproval"
_ "github.com/chihaya/chihaya/middleware/varinterval"

View file

@ -1,8 +1,8 @@
package main
import (
"context"
"errors"
"os"
"os/signal"
"runtime"
"strings"
@ -100,7 +100,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
}
func combineErrors(prefix string, errs []error) error {
errStrs := make([]string, 0, len(errs))
var errStrs []string
for _, err := range errs {
errStrs = append(errStrs, err.Error())
}
@ -144,13 +144,15 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
return err
}
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
quit := make(chan os.Signal)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
reload := makeReloadChan()
for {
select {
case <-reload.Done():
log.Info("reloading; received reload signal")
case <-reload:
log.Info("reloading; received SIGUSR1")
peerStore, err := r.Stop(true)
if err != nil {
return err
@ -159,8 +161,8 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
if err := r.Start(peerStore); err != nil {
return err
}
case <-ctx.Done():
log.Info("shutting down; received shutdown signal")
case <-quit:
log.Info("shutting down; received SIGINT/SIGTERM")
if _, err := r.Stop(false); err != nil {
return err
}
@ -208,7 +210,7 @@ func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
}
func main() {
rootCmd := &cobra.Command{
var rootCmd = &cobra.Command{
Use: "chihaya",
Short: "BitTorrent Tracker",
Long: "A customizable, multi-protocol BitTorrent Tracker",
@ -227,7 +229,7 @@ func main() {
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
e2eCmd := &cobra.Command{
var e2eCmd = &cobra.Command{
Use: "e2e",
Short: "exec e2e tests",
Long: "Execute the Chihaya end-to-end test suite",

View file

@ -1,15 +1,15 @@
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
package main
import (
"os"
"os/signal"
"syscall"
)
// ReloadSignals are the signals that the current OS will send to the process
// when a configuration reload is requested.
var ReloadSignals = []os.Signal{
syscall.SIGUSR1,
func makeReloadChan() <-chan os.Signal {
reload := make(chan os.Signal)
signal.Notify(reload, syscall.SIGUSR1)
return reload
}

View file

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package main
@ -9,6 +8,8 @@ import (
"syscall"
)
var ReloadSignals = []os.Signal{
syscall.SIGHUP,
func makeReloadChan() <-chan os.Signal {
reload := make(chan os.Signal)
signal.Notify(reload, syscall.SIGHUP)
return reload
}

View file

@ -1,16 +1,15 @@
---
chihaya:
# The interval communicated with BitTorrent clients informing them how
# frequently they should announce in between client events.
announce_interval: "30m"
announce_interval: 30m
# The interval communicated with BitTorrent clients informing them of the
# minimal duration between announces.
min_announce_interval: "15m"
min_announce_interval: 15m
# The network interface that will bind to an HTTP endpoint that can be
# scraped by programs collecting metrics.
#
#
# /metrics serves metrics in the Prometheus format
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
metrics_addr: "0.0.0.0:6880"
@ -31,14 +30,14 @@ chihaya:
tls_key_path: ""
# The timeout durations for HTTP requests.
read_timeout: "5s"
write_timeout: "5s"
read_timeout: 5s
write_timeout: 5s
# When true, persistent connections will be allowed. Generally this is not
# useful for a public tracker, but helps performance in some cases (use of
# a reverse proxy, or when there are few clients issuing many requests).
enable_keepalive: false
idle_timeout: "30s"
idle_timeout: 30s
# Whether to time requests.
# Disabling this should increase performance/decrease load.
@ -89,7 +88,7 @@ chihaya:
addr: "0.0.0.0:6969"
# The leeway for a timestamp on a connection ID.
max_clock_skew: "10s"
max_clock_skew: 10s
# The key used to encrypt connection IDs.
private_key: "paste a random string here that will be used to hmac connection IDs"
@ -114,17 +113,17 @@ chihaya:
# This block defines configuration used for the storage of peer data.
storage:
name: "memory"
name: memory
config:
# The frequency which stale peers are removed.
# This balances between
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
gc_interval: "3m"
gc_interval: 3m
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: "31m"
peer_lifetime: 31m
# The number of partitions data will be divided into in order to provide a
# higher degree of parallelism.
@ -132,7 +131,7 @@ chihaya:
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: "1s"
prometheus_reporting_interval: 1s
# This block defines configuration used for redis storage.
# storage:
@ -142,56 +141,56 @@ chihaya:
# # This balances between
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
# gc_interval: "3m"
# gc_interval: 3m
# # The interval at which metrics about the number of infohashes and peers
# # are collected and posted to Prometheus.
# prometheus_reporting_interval: "1s"
# prometheus_reporting_interval: 1s
# # The amount of time until a peer is considered stale.
# # To avoid churn, keep this slightly larger than `announce_interval`
# peer_lifetime: "31m"
# peer_lifetime: 31m
# # The address of redis storage.
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
# # The timeout for reading a command reply from redis.
# redis_read_timeout: "15s"
# redis_read_timeout: 15s
# # The timeout for writing a command to redis.
# redis_write_timeout: "15s"
# redis_write_timeout: 15s
# # The timeout for connecting to redis server.
# redis_connect_timeout: "15s"
# redis_connect_timeout: 15s
# This block defines configuration used for middleware executed before a
# response has been returned to a BitTorrent client.
prehooks:
# - name: "jwt"
# options:
# issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: "5m"
#- name: jwt
# options:
# issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: 5m
# - name: "client approval"
# options:
# whitelist:
# - "OP1011"
# blacklist:
# - "OP1012"
#- name: client approval
# options:
# whitelist:
# - "OP1011"
# blacklist:
# - "OP1012"
# - name: "interval variation"
# options:
# modify_response_probability: 0.2
# max_increase_delta: 60
# modify_min_interval: true
#- name: interval variation
# options:
# modify_response_probability: 0.2
# max_increase_delta: 60
# modify_min_interval: true
# This block defines configuration used for torrent approval, it requires to be given
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
# - name: "torrent approval"
# options:
# whitelist:
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
# blacklist:
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"
#- name: torrent approval
# options:
# whitelist:
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
# blacklist:
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"

View file

@ -1,12 +1,11 @@
---
global:
scrape_interval: "5s"
evaluation_interval: "5s"
scrape_interval: 5s
evaluation_interval: 5s
# A scrape configuration containing exactly one endpoint to scrape:
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "local-chihaya" # you can name this however you want
scrape_interval: "5s" # optionally override the global scrape_interval
- job_name: 'local-chihaya' # you can name this however you want
scrape_interval: 5s # optionally override the global scrape_interval
static_configs:
- targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint
- targets: ['localhost:6881'] # provide the address of chihaya's prometheus endpoint

View file

@ -1,6 +1,6 @@
# Architecture
## Overview
### Overview
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
Frontends parse requests and write responses for the particular protocol they implement.
@ -11,6 +11,6 @@ After all PreHooks have executed, any missing response fields that are required
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
## Diagram
### Diagram
![architecture diagram](https://user-images.githubusercontent.com/343539/52676700-05c45c80-2ef9-11e9-9887-8366008b4e7e.png)
![](https://user-images.githubusercontent.com/343539/52676700-05c45c80-2ef9-11e9-9887-8366008b4e7e.png)

View file

@ -44,11 +44,11 @@ The typical control flow of handling announces, in more detail, is:
6. Send the response to the Client.
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
8. Finish, accept next request.
9. For invalid requests or errors during processing: Send an error response to the client.
This step may be skipped for suspected denial-of-service attacks.
The error response may contain information about the cause of the error.
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
Then finish, and accept the next request.
9. For invalid requests or errors during processing: Send an error response to the client.
This step may be skipped for suspected denial-of-service attacks.
The error response may contain information about the cause of the error.
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
Then finish, and accept the next request.
#### Configuration
@ -62,22 +62,20 @@ Frontends may provide runtime metrics, such as the number of requests or their d
Metrics must be reported using [Prometheus].
A frontend should provide at least the following metrics:
- The number of valid and invalid requests handled
- The average time it takes to handle a single request.
This request timing should be made optional using a config entry.
This request timing should be made optional using a config entry.
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
If the frontend serves multiple transports or networks, metrics for them should be separable.
It is recommended to publish one Prometheus `HistogramVec` with:
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
- A value holding the duration in milliseconds of the reported request
- Labels for:
- `action` (= `announce`, `scrape`, ...)
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
- `error` (= A textual representation of the error encountered during processing.)
- `action` (= `announce`, `scrape`, ...)
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
- `error` (= A textual representation of the error encountered during processing.)
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
@ -108,4 +106,4 @@ This way, a PreHook can communicate with a PostHook by setting a context value.
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
[Prometheus]: https://prometheus.io/
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/

View file

@ -59,7 +59,7 @@ All the InfoHashes (swarms) are also stored in a redis hash, with IP family as t
Here is an example:
```yaml
```
- IPv4
- IPv4_S_<infohash 1>: <modification time>
- IPv4_L_<infohash 1>: <modification time>
@ -73,14 +73,15 @@ Here is an example:
- <peer 3 key>: <modification time>
```
In this case, prometheus would record two swarms, three seeders, and one leecher.
These three keys per address family are used to record the count of swarms, seeders, and leechers.
```yaml
```
- IPv4_infohash_count: 2
- IPv4_S_count: 3
- IPv4_L_count: 1
```
Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
Note: IPv4_infohash_count has a different meaning compared to the `memory` storage:
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.

View file

@ -46,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) {
d2 := NewDecoder(&bufferLoop{"i42e"})
for i := 0; i < b.N; i++ {
_, _ = d1.Decode()
_, _ = d2.Decode()
d1.Decode()
d2.Decode()
}
}
@ -79,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) {
dec := NewDecoder(&bufferLoop{string(buf)})
for i := 0; i < b.N; i++ {
_, _ = dec.Decode()
dec.Decode()
}
}

View file

@ -66,7 +66,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalInt(w, int64(v))
case int64:
err = marshalInt(w, v)
err = marshalInt(w, int64(v))
case uint:
err = marshalUint(w, uint64(v))
@ -78,7 +78,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalUint(w, uint64(v))
case uint64:
err = marshalUint(w, v)
err = marshalUint(w, uint64(v))
case time.Duration: // Assume seconds
err = marshalInt(w, int64(v/time.Second))
@ -90,7 +90,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalList(w, v)
case []Dict:
interfaceSlice := make([]interface{}, len(v))
var interfaceSlice = make([]interface{}, len(v))
for i, d := range v {
interfaceSlice[i] = d
}

View file

@ -50,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) {
encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ {
_ = encoder.Encode("test")
_ = encoder.Encode(123)
encoder.Encode("test")
encoder.Encode(123)
}
}
@ -67,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) {
encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ {
_ = encoder.Encode(data)
encoder.Encode(data)
}
}

View file

@ -164,7 +164,6 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
var err error
f.tlsCfg = &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: make([]tls.Certificate, 1),
}
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
@ -266,7 +265,7 @@ func (f *Frontend) serveHTTP(l net.Listener) error {
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server.
if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
if err := f.srv.Serve(l); err != http.ErrServerClosed {
return err
}
return nil
@ -286,7 +285,7 @@ func (f *Frontend) serveHTTPS(l net.Listener) error {
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server.
if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
if err := f.tlsSrv.ServeTLS(l, "", ""); err != http.ErrServerClosed {
return err
}
return nil
@ -318,7 +317,7 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
req, err := ParseAnnounce(r, f.ParseOptions)
if err != nil {
_ = WriteError(w, err)
WriteError(w, err)
return
}
af = new(bittorrent.AddressFamily)
@ -327,14 +326,14 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
if err != nil {
_ = WriteError(w, err)
WriteError(w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteAnnounceResponse(w, resp)
if err != nil {
_ = WriteError(w, err)
WriteError(w, err)
return
}
@ -359,14 +358,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
req, err := ParseScrape(r, f.ParseOptions)
if err != nil {
_ = WriteError(w, err)
WriteError(w, err)
return
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Error("http: unable to determine remote address for scrape", log.Err(err))
_ = WriteError(w, err)
WriteError(w, err)
return
}
@ -377,7 +376,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
req.AddressFamily = bittorrent.IPv6
} else {
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
_ = WriteError(w, bittorrent.ErrInvalidIP)
WriteError(w, bittorrent.ErrInvalidIP)
return
}
af = new(bittorrent.AddressFamily)
@ -386,14 +385,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleScrape(ctx, req)
if err != nil {
_ = WriteError(w, err)
WriteError(w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteScrapeResponse(w, resp)
if err != nil {
_ = WriteError(w, err)
WriteError(w, err)
return
}

View file

@ -1,7 +1,6 @@
package http
import (
"errors"
"net"
"net/http"
@ -74,26 +73,26 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
// Determine the number of remaining bytes for the client.
request.Left, err = qp.Uint("left", 64)
request.Left, err = qp.Uint64("left")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: left")
}
// Determine the number of bytes downloaded by the client.
request.Downloaded, err = qp.Uint("downloaded", 64)
request.Downloaded, err = qp.Uint64("downloaded")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
}
// Determine the number of bytes shared by the client.
request.Uploaded, err = qp.Uint("uploaded", 64)
request.Uploaded, err = qp.Uint64("uploaded")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
}
// Determine the number of peers the client wants in the response.
numwant, err := qp.Uint("numwant", 32)
if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
numwant, err := qp.Uint64("numwant")
if err != nil && err != bittorrent.ErrKeyNotFound {
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
}
// If there were no errors, the user actually provided the numwant.
@ -101,7 +100,7 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
request.NumWant = uint32(numwant)
// Parse the port where the client is listening.
port, err := qp.Uint("port", 16)
port, err := qp.Uint64("port")
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: port")
}

View file

@ -1,7 +1,6 @@
package http
import (
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -27,9 +26,8 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string
if err != nil {
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
errString = clientErr.Error()
if _, ok := err.(bittorrent.ClientError); ok {
errString = err.Error()
} else {
errString = "internal error"
}

View file

@ -1,7 +1,6 @@
package http
import (
"errors"
"net/http"
"github.com/chihaya/chihaya/bittorrent"
@ -12,9 +11,8 @@ import (
// WriteError communicates an error to a BitTorrent client over HTTP.
func WriteError(w http.ResponseWriter, err error) error {
message := "internal server error"
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
message = clientErr.Error()
if _, clientErr := err.(bittorrent.ClientError); clientErr {
message = err.Error()
} else {
log.Error("http: internal error", log.Err(err))
}
@ -59,7 +57,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
}
// Add the peers to the dictionary.
peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
var peers []bencode.Dict
for _, peer := range resp.IPv4Peers {
peers = append(peers, dict(peer))
}

View file

@ -11,7 +11,7 @@ import (
)
func TestWriteError(t *testing.T) {
table := []struct {
var table = []struct {
reason, expected string
}{
{"hello world", "d14:failure reason11:hello worlde"},
@ -29,7 +29,7 @@ func TestWriteError(t *testing.T) {
}
func TestWriteStatus(t *testing.T) {
table := []struct {
var table = []struct {
reason, expected string
}{
{"something is missing", "d14:failure reason20:something is missinge"},

View file

@ -11,27 +11,24 @@ type BytePool struct {
func New(length int) *BytePool {
var bp BytePool
bp.Pool.New = func() interface{} {
b := make([]byte, length)
return &b
return make([]byte, length, length)
}
return &bp
}
// Get returns a byte slice from the pool.
func (bp *BytePool) Get() *[]byte {
return bp.Pool.Get().(*[]byte)
func (bp *BytePool) Get() []byte {
return bp.Pool.Get().([]byte)
}
// Put returns a byte slice to the pool.
func (bp *BytePool) Put(b *[]byte) {
*b = (*b)[:cap(*b)]
func (bp *BytePool) Put(b []byte) {
b = b[:cap(b)]
// Zero out the bytes.
// This specific expression is optimized by the compiler:
// https://github.com/golang/go/issues/5373.
for i := range *b {
(*b)[i] = 0
// Apparently this specific expression is optimized by the compiler, see
// github.com/golang/go/issues/5373.
for i := range b {
b[i] = 0
}
bp.Pool.Put(b)
}

View file

@ -6,7 +6,6 @@ import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"net"
@ -124,7 +123,8 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
},
}
if err := f.listen(); err != nil {
err := f.listen()
if err != nil {
return nil, err
}
@ -148,7 +148,7 @@ func (t *Frontend) Stop() stop.Result {
c := make(stop.Channel)
go func() {
close(t.closing)
_ = t.socket.SetReadDeadline(time.Now())
t.socket.SetReadDeadline(time.Now())
t.wg.Wait()
c.Done(t.socket.Close())
}()
@ -185,11 +185,10 @@ func (t *Frontend) serve() error {
// Read a UDP packet into a reusable buffer.
buffer := pool.Get()
n, addr, err := t.socket.ReadFromUDP(*buffer)
n, addr, err := t.socket.ReadFromUDP(buffer)
if err != nil {
pool.Put(buffer)
var netErr net.Error
if errors.As(err, &netErr); netErr.Temporary() {
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
// A temporary failure is not fatal; just pretend it never happened.
continue
}
@ -218,7 +217,7 @@ func (t *Frontend) serve() error {
}
action, af, err := t.handleRequest(
// Make sure the IP is copied, not referenced.
Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
Request{buffer[:n], append([]byte{}, addr.IP...)},
ResponseWriter{t.socket, addr},
)
if t.EnableRequestTiming {
@ -245,7 +244,7 @@ type ResponseWriter struct {
// Write implements the io.Writer interface for a ResponseWriter.
func (w ResponseWriter) Write(b []byte) (int, error) {
_, _ = w.socket.WriteToUDP(b, w.addr)
w.socket.WriteToUDP(b, w.addr)
return len(b), nil
}

View file

@ -24,8 +24,8 @@ const (
// Option-Types as described in BEP 41 and BEP 45.
const (
optionEndOfOptions byte = 0x0
optionNOP byte = 0x1
optionURLData byte = 0x2
optionNOP = 0x1
optionURLData = 0x2
)
var (
@ -115,7 +115,7 @@ func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.Ann
request := &bittorrent.AnnounceRequest{
Event: eventIDs[eventID],
InfoHash: bittorrent.InfoHashFromBytes(infohash),
NumWant: numWant,
NumWant: uint32(numWant),
Left: left,
Downloaded: downloaded,
Uploaded: uploaded,
@ -161,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
return bittorrent.ParseURLData("")
}
buf := newBuffer()
var buf = newBuffer()
defer buf.free()
for i := 0; i < len(packet); {

View file

@ -1,7 +1,6 @@
package udp
import (
"errors"
"fmt"
"testing"
)
@ -52,7 +51,7 @@ func TestHandleOptionalParameters(t *testing.T) {
for _, tt := range table {
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
params, err := handleOptionalParameters(tt.data)
if !errors.Is(err, tt.err) {
if err != tt.err {
if tt.err == nil {
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
} else {

View file

@ -1,7 +1,6 @@
package udp
import (
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -27,9 +26,8 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string
if err != nil {
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
errString = clientErr.Error()
if _, ok := err.(bittorrent.ClientError); ok {
errString = err.Error()
} else {
errString = "internal error"
}

View file

@ -2,7 +2,6 @@ package udp
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
@ -13,16 +12,15 @@ import (
// WriteError writes the failure reason as a null-terminated string.
func WriteError(w io.Writer, txID []byte, err error) {
// If the client wasn't at fault, acknowledge it.
var clientErr bittorrent.ClientError
if !errors.As(err, &clientErr) {
err = fmt.Errorf("internal error occurred: %w", err)
if _, ok := err.(bittorrent.ClientError); !ok {
err = fmt.Errorf("internal error occurred: %s", err.Error())
}
buf := newBuffer()
writeHeader(buf, txID, errorActionID)
buf.WriteString(err.Error())
buf.WriteRune('\000')
_, _ = w.Write(buf.Bytes())
w.Write(buf.Bytes())
buf.free()
}
@ -39,9 +37,9 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
} else {
writeHeader(buf, txID, announceActionID)
}
_ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
_ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
_ = binary.Write(buf, binary.BigEndian, resp.Complete)
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
binary.Write(buf, binary.BigEndian, resp.Incomplete)
binary.Write(buf, binary.BigEndian, resp.Complete)
peers := resp.IPv4Peers
if v6Peers {
@ -50,10 +48,10 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
for _, peer := range peers {
buf.Write(peer.IP.IP)
_ = binary.Write(buf, binary.BigEndian, peer.Port)
binary.Write(buf, binary.BigEndian, peer.Port)
}
_, _ = w.Write(buf.Bytes())
w.Write(buf.Bytes())
buf.free()
}
@ -64,12 +62,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
writeHeader(buf, txID, scrapeActionID)
for _, scrape := range resp.Files {
_ = binary.Write(buf, binary.BigEndian, scrape.Complete)
_ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
_ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
binary.Write(buf, binary.BigEndian, scrape.Complete)
binary.Write(buf, binary.BigEndian, scrape.Snatches)
binary.Write(buf, binary.BigEndian, scrape.Incomplete)
}
_, _ = w.Write(buf.Bytes())
w.Write(buf.Bytes())
buf.free()
}
@ -80,13 +78,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) {
writeHeader(buf, txID, connectActionID)
buf.Write(connID)
_, _ = w.Write(buf.Bytes())
w.Write(buf.Bytes())
buf.free()
}
// writeHeader writes the action and transaction ID to the provided response
// buffer.
func writeHeader(w io.Writer, txID []byte, action uint32) {
_ = binary.Write(w, binary.BigEndian, action)
_, _ = w.Write(txID)
binary.Write(w, binary.BigEndian, action)
w.Write(txID)
}

20
go.mod
View file

@ -6,23 +6,17 @@ require (
github.com/SermoDigital/jose v0.9.2-0.20180104203859-803625baeddc
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/anacrolix/dht/v2 v2.15.1 // indirect
github.com/anacrolix/missinggo/v2 v2.5.3 // indirect
github.com/anacrolix/torrent v1.40.0
github.com/go-redsync/redsync/v4 v4.5.0
github.com/gomodule/redigo v1.8.8
github.com/anacrolix/torrent v1.25.0
github.com/go-redsync/redsync v1.4.2
github.com/gomodule/redigo v2.0.0+incompatible
github.com/julienschmidt/httprouter v1.3.0
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103
github.com/minio/sha256-simd v1.0.0
github.com/minio/sha256-simd v0.1.1
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.3.0
github.com/prometheus/client_golang v1.9.0
github.com/sirupsen/logrus v1.8.0
github.com/spf13/cobra v1.1.3
github.com/stretchr/testify v1.7.0
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
gopkg.in/yaml.v2 v2.4.0
)

929
go.sum

File diff suppressed because it is too large Load diff

View file

@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
}
return NewHook(cfg)

View file

@ -1,84 +0,0 @@
// Package fixedpeers implements a Hook that
//appends a fixed peer to every Announce request
package fixedpeers
import (
"context"
"fmt"
"net"
"strconv"
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "fixed peers"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
type Config struct {
FixedPeers []string `yaml:"fixed_peers"`
}
type hook struct {
peers []bittorrent.Peer
}
// NewHook returns an instance of the torrent approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
var peers []bittorrent.Peer
for _, peerString := range cfg.FixedPeers {
parts := strings.Split(peerString, ":")
port, err := strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
ip := net.ParseIP(parts[0]).To4()
if ip == nil {
panic("Invalid ip4 on fixed_peers")
}
peers = append(peers,
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: uint16(port),
IP: bittorrent.IP{IP: ip},
})
}
h := &hook{
peers: peers,
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
for _, peer := range h.peers {
resp.IPv4Peers = append(resp.IPv4Peers, peer)
resp.Complete += 1
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -1,47 +0,0 @@
package fixedpeers
import (
"context"
"encoding/hex"
"net"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
func TestAppendFixedPeer(t *testing.T) {
conf := Config{
FixedPeers: []string{"8.8.8.8:4040", "1.1.1.1:111"},
}
h, err := NewHook(conf)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
hashbytes, err := hex.DecodeString("3000000000000000000000000000000000000000")
require.Nil(t, err)
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
req.InfoHash = hashinfo
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
peers := []bittorrent.Peer{
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: 4040,
IP: bittorrent.IP{net.ParseIP("8.8.8.8"), bittorrent.IPv4},
},
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: 111,
IP: bittorrent.IP{net.ParseIP("1.1.1.1"), bittorrent.IPv4},
},
}
require.Equal(t, peers, resp.IPv4Peers)
}

View file

@ -2,7 +2,7 @@ package middleware
import (
"context"
"errors"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/storage"
)
@ -35,16 +35,14 @@ func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorre
}
switch {
case req.Port < 100:
return ctx, nil
case req.Event == bittorrent.Stopped:
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
if err != nil && err != storage.ErrResourceDoesNotExist {
return ctx, err
}
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
if err != nil && err != storage.ErrResourceDoesNotExist {
return ctx, err
}
case req.Event == bittorrent.Completed:
@ -98,8 +96,8 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
// Add the Scrape data to the response.
s := h.store.ScrapeSwarm(req.InfoHash, req.IP.AddressFamily)
resp.Incomplete += s.Incomplete
resp.Complete += s.Complete
resp.Incomplete = s.Incomplete
resp.Complete = s.Complete
err = h.appendPeers(req, resp)
return ctx, err
@ -108,7 +106,7 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
seeding := req.Left == 0
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
if err != nil && err != storage.ErrResourceDoesNotExist {
return err
}
@ -125,9 +123,9 @@ func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittor
switch req.IP.AddressFamily {
case bittorrent.IPv4:
resp.IPv4Peers = append(resp.IPv4Peers, peers...)
resp.IPv4Peers = peers
case bittorrent.IPv6:
resp.IPv6Peers = append(resp.IPv6Peers, peers...)
resp.IPv6Peers = peers
default:
panic("attempted to append peer that is neither IPv4 nor IPv6")
}

View file

@ -44,7 +44,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
}
return NewHook(cfg)
@ -93,7 +93,8 @@ func NewHook(cfg Config) (middleware.Hook, error) {
}
log.Debug("performing initial fetch of JWKs")
if err := h.updateKeys(); err != nil {
err := h.updateKeys()
if err != nil {
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
}
@ -104,7 +105,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
return
case <-time.After(cfg.JWKUpdateInterval):
log.Debug("performing fetch of JWKs")
_ = h.updateKeys()
h.updateKeys()
}
}
}()

View file

@ -11,7 +11,7 @@ import (
//
// Calling DeriveEntropyFromRequest multiple times yields the same values.
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
v0 := binary.BigEndian.Uint64([]byte(req.InfoHash[:8])) + binary.BigEndian.Uint64([]byte(req.InfoHash[8:16]))
v1 := binary.BigEndian.Uint64([]byte(req.Peer.ID[:8])) + binary.BigEndian.Uint64([]byte(req.Peer.ID[8:16]))
return v0, v1
}

View file

@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
}
return NewHook(cfg)

View file

@ -29,7 +29,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err)
}
return NewHook(cfg)
@ -77,7 +77,8 @@ type hook struct {
// NewHook creates a middleware to randomly modify the announce interval from
// the given config.
func NewHook(cfg Config) (middleware.Hook, error) {
if err := checkConfig(cfg); err != nil {
err := checkConfig(cfg)
if err != nil {
return nil, err
}
@ -95,12 +96,12 @@ func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceReque
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
// Generate the increase delta.
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
deltaDuration := time.Duration(v+1) * time.Second
addSeconds := time.Duration(v+1) * time.Second
resp.Interval += deltaDuration
resp.Interval += addSeconds
if h.cfg.ModifyMinInterval {
resp.MinInterval += deltaDuration
resp.MinInterval += addSeconds
}
return ctx, nil

View file

@ -4,7 +4,6 @@ package metrics
import (
"context"
"errors"
"net/http"
"net/http/pprof"
@ -50,7 +49,7 @@ func NewServer(addr string) *Server {
}
go func() {
if err := s.srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
if err := s.srv.ListenAndServe(); err != http.ErrServerClosed {
log.Fatal("failed while serving prometheus", log.Err(err))
}
}()

View file

@ -4,7 +4,6 @@ package memory
import (
"encoding/binary"
"math"
"net"
"runtime"
"sync"
@ -80,7 +79,7 @@ func (cfg Config) LogFields() log.Fields {
func (cfg Config) Validate() Config {
validcfg := cfg
if cfg.ShardCount <= 0 || cfg.ShardCount > (math.MaxInt/2) {
if cfg.ShardCount <= 0 {
validcfg.ShardCount = defaultShardCount
log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".ShardCount",
@ -143,7 +142,7 @@ func New(provided Config) (storage.PeerStore, error) {
case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime)
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
_ = ps.collectGarbage(before)
ps.collectGarbage(before)
}
}
}()
@ -184,8 +183,7 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])},
}
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip

View file

@ -25,7 +25,6 @@ package redis
import (
"encoding/binary"
"errors"
"net"
"strconv"
"sync"
@ -246,8 +245,7 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])},
}
IP: bittorrent.IP{IP: net.IP(pk[22:])}}
if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip
@ -302,7 +300,7 @@ func (ps *peerStore) populateProm() {
defer conn.Close()
for _, group := range ps.groups() {
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && err != redis.ErrNil {
log.Error("storage: GET counter failure", log.Fields{
"key": ps.infohashCountKey(group),
"error": err,
@ -310,7 +308,7 @@ func (ps *peerStore) populateProm() {
} else {
numInfohashes += n
}
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && err != redis.ErrNil {
log.Error("storage: GET counter failure", log.Fields{
"key": ps.seederCountKey(group),
"error": err,
@ -318,7 +316,7 @@ func (ps *peerStore) populateProm() {
} else {
numSeeders += n
}
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && err != redis.ErrNil {
log.Error("storage: GET counter failure", log.Fields{
"key": ps.leecherCountKey(group),
"error": err,
@ -358,9 +356,9 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error
conn := ps.rb.open()
defer conn.Close()
_ = conn.Send("MULTI")
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
conn.Send("MULTI")
conn.Send("HSET", encodedSeederInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil {
return err
@ -439,9 +437,9 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
conn := ps.rb.open()
defer conn.Close()
_ = conn.Send("MULTI")
_ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
_ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
conn.Send("MULTI")
conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil {
return err
@ -511,10 +509,10 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer)
conn := ps.rb.open()
defer conn.Close()
_ = conn.Send("MULTI")
_ = conn.Send("HDEL", encodedLeecherInfoHash, pk)
_ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
_ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
conn.Send("MULTI")
conn.Send("HDEL", encodedLeecherInfoHash, pk)
conn.Send("HSET", encodedSeederInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil {
return err
@ -784,13 +782,13 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
// in other words, it's removed automatically after `HDEL` the last field.
//_, err := conn.Do("DEL", ihStr)
_ = conn.Send("MULTI")
_ = conn.Send("HDEL", group, ihStr)
conn.Send("MULTI")
conn.Send("HDEL", group, ihStr)
if isSeeder {
_ = conn.Send("DECR", ps.infohashCountKey(group))
conn.Send("DECR", ps.infohashCountKey(group))
}
_, err = redis.Values(conn.Do("EXEC"))
if err != nil && !errors.Is(err, redis.ErrNil) {
if err != nil && err != redis.ErrNil {
log.Error("storage: Redis EXEC failure", log.Fields{
"group": group,
"infohash": ihStr,
@ -798,7 +796,7 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
})
}
} else {
if _, err = conn.Do("UNWATCH"); err != nil && !errors.Is(err, redis.ErrNil) {
if _, err = conn.Do("UNWATCH"); err != nil && err != redis.ErrNil {
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
}
}

View file

@ -23,8 +23,7 @@ func createNew() s.PeerStore {
RedisBroker: redisURL,
RedisReadTimeout: 10 * time.Second,
RedisWriteTimeout: 10 * time.Second,
RedisConnectTimeout: 10 * time.Second,
})
RedisConnectTimeout: 10 * time.Second})
if err != nil {
panic(err)
}

View file

@ -7,14 +7,13 @@ import (
"strings"
"time"
"github.com/go-redsync/redsync/v4"
"github.com/go-redsync/redsync/v4/redis/redigo"
redigolib "github.com/gomodule/redigo/redis"
"github.com/go-redsync/redsync"
"github.com/gomodule/redigo/redis"
)
// redisBackend represents a redis handler.
type redisBackend struct {
pool *redigolib.Pool
pool *redis.Pool
redsync *redsync.Redsync
}
@ -28,7 +27,7 @@ func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend
ConnectTimeout: cfg.RedisConnectTimeout,
}
pool := rc.NewPool()
redsync := redsync.New(redigo.NewPool(pool))
redsync := redsync.New([]redsync.Pool{pool})
return &redisBackend{
pool: pool,
redsync: redsync,
@ -36,7 +35,7 @@ func newRedisBackend(cfg *Config, u *redisURL, socketPath string) *redisBackend
}
// open returns or creates instance of Redis connection.
func (rb *redisBackend) open() redigolib.Conn {
func (rb *redisBackend) open() redis.Conn {
return rb.pool.Get()
}
@ -49,11 +48,11 @@ type redisConnector struct {
}
// NewPool returns a new pool of Redis connections
func (rc *redisConnector) NewPool() *redigolib.Pool {
return &redigolib.Pool{
func (rc *redisConnector) NewPool() *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redigolib.Conn, error) {
Dial: func() (redis.Conn, error) {
c, err := rc.open()
if err != nil {
return nil, err
@ -69,8 +68,8 @@ func (rc *redisConnector) NewPool() *redigolib.Pool {
return c, err
},
// PINGs connections that have been idle more than 10 seconds
TestOnBorrow: func(c redigolib.Conn, t time.Time) error {
if time.Since(t) < 10*time.Second {
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Duration(10*time.Second) {
return nil
}
_, err := c.Do("PING")
@ -80,23 +79,23 @@ func (rc *redisConnector) NewPool() *redigolib.Pool {
}
// Open a new Redis connection
func (rc *redisConnector) open() (redigolib.Conn, error) {
opts := []redigolib.DialOption{
redigolib.DialDatabase(rc.URL.DB),
redigolib.DialReadTimeout(rc.ReadTimeout),
redigolib.DialWriteTimeout(rc.WriteTimeout),
redigolib.DialConnectTimeout(rc.ConnectTimeout),
func (rc *redisConnector) open() (redis.Conn, error) {
var opts = []redis.DialOption{
redis.DialDatabase(rc.URL.DB),
redis.DialReadTimeout(rc.ReadTimeout),
redis.DialWriteTimeout(rc.WriteTimeout),
redis.DialConnectTimeout(rc.ConnectTimeout),
}
if rc.URL.Password != "" {
opts = append(opts, redigolib.DialPassword(rc.URL.Password))
opts = append(opts, redis.DialPassword(rc.URL.Password))
}
if rc.SocketPath != "" {
return redigolib.Dial("unix", rc.SocketPath, opts...)
return redis.Dial("unix", rc.SocketPath, opts...)
}
return redigolib.Dial("tcp", rc.URL.Host, opts...)
return redis.Dial("tcp", rc.URL.Host, opts...)
}
// A redisURL represents a parsed redisURL
@ -120,7 +119,7 @@ func parseRedisURL(target string) (*redisURL, error) {
return nil, errors.New("no redis scheme found")
}
db := 0 // default redis db
db := 0 //default redis db
parts := strings.Split(u.Path, "/")
if len(parts) != 1 {
db, err = strconv.Atoi(parts[1])

View file

@ -53,10 +53,8 @@ func generatePeers() (a [1000]bittorrent.Peer) {
return
}
type (
executionFunc func(int, PeerStore, *benchData) error
setupFunc func(PeerStore, *benchData) error
)
type executionFunc func(int, PeerStore, *benchData) error
type setupFunc func(PeerStore, *benchData) error
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
bd := &benchData{generateInfohashes(), generatePeers()}
@ -187,7 +185,6 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
if err != nil {
return err
}
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
})
@ -214,7 +211,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
// DeleteNonexist can run in parallel.
func DeleteNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
return nil
})
}
@ -225,7 +222,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) {
// DeleteNonexist can run in parallel.
func DeleteNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
return nil
})
}
@ -236,7 +233,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) {
// DeleteNonexist1kInfohash can run in parallel.
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
return nil
})
}
@ -247,7 +244,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
// DeleteNonexist1kInfohash1k can run in parallel.
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil
})
}
@ -258,7 +255,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
// GradNonexist can run in parallel.
func GradNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
return nil
})
}
@ -269,7 +266,7 @@ func GradNonexist(b *testing.B, ps PeerStore) {
// GradNonexist1k can run in parallel.
func GradNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
return nil
})
}
@ -280,7 +277,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) {
// GradNonexist1kInfohash can run in parallel.
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
return nil
})
}
@ -292,7 +289,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
// GradNonexist1kInfohash1k can run in parallel.
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
_ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil
})
}