Compare commits

..

3 commits

Author SHA1 Message Date
Alex Grintsvayg
c4ac60941a
copystreams 2019-07-30 18:01:06 -04:00
Alex Grintsvayg
8b9ec771d0
commit every 10 inserts 2019-07-12 21:49:07 -04:00
Alex Grintsvayg
eaf5f7478b
migration triggers and procedures 2019-07-11 13:46:19 -04:00
86 changed files with 1171 additions and 7368 deletions

View file

@ -1,37 +0,0 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.x
- name: Build linux
run: make linux
- name: Build macos
run: make macos
- name: Test
run: make test
- name: Lint
run: make lint
- name: retrieve all tags
run: git fetch --prune --unshallow --tags
- name: Print changes since last version
run: git log $(git describe --tags --abbrev=0)..HEAD --no-merges --oneline

View file

@ -1,62 +0,0 @@
name: release
on:
push:
tags:
- "*.*.*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.x
- name: Build linux
run: make linux
- name: Build macos
run: make macos
- name: Test
run: make test
- name: Lint
run: make lint
- name: Zip macos
run: zip -r reflector_darwin_amd64.zip ./dist/darwin_amd64
- name: Zip linux
run: zip -r reflector_linux_amd64.zip ./dist/linux_amd64
- name: retrieve all tags
run: git fetch --prune --unshallow --tags
- name: Generate Changelog
run: git log $(git describe --tags --abbrev=0 @^)..@ --no-merges --oneline > ${{ github.workspace }}-CHANGELOG.txt
- name: upload to github releases
uses: softprops/action-gh-release@v1
with:
files: |
./reflector_linux_amd64.zip
./reflector_darwin_amd64.zip
body_path: ${{ github.workspace }}-CHANGELOG.txt
# - name: Login to DockerHub
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# - name: Generate docker image
# run: make image
# - name: Docker push
# run: make publish_image

1
.gitignore vendored
View file

@ -1,4 +1,3 @@
/vendor
/config.json*
/dist
/bin

View file

@ -1,9 +1,12 @@
os: linux
dist: bionic
dist: trusty
language: go
env:
- GO111MODULE=on
go:
- 1.20.x
- 1.11.x
cache:
directories:
@ -14,22 +17,22 @@ notifications:
email: false
# Skip the install step. Don't `go get` dependencies. Only build with the code in vendor/
#install: true
install: true
# Anything in before_script that returns a nonzero exit code will
# flunk the build and immediately stop. It's sorta like having
# set -e enabled in bash.
before_script:
# All the .go files, excluding vendor/ and model (auto generated)
- GO_FILES=$(find . -iname '*.go' ! -iname '*_test.go' -type f | grep -v /vendor/ ) #i wish we were this crazy :p
- go install golang.org/x/tools/cmd/goimports # Used in build script for generated files
- GO_FILES=$(find . -iname '*.go' ! -iname '*_test.go' -type f | grep -v /vendor/ )
- go get golang.org/x/tools/cmd/goimports # Used in build script for generated files
# - go get github.com/golang/lint/golint # Linter
# - go get honnef.co/go/tools/cmd/megacheck # Badass static analyzer/linter
- go install github.com/fzipp/gocyclo/cmd/gocyclo@latest # Check against high complexity
- go install github.com/mdempsky/unconvert@latest # Identifies unnecessary type conversions
- go install github.com/kisielk/errcheck@latest # Checks for unhandled errors
- go install github.com/opennota/check/cmd/varcheck@latest # Checks for unused vars
- go install github.com/opennota/check/cmd/structcheck@latest # Checks for unused fields in structs
- go get github.com/jgautheron/gocyclo # Check against high complexity
- go get github.com/mdempsky/unconvert # Identifies unnecessary type conversions
- go get github.com/kisielk/errcheck # Checks for unhandled errors
- go get github.com/opennota/check/cmd/varcheck # Checks for unused vars
- go get github.com/opennota/check/cmd/structcheck # Checks for unused fields in structs
@ -37,7 +40,7 @@ before_script:
# in a modern Go project.
script:
# Fail if a .go file hasn't been formatted with gofmt
- for i in $GO_FILES; do test -z $(gofmt -s -l $i); done
- test -z $(gofmt -s -l $GO_FILES)
# Run unit tests
- make test
# Checks for unused vars and fields on structs
@ -56,11 +59,11 @@ script:
# one last linter - ignore autogen code
#- golint -set_exit_status $(go list ./... | grep -v /vendor/ )
# Finally, build the binary
- make linux
- make
deploy:
- provider: s3
local_dir: ./dist/linux_amd64
local_dir: ./bin
skip_cleanup: true
on:
repo: lbryio/reflector.go
@ -70,11 +73,11 @@ deploy:
secure: "AxYRTy/GnjeTJKQdeJ/AEeAd+yXs783bFDKdyKNswtsHlU8sWPQgNcvTLpVqnSQMpiwkGDGi/70rvR5C+AT3SIWNw13RYrgBRpduQU0J+B2JS+3dN2DIePu25uvs++Wo22OfS8I+UjZ1mWY1SSHI2spPXvDCq5tb+Ih8nlYflEyAtxU9Oq2R3Kus2tkIlRnL25sP/2fY7RvuJFYIV63z8ZIJRzB5WzOeERqnXq2zfwos+hycAqyo/VevJnWAYTEDsvBuSODOpZF+QfKtIQ2rYSoqy8Lq1M6UOZimnC3Ulea4euBVf2ssBCnI7csGNG5UzkTiwrPDi2xIP8nM01rHW1yHJ7tQsJaghnUsfw2t6ui4ZofvbbOFTN/YCloHITifEi8Tc1/17isi3y+kX5yQ/Nk5UNry0Wbt91CP+nkL/ZmA5grkBXDL7VJMmB60TnO3ap24CtwBQartN3LoWs7h+4ov+LqbCt6IqpJVWQWlwJeb2MFPFByALtBpsqAyL1SxXlGNpPa94CuXxfQ6Bv436PtefA5FlTzR8uMmqsjWciv06bVnSvVlFEVovN2Fkplrjt7AASJ/8KJs4THDg4k61nfd8roAHx6ewQzl4wCWKCikQ0MuFd2mVHwdrbnCH1mIHuPRyvWMMIAK0ooc1/rmKiJlpgumjxoFYNE10MXtt+I="
bucket: "lbry-reflector-binary"
# - provider: releases
# file: ./bin/prism-bin
# skip_cleanup: true
# on:
# repo: lbryio/reflector.go
# tags: true
# api_key:
# secure: epAlhp3SUr8hhISarJ22n6tRw2TEa4s4oNFIvJUb5HGECVp1SYN7ao0ln5NoNLmfJS60pi911i/kMhhi21/uhZ0kCYlEhhIE2pc1zsiAxK9L9ENCssJ205HfVbe5grhwskLGzgjhU9OznO8WtmyOPWXr0it8M8RCTjx6rEC0A33Id3WMYyhP938Sj9CxEYeH4KS8wFvBXkgBVtrgaYwRTCIROFddHFXOb9jyNhqQ1RbfKtllsVtQhVk5WMlomheBNSS4vr6WMS4X4+2okFqnLtiSn1wrn5I/94UQbnrI1juVnQj0K+j32EyQbAOt4T2cLW3GtG0jhaYKyNMT9ycDCdVACPSDELlHWjeyoes9bnhUFftm6kDbQxwA1UsTF1yG8tMKXxBSmYyoT7qDloi6pBifZMrFXL61uTs6yhVB9LS/2oqg4sc0Ne87bRcn4OxsBeVCe3kbBHDTR/NTyF2gNPtRvgMAWULxTVcUm9VYdO0IWvAig5g4Row0DnFzEquD6CzezbRWD9WyZyV/AFyYHeeQ2PO7jTw0/3M7aDX33Fuhh34lehzmrC03cfgD/wZW+spxozIcQCYdiJqVw+u+/NvbNr0kkFzE9zW26JEmUFTyDvKxvnza1Kwtww3EgH6zaOL8r4yVbb54rePRvLw7pl93zlfJnEB2MCPqJOY5ZpU=
- provider: releases
file: ./bin/prism-bin
skip_cleanup: true
on:
repo: lbryio/reflector.go
tags: true
api_key:
secure: epAlhp3SUr8hhISarJ22n6tRw2TEa4s4oNFIvJUb5HGECVp1SYN7ao0ln5NoNLmfJS60pi911i/kMhhi21/uhZ0kCYlEhhIE2pc1zsiAxK9L9ENCssJ205HfVbe5grhwskLGzgjhU9OznO8WtmyOPWXr0it8M8RCTjx6rEC0A33Id3WMYyhP938Sj9CxEYeH4KS8wFvBXkgBVtrgaYwRTCIROFddHFXOb9jyNhqQ1RbfKtllsVtQhVk5WMlomheBNSS4vr6WMS4X4+2okFqnLtiSn1wrn5I/94UQbnrI1juVnQj0K+j32EyQbAOt4T2cLW3GtG0jhaYKyNMT9ycDCdVACPSDELlHWjeyoes9bnhUFftm6kDbQxwA1UsTF1yG8tMKXxBSmYyoT7qDloi6pBifZMrFXL61uTs6yhVB9LS/2oqg4sc0Ne87bRcn4OxsBeVCe3kbBHDTR/NTyF2gNPtRvgMAWULxTVcUm9VYdO0IWvAig5g4Row0DnFzEquD6CzezbRWD9WyZyV/AFyYHeeQ2PO7jTw0/3M7aDX33Fuhh34lehzmrC03cfgD/wZW+spxozIcQCYdiJqVw+u+/NvbNr0kkFzE9zW26JEmUFTyDvKxvnza1Kwtww3EgH6zaOL8r4yVbb54rePRvLw7pl93zlfJnEB2MCPqJOY5ZpU=

View file

@ -1,9 +0,0 @@
FROM alpine
EXPOSE 8080
RUN mkdir /app
WORKDIR /app
COPY dist/linux_amd64/prism-bin ./prism
RUN chmod +x prism
ENTRYPOINT [ "/app/prism" ]

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2016-2020 LBRY Inc
Copyright (c) 2016-2018 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

View file

@ -1,33 +1,25 @@
version := $(shell git describe --dirty --always --long --abbrev=7)
commit := $(shell git rev-parse --short HEAD)
commit_long := $(shell git rev-parse HEAD)
branch := $(shell git rev-parse --abbrev-ref HEAD)
curTime := $(shell date +%s)
BINARY=prism-bin
IMPORT_PATH = github.com/lbryio/reflector.go
LDFLAGS="-X ${IMPORT_PATH}/meta.version=$(version) -X ${IMPORT_PATH}/meta.commit=$(commit) -X ${IMPORT_PATH}/meta.commitLong=$(commit_long) -X ${IMPORT_PATH}/meta.branch=$(branch) -X '${IMPORT_PATH}/meta.Time=$(curTime)'"
DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
BIN_DIR = $(DIR)/dist
BIN_DIR = ${DIR}/bin
IMPORT_PATH = github.com/lbryio/reflector.go
.DEFAULT_GOAL := linux
VERSION = $(shell git --git-dir=${DIR}/.git describe --dirty --always --long --abbrev=7)
LDFLAGS = -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION} -X ${IMPORT_PATH}/meta.Time=$(shell date +%s)"
.PHONY: build clean test lint
.DEFAULT_GOAL: build
build:
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
clean:
if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi
.PHONY: test
test:
go test -cover -v ./...
go test ./... -v -cover
.PHONY: lint
lint:
./scripts/lint.sh
.PHONY: linux
linux:
GOARCH=amd64 GOOS=linux go build -ldflags ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/linux_amd64/${BINARY}
.PHONY: macos
macos:
GOARCH=amd64 GOOS=darwin go build -ldflags ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/darwin_amd64/${BINARY}
.PHONY: image
image:
docker buildx build -t lbry/reflector:$(version) -t lbry/reflector:latest --platform linux/amd64 .
go get github.com/alecthomas/gometalinter && gometalinter --install && gometalinter ./...

View file

@ -1,14 +1,14 @@
package cluster
import (
"io"
"io/ioutil"
baselog "log"
"sort"
"time"
"github.com/lbryio/lbry.go/v2/extras/crypto"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/extras/crypto"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
"github.com/hashicorp/serf/serf"
log "github.com/sirupsen/logrus"
@ -52,7 +52,7 @@ func (c *Cluster) Connect() error {
conf.MemberlistConfig.AdvertisePort = c.port
conf.NodeName = c.name
nullLogger := baselog.New(io.Discard, "", 0)
nullLogger := baselog.New(ioutil.Discard, "", 0)
conf.Logger = nullLogger
c.eventCh = make(chan serf.Event)

View file

@ -6,8 +6,7 @@ import (
"strconv"
"syscall"
"github.com/lbryio/lbry.go/v2/extras/crypto"
"github.com/lbryio/lbry.go/extras/crypto"
"github.com/lbryio/reflector.go/cluster"
log "github.com/sirupsen/logrus"

View file

@ -1,55 +0,0 @@
package cmd
import (
"encoding/hex"
"fmt"
"github.com/lbryio/lbry.go/v2/schema/stake"
"github.com/davecgh/go-spew/spew"
"github.com/gogo/protobuf/jsonpb"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
var cmd = &cobra.Command{
Use: "decode VALUE",
Short: "Decode a claim value",
Args: cobra.ExactArgs(1),
Run: decodeCmd,
}
rootCmd.AddCommand(cmd)
}
func decodeCmd(cmd *cobra.Command, args []string) {
c, err := stake.DecodeClaimHex(args[0], "")
if err != nil {
log.Fatal(err)
}
m := jsonpb.Marshaler{Indent: " "}
if stream := c.Claim.GetStream(); stream != nil {
json, err := m.MarshalToString(stream)
if err != nil {
log.Fatal(err)
}
fmt.Println(json)
fmt.Printf("SD hash as hex: %s\n", hex.EncodeToString(stream.GetSource().GetSdHash()))
} else if channel := c.Claim.GetChannel(); channel != nil {
json, err := m.MarshalToString(channel)
if err != nil {
log.Fatal(err)
}
fmt.Println(json)
} else if repost := c.Claim.GetRepost(); repost != nil {
json, err := m.MarshalToString(repost)
if err != nil {
log.Fatal(err)
}
fmt.Println(json)
} else {
spew.Dump(c)
}
}

View file

@ -8,8 +8,8 @@ import (
"syscall"
"time"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/dht"
"github.com/lbryio/lbry.go/dht/bits"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"

View file

@ -1,80 +0,0 @@
package cmd
import (
"encoding/hex"
"os"
"time"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
var cmd = &cobra.Command{
Use: "getstream ADDRESS:PORT SDHASH",
Short: "Get a stream from a reflector server",
Args: cobra.ExactArgs(2),
Run: getStreamCmd,
}
rootCmd.AddCommand(cmd)
}
func getStreamCmd(cmd *cobra.Command, args []string) {
addr := args[0]
sdHash := args[1]
s := store.NewCachingStore(
"getstream",
peer.NewStore(peer.StoreOpts{Address: addr}),
store.NewDiskStore("/tmp/lbry_downloaded_blobs", 2),
)
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
var sd stream.SDBlob
sdb, _, err := s.Get(sdHash)
if err != nil {
log.Fatal(err)
}
err = sd.FromBlob(sdb)
if err != nil {
log.Fatal(err)
}
filename := sd.SuggestedFileName
if filename == "" {
filename = "stream_" + time.Now().Format("20060102_150405")
}
f, err := os.Create(wd + "/" + filename)
if err != nil {
log.Fatal(err)
}
for i := 0; i < len(sd.BlobInfos)-1; i++ {
b, _, err := s.Get(hex.EncodeToString(sd.BlobInfos[i].BlobHash))
if err != nil {
log.Fatal(err)
}
data, err := b.Plaintext(sd.Key, sd.BlobInfos[i].IV)
if err != nil {
log.Fatal(err)
}
_, err = f.Write(data)
if err != nil {
log.Fatal(err)
}
}
}

View file

@ -1,93 +0,0 @@
package cmd
import (
"crypto/sha512"
"encoding/hex"
"os"
"path"
"runtime"
"sync/atomic"
"time"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var threads int
func init() {
var cmd = &cobra.Command{
Use: "check-integrity",
Short: "check blobs integrity for a given path",
Run: integrityCheckCmd,
}
cmd.Flags().StringVar(&diskStorePath, "store-path", "", "path of the store where all blobs are cached")
cmd.Flags().IntVar(&threads, "threads", runtime.NumCPU()-1, "number of concurrent threads to process blobs")
rootCmd.AddCommand(cmd)
}
func integrityCheckCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString())
if diskStorePath == "" {
log.Fatal("store-path must be defined")
}
blobs, err := speedwalk.AllFiles(diskStorePath, true)
if err != nil {
log.Fatalf("error while reading blobs from disk %s", errors.FullTrace(err))
}
tasks := make(chan string, len(blobs))
done := make(chan bool)
processed := new(int32)
go produce(tasks, blobs)
cpus := runtime.NumCPU()
for i := 0; i < cpus-1; i++ {
go consume(i, tasks, done, len(blobs), processed)
}
<-done
}
func produce(tasks chan<- string, blobs []string) {
for _, b := range blobs {
tasks <- b
}
close(tasks)
}
func consume(worker int, tasks <-chan string, done chan<- bool, totalTasks int, processed *int32) {
start := time.Now()
for b := range tasks {
processedSoFar := atomic.AddInt32(processed, 1)
if worker == 0 {
remaining := int32(totalTasks) - processedSoFar
timePerBlob := time.Since(start).Microseconds() / int64(processedSoFar)
remainingTime := time.Duration(int64(remaining)*timePerBlob) * time.Microsecond
log.Infof("[T%d] %d/%d blobs processed so far. ETA: %s", worker, processedSoFar, totalTasks, remainingTime.String())
}
blobPath := path.Join(diskStorePath, b[:2], b)
blob, err := os.ReadFile(blobPath)
if err != nil {
if os.IsNotExist(err) {
continue
}
log.Errorf("[Worker %d] Error looking up blob %s: %s", worker, b, err.Error())
continue
}
hashBytes := sha512.Sum384(blob)
readHash := hex.EncodeToString(hashBytes[:])
if readHash != b {
log.Infof("[%s] found a broken blob while reading from disk. Actual hash: %s", b, readHash)
err := os.Remove(blobPath)
if err != nil {
log.Errorf("Error while deleting broken blob %s: %s", b, err.Error())
}
}
}
done <- true
}

View file

@ -7,41 +7,30 @@ import (
"syscall"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var peerNoDB bool
func init() {
var cmd = &cobra.Command{
Use: "peer",
Short: "Run peer server",
Run: peerCmd,
}
cmd.Flags().BoolVar(&peerNoDB, "nodb", false, "Don't connect to a db and don't use a db-backed blob store")
rootCmd.AddCommand(cmd)
}
func peerCmd(cmd *cobra.Command, args []string) {
var err error
db := new(db.SQL)
err := db.Connect(globalConfig.DBConn)
checkErr(err)
s3 := store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
peerServer := peer.NewServer(s3)
if !peerNoDB {
db := &db.SQL{
LogQueries: log.GetLevel() == log.DebugLevel,
}
err = db.Connect(globalConfig.DBConn)
checkErr(err)
combo := store.NewDBBackedStore(s3, db, false)
peerServer = peer.NewServer(combo)
}
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
combo := store.NewDBBackedS3Store(s3, db)
peerServer := peer.NewServer(combo)
err = peerServer.Start(":" + strconv.Itoa(peer.DefaultPort))
if err != nil {

View file

@ -1,51 +0,0 @@
package cmd
import (
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
diskStorePath string
)
func init() {
var cmd = &cobra.Command{
Use: "populate-db",
Short: "populate local database with blobs from a disk storage",
Run: populateDbCmd,
}
cmd.Flags().StringVar(&diskStorePath, "store-path", "",
"path of the store where all blobs are cached")
rootCmd.AddCommand(cmd)
}
func populateDbCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString())
if diskStorePath == "" {
log.Fatal("store-path must be defined")
}
localDb := &db.SQL{
SoftDelete: true,
TrackAccess: db.TrackAccessBlobs,
LogQueries: log.GetLevel() == log.DebugLevel,
}
err := localDb.Connect("reflector:reflector@tcp(localhost:3306)/reflector")
if err != nil {
log.Fatal(err)
}
blobs, err := speedwalk.AllFiles(diskStorePath, true)
if err != nil {
log.Fatal(err)
}
err = localDb.AddBlobs(blobs)
if err != nil {
log.Errorf("error while storing to db: %s", errors.FullTrace(err))
}
}

View file

@ -1,65 +0,0 @@
package cmd
import (
"fmt"
"github.com/lbryio/reflector.go/publish"
"github.com/lbryio/lbry.go/v2/lbrycrd"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
func init() {
var cmd = &cobra.Command{
Use: "publish FILE",
Short: "Publish a file",
Args: cobra.ExactArgs(1),
Run: publishCmd,
}
cmd.Flags().String("name", "", "Claim name")
cmd.Flags().String("title", "", "Title of the content")
cmd.Flags().String("description", "", "Description of the content")
cmd.Flags().String("author", "", "Content author")
cmd.Flags().String("tags", "", "Comma-separated list of tags")
cmd.Flags().Int64("release-time", 0, "original public release of content, seconds since UNIX epoch")
rootCmd.AddCommand(cmd)
}
func publishCmd(cmd *cobra.Command, args []string) {
var err error
claimName := mustGetFlagString(cmd, "name")
if claimName == "" {
log.Errorln("--name required")
return
}
path := args[0]
client, err := lbrycrd.NewWithDefaultURL(nil)
checkErr(err)
tx, txid, err := publish.Publish(
client,
path,
claimName,
"bSzpgkTnAoiT2YAhUShPpfpajPESfNXVTu",
publish.Details{
Title: mustGetFlagString(cmd, "title"),
Description: mustGetFlagString(cmd, "description"),
Author: mustGetFlagString(cmd, "author"),
Tags: nil,
ReleaseTime: mustGetFlagInt64(cmd, "release-time"),
},
"reflector.lbry.com:5566",
)
checkErr(err)
decoded, err := publish.Decode(client, tx)
checkErr(err)
fmt.Printf("TX: %s\n\n", decoded)
fmt.Printf("TXID: %s\n", txid.String())
}

View file

@ -4,378 +4,81 @@ import (
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/http"
"github.com/lbryio/reflector.go/server/http3"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/c2h5oh/datasize"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
//port configuration
tcpPeerPort int
http3PeerPort int
httpPeerPort int
receiverPort int
metricsPort int
//flags configuration
disableUploads bool
disableBlocklist bool
useDB bool
//upstream configuration
upstreamReflector string
upstreamProtocol string
upstreamEdgeToken string
//downstream configuration
requestQueueSize int
//upstream edge configuration (to "cold" storage)
originEndpoint string
originEndpointFallback string
//cache configuration
diskCache string
secondaryDiskCache string
memCache int
)
var cacheManagers = []string{"localdb", "lfu", "arc", "lru", "simple"}
var cacheMangerToGcache = map[string]store.EvictionStrategy{
"lfu": store.LFU,
"arc": store.ARC,
"lru": store.LRU,
"simple": store.SIMPLE,
}
func init() {
var cmd = &cobra.Command{
Use: "reflector",
Short: "Run reflector server",
Run: reflectorCmd,
}
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from for the TCP (LBRY) protocol")
cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol")
cmd.Flags().IntVar(&httpPeerPort, "http-peer-port", 5569, "The port reflector will distribute content from over HTTP protocol")
cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from")
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for prometheus metrics")
cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server")
cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating")
cmd.Flags().BoolVar(&useDB, "use-db", true, "Whether to connect to the reflector db or not")
cmd.Flags().StringVar(&upstreamReflector, "upstream-reflector", "", "host:port of a reflector server where blobs are fetched from")
cmd.Flags().StringVar(&upstreamProtocol, "upstream-protocol", "http", "protocol used to fetch blobs from another upstream reflector server (tcp/http3/http)")
cmd.Flags().StringVar(&upstreamEdgeToken, "upstream-edge-token", "", "token used to retrieve/authenticate protected content")
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests from downstream should be handled at once (the rest will wait)")
cmd.Flags().StringVar(&originEndpoint, "origin-endpoint", "", "HTTP edge endpoint for standard HTTP retrieval")
cmd.Flags().StringVar(&originEndpointFallback, "origin-endpoint-fallback", "", "HTTP edge endpoint for standard HTTP retrieval if first origin fails")
cmd.Flags().StringVar(&diskCache, "disk-cache", "100GB:/tmp/downloaded_blobs:localdb", "Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru)")
cmd.Flags().StringVar(&secondaryDiskCache, "optional-disk-cache", "", "Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru) (this would get hit before the one specified in disk-cache)")
cmd.Flags().IntVar(&memCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
rootCmd.AddCommand(cmd)
}
func reflectorCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString())
log.Printf("reflector version %s, built %s", meta.Version, meta.BuildTime.Format(time.RFC3339))
// the blocklist logic requires the db backed store to be the outer-most store
underlyingStore := initStores()
underlyingStoreWithCaches, cleanerStopper := initCaches(underlyingStore)
// flip this flag to false when doing db maintenance. uploads will not work (as reflector server wont be running)
// but downloads will still work straight from s3
useDB := false
if !disableUploads {
reflectorServer := reflector.NewServer(underlyingStore, underlyingStoreWithCaches)
reflectorServer.Timeout = 3 * time.Minute
reflectorServer.EnableBlocklist = !disableBlocklist
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
err := reflectorServer.Start(":" + strconv.Itoa(receiverPort))
var err error
var blobStore store.BlobStore = s3
var reflectorServer *reflector.Server
if useDB {
db := new(db.SQL)
err = db.Connect(globalConfig.DBConn)
if err != nil {
log.Fatal(err)
}
blobStore = store.NewDBBackedS3Store(s3, db)
reflectorServer = reflector.NewServer(blobStore)
reflectorServer.Timeout = 3 * time.Minute
if globalConfig.SlackHookURL != "" {
reflectorServer.StatLogger = log.StandardLogger()
reflectorServer.StatReportFrequency = 1 * time.Hour
}
reflectorServer.EnableBlocklist = true
err = reflectorServer.Start(":" + strconv.Itoa(reflector.DefaultPort))
if err != nil {
log.Fatal(err)
}
defer reflectorServer.Shutdown()
}
peerServer := peer.NewServer(underlyingStoreWithCaches)
err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort))
peerServer := peer.NewServer(blobStore)
if globalConfig.SlackHookURL != "" {
peerServer.StatLogger = log.StandardLogger()
peerServer.StatReportFrequency = 1 * time.Hour
}
err = peerServer.Start(":5567")
if err != nil {
log.Fatal(err)
}
defer peerServer.Shutdown()
http3PeerServer := http3.NewServer(underlyingStoreWithCaches, requestQueueSize)
err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort))
if err != nil {
log.Fatal(err)
}
defer http3PeerServer.Shutdown()
httpServer := http.NewServer(store.WithSingleFlight("sf-http", underlyingStoreWithCaches), requestQueueSize, upstreamEdgeToken)
err = httpServer.Start(":" + strconv.Itoa(httpPeerPort))
if err != nil {
log.Fatal(err)
}
defer httpServer.Shutdown()
metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics")
metricsServer.Start()
defer metricsServer.Shutdown()
defer underlyingStoreWithCaches.Shutdown()
defer underlyingStore.Shutdown() //do we actually need this? Oo
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
<-interruptChan
// deferred shutdowns happen now
cleanerStopper.StopAndWait()
}
func initUpstreamStore() store.BlobStore {
var s store.BlobStore
if upstreamReflector == "" {
return nil
}
switch upstreamProtocol {
case "tcp":
s = peer.NewStore(peer.StoreOpts{
Address: upstreamReflector,
Timeout: 30 * time.Second,
})
case "http3":
s = http3.NewStore(http3.StoreOpts{
Address: upstreamReflector,
Timeout: 30 * time.Second,
})
case "http":
s = store.NewHttpStore(upstreamReflector, upstreamEdgeToken)
default:
log.Fatalf("protocol is not recognized: %s", upstreamProtocol)
}
return s
}
func initEdgeStore() store.BlobStore {
var s3Store *store.S3Store
var s store.BlobStore
if conf != "none" {
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
}
if originEndpointFallback != "" && originEndpoint != "" {
ittt := store.NewITTTStore(store.NewCloudFrontROStore(originEndpoint), store.NewCloudFrontROStore(originEndpointFallback))
if s3Store != nil {
s = store.NewCloudFrontRWStore(ittt, s3Store)
} else {
s = ittt
}
} else if s3Store != nil {
s = s3Store
} else {
log.Fatalf("this configuration does not include a valid upstream source")
}
return s
}
func initDBStore(s store.BlobStore) store.BlobStore {
if useDB {
dbInst := &db.SQL{
TrackAccess: db.TrackAccessStreams,
LogQueries: log.GetLevel() == log.DebugLevel,
}
err := dbInst.Connect(globalConfig.DBConn)
if err != nil {
log.Fatal(err)
}
s = store.NewDBBackedStore(s, dbInst, false)
}
return s
}
func initStores() store.BlobStore {
s := initUpstreamStore()
if s == nil {
s = initEdgeStore()
}
s = initDBStore(s)
return s
}
// initCaches returns a store wrapped with caches and a stop group to execute a clean shutdown
func initCaches(s store.BlobStore) (store.BlobStore, *stop.Group) {
stopper := stop.New()
diskStore := initDiskStore(s, diskCache, stopper)
finalStore := initDiskStore(diskStore, secondaryDiskCache, stopper)
stop.New()
if memCache > 0 {
finalStore = store.NewCachingStore(
"reflector",
finalStore,
store.NewGcacheStore("mem", store.NewMemStore(), memCache, store.LRU),
)
}
return finalStore, stopper
}
func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *stop.Group) store.BlobStore {
diskCacheMaxSize, diskCachePath, cacheManager := diskCacheParams(diskParams)
//we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte
// so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup
realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
if diskCacheMaxSize == 0 {
return upstreamStore
}
err := os.MkdirAll(diskCachePath, os.ModePerm)
if err != nil {
log.Fatal(err)
}
diskStore := store.NewDiskStore(diskCachePath, 2)
var unwrappedStore store.BlobStore
cleanerStopper := stop.New(stopper)
if cacheManager == "localdb" {
localDb := &db.SQL{
SoftDelete: true,
TrackAccess: db.TrackAccessBlobs,
LogQueries: log.GetLevel() == log.DebugLevel,
}
err = localDb.Connect("reflector:reflector@tcp(localhost:3306)/reflector")
if err != nil {
log.Fatal(err)
}
unwrappedStore = store.NewDBBackedStore(diskStore, localDb, true)
go cleanOldestBlobs(int(realCacheSize), localDb, unwrappedStore, cleanerStopper)
} else {
unwrappedStore = store.NewGcacheStore("nvme", store.NewDiskStore(diskCachePath, 2), int(realCacheSize), cacheMangerToGcache[cacheManager])
}
wrapped := store.NewCachingStore(
"reflector",
upstreamStore,
unwrappedStore,
)
return wrapped
}
func diskCacheParams(diskParams string) (int, string, string) {
if diskParams == "" {
return 0, "", ""
}
parts := strings.Split(diskParams, ":")
if len(parts) != 3 {
log.Fatalf("%s does is formatted incorrectly. Expected format: 'sizeGB:CACHE_PATH:cachemanager' for example: '100GB:/tmp/downloaded_blobs:localdb'", diskParams)
}
diskCacheSize := parts[0]
path := parts[1]
cacheManager := parts[2]
if len(path) == 0 || path[0] != '/' {
log.Fatalf("disk cache paths must start with '/'")
}
if !util.InSlice(cacheManager, cacheManagers) {
log.Fatalf("specified cache manager '%s' is not supported. Use one of the following: %v", cacheManager, cacheManagers)
}
var maxSize datasize.ByteSize
err := maxSize.UnmarshalText([]byte(diskCacheSize))
if err != nil {
log.Fatal(err)
}
if maxSize <= 0 {
log.Fatal("disk cache size must be more than 0")
}
return int(maxSize), path, cacheManager
}
func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) {
// this is so that it runs on startup without having to wait for 10 minutes
err := doClean(maxItems, db, store, stopper)
if err != nil {
log.Error(errors.FullTrace(err))
}
const cleanupInterval = 10 * time.Minute
for {
select {
case <-stopper.Ch():
log.Infoln("stopping self cleanup")
return
case <-time.After(cleanupInterval):
err := doClean(maxItems, db, store, stopper)
if err != nil {
log.Error(errors.FullTrace(err))
}
}
peerServer.Shutdown()
if reflectorServer != nil {
reflectorServer.Shutdown()
}
}
func doClean(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) error {
blobsCount, err := db.Count()
if err != nil {
return err
}
if blobsCount >= maxItems {
itemsToDelete := blobsCount / 10
blobs, err := db.LeastRecentlyAccessedHashes(itemsToDelete)
if err != nil {
return err
}
blobsChan := make(chan string, len(blobs))
wg := &stop.Group{}
go func() {
for _, hash := range blobs {
select {
case <-stopper.Ch():
return
default:
}
blobsChan <- hash
}
close(blobsChan)
}()
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for h := range blobsChan {
select {
case <-stopper.Ch():
return
default:
}
err = store.Delete(h)
if err != nil {
log.Errorf("error pruning %s: %s", h, errors.FullTrace(err))
continue
}
}
}()
}
wg.Wait()
}
return nil
}

View file

@ -1,50 +0,0 @@
package cmd
import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/lbryio/reflector.go/wallet"
"github.com/spf13/cobra"
)
func init() {
var cmd = &cobra.Command{
Use: "resolve ADDRESS:PORT URL",
Short: "Resolve a URL",
Args: cobra.ExactArgs(2),
Run: resolveCmd,
}
rootCmd.AddCommand(cmd)
}
func resolveCmd(cmd *cobra.Command, args []string) {
addr := args[0]
url := args[1]
node := wallet.NewNode()
defer node.Shutdown()
err := node.Connect([]string{addr}, nil)
checkErr(err)
output, err := node.Resolve(url)
checkErr(err)
claim, err := node.GetClaimInTx(hex.EncodeToString(rev(output.GetTxHash())), int(output.GetNout()))
checkErr(err)
jsonClaim, err := json.MarshalIndent(claim, "", " ")
checkErr(err)
fmt.Println(string(jsonClaim))
}
func rev(b []byte) []byte {
r := make([]byte, len(b))
for left, right := 0, len(b)-1; left < right; left, right = left+1, right-1 {
r[left], r[right] = b[right], b[left]
}
return r
}

View file

@ -2,15 +2,14 @@ package cmd
import (
"encoding/json"
"io/ioutil"
"os"
"strings"
"github.com/lbryio/lbry.go/dht"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/util"
"github.com/lbryio/reflector.go/updater"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/johntdyer/slackrus"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -24,7 +23,6 @@ type Config struct {
BucketName string `json:"bucket_name"`
DBConn string `json:"db_conn"`
SlackHookURL string `json:"slack_hook_url"`
SlackChannel string `json:"slack_channel"`
UpdateBinURL string `json:"update_bin_url"`
UpdateCmd string `json:"update_cmd"`
}
@ -51,7 +49,7 @@ var rootCmd = &cobra.Command{
func init() {
rootCmd.PersistentFlags().StringSliceVarP(&verbose, "verbose", "v", []string{}, "Verbose logging for specific components")
rootCmd.PersistentFlags().StringVar(&conf, "conf", "config.json", "Path to config. Use 'none' to disable")
rootCmd.PersistentFlags().StringVar(&conf, "conf", "config.json", "Path to config")
}
// Execute adds all child commands to the root command and sets flags appropriately.
@ -70,11 +68,8 @@ func preRun(cmd *cobra.Command, args []string) {
debugLogger.SetOutput(os.Stderr)
if util.InSlice(verboseAll, verbose) {
logrus.Info("global verbose logging enabled")
logrus.SetLevel(logrus.DebugLevel)
verbose = []string{verboseDHT, verboseNodeFinder}
} else if len(verbose) > 0 {
logrus.Infof("verbose logging enabled for: %s", strings.Join(verbose, ", "))
}
for _, debugType := range verbose {
@ -102,7 +97,7 @@ func preRun(cmd *cobra.Command, args []string) {
hook := &slackrus.SlackrusHook{
HookURL: globalConfig.SlackHookURL,
AcceptedLevels: slackrus.LevelThreshold(logrus.InfoLevel),
Channel: globalConfig.SlackChannel,
Channel: "#reflector-logs",
//IconEmoji: ":ghost:",
//Username: "reflector.go",
}
@ -141,32 +136,14 @@ func argFuncs(funcs ...cobra.PositionalArgs) cobra.PositionalArgs {
func loadConfig(path string) (Config, error) {
var c Config
raw, err := os.ReadFile(path)
raw, err := ioutil.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return c, errors.Err("config file not found")
}
return c, errors.Err(err)
return c, err
}
err = json.Unmarshal(raw, &c)
return c, errors.Err(err)
return c, err
}
func mustGetFlagString(cmd *cobra.Command, name string) string {
v, err := cmd.Flags().GetString(name)
checkErr(err)
return v
}
func mustGetFlagInt64(cmd *cobra.Command, name string) int64 {
v, err := cmd.Flags().GetInt64(name)
checkErr(err)
return v
}
//func mustGetFlagBool(cmd *cobra.Command, name string) bool {
// v, err := cmd.Flags().GetBool(name)
// checkErr(err)
// return v
//}

View file

@ -1,158 +0,0 @@
package cmd
import (
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"os/signal"
"path"
"syscall"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/cobra"
)
func init() {
var cmd = &cobra.Command{
Use: "send ADDRESS:PORT PATH",
Short: "Send a file to a reflector",
Args: cobra.ExactArgs(2),
Run: sendCmd,
}
cmd.PersistentFlags().String("sd-cache", "", "path to dir where sd blobs will be cached")
rootCmd.AddCommand(cmd)
}
// todo: if retrying a large file is slow, we can add the ability to seek ahead in the file so we're not
// re-uploading blobs that already exist
var hackyReflector reflector.Client
func sendCmd(cmd *cobra.Command, args []string) {
reflectorAddress := args[0]
err := hackyReflector.Connect(reflectorAddress)
checkErr(err)
defer func() { _ = hackyReflector.Close() }()
filePath := args[1]
file, err := os.Open(filePath)
checkErr(err)
defer func() { _ = file.Close() }()
sdCachePath := ""
sdCacheDir := mustGetFlagString(cmd, "sd-cache")
if sdCacheDir != "" {
if _, err := os.Stat(sdCacheDir); os.IsNotExist(err) {
err = os.MkdirAll(sdCacheDir, 0777)
checkErr(err)
}
sdCachePath = path.Join(sdCacheDir, filePath+".sdblob")
}
var enc *stream.Encoder
if sdCachePath != "" {
if _, err := os.Stat(sdCachePath); !os.IsNotExist(err) {
sdBlob, err := os.ReadFile(sdCachePath)
checkErr(err)
cachedSDBlob := &stream.SDBlob{}
err = cachedSDBlob.FromBlob(sdBlob)
checkErr(err)
enc = stream.NewEncoderFromSD(file, cachedSDBlob)
}
}
if enc == nil {
enc = stream.NewEncoder(file)
}
exitCode := 0
var killed bool
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
go func() {
sig := <-interruptChan
fmt.Printf("caught %s, exiting...\n", sig.String())
killed = true
exitCode = 1
}()
for {
if killed {
break
}
b, err := enc.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
fmt.Printf("error reading next blob: %v\n", err)
exitCode = 1
break
}
err = hackyReflect(b, false)
if err != nil {
fmt.Printf("error reflecting blob %s: %v\n", b.HashHex()[:8], err)
exitCode = 1
break
}
}
sd := enc.SDBlob()
//sd.StreamName = filepath.Base(filePath)
//sd.SuggestedFileName = filepath.Base(filePath)
err = os.WriteFile(sdCachePath, sd.ToBlob(), 0666)
if err != nil {
fmt.Printf("error saving sd blob: %v\n", err)
fmt.Println(sd.ToJson())
exitCode = 1
}
if killed {
os.Exit(exitCode)
}
if reflectorAddress != "" {
err = hackyReflect(sd.ToBlob(), true)
if err != nil {
fmt.Printf("error reflecting sd blob %s: %v\n", sd.HashHex()[:8], err)
exitCode = 1
}
}
ret := struct {
SDHash string `json:"sd_hash"`
SourceHash string `json:"source_hash"`
}{
SDHash: sd.HashHex(),
SourceHash: hex.EncodeToString(enc.SourceHash()),
}
j, err := json.MarshalIndent(ret, "", " ")
checkErr(err)
fmt.Println(string(j))
os.Exit(exitCode)
}
func hackyReflect(b stream.Blob, sd bool) error {
var err error
if sd {
err = hackyReflector.SendSDBlob(b)
} else {
err = hackyReflector.SendBlob(b)
}
if errors.Is(err, reflector.ErrBlobExists) {
//fmt.Printf("%s already reflected\n", b.HashHex()[:8])
return nil
}
return err
}

View file

@ -2,11 +2,10 @@ package cmd
import (
"crypto/rand"
"os"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/stream"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -14,9 +13,9 @@ import (
func init() {
var cmd = &cobra.Command{
Use: "sendblob ADDRESS:PORT [PATH]",
Use: "sendblob ADDRESS:PORT",
Short: "Send a random blob to a reflector server",
Args: cobra.RangeArgs(1, 2),
Args: cobra.ExactArgs(1),
Run: sendBlobCmd,
}
rootCmd.AddCommand(cmd)
@ -24,10 +23,6 @@ func init() {
func sendBlobCmd(cmd *cobra.Command, args []string) {
addr := args[0]
var path string
if len(args) >= 2 {
path = args[1]
}
c := reflector.Client{}
err := c.Connect(addr)
@ -35,36 +30,14 @@ func sendBlobCmd(cmd *cobra.Command, args []string) {
log.Fatal("error connecting client to server: ", err)
}
if path == "" {
blob := make(stream.Blob, 1024)
_, err = rand.Read(blob)
if err != nil {
log.Fatal("failed to make random blob: ", err)
}
err = c.SendBlob(blob)
if err != nil {
log.Error(err)
}
return
blob := make(stream.Blob, 1024)
_, err = rand.Read(blob)
if err != nil {
log.Fatal("failed to make random blob: ", err)
}
file, err := os.Open(path)
checkErr(err)
defer func() { _ = file.Close() }()
s, err := stream.New(file)
checkErr(err)
sdBlob := &stream.SDBlob{}
err = sdBlob.FromBlob(s[0])
checkErr(err)
for i, b := range s {
if i == 0 {
err = c.SendSDBlob(b)
} else {
err = c.SendBlob(b)
}
checkErr(err)
err = c.SendBlob(blob)
if err != nil {
log.Error(err)
}
}

View file

@ -7,16 +7,15 @@ import (
"strings"
"syscall"
"github.com/lbryio/lbry.go/dht"
"github.com/lbryio/lbry.go/dht/bits"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/prism"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -53,13 +52,11 @@ func init() {
}
func startCmd(cmd *cobra.Command, args []string) {
db := &db.SQL{
LogQueries: log.GetLevel() == log.DebugLevel,
}
db := new(db.SQL)
err := db.Connect(globalConfig.DBConn)
checkErr(err)
s3 := store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
comboStore := store.NewDBBackedStore(s3, db, false)
s3 := store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
comboStore := store.NewDBBackedS3Store(s3, db)
conf := prism.DefaultConf()

View file

@ -9,8 +9,8 @@ import (
"time"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus"
@ -27,11 +27,11 @@ func init() {
}
func testCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString())
log.Printf("reflector version %s", meta.Version)
memStore := store.NewMemStore()
memStore := &store.MemoryBlobStore{}
reflectorServer := reflector.NewServer(memStore, memStore)
reflectorServer := reflector.NewServer(memStore)
reflectorServer.Timeout = 3 * time.Minute
err := reflectorServer.Start(":" + strconv.Itoa(reflector.DefaultPort))

View file

@ -9,13 +9,11 @@ import (
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var uploadWorkers int
var uploadSkipExistsCheck bool
var uploadDeleteBlobsAfterUpload bool
func init() {
var cmd = &cobra.Command{
@ -26,22 +24,19 @@ func init() {
}
cmd.PersistentFlags().IntVar(&uploadWorkers, "workers", 1, "How many worker threads to run at once")
cmd.PersistentFlags().BoolVar(&uploadSkipExistsCheck, "skipExistsCheck", false, "Dont check if blobs exist before uploading")
cmd.PersistentFlags().BoolVar(&uploadDeleteBlobsAfterUpload, "deleteBlobsAfterUpload", false, "Delete blobs after uploading them")
rootCmd.AddCommand(cmd)
}
func uploadCmd(cmd *cobra.Command, args []string) {
db := &db.SQL{
LogQueries: log.GetLevel() == log.DebugLevel,
}
db := new(db.SQL)
err := db.Connect(globalConfig.DBConn)
checkErr(err)
st := store.NewDBBackedStore(
store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName),
db, false)
st := store.NewDBBackedS3Store(
store.NewS3BlobStore(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName),
db)
uploader := reflector.NewUploader(db, st, uploadWorkers, uploadSkipExistsCheck, uploadDeleteBlobsAfterUpload)
uploader := reflector.NewUploader(db, st, uploadWorkers, uploadSkipExistsCheck)
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)

View file

@ -2,9 +2,9 @@ package cmd
import (
"fmt"
"time"
"github.com/lbryio/reflector.go/meta"
"github.com/spf13/cobra"
)
@ -18,5 +18,5 @@ func init() {
}
func versionCmd(cmd *cobra.Command, args []string) {
fmt.Println(meta.FullName())
fmt.Printf("version %s (built %s)\n", meta.Version, meta.BuildTime.Format(time.RFC3339))
}

445
db/db.go
View file

@ -3,22 +3,15 @@ package db
import (
"context"
"database/sql"
"fmt"
"runtime"
"strings"
"time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
qt "github.com/lbryio/lbry.go/v2/extras/query"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/dht/bits"
"github.com/lbryio/lbry.go/extras/errors"
qt "github.com/lbryio/lbry.go/extras/query"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
log "github.com/sirupsen/logrus"
"github.com/volatiletech/null/v8"
"go.uber.org/atomic"
)
// SdBlob is a special blob that contains information on the rest of the blobs in the stream
@ -36,38 +29,17 @@ type SdBlob struct {
StreamHash string `json:"stream_hash"`
}
type trackAccess int
const (
TrackAccessNone trackAccess = iota // Don't track accesses
TrackAccessStreams // Track accesses at the stream level
TrackAccessBlobs // Track accesses at the blob level
)
// SQL implements the DB interface
type SQL struct {
conn *sql.DB
// Track the approx last time a blob or stream was accessed
TrackAccess trackAccess
// Instead of deleting a blob, marked it as not stored in the db
SoftDelete bool
// Log executed queries. qt.InterpolateParams is cpu-heavy. This avoids that call if not needed.
LogQueries bool
}
func (s SQL) logQuery(query string, args ...interface{}) {
if !s.LogQueries {
return
}
qStr, err := qt.InterpolateParams(query, args...)
func logQuery(query string, args ...interface{}) {
s, err := qt.InterpolateParams(query, args...)
if err != nil {
log.Errorln(err)
} else {
log.Debugln(qStr)
log.Debugln(s)
}
}
@ -98,97 +70,15 @@ func (s *SQL) AddBlob(hash string, length int, isStored bool) error {
return err
}
//AddBlobs adds blobs to the database.
func (s *SQL) AddBlobs(hash []string) error {
if s.conn == nil {
return errors.Err("not connected")
}
batch := 10000
totalBlobs := int64(len(hash))
work := make(chan []string, 1000)
stopper := stop.New()
var totalInserted atomic.Int64
start := time.Now()
go func() {
for i := 0; i < len(hash); i += batch {
j := i + batch
if j > len(hash) {
j = len(hash)
}
work <- hash[i:j]
}
log.Infof("done loading %d hashes in the work queue", len(hash))
close(work)
}()
for i := 0; i < runtime.NumCPU(); i++ {
stopper.Add(1)
go func(worker int) {
log.Infof("starting worker %d", worker)
defer stopper.Done()
for hashes := range work {
inserted := totalInserted.Load()
remaining := totalBlobs - inserted
if inserted > 0 {
timePerBlob := time.Since(start).Microseconds() / inserted
remainingTime := time.Duration(remaining*timePerBlob) * time.Microsecond
log.Infof("[T%d] processing batch of %d items. ETA: %s", worker, len(hashes), remainingTime.String())
}
err := s.insertBlobs(hashes) // Process the batch.
if err != nil {
log.Errorf("error while inserting batch: %s", errors.FullTrace(err))
}
totalInserted.Add(int64(len(hashes)))
}
}(i)
}
stopper.Wait()
return nil
}
func (s *SQL) insertBlobs(hashes []string) error {
var (
q string
//args []interface{}
)
dayAgo := time.Now().AddDate(0, 0, -1).Format("2006-01-02 15:04:05")
q = "insert into blob_ (hash, is_stored, length, last_accessed_at) values "
for _, hash := range hashes {
// prepared statements slow everything down by a lot due to reflection
// for this specific instance we'll go ahead and hardcode the query to make it go faster
q += fmt.Sprintf("('%s',1,%d,'%s'),", hash, stream.MaxBlobSize, dayAgo)
//args = append(args, hash, true, stream.MaxBlobSize, dayAgo)
}
q = strings.TrimSuffix(q, ",")
_, err := s.exec(q)
if err != nil {
return err
}
return nil
}
func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error) {
if length <= 0 {
return 0, errors.Err("length must be positive")
}
var (
q string
args []interface{}
blobID, err := s.exec(
"INSERT INTO blob_ (hash, is_stored, length) VALUES (?,?,?) ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored))",
hash, isStored, length,
)
if s.TrackAccess == TrackAccessBlobs {
args = []interface{}{hash, isStored, length, time.Now()}
q = "INSERT INTO blob_ (hash, is_stored, length, last_accessed_at) VALUES (" + qt.Qs(len(args)) + ") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored)), last_accessed_at = VALUES(last_accessed_at)"
} else {
args = []interface{}{hash, isStored, length}
q = "INSERT INTO blob_ (hash, is_stored, length) VALUES (" + qt.Qs(len(args)) + ") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored))"
}
blobID, err := s.exec(q, args...)
if err != nil {
return 0, err
}
@ -201,33 +91,16 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
if blobID == 0 {
return 0, errors.Err("blob ID is 0 even after INSERTing and SELECTing")
}
if s.TrackAccess == TrackAccessBlobs {
err := s.touchBlobs([]uint64{uint64(blobID)})
if err != nil {
return 0, errors.Err(err)
}
}
}
return blobID, nil
}
func (s *SQL) insertStream(hash string, sdBlobID int64) (int64, error) {
var (
q string
args []interface{}
func (s *SQL) insertStream(hash, sdHash string, sdBlobID int64) (int64, error) {
streamID, err := s.exec(
"INSERT IGNORE INTO stream (hash, sd_hash, sd_blob_id) VALUES (?,?, ?)",
hash, sdHash, sdBlobID,
)
if s.TrackAccess == TrackAccessStreams {
args = []interface{}{hash, sdBlobID, time.Now()}
q = "INSERT IGNORE INTO stream (hash, sd_blob_id, last_accessed_at) VALUES (" + qt.Qs(len(args)) + ")"
} else {
args = []interface{}{hash, sdBlobID}
q = "INSERT IGNORE INTO stream (hash, sd_blob_id) VALUES (" + qt.Qs(len(args)) + ")"
}
streamID, err := s.exec(q, args...)
if err != nil {
return 0, errors.Err(err)
}
@ -240,20 +113,13 @@ func (s *SQL) insertStream(hash string, sdBlobID int64) (int64, error) {
if streamID == 0 {
return 0, errors.Err("stream ID is 0 even after INSERTing and SELECTing")
}
if s.TrackAccess == TrackAccessStreams {
err := s.touchStreams([]uint64{uint64(streamID)})
if err != nil {
return 0, errors.Err(err)
}
}
}
return streamID, nil
}
// HasBlob checks if the database contains the blob information.
func (s *SQL) HasBlob(hash string, touch bool) (bool, error) {
exists, err := s.HasBlobs([]string{hash}, touch)
func (s *SQL) HasBlob(hash string) (bool, error) {
exists, err := s.HasBlobs([]string{hash})
if err != nil {
return false, err
}
@ -261,72 +127,13 @@ func (s *SQL) HasBlob(hash string, touch bool) (bool, error) {
}
// HasBlobs checks if the database contains the set of blobs and returns a bool map.
func (s *SQL) HasBlobs(hashes []string, touch bool) (map[string]bool, error) {
exists, idsNeedingTouch, err := s.hasBlobs(hashes)
if touch {
if s.TrackAccess == TrackAccessBlobs {
_ = s.touchBlobs(idsNeedingTouch)
} else if s.TrackAccess == TrackAccessStreams {
_ = s.touchStreams(idsNeedingTouch)
}
}
return exists, err
}
func (s *SQL) touchBlobs(blobIDs []uint64) error {
if len(blobIDs) == 0 {
return nil
}
query := "UPDATE blob_ SET last_accessed_at = ? WHERE id IN (" + qt.Qs(len(blobIDs)) + ")"
args := make([]interface{}, len(blobIDs)+1)
args[0] = time.Now()
for i := range blobIDs {
args[i+1] = blobIDs[i]
}
startTime := time.Now()
_, err := s.exec(query, args...)
log.Debugf("touched %d blobs and took %s", len(blobIDs), time.Since(startTime))
return errors.Err(err)
}
func (s *SQL) touchStreams(streamIDs []uint64) error {
if len(streamIDs) == 0 {
return nil
}
query := "UPDATE stream SET last_accessed_at = ? WHERE id IN (" + qt.Qs(len(streamIDs)) + ")"
args := make([]interface{}, len(streamIDs)+1)
args[0] = time.Now()
for i := range streamIDs {
args[i+1] = streamIDs[i]
}
startTime := time.Now()
_, err := s.exec(query, args...)
log.Debugf("touched %d streams and took %s", len(streamIDs), time.Since(startTime))
return errors.Err(err)
}
func (s *SQL) hasBlobs(hashes []string) (map[string]bool, []uint64, error) {
func (s *SQL) HasBlobs(hashes []string) (map[string]bool, error) {
if s.conn == nil {
return nil, nil, errors.Err("not connected")
return nil, errors.Err("not connected")
}
var (
hash string
blobID uint64
streamID null.Uint64
lastAccessedAt null.Time
)
var needsTouch []uint64
var hash string
exists := make(map[string]bool)
touchDeadline := time.Now().Add(-6 * time.Hour) // touch blob if last accessed before this time
maxBatchSize := 10000
doneIndex := 0
@ -338,29 +145,14 @@ func (s *SQL) hasBlobs(hashes []string) (map[string]bool, []uint64, error) {
log.Debugf("getting hashes[%d:%d] of %d", doneIndex, sliceEnd, len(hashes))
batch := hashes[doneIndex:sliceEnd]
var query string
if s.TrackAccess == TrackAccessBlobs {
query = `SELECT b.hash, b.id, NULL, b.last_accessed_at
FROM blob_ b
WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
} else if s.TrackAccess == TrackAccessStreams {
query = `SELECT b.hash, b.id, s.id, s.last_accessed_at
FROM blob_ b
LEFT JOIN stream_blob sb ON b.id = sb.blob_id
INNER JOIN stream s on (sb.stream_id = s.id or s.sd_blob_id = b.id)
WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
} else {
query = `SELECT b.hash, b.id, NULL, NULL
FROM blob_ b
WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
}
args := make([]interface{}, len(batch))
query := "SELECT hash FROM blob_ WHERE is_stored = ? && hash IN (" + qt.Qs(len(batch)) + ")"
args := make([]interface{}, len(batch)+1)
args[0] = true
for i := range batch {
args[i] = batch[i]
args[i+1] = batch[i]
}
s.logQuery(query, args...)
logQuery(query, args...)
err := func() error {
startTime := time.Now()
@ -372,18 +164,11 @@ WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
defer closeRows(rows)
for rows.Next() {
err := rows.Scan(&hash, &blobID, &streamID, &lastAccessedAt)
err := rows.Scan(&hash)
if err != nil {
return errors.Err(err)
}
exists[hash] = true
if !lastAccessedAt.Valid || lastAccessedAt.Time.Before(touchDeadline) {
if s.TrackAccess == TrackAccessBlobs {
needsTouch = append(needsTouch, blobID)
} else if s.TrackAccess == TrackAccessStreams && !streamID.IsZero() {
needsTouch = append(needsTouch, streamID.Uint64)
}
}
}
err = rows.Err()
@ -395,22 +180,16 @@ WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
return nil
}()
if err != nil {
return nil, nil, err
return nil, err
}
}
return exists, needsTouch, nil
return exists, nil
}
// Delete will remove (or soft-delete) the blob from the db
// NOTE: If SoftDelete is enabled, streams will never be deleted
// Delete will remove the blob from the db
func (s *SQL) Delete(hash string) error {
if s.SoftDelete {
_, err := s.exec("UPDATE blob_ SET is_stored = 0 WHERE hash = ?", hash)
return errors.Err(err)
}
_, err := s.exec("DELETE FROM stream WHERE sd_blob_id = (SELECT id FROM blob_ WHERE hash = ?)", hash)
_, err := s.exec("DELETE FROM stream WHERE sd_hash = ?", hash)
if err != nil {
return errors.Err(err)
}
@ -419,59 +198,11 @@ func (s *SQL) Delete(hash string) error {
return errors.Err(err)
}
//LeastRecentlyAccessedHashes gets the least recently accessed blobs
func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
if s.conn == nil {
return nil, errors.Err("not connected")
}
if s.TrackAccess != TrackAccessBlobs {
return nil, errors.Err("blob access tracking is disabled")
}
query := "SELECT hash from blob_ where is_stored = 1 order by last_accessed_at limit ?"
s.logQuery(query, maxBlobs)
rows, err := s.conn.Query(query, maxBlobs)
if err != nil {
return nil, errors.Err(err)
}
defer closeRows(rows)
blobs := make([]string, 0, maxBlobs)
for rows.Next() {
var hash string
err := rows.Scan(&hash)
if err != nil {
return nil, errors.Err(err)
}
blobs = append(blobs, hash)
}
return blobs, nil
}
func (s *SQL) Count() (int, error) {
if s.conn == nil {
return 0, errors.Err("not connected")
}
query := "SELECT count(id) from blob_"
if s.SoftDelete {
query += " where is_stored = 1"
}
s.logQuery(query)
var count int
err := s.conn.QueryRow(query).Scan(&count)
return count, errors.Err(err)
}
// Block will mark a blob as blocked
func (s *SQL) Block(hash string) error {
query := "INSERT IGNORE INTO blocked SET hash = ?"
args := []interface{}{hash}
s.logQuery(query, args...)
logQuery(query, args...)
_, err := s.conn.Exec(query, args...)
return errors.Err(err)
}
@ -479,7 +210,7 @@ func (s *SQL) Block(hash string) error {
// GetBlocked will return a list of blocked hashes
func (s *SQL) GetBlocked() (map[string]bool, error) {
query := "SELECT hash FROM blocked"
s.logQuery(query)
logQuery(query)
rows, err := s.conn.Query(query)
if err != nil {
return nil, errors.Err(err)
@ -515,14 +246,13 @@ func (s *SQL) MissingBlobsForKnownStream(sdHash string) ([]string, error) {
query := `
SELECT b.hash FROM blob_ b
INNER JOIN stream_blob sb ON b.id = sb.blob_id
INNER JOIN stream s ON s.id = sb.stream_id
INNER JOIN blob_ sdb ON sdb.id = s.sd_blob_id AND sdb.hash = ?
INNER JOIN stream_blob sb ON b.hash = sb.blob_hash
INNER JOIN stream s ON s.hash = sb.stream_hash AND s.sd_hash = ?
WHERE b.is_stored = 0
`
args := []interface{}{sdHash}
s.logQuery(query, args...)
logQuery(query, args...)
rows, err := s.conn.Query(query, args...)
if err != nil {
@ -561,7 +291,7 @@ func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
return err
}
streamID, err := s.insertStream(sdBlob.StreamHash, sdBlobID)
streamID, err := s.insertStream(sdBlob.StreamHash, sdHash, sdBlobID)
if err != nil {
return err
}
@ -578,10 +308,9 @@ func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
return err
}
args := []interface{}{streamID, blobID, contentBlob.BlobNum}
_, err = s.exec(
"INSERT IGNORE INTO stream_blob (stream_id, blob_id, num) VALUES ("+qt.Qs(len(args))+")",
args...,
"INSERT IGNORE INTO stream_blob (stream_id, stream_hash, blob_id, blob_hash, num) VALUES (?,?,?,?,?)",
streamID, sdBlob.StreamHash, blobID, contentBlob.BlobHash, contentBlob.BlobNum,
)
if err != nil {
return errors.Err(err)
@ -601,7 +330,7 @@ func (s *SQL) GetHashRange() (string, string, error) {
query := "SELECT MIN(hash), MAX(hash) from blob_"
s.logQuery(query)
logQuery(query)
err := s.conn.QueryRow(query).Scan(&min, &max)
return min, max, err
@ -625,7 +354,7 @@ func (s *SQL) GetStoredHashesInRange(ctx context.Context, start, end bits.Bitmap
query := "SELECT hash FROM blob_ WHERE hash >= ? AND hash <= ? AND is_stored = 1"
args := []interface{}{start.Hex(), end.Hex()}
s.logQuery(query, args...)
logQuery(query, args...)
rows, err := s.conn.Query(query, args...)
defer closeRows(rows)
@ -706,7 +435,7 @@ func closeRows(rows *sql.Rows) {
}
func (s *SQL) exec(query string, args ...interface{}) (int64, error) {
s.logQuery(query, args...)
logQuery(query, args...)
attempt, maxAttempts := 0, 3
Retry:
attempt++
@ -744,22 +473,17 @@ CREATE TABLE blob_ (
hash char(96) NOT NULL,
is_stored TINYINT(1) NOT NULL DEFAULT 0,
length bigint(20) unsigned DEFAULT NULL,
last_accessed_at TIMESTAMP NULL DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY blob_hash_idx (hash),
KEY `blob_last_accessed_idx` (`last_accessed_at`),
KEY `is_stored_idx` (`is_stored`)
UNIQUE KEY blob_hash_idx (hash)
);
CREATE TABLE stream (
id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT UNIQUE,
hash char(96) NOT NULL,
sd_blob_id BIGINT UNSIGNED NOT NULL,
last_accessed_at TIMESTAMP NULL DEFAULT NULL,
PRIMARY KEY (id),
UNIQUE KEY stream_hash_idx (hash),
KEY stream_sd_blob_id_idx (sd_blob_id),
KEY last_accessed_at_idx (last_accessed_at),
FOREIGN KEY (sd_blob_id) REFERENCES blob_ (id) ON DELETE RESTRICT ON UPDATE CASCADE
);
@ -778,4 +502,93 @@ CREATE TABLE blocked (
PRIMARY KEY (hash)
);
FOR THE MIGRATION, USE THESE TRIGGERS
CREATE TRIGGER tgr_stream_insert AFTER INSERT ON stream FOR EACH ROW INSERT INTO stream_new SET id = NEW.id, hash = NEW.hash, sd_blob_id = (SELECT id from blob_ where hash = NEW.sd_hash);
CREATE TRIGGER tgr_stream_delete AFTER DELETE ON stream FOR EACH ROW DELETE FROM stream_new WHERE id = OLD.id;
CREATE TRIGGER tgr_blob_insert AFTER INSERT ON blob_ FOR EACH ROW INSERT INTO blob_new SET id = NEW.id, hash = NEW.hash, is_stored = NEW.is_stored, length = NEW.length;
CREATE TRIGGER tgr_blob_update AFTER UPDATE ON blob_ FOR EACH ROW UPDATE blob_new SET hash = NEW.hash, is_stored = NEW.is_stored, length = NEW.length WHERE id = NEW.id;
CREATE TRIGGER tgr_blob_delete AFTER DELETE ON blob_ FOR EACH ROW DELETE FROM blob_new WHERE id = OLD.id;
CREATE TRIGGER tgr_stream_blob_insert AFTER INSERT ON stream_blob FOR EACH ROW INSERT INTO stream_blob_new SET stream_id = NEW.stream_id, blob_id = NEW.blob_id, num = NEW.num;
CREATE TRIGGER tgr_stream_blob_delete AFTER DELETE ON stream_blob FOR EACH ROW DELETE FROM stream_blob_new WHERE stream_id = OLD.stream_id AND blob_id = OLD.blob_id;
DROP PROCEDURE IF EXISTS copyblobs;
DELIMITER $$
CREATE PROCEDURE copyblobs()
BEGIN
DECLARE first_trigger_blob_id INT DEFAULT 124802284; # ID of first blob that was copied using the triggers. dont copy anything after that.
DECLARE i INT DEFAULT 0;
DECLARE minid BIGINT UNSIGNED DEFAULT 0;
SELECT min(id) INTO minid FROM blob_ WHERE id < first_trigger_blob_id AND id > (SELECT coalesce(max(id),0) from blob_new where id < first_trigger_blob_id);
wloop: WHILE minid is not null DO
#IF (i >= 100) THEN
# LEAVE wloop;
#END IF;
SET i = i + 1;
IF (i % 5000 = 0) THEN
SELECT concat('loop ', i, ', id ', minid) as progress;
END IF;
IF (i % 10 = 1) THEN # we start our loops on 1, like normal people
START TRANSACTION;
END IF;
INSERT INTO blob_new (id, hash, is_stored, length) SELECT id, hash, is_stored, length from blob_ where id = minid;
IF (i % 10 = 0) THEN
COMMIT;
END IF;
SELECT min(id) INTO minid FROM blob_ WHERE id < first_trigger_blob_id AND id > minid;
END WHILE wloop;
COMMIT;
END$$
DELIMITER ;
DROP PROCEDURE IF EXISTS copystreams;
DELIMITER $$
CREATE PROCEDURE copystreams()
BEGIN
DECLARE first_trigger_stream_id INT DEFAULT 1465749; # ID of first stream that was copied using the triggers. dont copy anything after that.
DECLARE i INT DEFAULT 0;
DECLARE minid BIGINT UNSIGNED DEFAULT 0;
DECLARE streamhash char(96);
SELECT min(id) INTO minid FROM stream WHERE id < first_trigger_stream_id AND id > (SELECT coalesce(max(id),0) from stream_new where id < first_trigger_stream_id);
wloop: WHILE minid is not null DO
#IF (i >= 10) THEN
# LEAVE wloop;
#END IF;
SET i = i + 1;
IF (i % 5000 = 0) THEN
SELECT concat('loop ', i, ', id ', minid) as progress;
END IF;
IF (i % 10 = 1) THEN # we start our loops on 1, like normal people
START TRANSACTION;
END IF;
SELECT hash INTO streamhash FROM stream WHERE id = minid;
INSERT INTO stream_new (id, hash, sd_blob_id) SELECT s.id, s.hash, b.id FROM stream s INNER JOIN blob_ b ON s.sd_hash = b.hash WHERE s.id = minid;
INSERT INTO stream_blob_new SELECT minid, b.id, sb.num FROM stream_blob sb INNER JOIN blob_ b ON sb.blob_hash = b.hash WHERE sb.stream_hash = streamhash;
IF (i % 10 = 0) THEN
COMMIT;
END IF;
SELECT min(id) INTO minid FROM stream WHERE id < first_trigger_stream_id AND id > minid;
END WHILE wloop;
COMMIT;
END$$
DELIMITER ;
*/

179
go.mod
View file

@ -1,124 +1,67 @@
module github.com/lbryio/reflector.go
go 1.20
replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19
require (
github.com/aws/aws-sdk-go v1.45.24
github.com/bluele/gcache v0.0.2
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
cloud.google.com/go v0.34.0 // indirect
github.com/armon/go-metrics v0.0.0-20180713145231-3c58d8115a78
github.com/aws/aws-sdk-go v1.16.11
github.com/btcsuite/btcd v0.0.0-20190109040709-5bda5314ca95 // indirect
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a
github.com/btcsuite/goleveldb v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db
github.com/gin-gonic/gin v1.9.1
github.com/go-sql-driver/mysql v1.7.1
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.3
github.com/google/gops v0.3.28
github.com/gorilla/mux v1.8.0
github.com/hashicorp/serf v0.10.1
github.com/go-errors/errors v1.0.1
github.com/go-ini/ini v1.41.0
github.com/go-sql-driver/mysql v0.0.0-20180719071942-99ff426eb706
github.com/golang/mock v1.2.0 // indirect
github.com/golang/protobuf v1.3.1
github.com/gorilla/context v1.1.1
github.com/gorilla/mux v1.6.2
github.com/gorilla/rpc v1.1.0
github.com/gorilla/websocket v1.4.0
github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa
github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c
github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0
github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47
github.com/hashicorp/memberlist v0.1.0
github.com/hashicorp/serf v0.0.0-20180530155958-984a73625de3
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc
github.com/karrick/godirwalk v1.17.0
github.com/lbryio/chainquery v1.9.1-0.20230515181855-2fcba3115cfe
github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
github.com/prometheus/client_golang v1.16.0
github.com/quic-go/quic-go v0.39.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cast v1.5.1
github.com/spf13/cobra v1.7.0
github.com/stretchr/testify v1.8.4
github.com/volatiletech/null/v8 v8.1.2
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.4.0
)
require (
github.com/armon/go-metrics v0.4.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/friendsofgo/errors v0.9.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gofrs/uuid v4.2.0+incompatible // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
github.com/gorilla/rpc v1.2.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/memberlist v0.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.41 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/slack-go/slack v0.12.1 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.15.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/volatiletech/inflect v0.0.1 // indirect
github.com/volatiletech/randomize v0.0.1 // indirect
github.com/volatiletech/strmangle v0.0.4 // indirect
go.uber.org/mock v0.3.0 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.1 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
github.com/inconshreveable/mousetrap v1.0.0
github.com/jessevdk/go-flags v1.4.0 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
github.com/johntdyer/slack-go v0.0.0-20180213144715-95fac1160b22
github.com/johntdyer/slackrus v0.0.0-20180518184837-f7aae3243a07
github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec // indirect
github.com/lbryio/errors.go v0.0.0-20180223142025-ad03d3cc6a5c
github.com/lbryio/lbry.go v0.0.0-20190109223729-30c312501602
github.com/lbryio/types v0.0.0-20190422033210-321fb2abda9c
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5
github.com/miekg/dns v1.0.8
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/nlopes/slack v0.4.0
github.com/phayes/freeport v0.0.0-20171002185219-e27662a4a9d6
github.com/pkg/errors v0.8.1 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529
github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561
github.com/sergi/go-diff v1.0.0
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
github.com/sirupsen/logrus v1.3.0
github.com/spf13/cast v1.3.0
github.com/spf13/cobra v0.0.0-20180722215644-7c4570c3ebeb
github.com/spf13/pflag v1.0.1
github.com/uber-go/atomic v1.3.2
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 // indirect
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 // indirect
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
golang.org/x/sys v0.0.0-20190109145017-48ac38b7c8cb
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
golang.org/x/tools v0.0.0-20190109165630-d30e00c24034 // indirect
google.golang.org/appengine v1.4.0
google.golang.org/genproto v0.0.0-20190108161440-ae2f86662275 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79
gopkg.in/yaml.v2 v2.2.2 // indirect
honnef.co/go/tools v0.0.0-20190109154334-5bcec433c8ea // indirect
)

929
go.sum
View file

@ -1,843 +1,220 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q=
github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.45.24 h1:TZx/CizkmCQn8Rtsb11iLYutEQVGK5PK9wAhwouELBo=
github.com/aws/aws-sdk-go v1.45.24/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7 h1:7gNKWnX6OF+ERiXVw4I9RsHhZ52aumXdFE07nEx5v20=
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7/go.mod h1:M/KA3XJG5PJaApPiv4gWNsgcSJquOQTqumZNLyYE0KM=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/armon/go-metrics v0.0.0-20180713145231-3c58d8115a78 h1:mdRSArcFLfW0VoL34LZAKSz6LkkK4jFxVx2xYavACMg=
github.com/armon/go-metrics v0.0.0-20180713145231-3c58d8115a78/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v0.0.0-20180806205706-c0447dbaaf19 h1:2di6C9H5QJ+D5LPazymJz8s2kRd8YkbN7knV17yH1Yw=
github.com/aws/aws-sdk-go v0.0.0-20180806205706-c0447dbaaf19/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.16.11 h1:g/c7gJeVyHoXCxM2fddS85bPGVkBF8s2q8t3fyElegc=
github.com/aws/aws-sdk-go v1.16.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/btcsuite/btcd v0.0.0-20180531025944-86fed781132a/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
github.com/btcsuite/btcd v0.0.0-20190109040709-5bda5314ca95/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/btcutil v0.0.0-20180524032703-d4cc87b86016/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE=
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db h1:oZ4U9IqO8NS+61OmGTBi8vopzqTRxwQeogyBHdrhjbc=
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db/go.mod h1:Pk7/9x6tyChFTkahDvLBQMlvdsWvfC+yU8HTT5VD314=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/friendsofgo/errors v0.9.2 h1:X6NYxef4efCBdwI7BgS820zFaN7Cphrmb+Pljdzjtgk=
github.com/friendsofgo/errors v0.9.2/go.mod h1:yCvFW5AkDIL9qn7suHVLiI/gH228n7PC4Pn44IGoTOI=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.38.1 h1:hbtfM8emWUVo9GnXSloXYyFbXxZ+tG6sbepSStoe1FY=
github.com/go-ini/ini v1.38.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.38.2/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.41.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU=
github.com/go-sql-driver/mysql v0.0.0-20180719071942-99ff426eb706 h1:P3NPKb7qq581SeMCB+dU1SuCX1kQh8VoQ/4HmT2ftQY=
github.com/go-sql-driver/mysql v0.0.0-20180719071942-99ff426eb706/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk=
github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/rpc v1.1.0 h1:marKfvVP0Gpd/jHlVBKCQ8RAoUPdX7K1Nuh6l1BNh7A=
github.com/gorilla/rpc v1.1.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ=
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164=
github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa h1:0nA8i+6Rwqaq9xlpmVxxTwk6rxiEhX+E6Wh4vPNHiS8=
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw=
github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c h1:BTAbnbegUIMB6xmQCwWE8yRzbA4XSpnZY5hvRJC188I=
github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU=
github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM=
github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/memberlist v0.1.0 h1:qSsCiC0WYD39lbSitKNt40e30uorm2Ss/d4JGU1hzH8=
github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE=
github.com/hashicorp/serf v0.0.0-20180530155958-984a73625de3 h1:NUr1hG6WO9sI1x8ofSimmpqfJ+rEHiHP/PLEA33rcfQ=
github.com/hashicorp/serf v0.0.0-20180530155958-984a73625de3/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e h1:5tRmeUw/tXT/DvaoloWTWwlyrEZrKA7pnrz/X+g9s34=
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e/go.mod h1:u0Jo4f2dNlTJeeOywkM6bLwxq6gC3pZ9rEFHn3AhTdk=
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc h1:enUIjGI+ljPLV2X3Mu3noR0P3m2NaIFGRsp96J8RBio=
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc/go.mod h1:EM3NFHkhmCX05s6UvxWSJ8h/3mluH4tF6bYr9FXF1Cg=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/johntdyer/slack-go v0.0.0-20180213144715-95fac1160b22 h1:jKUP9TQ0c7X3w6+IPyMit07RE42MtTWNd77sN2cHngQ=
github.com/johntdyer/slack-go v0.0.0-20180213144715-95fac1160b22/go.mod h1:u0Jo4f2dNlTJeeOywkM6bLwxq6gC3pZ9rEFHn3AhTdk=
github.com/johntdyer/slackrus v0.0.0-20180518184837-f7aae3243a07 h1:+kBG/8rjCa6vxJZbUjAiE4MQmBEBYc8nLEb51frnvBY=
github.com/johntdyer/slackrus v0.0.0-20180518184837-f7aae3243a07/go.mod h1:j1kV/8f3jowErEq4XyeypkCdvg5EeHkf0YCKCcq5Ybo=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/lbryio/chainquery v1.9.1-0.20230515181855-2fcba3115cfe h1:xZ3ma924JghoG6mo0xSNTDujy9hR0mczqIm+GGeeux0=
github.com/lbryio/chainquery v1.9.1-0.20230515181855-2fcba3115cfe/go.mod h1:GfIqGzrg0GA0+Wb0dWRgAtATtUGsYRHIpTQEIoapkKU=
github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629 h1:klpHPQ5iERUhczdITuKUpYuUZrWDGWb3zlAv3qYgc+o=
github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629/go.mod h1:JTkXBAVK8iHNcYmffbLzQ7IFKd/+/oBQGIwiG53bbqw=
github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19 h1:/zWD8dVIl7bV1TdJWqPqy9tpqixzX2Qxgit48h3hQcY=
github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6 h1:IhL9D2QfDWhLNDQpZ3Uiiw0gZEUYeLBS6uDqOd59G5o=
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6/go.mod h1:CG3wsDv5BiVYQd5i1Jp7wGsaVyjZTJshqXeWMVKsISE=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lbryio/errors.go v0.0.0-20180223142025-ad03d3cc6a5c h1:BhdcWGsuKif/XoSZnqVGNqJ1iEmH0czWR5upj+AuR8M=
github.com/lbryio/errors.go v0.0.0-20180223142025-ad03d3cc6a5c/go.mod h1:muH7wpUqE8hRA3OrYYosw9+Sl681BF9cwcjzE+OCNK8=
github.com/lbryio/lbry.go v0.0.0-20180803110248-e2c96944fc48 h1:ojUrZuL8vqHxyhivNdpjDaIBkjzWktS9FOTTFkVG8yU=
github.com/lbryio/lbry.go v0.0.0-20180803110248-e2c96944fc48/go.mod h1:jyMyYNmA1t7GTkgYs6z2kMnDFTFzlGBr+IkG9LXHk9M=
github.com/lbryio/lbry.go v0.0.0-20190109223729-30c312501602 h1:HSv40ELfMPW2GIpyWwX2JaSAaYz5XW1GNweYc89F2kc=
github.com/lbryio/lbry.go v0.0.0-20190109223729-30c312501602/go.mod h1:YEuFJD/oHNra6BFy+NfuvS84Wg6RMWJFGtiCCCc6MmQ=
github.com/lbryio/ozzo-validation v0.0.0-20170323141101-d1008ad1fd04/go.mod h1:fbG/dzobG8r95KzMwckXiLMHfFjZaBRQqC9hPs2XAQ4=
github.com/lbryio/types v0.0.0-20171215152337-0a913ba650dd h1:5wQgwcaLqLMaFIPju2QHcCseSHlArxpQZ1szEGzrp6Y=
github.com/lbryio/types v0.0.0-20171215152337-0a913ba650dd/go.mod h1:CG3wsDv5BiVYQd5i1Jp7wGsaVyjZTJshqXeWMVKsISE=
github.com/lbryio/types v0.0.0-20181001180206-594241d24e00 h1:1qRpd8lcyVigX+kYkwQL13gpOURyytgvxZtuIQfPPX8=
github.com/lbryio/types v0.0.0-20181001180206-594241d24e00/go.mod h1:CG3wsDv5BiVYQd5i1Jp7wGsaVyjZTJshqXeWMVKsISE=
github.com/lbryio/types v0.0.0-20190422033210-321fb2abda9c h1:m3O7561xBQ00lfUVayW4c6SnpVbUDQtPUwGcGYSUYQA=
github.com/lbryio/types v0.0.0-20190422033210-321fb2abda9c/go.mod h1:CG3wsDv5BiVYQd5i1Jp7wGsaVyjZTJshqXeWMVKsISE=
github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5/go.mod h1:c2mYKRyMb1BPkO5St0c/ps62L4S0W2NAkaTXj9qEI+0=
github.com/lusis/slack-test v0.0.0-20180109053238-3c758769bfa6/go.mod h1:sFlOUpQL1YcjhFVXhg1CG8ZASEs/Mf1oVb6H75JL/zg=
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 h1:mG83tLXWSRdcXMWfkoumVwhcCbf3jHF9QKv/m37BkM0=
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5/go.mod h1:H0aPCWffGOaDcjkw1iB7W9DVLp6GXmfcJY/7YZCWPA4=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/miekg/dns v1.0.8 h1:Zi8HNpze3NeRWH1PQV6O71YcvJRQ6j0lORO6DAEmAAI=
github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/mapstructure v0.0.0-20180511142126-bb74f1db0675/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/nlopes/slack v0.2.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM=
github.com/nlopes/slack v0.3.0 h1:jCxvaS8wC4Bb1jnbqZMjCDkOOgy4spvQWcrw/TF0L0E=
github.com/nlopes/slack v0.3.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM=
github.com/nlopes/slack v0.4.0 h1:OVnHm7lv5gGT5gkcHsZAyw++oHVFihbjWbL3UceUpiA=
github.com/nlopes/slack v0.4.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/phayes/freeport v0.0.0-20171002185219-e27662a4a9d6 h1:2bae6N0SZjgzk+Zg8mzTsfmpwHXY9VBNp9UdjhaElA0=
github.com/phayes/freeport v0.0.0-20171002185219-e27662a4a9d6/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg=
github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
github.com/quic-go/quic-go v0.39.0 h1:AgP40iThFMY0bj8jGxROhw3S0FMGa8ryqsmi9tBH3So=
github.com/quic-go/quic-go v0.39.0/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sebdah/goldie v1.0.0 h1:9GNhIat69MSlz/ndaBg48vl9dF5fI+NBB6kfOxgfkMc=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/slack-go/slack v0.12.1 h1:X97b9g2hnITDtNsNe5GkGx6O2/Sz/uC20ejRZN6QxOw=
github.com/slack-go/slack v0.12.1/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561 h1:IY+sDBJR/wRtsxq+626xJnt4Tw7/ROA9cDIR8MMhWyg=
github.com/sebdah/goldie v0.0.0-20180424091453-8784dd1ab561/go.mod h1:lvjGftC8oe7XPtyrOidaMi0rp5B9+XY/ZRUynGnuaxQ=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shopspring/decimal v0.0.0-20180607144847-19e3cb6c2930/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/sirupsen/logrus v0.0.0-20180523074243-ea8897e79973/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v0.0.0-20180731161355-d329d24db431 h1:tJ2phHULXperiIq6Cl3t4MLypicinjmlM3Y+lNEipuo=
github.com/sirupsen/logrus v0.0.0-20180731161355-d329d24db431/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/spf13/cast v1.2.0 h1:HHl1DSRbEQN2i8tJmtS6ViPyHx35+p51amrdsiTCrkg=
github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.0-20180722215644-7c4570c3ebeb h1:9EsYJzSlhhaP+nYmMOcptMF2VEUH52jxPzt/TX14KWM=
github.com/spf13/cobra v0.0.0-20180722215644-7c4570c3ebeb/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/volatiletech/inflect v0.0.1 h1:2a6FcMQyhmPZcLa+uet3VJ8gLn/9svWhJxJYwvE8KsU=
github.com/volatiletech/inflect v0.0.1/go.mod h1:IBti31tG6phkHitLlr5j7shC5SOo//x0AjDzaJU1PLA=
github.com/volatiletech/null/v8 v8.1.2 h1:kiTiX1PpwvuugKwfvUNX/SU/5A2KGZMXfGD0DUHdKEI=
github.com/volatiletech/null/v8 v8.1.2/go.mod h1:98DbwNoKEpRrYtGjWFctievIfm4n4MxG0A6EBUcoS5g=
github.com/volatiletech/randomize v0.0.1 h1:eE5yajattWqTB2/eN8df4dw+8jwAzBtbdo5sbWC4nMk=
github.com/volatiletech/randomize v0.0.1/go.mod h1:GN3U0QYqfZ9FOJ67bzax1cqZ5q2xuj2mXrXBjWaRTlY=
github.com/volatiletech/strmangle v0.0.1/go.mod h1:F6RA6IkB5vq0yTG4GQ0UsbbRcl3ni9P76i+JrTBKFFg=
github.com/volatiletech/strmangle v0.0.4 h1:CxrEPhobZL/PCZOTDSH1aq7s4Kv76hQpRoTVVlUOim4=
github.com/volatiletech/strmangle v0.0.4/go.mod h1:ycDvbDkjDvhC0NUU8w3fWwl5JEMTV56vTKXzR3GeR+0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo=
go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
github.com/uber-go/atomic v0.0.0-20180806045314-ca680462431f h1:vKpCeeburRE8ZXnuj9ptRqjm7WLD0O7ug28tRJuWM54=
github.com/uber-go/atomic v0.0.0-20180806045314-ca680462431f/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo=
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/ybbus/jsonrpc v0.0.0-20180411222309-2a548b7d822d/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180807104621-f027049dab0a h1:PulT0Y50PcfTWomfsD39bSQyVrjjWdIuJKfyR4nOCJw=
golang.org/x/crypto v0.0.0-20180807104621-f027049dab0a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc h1:F5tKCVGp+MUAHhKp5MZtGqAlGX3+oCsiL1Q629FL90M=
golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180807145015-19491d39cadb h1:H5qWxL6CeEdaEGPqB5nnxJja1ULIcpuu91vIqAuGoJ0=
golang.org/x/net v0.0.0-20180807145015-19491d39cadb/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180807141123-0718ef2ef256 h1:tKa4dTsBBeG8RnHO9sDPtJYNJNOc4ilC49ePrvii4To=
golang.org/x/sys v0.0.0-20180807141123-0718ef2ef256/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/sys v0.0.0-20190109145017-48ac38b7c8cb h1:1w588/yEchbPNpa9sEvOcMZYbWHedwJjg4VOAdDHWHk=
golang.org/x/sys v0.0.0-20190109145017-48ac38b7c8cb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190109165630-d30e00c24034/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190108161440-ae2f86662275/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 h1:FpCr9V8wuOei4BAen+93HtVJ+XSi+KPbaPKm0Vj5R64=
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79/go.mod h1:gWkaRU7CoXpezCBWfWjm3999QqS+1pYPXGbqQCTMzo8=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190109154334-5bcec433c8ea/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -1,339 +0,0 @@
package metrics
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
"strings"
"syscall"
"time"
ee "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
type Server struct {
srv *http.Server
stop *stop.Stopper
}
func NewServer(address string, path string) *Server {
h := http.NewServeMux()
h.Handle(path, promhttp.Handler())
return &Server{
srv: &http.Server{
Addr: address,
Handler: h,
//https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/
//https://blog.cloudflare.com/exposing-go-on-the-internet/
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
},
stop: stop.New(),
}
}
func (s *Server) Start() {
s.stop.Add(1)
go func() {
defer s.stop.Done()
err := s.srv.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Error(err)
}
}()
}
func (s *Server) Shutdown() {
_ = s.srv.Shutdown(context.Background())
s.stop.StopAndWait()
}
const (
ns = "reflector"
subsystemCache = "cache"
subsystemITTT = "ittt"
labelDirection = "direction"
labelErrorType = "error_type"
DirectionUpload = "upload" // to reflector
DirectionDownload = "download" // from reflector
LabelCacheType = "cache_type"
LabelComponent = "component"
LabelSource = "source"
errConnReset = "conn_reset"
errReadConnReset = "read_conn_reset"
errWriteConnReset = "write_conn_reset"
errReadConnTimedOut = "read_conn_timed_out"
errNoNetworkActivity = "no_network_activity"
errWriteConnTimedOut = "write_conn_timed_out"
errWriteBrokenPipe = "write_broken_pipe"
errEPipe = "e_pipe"
errETimedout = "e_timedout"
errIOTimeout = "io_timeout"
errUnexpectedEOF = "unexpected_eof"
errUnexpectedEOFStr = "unexpected_eof_str"
errJSONSyntax = "json_syntax"
errBlobTooBig = "blob_too_big"
errInvalidPeerJSON = "invalid_peer_json"
errInvalidPeerData = "invalid_peer_data"
errRequestTooLarge = "request_too_large"
errDeadlineExceeded = "deadline_exceeded"
errHashMismatch = "hash_mismatch"
errProtectedBlob = "protected_blob"
errInvalidBlobHash = "invalid_blob_hash"
errZeroByteBlob = "zero_byte_blob"
errInvalidCharacter = "invalid_character"
errBlobNotFound = "blob_not_found"
errNoErr = "no_error"
errQuicProto = "quic_protocol_violation"
errOther = "other"
)
var (
ErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Name: "error_total",
Help: "Total number of errors",
}, []string{labelDirection, labelErrorType})
BlobDownloadCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "blob_download_total",
Help: "Total number of blobs downloaded from reflector",
})
PeerDownloadCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "peer_download_total",
Help: "Total number of blobs downloaded from reflector through tcp protocol",
})
Http3DownloadCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "http3_blob_download_total",
Help: "Total number of blobs downloaded from reflector through QUIC protocol",
})
HttpDownloadCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "http_blob_download_total",
Help: "Total number of blobs downloaded from reflector through HTTP protocol",
})
CacheHitCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemCache,
Name: "hit_total",
Help: "Total number of blobs retrieved from the cache storage",
}, []string{LabelCacheType, LabelComponent})
ThisHitCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemITTT,
Name: "this_hit_total",
Help: "Total number of blobs retrieved from the this storage",
})
ThatHitCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemITTT,
Name: "that_hit_total",
Help: "Total number of blobs retrieved from the that storage",
})
CacheMissCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemCache,
Name: "miss_total",
Help: "Total number of blobs retrieved from origin rather than cache storage",
}, []string{LabelCacheType, LabelComponent})
CacheOriginRequestsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: subsystemCache,
Name: "origin_requests_total",
Help: "How many Get requests are in flight from the cache to the origin",
}, []string{LabelCacheType, LabelComponent})
//during thundering-herd situations, the metric below should be a lot smaller than the metric above
CacheWaitingRequestsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: subsystemCache,
Name: "waiting_requests_total",
Help: "How many cache requests are waiting for an in-flight origin request",
}, []string{LabelCacheType, LabelComponent})
CacheLRUEvictCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemCache,
Name: "evict_total",
Help: "Count of blobs evicted from cache",
}, []string{LabelCacheType, LabelComponent})
CacheRetrievalSpeed = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "speed_mbps",
Help: "Speed of blob retrieval from cache or from origin",
}, []string{LabelCacheType, LabelComponent, LabelSource})
BlobUploadCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "blob_upload_total",
Help: "Total number of blobs uploaded to reflector",
})
SDBlobUploadCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "sdblob_upload_total",
Help: "Total number of SD blobs (and therefore streams) uploaded to reflector",
})
MtrInBytesTcp = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "tcp_in_bytes",
Help: "Total number of bytes downloaded through TCP",
})
MtrOutBytesTcp = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "tcp_out_bytes",
Help: "Total number of bytes streamed out through TCP",
})
MtrInBytesUdp = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "udp_in_bytes",
Help: "Total number of bytes downloaded through UDP",
})
MtrInBytesHttp = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "http_in_bytes",
Help: "Total number of bytes downloaded through HTTP",
})
MtrOutBytesUdp = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "udp_out_bytes",
Help: "Total number of bytes streamed out through UDP",
})
MtrOutBytesHttp = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "http_out_bytes",
Help: "Total number of bytes streamed out through UDP",
})
MtrInBytesReflector = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "reflector_in_bytes",
Help: "Total number of incoming bytes (from users)",
})
MtrOutBytesReflector = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "s3_out_bytes",
Help: "Total number of outgoing bytes (to S3)",
})
MtrInBytesS3 = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "s3_in_bytes",
Help: "Total number of incoming bytes (from S3-CF)",
})
Http3BlobReqQueue = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "http3_blob_request_queue_size",
Help: "Blob requests of https queue size",
})
HttpBlobReqQueue = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "http_blob_request_queue_size",
Help: "Blob requests queue size of the HTTP protocol",
})
RoutinesQueue = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Name: "routines",
Help: "routines running by type",
}, []string{"package", "kind"})
)
func CacheLabels(name, component string) prometheus.Labels {
return prometheus.Labels{
LabelCacheType: name,
LabelComponent: component,
}
}
func TrackError(direction string, e error) (shouldLog bool) { // shouldLog is a hack, but whatever
if e == nil {
return
}
err := ee.Wrap(e, 0)
errType := errOther
if strings.Contains(err.Error(), "i/o timeout") {
errType = errIOTimeout
} else if errors.Is(e, syscall.ECONNRESET) {
// Looks like we're getting this when direction == "download", but read_conn_reset and
// write_conn_reset when its "upload"
errType = errConnReset
} else if errors.Is(e, context.DeadlineExceeded) {
errType = errDeadlineExceeded
} else if strings.Contains(err.Error(), "read: connection reset by peer") { // the other side closed the connection using TCP reset
errType = errReadConnReset
} else if strings.Contains(err.Error(), "write: connection reset by peer") { // the other side closed the connection using TCP reset
errType = errWriteConnReset
} else if errors.Is(e, syscall.ETIMEDOUT) {
errType = errETimedout
} else if strings.Contains(err.Error(), "read: connection timed out") { // the other side closed the connection using TCP reset
//log.Warnln("read conn timed out is not the same as ETIMEDOUT")
errType = errReadConnTimedOut
} else if strings.Contains(err.Error(), "NO_ERROR: No recent network activity") { // the other side closed the QUIC connection
//log.Warnln("read conn timed out is not the same as ETIMEDOUT")
errType = errNoNetworkActivity
} else if strings.Contains(err.Error(), "write: connection timed out") {
errType = errWriteConnTimedOut
} else if errors.Is(e, io.ErrUnexpectedEOF) {
errType = errUnexpectedEOF
} else if strings.Contains(err.Error(), "unexpected EOF") { // tried to read from closed pipe or socket
errType = errUnexpectedEOFStr
} else if errors.Is(e, syscall.EPIPE) {
errType = errEPipe
} else if strings.Contains(err.Error(), "write: broken pipe") { // tried to write to a pipe or socket that was closed by the peer
// I believe this is the same as EPipe when direction == "download", but not for upload
errType = errWriteBrokenPipe
//} else if errors.Is(e, reflector.ErrBlobTooBig) { # this creates a circular import
// errType = errBlobTooBig
} else if strings.Contains(err.Error(), "blob must be at most") {
//log.Warnln("blob must be at most X bytes is not the same as ErrBlobTooBig")
errType = errBlobTooBig
} else if strings.Contains(err.Error(), "invalid json request") {
errType = errInvalidPeerJSON
} else if strings.Contains(err.Error(), "Invalid data") {
errType = errInvalidPeerData
} else if strings.Contains(err.Error(), "request is too large") {
errType = errRequestTooLarge
} else if strings.Contains(err.Error(), "Invalid blob hash length") {
errType = errInvalidBlobHash
} else if strings.Contains(err.Error(), "hash of received blob data does not match hash from send request") {
errType = errHashMismatch
} else if strings.Contains(err.Error(), "blob not found") {
errType = errBlobNotFound
} else if strings.Contains(err.Error(), "requested blob is protected") {
errType = errProtectedBlob
} else if strings.Contains(err.Error(), "0-byte blob received") {
errType = errZeroByteBlob
} else if strings.Contains(err.Error(), "PROTOCOL_VIOLATION: tried to retire connection") {
errType = errQuicProto
} else if strings.Contains(err.Error(), "invalid character") {
errType = errInvalidCharacter
} else if _, ok := e.(*json.SyntaxError); ok {
errType = errJSONSyntax
} else if strings.Contains(err.Error(), "NO_ERROR") {
errType = errNoErr
} else {
log.Warnf("error '%s' for direction '%s' is not being tracked", err.TypeName(), direction)
shouldLog = true
}
ErrorCount.With(map[string]string{
labelDirection: direction,
labelErrorType: errType,
}).Inc()
return
}

View file

@ -1,348 +0,0 @@
package lite_db
import (
"database/sql"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
qt "github.com/lbryio/lbry.go/v2/extras/query"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
log "github.com/sirupsen/logrus"
"github.com/volatiletech/null/v8"
)
// SdBlob is a special blob that contains information on the rest of the blobs in the stream
type SdBlob struct {
StreamName string `json:"stream_name"`
Blobs []struct {
Length int `json:"length"`
BlobNum int `json:"blob_num"`
BlobHash string `json:"blob_hash,omitempty"`
IV string `json:"iv"`
} `json:"blobs"`
StreamType string `json:"stream_type"`
Key string `json:"key"`
SuggestedFileName string `json:"suggested_file_name"`
StreamHash string `json:"stream_hash"`
}
// SQL implements the DB interface
type SQL struct {
conn *sql.DB
TrackAccessTime bool
}
func logQuery(query string, args ...interface{}) {
s, err := qt.InterpolateParams(query, args...)
if err != nil {
log.Errorln(err)
} else {
log.Debugln(s)
}
}
// Connect will create a connection to the database
func (s *SQL) Connect(dsn string) error {
var err error
// interpolateParams is necessary. otherwise uploading a stream with thousands of blobs
// will hit MySQL's max_prepared_stmt_count limit because the prepared statements are all
// opened inside a transaction. closing them manually doesn't seem to help
dsn += "?parseTime=1&collation=utf8mb4_unicode_ci&interpolateParams=1"
s.conn, err = sql.Open("mysql", dsn)
if err != nil {
return errors.Err(err)
}
s.conn.SetMaxIdleConns(12)
return errors.Err(s.conn.Ping())
}
// AddBlob adds a blob to the database.
func (s *SQL) AddBlob(hash string, length int) error {
if s.conn == nil {
return errors.Err("not connected")
}
_, err := s.insertBlob(hash, length)
return err
}
func (s *SQL) insertBlob(hash string, length int) (int64, error) {
if length <= 0 {
return 0, errors.Err("length must be positive")
}
const isStored = true
now := time.Now()
args := []interface{}{hash, isStored, length, now}
blobID, err := s.exec(
"INSERT INTO blob_ (hash, is_stored, length, last_accessed_at) VALUES ("+qt.Qs(len(args))+") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored)), last_accessed_at=VALUES(last_accessed_at)",
args...,
)
if err != nil {
return 0, err
}
if blobID == 0 {
err = s.conn.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID)
if err != nil {
return 0, errors.Err(err)
}
if blobID == 0 {
return 0, errors.Err("blob ID is 0 even after INSERTing and SELECTing")
}
}
return blobID, nil
}
// HasBlob checks if the database contains the blob information.
func (s *SQL) HasBlob(hash string) (bool, error) {
exists, err := s.HasBlobs([]string{hash})
if err != nil {
return false, err
}
return exists[hash], nil
}
// HasBlobs checks if the database contains the set of blobs and returns a bool map.
func (s *SQL) HasBlobs(hashes []string) (map[string]bool, error) {
exists, streamsNeedingTouch, err := s.hasBlobs(hashes)
_ = s.touch(streamsNeedingTouch)
return exists, err
}
func (s *SQL) touch(blobIDs []uint64) error {
if len(blobIDs) == 0 {
return nil
}
query := "UPDATE blob_ SET last_accessed_at = ? WHERE id IN (" + qt.Qs(len(blobIDs)) + ")"
args := make([]interface{}, len(blobIDs)+1)
args[0] = time.Now()
for i := range blobIDs {
args[i+1] = blobIDs[i]
}
startTime := time.Now()
_, err := s.exec(query, args...)
log.Debugf("blobs access query touched %d blobs and took %s", len(blobIDs), time.Since(startTime))
return errors.Err(err)
}
func (s *SQL) hasBlobs(hashes []string) (map[string]bool, []uint64, error) {
if s.conn == nil {
return nil, nil, errors.Err("not connected")
}
var (
hash string
blobID uint64
lastAccessedAt null.Time
)
var needsTouch []uint64
exists := make(map[string]bool)
touchDeadline := time.Now().AddDate(0, 0, -1) // touch blob if last accessed before this time
maxBatchSize := 10000
doneIndex := 0
for len(hashes) > doneIndex {
sliceEnd := doneIndex + maxBatchSize
if sliceEnd > len(hashes) {
sliceEnd = len(hashes)
}
log.Debugf("getting hashes[%d:%d] of %d", doneIndex, sliceEnd, len(hashes))
batch := hashes[doneIndex:sliceEnd]
// TODO: this query doesn't work for SD blobs, which are not in the stream_blob table
query := `SELECT hash, id, last_accessed_at
FROM blob_
WHERE is_stored = ? and hash IN (` + qt.Qs(len(batch)) + `)`
args := make([]interface{}, len(batch)+1)
args[0] = true
for i := range batch {
args[i+1] = batch[i]
}
logQuery(query, args...)
err := func() error {
startTime := time.Now()
rows, err := s.conn.Query(query, args...)
log.Debugf("hashes query took %s", time.Since(startTime))
if err != nil {
return errors.Err(err)
}
defer closeRows(rows)
for rows.Next() {
err := rows.Scan(&hash, &blobID, &lastAccessedAt)
if err != nil {
return errors.Err(err)
}
exists[hash] = true
if s.TrackAccessTime && (!lastAccessedAt.Valid || lastAccessedAt.Time.Before(touchDeadline)) {
needsTouch = append(needsTouch, blobID)
}
}
err = rows.Err()
if err != nil {
return errors.Err(err)
}
doneIndex += len(batch)
return nil
}()
if err != nil {
return nil, nil, err
}
}
return exists, needsTouch, nil
}
// Delete will remove the blob from the db
func (s *SQL) Delete(hash string) error {
_, err := s.exec("UPDATE blob_ set is_stored = ? WHERE hash = ?", 0, hash)
return errors.Err(err)
}
// AddSDBlob insert the SD blob and all the content blobs. The content blobs are marked as "not stored",
// but they are tracked so reflector knows what it is missing.
func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int) error {
if s.conn == nil {
return errors.Err("not connected")
}
_, err := s.insertBlob(sdHash, sdBlobLength)
return err
}
// GetHashRange gets the smallest and biggest hashes in the db
func (s *SQL) GetLRUBlobs(maxBlobs int) ([]string, error) {
if s.conn == nil {
return nil, errors.Err("not connected")
}
query := "SELECT hash from blob_ where is_stored = ? order by last_accessed_at limit ?"
const isStored = true
logQuery(query, isStored, maxBlobs)
rows, err := s.conn.Query(query, isStored, maxBlobs)
if err != nil {
return nil, errors.Err(err)
}
defer closeRows(rows)
blobs := make([]string, 0, maxBlobs)
for rows.Next() {
var hash string
err := rows.Scan(&hash)
if err != nil {
return nil, errors.Err(err)
}
blobs = append(blobs, hash)
}
return blobs, nil
}
func (s *SQL) AllBlobs() ([]string, error) {
if s.conn == nil {
return nil, errors.Err("not connected")
}
query := "SELECT hash from blob_ where is_stored = ?" //TODO: maybe sorting them makes more sense?
const isStored = true
logQuery(query, isStored)
rows, err := s.conn.Query(query, isStored)
if err != nil {
return nil, errors.Err(err)
}
defer closeRows(rows)
totalBlobs, err := s.BlobsCount()
if err != nil {
return nil, err
}
blobs := make([]string, 0, totalBlobs)
for rows.Next() {
var hash string
err := rows.Scan(&hash)
if err != nil {
return nil, errors.Err(err)
}
blobs = append(blobs, hash)
}
return blobs, nil
}
func (s *SQL) BlobsCount() (int, error) {
if s.conn == nil {
return 0, errors.Err("not connected")
}
query := "SELECT count(id) from blob_ where is_stored = ?" //TODO: maybe sorting them makes more sense?
const isStored = true
logQuery(query, isStored)
var count int
err := s.conn.QueryRow(query, isStored).Scan(&count)
return count, errors.Err(err)
}
func closeRows(rows *sql.Rows) {
if rows != nil {
err := rows.Close()
if err != nil {
log.Error("error closing rows: ", err)
}
}
}
func (s *SQL) exec(query string, args ...interface{}) (int64, error) {
logQuery(query, args...)
attempt, maxAttempts := 0, 3
Retry:
attempt++
result, err := s.conn.Exec(query, args...)
if isLockTimeoutError(err) {
if attempt <= maxAttempts {
//Error 1205: Lock wait timeout exceeded; try restarting transaction
goto Retry
}
err = errors.Prefix("Lock timeout for query "+query, err)
}
if err != nil {
return 0, errors.Err(err)
}
lastID, err := result.LastInsertId()
return lastID, errors.Err(err)
}
func isLockTimeoutError(err error) bool {
e, ok := err.(*mysql.MySQLError)
return ok && e != nil && e.Number == 1205
}
/* SQL schema
in prod make sure you use latin1 or utf8 charset, NOT utf8mb4. that's a waste of space.
CREATE TABLE `blob_` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT,
`hash` char(96) NOT NULL,
`is_stored` tinyint(1) NOT NULL DEFAULT '0',
`length` bigint unsigned DEFAULT NULL,
`last_accessed_at` datetime DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `id` (`id`),
UNIQUE KEY `blob_hash_idx` (`hash`),
KEY `blob_last_accessed_idx` (`last_accessed_at`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
*/

View file

@ -5,15 +5,9 @@ import (
"time"
"github.com/lbryio/reflector.go/cmd"
"github.com/google/gops/agent"
log "github.com/sirupsen/logrus"
)
func main() {
if err := agent.Listen(agent.Options{}); err != nil {
log.Fatal(err)
}
rand.Seed(time.Now().UnixNano())
cmd.Execute()
}

View file

@ -1,58 +1,20 @@
package meta
import (
"fmt"
"strconv"
"time"
)
var (
name = "prism-bin"
version = "unknown"
commit = "unknown"
commitLong = "unknown"
branch = "unknown"
Time = "unknown"
BuildTime time.Time
)
// Name returns main application name
func Name() string {
return name
}
// Version returns current application version
func Version() string {
return version
}
// FullName returns current app version, commit and build time
func FullName() string {
return fmt.Sprintf(
`Name: %v
Version: %v
branch: %v
commit: %v
commit long: %v
build date: %v`, Name(), Version(), branch, commit, commitLong, BuildTime.String())
}
var Version = ""
var Time = ""
var BuildTime time.Time
func init() {
if Time != "" {
t, err := strconv.Atoi(Time)
if err == nil {
BuildTime = time.Unix(int64(t), 0).UTC()
if err != nil {
return
}
BuildTime = time.Unix(int64(t), 0).UTC()
}
}
func VersionString() string {
var buildTime string
if BuildTime.IsZero() {
buildTime = "<now>"
} else {
buildTime = BuildTime.Format(time.RFC3339)
}
return fmt.Sprintf("version %s, built %s", version, buildTime)
}

View file

@ -2,22 +2,17 @@ package peer
import (
"bufio"
"encoding/hex"
"encoding/json"
ee "errors"
"io"
"net"
"strings"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
log "github.com/sirupsen/logrus"
)
@ -31,10 +26,14 @@ const (
// Server is an instance of a peer server that houses the listener and store.
type Server struct {
StatLogger *log.Logger // logger to log stats
StatReportFrequency time.Duration // how often to log stats
store store.BlobStore
closed bool
grp *stop.Group
grp *stop.Group
stats *reflector.Stats
}
// NewServer returns an initialized Server pointer.
@ -47,7 +46,8 @@ func NewServer(store store.BlobStore) *Server {
// Shutdown gracefully shuts down the peer server.
func (s *Server) Shutdown() {
log.Debug("shutting down peer server")
log.Debug("shutting down peer server...")
s.stats.Shutdown()
s.grp.StopAndWait()
log.Debug("peer server stopped")
}
@ -67,6 +67,11 @@ func (s *Server) Start(address string) error {
s.grp.Done()
}()
s.stats = reflector.NewStatLogger("DOWNLOAD", s.StatLogger, s.StatReportFrequency, s.grp.Child())
if s.StatLogger != nil && s.StatReportFrequency > 0 {
s.stats.Start()
}
return nil
}
@ -89,9 +94,7 @@ func (s *Server) listenAndServe(listener net.Listener) {
log.Error(errors.Prefix("accepting conn", err))
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("peer", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("peer", "server-handleconn").Dec()
s.handleConnection(conn)
s.grp.Done()
}()
@ -107,7 +110,6 @@ func (s *Server) handleConnection(conn net.Conn) {
}()
timeoutDuration := 1 * time.Minute
buf := bufio.NewReader(conn)
for {
var request []byte
@ -118,7 +120,7 @@ func (s *Server) handleConnection(conn net.Conn) {
log.Error(errors.FullTrace(err))
}
request, err = readNextMessage(buf)
request, err = readNextRequest(conn)
if err != nil {
if err != io.EOF {
s.logError(err)
@ -133,7 +135,7 @@ func (s *Server) handleConnection(conn net.Conn) {
response, err = s.handleCompositeRequest(request)
if err != nil {
log.Error(errors.FullTrace(err))
log.Error(err)
return
}
@ -164,14 +166,14 @@ func (s *Server) handleAvailabilityRequest(data []byte) ([]byte, error) {
var request availabilityRequest
err := json.Unmarshal(data, &request)
if err != nil {
return nil, errors.Err(err)
return []byte{}, err
}
availableBlobs := []string{}
for _, blobHash := range request.RequestedBlobs {
exists, err := s.store.Has(blobHash)
if err != nil {
return nil, err
return []byte{}, err
}
if exists {
availableBlobs = append(availableBlobs, blobHash)
@ -181,115 +183,99 @@ func (s *Server) handleAvailabilityRequest(data []byte) ([]byte, error) {
return json.Marshal(availabilityResponse{LbrycrdAddress: LbrycrdAddress, AvailableBlobs: availableBlobs})
}
//func (s *Server) handlePaymentRateNegotiation(data []byte) ([]byte, error) {
// var request paymentRateRequest
// err := json.Unmarshal(data, &request)
// if err != nil {
// return nil, err
// }
//
// offerReply := paymentRateAccepted
// if request.BlobDataPaymentRate < 0 {
// offerReply = paymentRateTooLow
// }
//
// return json.Marshal(paymentRateResponse{BlobDataPaymentRate: offerReply})
//}
//
//func (s *Server) handleBlobRequest(data []byte) ([]byte, error) {
// var request blobRequest
// err := json.Unmarshal(data, &request)
// if err != nil {
// return nil, err
// }
//
// log.Debugln("Sending blob " + request.RequestedBlob[:8])
//
// blob, err := s.store.Get(request.RequestedBlob)
// if err != nil {
// return nil, err
// }
//
// response, err := json.Marshal(blobResponse{IncomingBlob: incomingBlob{
// BlobHash: reflector.BlobHash(blob),
// Length: len(blob),
// }})
// if err != nil {
// return nil, err
// }
//
// return append(response, blob...), nil
//}
func (s *Server) handlePaymentRateNegotiation(data []byte) ([]byte, error) {
var request paymentRateRequest
err := json.Unmarshal(data, &request)
if err != nil {
return []byte{}, err
}
offerReply := paymentRateAccepted
if request.BlobDataPaymentRate < 0 {
offerReply = paymentRateTooLow
}
return json.Marshal(paymentRateResponse{BlobDataPaymentRate: offerReply})
}
func (s *Server) handleBlobRequest(data []byte) ([]byte, error) {
var request blobRequest
err := json.Unmarshal(data, &request)
if err != nil {
return []byte{}, err
}
log.Debugln("Sending blob " + request.RequestedBlob[:8])
blob, err := s.store.Get(request.RequestedBlob)
if err != nil {
return []byte{}, err
}
response, err := json.Marshal(blobResponse{IncomingBlob: incomingBlob{
BlobHash: reflector.BlobHash(blob),
Length: len(blob),
}})
if err != nil {
return []byte{}, err
}
return append(response, blob...), nil
}
func (s *Server) handleCompositeRequest(data []byte) ([]byte, error) {
var request compositeRequest
err := json.Unmarshal(data, &request)
if err != nil {
var je *json.SyntaxError
if ee.As(err, &je) {
return nil, errors.Err("invalid json request: offset %d in data %s", je.Offset, hex.EncodeToString(data))
}
return nil, errors.Err(err)
return []byte{}, err
}
response := compositeResponse{
LbrycrdAddress: LbrycrdAddress,
AvailableBlobs: []string{},
}
if len(request.RequestedBlobs) > 0 {
var availableBlobs []string
for _, blobHash := range request.RequestedBlobs {
if reflector.IsProtected(blobHash) {
return nil, errors.Err("requested blob is protected")
}
exists, err := s.store.Has(blobHash)
if err != nil {
return nil, err
return []byte{}, err
}
if exists {
response.AvailableBlobs = append(response.AvailableBlobs, blobHash)
availableBlobs = append(availableBlobs, blobHash)
}
}
response.AvailableBlobs = availableBlobs
}
if request.BlobDataPaymentRate != nil {
response.BlobDataPaymentRate = paymentRateAccepted
if *request.BlobDataPaymentRate < 0 {
response.BlobDataPaymentRate = paymentRateTooLow
}
response.BlobDataPaymentRate = paymentRateAccepted
if request.BlobDataPaymentRate < 0 {
response.BlobDataPaymentRate = paymentRateTooLow
}
var blob []byte
var trace shared.BlobTrace
if request.RequestedBlob != "" {
if len(request.RequestedBlob) != stream.BlobHashHexLength {
return nil, errors.Err("Invalid blob hash length")
}
log.Debugln("Sending blob " + request.RequestedBlob[:8])
blob, trace, err = s.store.Get(request.RequestedBlob)
log.Debug(trace.String())
blob, err = s.store.Get(request.RequestedBlob)
if errors.Is(err, store.ErrBlobNotFound) {
response.IncomingBlob = &incomingBlob{
response.IncomingBlob = incomingBlob{
Error: err.Error(),
}
} else if err != nil {
return nil, err
return []byte{}, err
} else {
response.IncomingBlob = &incomingBlob{
BlobHash: request.RequestedBlob,
response.IncomingBlob = incomingBlob{
BlobHash: reflector.BlobHash(blob),
Length: len(blob),
}
metrics.MtrOutBytesTcp.Add(float64(len(blob)))
metrics.BlobDownloadCount.Inc()
metrics.PeerDownloadCount.Inc()
s.stats.AddBlob()
}
}
respData, err := json.Marshal(response)
if err != nil {
return nil, err
return []byte{}, err
}
return append(respData, blob...), nil
@ -299,30 +285,40 @@ func (s *Server) logError(e error) {
if e == nil {
return
}
shouldLog := metrics.TrackError(metrics.DirectionDownload, e)
shouldLog := s.stats.AddError(e)
if shouldLog {
log.Errorln(errors.FullTrace(e))
}
return
// old stuff below. its here for posterity, because we're gonna have to deal with these errors someday for real
//err := errors.Wrap(e, 0)
// these happen because the peer protocol does not have a way to cancel blob downloads
// so the client will just close the connection if its in the middle of downloading a blob
// but receives the blob from a different peer first or simply goes offline (timeout)
//if strings.Contains(err.Error(), "connection reset by peer") ||
// strings.Contains(err.Error(), "i/o timeout") ||
// strings.Contains(err.Error(), "broken pipe") {
// return
//}
//
//log.Error(errors.FullTrace(e))
}
func readNextMessage(buf *bufio.Reader) ([]byte, error) {
first_byte, err := buf.ReadByte()
if err != nil {
return nil, err
}
if first_byte != '{' {
// every request starts with '{'. Checking here disconnects earlier, so we don't wait until timeout
return nil, errInvalidData
}
msg := []byte("{")
func readNextRequest(conn net.Conn) ([]byte, error) {
request := make([]byte, 0)
eof := false
buf := bufio.NewReader(conn)
for {
chunk, err := buf.ReadBytes('}')
if err != nil {
if err != io.EOF {
//log.Errorln("readBytes error:", err) // logged by caller
return msg, err
return request, err
}
eof = true
}
@ -331,16 +327,14 @@ func readNextMessage(buf *bufio.Reader) ([]byte, error) {
//spew.Dump(chunk)
if len(chunk) > 0 {
msg = append(msg, chunk...)
request = append(request, chunk...)
if len(msg) > maxRequestSize {
return msg, errRequestTooLarge
} else if len(msg) > 0 && msg[0] != '{' {
return msg, errInvalidData
if len(request) > maxRequestSize {
return request, errRequestTooLarge
}
// yes, this is how the peer protocol knows when the request finishes
if reflector.IsValidJSON(msg) {
if reflector.IsValidJSON(request) {
break
}
}
@ -355,11 +349,11 @@ func readNextMessage(buf *bufio.Reader) ([]byte, error) {
// spew.Dump(request)
//}
if len(msg) == 0 && eof {
return nil, io.EOF
if len(request) == 0 && eof {
return []byte{}, io.EOF
}
return msg, nil
return request, nil
}
const (
@ -371,7 +365,6 @@ const (
)
var errRequestTooLarge = errors.Base("request is too large")
var errInvalidData = errors.Base("Invalid data")
type availabilityRequest struct {
LbrycrdAddress bool `json:"lbrycrd_address"`
@ -402,19 +395,18 @@ type incomingBlob struct {
}
type blobResponse struct {
IncomingBlob incomingBlob `json:"incoming_blob"`
RequestTrace *shared.BlobTrace
}
type compositeRequest struct {
LbrycrdAddress bool `json:"lbrycrd_address"`
RequestedBlobs []string `json:"requested_blobs"`
BlobDataPaymentRate *float64 `json:"blob_data_payment_rate"`
BlobDataPaymentRate float64 `json:"blob_data_payment_rate"`
RequestedBlob string `json:"requested_blob"`
}
type compositeResponse struct {
LbrycrdAddress string `json:"lbrycrd_address,omitempty"`
AvailableBlobs []string `json:"available_blobs"`
BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"`
IncomingBlob *incomingBlob `json:"incoming_blob,omitempty"`
LbrycrdAddress string `json:"lbrycrd_address,omitempty"`
AvailableBlobs []string `json:"available_blobs,omitempty"`
BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"`
IncomingBlob incomingBlob `json:"incoming_blob,omitempty"`
}

View file

@ -2,10 +2,7 @@ package peer
import (
"bytes"
"io"
"net"
"testing"
"time"
"github.com/lbryio/reflector.go/store"
)
@ -37,7 +34,7 @@ var availabilityRequests = []pair{
}
func getServer(t *testing.T, withBlobs bool) *Server {
st := store.NewMemStore()
st := store.MemoryBlobStore{}
if withBlobs {
for k, v := range blobs {
err := st.Put(k, v)
@ -46,7 +43,7 @@ func getServer(t *testing.T, withBlobs bool) *Server {
}
}
}
return NewServer(st)
return NewServer(&st)
}
func TestAvailabilityRequest_NoBlobs(t *testing.T) {
@ -78,62 +75,3 @@ func TestAvailabilityRequest_WithBlobs(t *testing.T) {
}
}
}
func TestRequestFromConnection(t *testing.T) {
s := getServer(t, true)
err := s.Start("127.0.0.1:50505")
defer s.Shutdown()
if err != nil {
t.Error("error starting server", err)
}
for _, p := range availabilityRequests {
conn, err := net.Dial("tcp", "127.0.0.1:50505")
if err != nil {
t.Error("error opening connection", err)
}
defer func() { _ = conn.Close() }()
response := make([]byte, 8192)
_, err = conn.Write(p.request)
if err != nil {
t.Error("error writing", err)
}
_, err = conn.Read(response)
if err != nil {
t.Error("error reading", err)
}
if !bytes.Equal(response[:len(p.response)], p.response) {
t.Errorf("Response did not match expected response.\nExpected: %s\nGot: %s", string(p.response), string(response))
}
}
}
func TestInvalidData(t *testing.T) {
s := getServer(t, true)
err := s.Start("127.0.0.1:50503")
defer s.Shutdown()
if err != nil {
t.Error("error starting server", err)
}
conn, err := net.Dial("tcp", "127.0.0.1:50503")
if err != nil {
t.Error("error opening connection", err)
}
defer func() { _ = conn.Close() }()
response := make([]byte, 8192)
_, err = conn.Write([]byte("hello dear server, I would like blobs. Please"))
if err != nil {
t.Error("error writing", err)
}
err = conn.SetReadDeadline(time.Now().Add(5 * time.Second))
if err != nil {
t.Error("error setting read deadline", err)
}
_, err = conn.Read(response)
if err != io.EOF {
t.Error("error reading", err)
}
println(response)
}

View file

@ -5,16 +5,16 @@ import (
"strconv"
"sync"
"github.com/lbryio/lbry.go/dht"
"github.com/lbryio/lbry.go/dht/bits"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
log "github.com/sirupsen/logrus"
)
@ -79,7 +79,7 @@ func New(conf *Config) *Prism {
dht: d,
cluster: c,
peer: peer.NewServer(conf.Blobs),
reflector: reflector.NewServer(conf.Blobs, conf.Blobs),
reflector: reflector.NewServer(conf.Blobs),
grp: stop.New(),
}

View file

@ -4,9 +4,8 @@ import (
"math/big"
"testing"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/davecgh/go-spew/spew"
"github.com/lbryio/lbry.go/dht/bits"
)
func TestAnnounceRange(t *testing.T) {

View file

@ -1,174 +0,0 @@
package publish
import "strings"
func guessMimeType(ext string) (string, string) {
if ext == "" {
return "application/octet-stream", "binary"
}
ext = strings.ToLower(strings.TrimLeft(strings.TrimSpace(ext), "."))
types := map[string]struct{ mime, t string }{
"a": {"application/octet-stream", "binary"},
"ai": {"application/postscript", "image"},
"aif": {"audio/x-aiff", "audio"},
"aifc": {"audio/x-aiff", "audio"},
"aiff": {"audio/x-aiff", "audio"},
"au": {"audio/basic", "audio"},
"avi": {"video/x-msvideo", "video"},
"bat": {"text/plain", "document"},
"bcpio": {"application/x-bcpio", "binary"},
"bin": {"application/octet-stream", "binary"},
"bmp": {"image/bmp", "image"},
"c": {"text/plain", "document"},
"cdf": {"application/x-netcdf", "binary"},
"cpio": {"application/x-cpio", "binary"},
"csh": {"application/x-csh", "binary"},
"css": {"text/css", "document"},
"csv": {"text/csv", "document"},
"dll": {"application/octet-stream", "binary"},
"doc": {"application/msword", "document"},
"dot": {"application/msword", "document"},
"dvi": {"application/x-dvi", "binary"},
"eml": {"message/rfc822", "document"},
"eps": {"application/postscript", "document"},
"epub": {"application/epub+zip", "document"},
"etx": {"text/x-setext", "document"},
"exe": {"application/octet-stream", "binary"},
"gif": {"image/gif", "image"},
"gtar": {"application/x-gtar", "binary"},
"h": {"text/plain", "document"},
"hdf": {"application/x-hdf", "binary"},
"htm": {"text/html", "document"},
"html": {"text/html", "document"},
"ico": {"image/vnd.microsoft.icon", "image"},
"ief": {"image/ief", "image"},
"iges": {"model/iges", "model"},
"jpe": {"image/jpeg", "image"},
"jpeg": {"image/jpeg", "image"},
"jpg": {"image/jpeg", "image"},
"js": {"application/javascript", "document"},
"json": {"application/json", "document"},
"ksh": {"text/plain", "document"},
"latex": {"application/x-latex", "binary"},
"m1v": {"video/mpeg", "video"},
"m3u": {"application/vnd.apple.mpegurl", "audio"},
"m3u8": {"application/vnd.apple.mpegurl", "audio"},
"man": {"application/x-troff-man", "document"},
"markdown": {"text/markdown", "document"},
"md": {"text/markdown", "document"},
"me": {"application/x-troff-me", "binary"},
"mht": {"message/rfc822", "document"},
"mhtml": {"message/rfc822", "document"},
"mif": {"application/x-mif", "binary"},
"mov": {"video/quicktime", "video"},
"movie": {"video/x-sgi-movie", "video"},
"mp2": {"audio/mpeg", "audio"},
"mp3": {"audio/mpeg", "audio"},
"mp4": {"video/mp4", "video"},
"mpa": {"video/mpeg", "video"},
"mpe": {"video/mpeg", "video"},
"mpeg": {"video/mpeg", "video"},
"mpg": {"video/mpeg", "video"},
"ms": {"application/x-troff-ms", "binary"},
"nc": {"application/x-netcdf", "binary"},
"nws": {"message/rfc822", "document"},
"o": {"application/octet-stream", "binary"},
"obj": {"application/octet-stream", "model"},
"oda": {"application/oda", "binary"},
"p12": {"application/x-pkcs12", "binary"},
"p7c": {"application/pkcs7-mime", "binary"},
"pbm": {"image/x-portable-bitmap", "image"},
"pdf": {"application/pdf", "document"},
"pfx": {"application/x-pkcs12", "binary"},
"pgm": {"image/x-portable-graymap", "image"},
"pl": {"text/plain", "document"},
"png": {"image/png", "image"},
"pnm": {"image/x-portable-anymap", "image"},
"pot": {"application/vnd.ms-powerpoint", "document"},
"ppa": {"application/vnd.ms-powerpoint", "document"},
"ppm": {"image/x-portable-pixmap", "image"},
"pps": {"application/vnd.ms-powerpoint", "document"},
"ppt": {"application/vnd.ms-powerpoint", "document"},
"ps": {"application/postscript", "document"},
"pwz": {"application/vnd.ms-powerpoint", "document"},
"py": {"text/x-python", "document"},
"pyc": {"application/x-python-code", "binary"},
"pyo": {"application/x-python-code", "binary"},
"qt": {"video/quicktime", "video"},
"ra": {"audio/x-pn-realaudio", "audio"},
"ram": {"application/x-pn-realaudio", "audio"},
"ras": {"image/x-cmu-raster", "image"},
"rdf": {"application/xml", "binary"},
"rgb": {"image/x-rgb", "image"},
"roff": {"application/x-troff", "binary"},
"rtx": {"text/richtext", "document"},
"sgm": {"text/x-sgml", "document"},
"sgml": {"text/x-sgml", "document"},
"sh": {"application/x-sh", "document"},
"shar": {"application/x-shar", "binary"},
"snd": {"audio/basic", "audio"},
"so": {"application/octet-stream", "binary"},
"src": {"application/x-wais-source", "binary"},
"stl": {"model/stl", "model"},
"sv4cpio": {"application/x-sv4cpio", "binary"},
"sv4crc": {"application/x-sv4crc", "binary"},
"svg": {"image/svg+xml", "image"},
"swf": {"application/x-shockwave-flash", "binary"},
"t": {"application/x-troff", "binary"},
"tar": {"application/x-tar", "binary"},
"tcl": {"application/x-tcl", "binary"},
"tex": {"application/x-tex", "binary"},
"texi": {"application/x-texinfo", "binary"},
"texinfo": {"application/x-texinfo", "binary"},
"tif": {"image/tiff", "image"},
"tiff": {"image/tiff", "image"},
"tr": {"application/x-troff", "binary"},
"tsv": {"text/tab-separated-values", "document"},
"txt": {"text/plain", "document"},
"ustar": {"application/x-ustar", "binary"},
"vcf": {"text/x-vcard", "document"},
"wav": {"audio/x-wav", "audio"},
"webm": {"video/webm", "video"},
"wiz": {"application/msword", "document"},
"wsdl": {"application/xml", "document"},
"xbm": {"image/x-xbitmap", "image"},
"xlb": {"application/vnd.ms-excel", "document"},
"xls": {"application/vnd.ms-excel", "document"},
"xml": {"text/xml", "document"},
"xpdl": {"application/xml", "document"},
"xpm": {"image/x-xpixmap", "image"},
"xsl": {"application/xml", "document"},
"xwd": {"image/x-xwindowdump", "image"},
"zip": {"application/zip", "binary"},
// These are non-standard types, commonly found in the wild.
"cbr": {"application/vnd.comicbook-rar", "document"},
"cbz": {"application/vnd.comicbook+zip", "document"},
"flac": {"audio/flac", "audio"},
"lbry": {"application/x-ext-lbry", "document"},
"m4v": {"video/m4v", "video"},
"mid": {"audio/midi", "audio"},
"midi": {"audio/midi", "audio"},
"mkv": {"video/x-matroska", "video"},
"mobi": {"application/x-mobipocket-ebook", "document"},
"oga": {"audio/ogg", "audio"},
"ogv": {"video/ogg", "video"},
"pct": {"image/pict", "image"},
"pic": {"image/pict", "image"},
"pict": {"image/pict", "image"},
"prc": {"application/x-mobipocket-ebook", "document"},
"rtf": {"application/rtf", "document"},
"xul": {"text/xul", "document"},
// microsoft is special and has its own "standard"
// https://docs.microsoft.com/en-us/windows/desktop/wmp/file-name-extensions
"wmv": {"video/x-ms-wmv", "video"},
}
if data, ok := types[ext]; ok {
return data.mime, data.t
}
return "application/x-ext-" + ext, "binary"
}

View file

@ -1,291 +0,0 @@
package publish
import (
"bytes"
"encoding/json"
"os"
"path/filepath"
"sort"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/lbrycrd"
"github.com/lbryio/lbry.go/v2/stream"
pb "github.com/lbryio/types/v2/go"
"github.com/btcsuite/btcd/btcjson"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/golang/protobuf/proto"
)
/* TODO:
import cert from wallet
get all utxos from chainquery
create transaction
sign it with the channel
track state of utxos across publishes from this channel so that we can just do one query to get utxos
prioritize only confirmed utxos
Handling all the issues we handle currently with lbrynet:
"Couldn't find private key for id",
"You already have a stream claim published under the name",
"Cannot publish using channel",
"txn-mempool-conflict",
"too-long-mempool-chain",
"Missing inputs",
"Not enough funds to cover this transaction",
*/
type Details struct {
Title string
Description string
Author string
Tags []string
ReleaseTime int64
}
func Publish(client *lbrycrd.Client, path, name, address string, details Details, reflectorAddress string) (*wire.MsgTx, *chainhash.Hash, error) {
if name == "" {
return nil, nil, errors.Err("name required")
}
//TODO: sign claim if publishing into channel
addr, err := btcutil.DecodeAddress(address, &lbrycrd.MainNetParams)
if errors.Is(err, btcutil.ErrUnknownAddressType) {
return nil, nil, errors.Err(`unknown address type. here's what you need to make this work:
- deprecatedrpc=validateaddress" and "deprecatedrpc=signrawtransaction" in your lbrycrd.conf
- github.com/btcsuite/btcd pinned to hash 306aecffea32
- github.com/btcsuite/btcutil pinned to 4c204d697803
- github.com/lbryio/lbry.go/v2 (make sure you have v2 at the end)`)
}
if err != nil {
return nil, nil, err
}
amount := 0.01
changeAddr := addr // TODO: fix this? or maybe its fine?
tx, err := baseTx(client, amount, changeAddr)
if err != nil {
return nil, nil, err
}
st, stPB, err := makeStream(path)
if err != nil {
return nil, nil, err
}
stPB.Author = details.Author
stPB.ReleaseTime = details.ReleaseTime
claim := &pb.Claim{
Title: details.Title,
Description: details.Description,
Type: &pb.Claim_Stream{Stream: stPB},
}
err = addClaimToTx(tx, claim, name, amount, addr)
if err != nil {
return nil, nil, err
}
// sign and send
signedTx, allInputsSigned, err := client.SignRawTransaction(tx)
if err != nil {
return nil, nil, err
}
if !allInputsSigned {
return nil, nil, errors.Err("not all inputs for the tx could be signed")
}
err = reflect(st, reflectorAddress)
if err != nil {
return nil, nil, err
}
txid, err := client.SendRawTransaction(signedTx, false)
if err != nil {
return nil, nil, err
}
return signedTx, txid, nil
}
// TODO: lots of assumptions. hardcoded values need to be passed in or calculated
func baseTx(client *lbrycrd.Client, amount float64, changeAddress btcutil.Address) (*wire.MsgTx, error) {
txFee := 0.0002 // TODO: estimate this better?
inputs, total, err := coinChooser(client, amount+txFee)
if err != nil {
return nil, err
}
change := total - amount - txFee
// create base raw tx
addresses := make(map[btcutil.Address]btcutil.Amount)
//changeAddr, err := client.GetNewAddress("")
changeAmount, err := btcutil.NewAmount(change)
if err != nil {
return nil, err
}
addresses[changeAddress] = changeAmount
lockTime := int64(0)
return client.CreateRawTransaction(inputs, addresses, &lockTime)
}
func coinChooser(client *lbrycrd.Client, amount float64) ([]btcjson.TransactionInput, float64, error) {
utxos, err := client.ListUnspentMin(1)
if err != nil {
return nil, 0, err
}
sort.Slice(utxos, func(i, j int) bool { return utxos[i].Amount < utxos[j].Amount })
var utxo btcjson.ListUnspentResult
for _, u := range utxos {
if u.Spendable && u.Amount >= amount {
utxo = u
break
}
}
if utxo.TxID == "" {
return nil, 0, errors.Err("not enough utxos to create tx")
}
return []btcjson.TransactionInput{{Txid: utxo.TxID, Vout: utxo.Vout}}, utxo.Amount, nil
}
func addClaimToTx(tx *wire.MsgTx, claim *pb.Claim, name string, amount float64, claimAddress btcutil.Address) error {
claimBytes, err := proto.Marshal(claim)
if err != nil {
return err
}
claimBytes = append([]byte{0}, claimBytes...) // version 0 = no channel sig
amt, err := btcutil.NewAmount(amount)
if err != nil {
return err
}
script, err := getClaimPayoutScript(name, claimBytes, claimAddress)
if err != nil {
return err
}
tx.AddTxOut(wire.NewTxOut(int64(amt), script))
return nil
}
func Decode(client *lbrycrd.Client, tx *wire.MsgTx) (string, error) {
buf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize()))
if err := tx.Serialize(buf); err != nil {
return "", errors.Err(err)
}
//txHex := hex.EncodeToString(buf.Bytes())
//spew.Dump(txHex)
decoded, err := client.DecodeRawTransaction(buf.Bytes())
if err != nil {
return "", err
}
data, err := json.MarshalIndent(decoded, "", " ")
return string(data), err
}
func reflect(st stream.Stream, reflectorAddress string) error {
// upload blobs to reflector
c := reflector.Client{}
err := c.Connect(reflectorAddress)
if err != nil {
return errors.Err(err)
}
for i, b := range st {
if i == 0 {
err = c.SendSDBlob(b)
} else {
err = c.SendBlob(b)
}
if err != nil {
return errors.Err(err)
}
}
return nil
}
func makeStream(path string) (stream.Stream, *pb.Stream, error) {
file, err := os.Open(path)
if err != nil {
return nil, nil, errors.Err(err)
}
defer func() { _ = file.Close() }()
enc := stream.NewEncoder(file)
s, err := enc.Stream()
if err != nil {
return nil, nil, errors.Err(err)
}
streamProto := &pb.Stream{
Source: &pb.Source{
SdHash: enc.SDBlob().Hash(),
Name: filepath.Base(file.Name()),
Size: uint64(enc.SourceLen()),
Hash: enc.SourceHash(),
},
}
mimeType, category := guessMimeType(filepath.Ext(file.Name()))
streamProto.Source.MediaType = mimeType
switch category {
case "video":
//t, err := streamVideoMetadata(path)
//if err != nil {
// return nil, nil, err
//}
streamProto.Type = &pb.Stream_Video{}
case "audio":
streamProto.Type = &pb.Stream_Audio{}
case "image":
streamProto.Type = &pb.Stream_Image{}
}
return s, streamProto, nil
}
func getClaimPayoutScript(name string, value []byte, address btcutil.Address) ([]byte, error) {
//OP_CLAIM_NAME <name> <value> OP_2DROP OP_DROP OP_DUP OP_HASH160 <address> OP_EQUALVERIFY OP_CHECKSIG
pkscript, err := txscript.PayToAddrScript(address)
if err != nil {
return nil, errors.Err(err)
}
return txscript.NewScriptBuilder().
AddOp(txscript.OP_NOP6). //OP_CLAIM_NAME
AddData([]byte(name)). //<name>
AddData(value). //<value>
AddOp(txscript.OP_2DROP). //OP_2DROP
AddOp(txscript.OP_DROP). //OP_DROP
AddOps(pkscript). //OP_DUP OP_HASH160 <address> OP_EQUALVERIFY OP_CHECKSIG
Script()
}
//func streamVideoMetadata(path string) (*pb.Stream_Video, error) {
// mi, err := mediainfo.GetMediaInfo(path)
// if err != nil {
// return nil, err
// }
// return &pb.Stream_Video{
// Video: &pb.Video{
// Duration: uint32(mi.General.Duration / 1000),
// Height: uint32(mi.Video.Height),
// Width: uint32(mi.Video.Width),
// },
// }, nil
//}

View file

@ -1,59 +0,0 @@
package publish
import (
"encoding/json"
"io"
)
func LoadWallet(r io.Reader) (WalletFile, error) {
var w WalletFile
err := json.NewDecoder(r).Decode(&w)
return w, err
}
type WalletFile struct {
Name string `json:"name"`
Version int `json:"version"`
Preferences WalletPrefs `json:"preferences"`
Accounts []Account `json:"accounts"`
}
type Account struct {
AddressGenerator AddressGenerator `json:"address_generator"`
Certificates map[string]string `json:"certificates"`
Encrypted bool `json:"encrypted"`
Ledger string `json:"ledger"`
ModifiedOn float64 `json:"modified_on"`
Name string `json:"name"`
PrivateKey string `json:"private_key"`
PublicKey string `json:"public_key"`
Seed string `json:"seed"`
}
type AddressGenerator struct {
Name string `json:"name"`
Change AddressGenParams `json:"change"` // should "change" and "receiving" be replaced with a map[string]AddressGenParams?
Receiving AddressGenParams `json:"receiving"`
}
type AddressGenParams struct {
Gap int `json:"gap"`
MaximumUsesPerAddress int `json:"maximum_uses_per_address"`
}
type WalletPrefs struct {
Shared struct {
Ts float64 `json:"ts"`
Value struct {
Type string `json:"type"`
Value struct {
AppWelcomeVersion int `json:"app_welcome_version"`
Blocked []interface{} `json:"blocked"`
Sharing3P bool `json:"sharing_3P"`
Subscriptions []string `json:"subscriptions"`
Tags []string `json:"tags"`
} `json:"value"`
Version string `json:"version"`
} `json:"value"`
} `json:"shared"`
}

106
readme.md
View file

@ -1,110 +1,25 @@
# Reflector
Reflector is a central piece of software that providers LBRY with the following features:
- Blobs reflection: when something is published, we capture the data and store it on our servers for quicker retrieval
- Blobs distribution: when a piece of content is requested and the LBRY network doesn't have it, reflector will retrieve it from its storage and distribute it
- Blobs caching: reflectors can be chained together in multiple regions or servers to form a chain of cached content. We call those "blobcaches". They are layered so that content distribution is favorable in all the regions we deploy it to
There are a few other features embedded in reflector.go including publishing streams from Go, downloading or upload blobs, resolving content and more unfinished tools.
This code includes a Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
A reflector cluster to accept LBRY content for hosting en masse, rehost the content, and make money on data fees (TODO).
This code includes Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
## Installation
- Install mysql 8 (5.7 might work too)
- add a reflector user and database with password `reflector` with localhost access only
- Create the tables as described [here](https://github.com/lbryio/reflector.go/blob/master/db/db.go#L735) (the link might not update as the code does so just look for the schema in that file)
#### We do not support running reflector.go as a blob receiver, however if you want to run it as a private blobcache you may compile it yourself and run it as following:
```bash
./prism-bin reflector \
--conf="none" \
--disable-uploads=true \
--use-db=false \
--upstream-reflector="reflector.lbry.com" \
--upstream-protocol="http" \
--request-queue-size=200 \
--disk-cache="2GB:/path/to/your/storage/:localdb" \
```
Create a systemd script if you want to run it automatically on startup or as a service.
coming soon
## Usage
Usage as reflector/blobcache:
```bash
Run reflector server
coming soon
Usage:
prism reflector [flags]
Flags:
--disable-blocklist Disable blocklist watching/updating
--disable-uploads Disable uploads to this reflector server
--disk-cache string Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (default "100GB:/tmp/downloaded_blobs:localdb")
-h, --help help for reflector
--http-peer-port int The port reflector will distribute content from over HTTP protocol (default 5569)
--http3-peer-port int The port reflector will distribute content from over HTTP3 protocol (default 5568)
--mem-cache int enable in-memory cache with a max size of this many blobs
--metrics-port int The port reflector will use for prometheus metrics (default 2112)
--optional-disk-cache string Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (this would get hit before the one specified in disk-cache)
--origin-endpoint string HTTP edge endpoint for standard HTTP retrieval
--origin-endpoint-fallback string HTTP edge endpoint for standard HTTP retrieval if first origin fails
--receiver-port int The port reflector will receive content from (default 5566)
--request-queue-size int How many concurrent requests from downstream should be handled at once (the rest will wait) (default 200)
--tcp-peer-port int The port reflector will distribute content from for the TCP (LBRY) protocol (default 5567)
--upstream-protocol string protocol used to fetch blobs from another upstream reflector server (tcp/http3/http) (default "http")
--upstream-reflector string host:port of a reflector server where blobs are fetched from
--use-db Whether to connect to the reflector db or not (default true)
Global Flags:
--conf string Path to config. Use 'none' to disable (default "config.json")
-v, --verbose strings Verbose logging for specific components
```
Other uses:
```bash
Prism is a single entry point application with multiple sub modules which can be leveraged individually or together
Usage:
prism [command]
Available Commands:
check-integrity check blobs integrity for a given path
cluster Start(join) to or Start a new cluster
decode Decode a claim value
dht Run dht node
getstream Get a stream from a reflector server
help Help about any command
peer Run peer server
populate-db populate local database with blobs from a disk storage
publish Publish a file
reflector Run reflector server
resolve Resolve a URL
send Send a file to a reflector
sendblob Send a random blob to a reflector server
start Runs full prism application with cluster, dht, peer server, and reflector server.
test Test things
upload Upload blobs to S3
version Print the version
Flags:
--conf string Path to config. Use 'none' to disable (default "config.json")
-h, --help help for prism
-v, --verbose strings Verbose logging for specific components
```
## Running from Source
This project requires [Go v1.20](https://golang.org/doc/install).
On Ubuntu you can install it with `sudo snap install go --classic`
This project requires [Go v1.10](https://golang.org/doc/install) or higher.
```
git clone git@github.com:lbryio/reflector.go.git
cd reflector.go
go get -u github.com/lbryio/reflector.go
cd "$(go env GOPATH)/src/github.com/lbryio/reflector.go"
make
./dist/linux_amd64/prism-bin
./bin/prism-bin
```
## Contributing
@ -118,7 +33,8 @@ This project is MIT licensed.
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues.
Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it.
## Contact
The primary contact for this project is [@Nikooo777](https://github.com/Nikooo777) (niko-at-lbry.com)
The primary contact for this project is [@lyoshenka](https://github.com/lyoshenka) (grin@lbry.com)

View file

@ -8,49 +8,42 @@ import (
"strings"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/reflector.go/wallet"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/extras/errors"
types1 "github.com/lbryio/types/v1/go"
types2 "github.com/lbryio/types/v2/go"
"github.com/golang/protobuf/proto"
log "github.com/sirupsen/logrus"
)
const blocklistURL = "https://api.lbry.com/file/list_blocked"
const blocklistURL = "https://api.lbry.io/file/list_blocked"
func (s *Server) enableBlocklist(b store.Blocklister) {
walletServers := []string{
"spv25.lbry.com:50001",
"spv26.lbry.com:50001",
"spv19.lbry.com:50001",
"spv14.lbry.com:50001",
}
updateBlocklist(b, walletServers, s.grp.Ch())
// TODO: updateBlocklist should be killed when server is shutting down
updateBlocklist(b)
t := time.NewTicker(12 * time.Hour)
for {
select {
case <-s.grp.Ch():
return
case <-t.C:
updateBlocklist(b, walletServers, s.grp.Ch())
updateBlocklist(b)
}
}
}
func updateBlocklist(b store.Blocklister, walletServers []string, stopper stop.Chan) {
log.Debugf("blocklist update starting")
values, err := blockedSdHashes(walletServers, stopper)
func updateBlocklist(b store.Blocklister) {
values, err := blockedSdHashes()
if err != nil {
log.Error(err)
return
}
for name, v := range values {
for _, v := range values {
if v.Err != nil {
log.Error(errors.FullTrace(errors.Err("blocklist: %s: %s", name, v.Err)))
continue
}
@ -59,19 +52,17 @@ func updateBlocklist(b store.Blocklister, walletServers []string, stopper stop.C
log.Error(err)
}
}
log.Debugf("blocklist update done")
}
func blockedSdHashes(walletServers []string, stopper stop.Chan) (map[string]valOrErr, error) {
client := http.Client{Timeout: 1 * time.Second}
resp, err := client.Get(blocklistURL)
func blockedSdHashes() (map[string]valOrErr, error) {
resp, err := http.Get(blocklistURL)
if err != nil {
return nil, errors.Err(err)
}
defer func() {
err := resp.Body.Close()
if err != nil {
log.Errorln(errors.Err(err))
log.Errorln(err)
}
}()
@ -91,7 +82,7 @@ func blockedSdHashes(walletServers []string, stopper stop.Chan) (map[string]valO
return nil, errors.Prefix("list_blocked API call", r.Error)
}
return sdHashesForOutpoints(walletServers, r.Data.Outpoints, stopper)
return sdHashesForOutpoints(r.Data.Outpoints)
}
type valOrErr struct {
@ -100,34 +91,20 @@ type valOrErr struct {
}
// sdHashesForOutpoints queries wallet server for the sd hashes in a given outpoints
func sdHashesForOutpoints(walletServers, outpoints []string, stopper stop.Chan) (map[string]valOrErr, error) {
func sdHashesForOutpoints(outpoints []string) (map[string]valOrErr, error) {
values := make(map[string]valOrErr)
node := wallet.NewNode()
err := node.Connect(walletServers, nil)
defer node.Shutdown()
err := node.Connect([]string{
"lbryumx1.lbry.io:50001",
"lbryumx2.lbry.io:50001",
}, nil)
if err != nil {
return nil, errors.Err(err)
return nil, err
}
done := make(chan bool)
metrics.RoutinesQueue.WithLabelValues("reflector", "sdhashesforoutput").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "sdhashesforoutput").Dec()
select {
case <-done:
case <-stopper:
}
node.Shutdown()
}()
OutpointLoop:
for _, outpoint := range outpoints {
select {
case <-stopper:
break OutpointLoop
default:
}
parts := strings.Split(outpoint, ":")
if len(parts) != 2 {
values[outpoint] = valOrErr{Err: errors.Err("invalid outpoint format")}
@ -140,20 +117,57 @@ OutpointLoop:
continue
}
claim, err := node.GetClaimInTx(parts[0], nout)
resp, err := node.GetClaimsInTx(parts[0])
if err != nil {
values[outpoint] = valOrErr{Err: err}
continue
}
hash := hex.EncodeToString(claim.GetStream().GetSource().GetSdHash())
values[outpoint] = valOrErr{Value: hash, Err: nil}
}
var value []byte
for _, tx := range resp.Result {
if tx.Nout != nout {
continue
}
select {
case done <- true:
default: // in case of race where stopper got stopped right after loop finished
value, err = hex.DecodeString(tx.Value)
break
}
if err != nil {
values[outpoint] = valOrErr{Err: err}
continue
} else if value == nil {
values[outpoint] = valOrErr{Err: errors.Err("outpoint not found")}
continue
}
hash, err := hashFromClaim(value)
values[outpoint] = valOrErr{Value: hash, Err: err}
}
return values, nil
}
func hashFromClaim(value []byte) (string, error) {
claim := &types1.Claim{}
err := proto.Unmarshal(value, claim)
if err != nil {
return "", err
}
if claim.GetStream().GetSource().GetSourceType() == types1.Source_lbry_sd_hash && claim.GetStream().GetSource().GetSource() != nil {
return hex.EncodeToString(claim.GetStream().GetSource().GetSource()), nil
}
claim2 := &types2.Claim{}
err = proto.Unmarshal(value, claim2)
if err != nil {
return "", err
}
stream, ok := claim2.GetType().(*types2.Claim_Stream)
if !ok || stream == nil {
return "", errors.Err("not a stream claim")
}
return hex.EncodeToString(claim2.GetStream().GetSource().GetSdHash()), nil
}

View file

@ -2,11 +2,12 @@ package reflector
import (
"encoding/json"
"log"
"net"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/stream"
log "github.com/sirupsen/logrus"
)
// ErrBlobExists is a default error for when a blob already exists on the reflector server.
@ -35,18 +36,8 @@ func (c *Client) Close() error {
return c.conn.Close()
}
// SendBlob sends a blob to the server.
// SendBlob sends a send blob request to the client.
func (c *Client) SendBlob(blob stream.Blob) error {
return c.sendBlob(blob, false)
}
// SendSDBlob sends an SD blob request to the server.
func (c *Client) SendSDBlob(blob stream.Blob) error {
return c.sendBlob(blob, true)
}
// sendBlob does the actual blob sending
func (c *Client) sendBlob(blob stream.Blob, isSDBlob bool) error {
if !c.connected {
return errors.Err("not connected")
}
@ -56,15 +47,10 @@ func (c *Client) sendBlob(blob stream.Blob, isSDBlob bool) error {
}
blobHash := blob.HashHex()
var req sendBlobRequest
if isSDBlob {
req.SdBlobSize = blob.Size()
req.SdBlobHash = blobHash
} else {
req.BlobSize = blob.Size()
req.BlobHash = blobHash
}
sendRequest, err := json.Marshal(req)
sendRequest, err := json.Marshal(sendBlobRequest{
BlobSize: blob.Size(),
BlobHash: blobHash,
})
if err != nil {
return err
}
@ -76,51 +62,30 @@ func (c *Client) sendBlob(blob stream.Blob, isSDBlob bool) error {
dec := json.NewDecoder(c.conn)
if isSDBlob {
var sendResp sendSdBlobResponse
err = dec.Decode(&sendResp)
if err != nil {
return err
}
if !sendResp.SendSdBlob {
return errors.Prefix(blobHash[:8], ErrBlobExists)
}
log.Println("Sending SD blob " + blobHash[:8])
} else {
var sendResp sendBlobResponse
err = dec.Decode(&sendResp)
if err != nil {
return err
}
if !sendResp.SendBlob {
return errors.Prefix(blobHash[:8], ErrBlobExists)
}
log.Println("Sending blob " + blobHash[:8])
var sendResp sendBlobResponse
err = dec.Decode(&sendResp)
if err != nil {
return err
}
if !sendResp.SendBlob {
return errors.Prefix(blobHash[:8], ErrBlobExists)
}
log.Println("Sending blob " + blobHash[:8])
_, err = c.conn.Write(blob)
if err != nil {
return err
}
var transferResp blobTransferResponse
err = dec.Decode(&transferResp)
if err != nil {
return err
}
if isSDBlob {
var transferResp sdBlobTransferResponse
err = dec.Decode(&transferResp)
if err != nil {
return err
}
if !transferResp.ReceivedSdBlob {
return errors.Err("server did not received SD blob")
}
} else {
var transferResp blobTransferResponse
err = dec.Decode(&transferResp)
if err != nil {
return err
}
if !transferResp.ReceivedBlob {
return errors.Err("server did not received blob")
}
if !transferResp.ReceivedBlob {
return errors.Err("server did not received blob")
}
return nil

View file

@ -1,81 +0,0 @@
package reflector
import (
"encoding/json"
"net/http"
"time"
"github.com/bluele/gcache"
"github.com/lbryio/lbry.go/v2/extras/errors"
"golang.org/x/sync/singleflight"
)
const protectedListURL = "https://api.odysee.com/file/list_protected"
type ProtectedContent struct {
SDHash string `json:"sd_hash"`
ClaimID string `json:"claim_id"`
}
var protectedCache = gcache.New(10).Expiration(2 * time.Minute).Build()
func GetProtectedContent() (interface{}, error) {
cachedVal, err := protectedCache.Get("protected")
if err == nil && cachedVal != nil {
return cachedVal.(map[string]bool), nil
}
method := "GET"
var r struct {
Success bool `json:"success"`
Error string `json:"error"`
Data []ProtectedContent `json:"data"`
}
client := &http.Client{}
req, err := http.NewRequest(method, protectedListURL, nil)
if err != nil {
return nil, errors.Err(err)
}
res, err := client.Do(req)
if err != nil {
return nil, errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode != http.StatusOK {
return nil, errors.Err("unexpected status code %d", res.StatusCode)
}
if err = json.NewDecoder(res.Body).Decode(&r); err != nil {
return nil, errors.Err(err)
}
if !r.Success {
return nil, errors.Prefix("file/list_protected API call", r.Error)
}
protectedMap := make(map[string]bool, len(r.Data))
for _, pc := range r.Data {
protectedMap[pc.SDHash] = true
}
err = protectedCache.Set("protected", protectedMap)
if err != nil {
return protectedMap, errors.Err(err)
}
return protectedMap, nil
}
var sf = singleflight.Group{}
func IsProtected(sdHash string) bool {
val, err, _ := sf.Do("protected", GetProtectedContent)
if err != nil {
return false
}
cachedMap, ok := val.(map[string]bool)
if !ok {
return false
}
return cachedMap[sdHash]
}

View file

@ -6,15 +6,16 @@ import (
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"net"
"strconv"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
"github.com/lbryio/lbry.go/stream"
log "github.com/sirupsen/logrus"
)
@ -31,47 +32,47 @@ const (
maxBlobSize = stream.MaxBlobSize
)
var ErrBlobTooBig = errors.Base("blob must be at most %d bytes", maxBlobSize)
// Server is and instance of the reflector server. It houses the blob store and listener.
type Server struct {
Timeout time.Duration // timeout to read or write next message
StatLogger *log.Logger // logger to log stats
StatReportFrequency time.Duration // how often to log stats
EnableBlocklist bool // if true, blocklist checking and blob deletion will be enabled
underlyingStore store.BlobStore
outerStore store.BlobStore
grp *stop.Group
store store.BlobStore
grp *stop.Group
stats *Stats
}
// NewServer returns an initialized reflector server pointer.
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
func NewServer(store store.BlobStore) *Server {
return &Server{
Timeout: DefaultTimeout,
underlyingStore: underlying,
outerStore: outer,
grp: stop.New(),
Timeout: DefaultTimeout,
store: store,
grp: stop.New(),
}
}
// Shutdown shuts down the reflector server gracefully.
func (s *Server) Shutdown() {
log.Println("shutting down reflector server...")
s.stats.Shutdown()
s.grp.StopAndWait()
log.Println("reflector server stopped")
}
// Start starts the server to handle connections.
//Start starts the server to handle connections.
func (s *Server) Start(address string) error {
l, err := net.Listen(network, address)
if err != nil {
return errors.Err(err)
}
log.Println("reflector listening on " + address)
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Dec()
<-s.grp.Ch()
err := l.Close()
if err != nil {
@ -81,19 +82,20 @@ func (s *Server) Start(address string) error {
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "start").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "start").Dec()
s.listenAndServe(l)
s.grp.Done()
}()
s.stats = NewStatLogger("UPLOAD", s.StatLogger, s.StatReportFrequency, s.grp.Child())
if s.StatLogger != nil && s.StatReportFrequency > 0 {
s.stats.Start()
}
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
if b, ok := s.store.(store.Blocklister); ok {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Dec()
s.enableBlocklist(b)
s.grp.Done()
}()
@ -116,9 +118,7 @@ func (s *Server) listenAndServe(listener net.Listener) {
log.Error(err)
} else {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
s.handleConn(conn)
s.grp.Done()
}()
@ -133,9 +133,7 @@ func (s *Server) handleConn(conn net.Conn) {
close(connNeedsClosing)
}()
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-handleconn").Dec()
defer s.grp.Done()
select {
case <-connNeedsClosing:
@ -175,10 +173,7 @@ func (s *Server) handleConn(conn net.Conn) {
}
func (s *Server) doError(conn net.Conn, err error) error {
if err == nil {
return nil
}
shouldLog := metrics.TrackError(metrics.DirectionUpload, err)
shouldLog := s.stats.AddError(err)
if shouldLog {
log.Errorln(errors.FullTrace(err))
}
@ -200,13 +195,13 @@ func (s *Server) receiveBlob(conn net.Conn) error {
}
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
if bl, ok := s.store.(store.Blocklister); ok {
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
blobExists, err := s.store.Has(blobHash)
if err != nil {
return err
}
@ -216,7 +211,7 @@ func (s *Server) receiveBlob(conn net.Conn) error {
var neededBlobs []string
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
if nbc, ok := s.store.(neededBlobChecker); ok {
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
if err != nil {
return err
@ -259,17 +254,17 @@ func (s *Server) receiveBlob(conn net.Conn) error {
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
err = s.store.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
err = s.store.Put(blobHash, blob)
}
if err != nil {
return err
}
metrics.MtrInBytesReflector.Add(float64(len(blob)))
metrics.BlobUploadCount.Inc()
s.stats.AddBlob()
if isSdBlob {
metrics.SDBlobUploadCount.Inc()
s.stats.AddStream()
}
return s.sendTransferResponse(conn, true, isSdBlob)
}
@ -316,7 +311,7 @@ func (s *Server) readBlobRequest(conn net.Conn) (int, string, bool, error) {
return blobSize, blobHash, isSdBlob, errors.Err("blob hash is empty")
}
if blobSize > maxBlobSize {
return blobSize, blobHash, isSdBlob, errors.Err(ErrBlobTooBig)
return blobSize, blobHash, isSdBlob, errors.Err("blob must be at most " + strconv.Itoa(maxBlobSize) + " bytes")
}
if blobSize == 0 {
return blobSize, blobHash, isSdBlob, errors.Err("0-byte blob received")
@ -366,7 +361,7 @@ func (s *Server) read(conn net.Conn, v interface{}) error {
dec := json.NewDecoder(conn)
err = dec.Decode(v)
if err != nil {
data, _ := io.ReadAll(dec.Buffered())
data, _ := ioutil.ReadAll(dec.Buffered())
if len(data) > 0 {
return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data))
}

View file

@ -9,10 +9,9 @@ import (
"testing"
"time"
"github.com/lbryio/lbry.go/dht/bits"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/davecgh/go-spew/spew"
"github.com/phayes/freeport"
)
@ -23,7 +22,7 @@ func startServerOnRandomPort(t *testing.T) (*Server, int) {
t.Fatal(err)
}
srv := NewServer(store.NewMemStore(), store.NewMemStore())
srv := NewServer(&store.MemoryBlobStore{})
err = srv.Start("127.0.0.1:" + strconv.Itoa(port))
if err != nil {
t.Fatal(err)
@ -120,7 +119,7 @@ func TestServer_Timeout(t *testing.T) {
t.Fatal(err)
}
srv := NewServer(store.NewMemStore(), store.NewMemStore())
srv := NewServer(&store.MemoryBlobStore{})
srv.Timeout = testTimeout
err = srv.Start("127.0.0.1:" + strconv.Itoa(port))
if err != nil {
@ -162,7 +161,7 @@ func TestServer_Timeout(t *testing.T) {
//}
type mockPartialStore struct {
*store.MemStore
store.MemoryBlobStore
missing []string
}
@ -182,7 +181,7 @@ func TestServer_PartialUpload(t *testing.T) {
missing[i] = bits.Rand().String()
}
st := store.BlobStore(&mockPartialStore{MemStore: store.NewMemStore(), missing: missing})
st := store.BlobStore(&mockPartialStore{missing: missing})
if _, ok := st.(neededBlobChecker); !ok {
t.Fatal("mock does not implement the relevant interface")
}
@ -191,7 +190,7 @@ func TestServer_PartialUpload(t *testing.T) {
t.Fatal(err)
}
srv := NewServer(st, st)
srv := NewServer(st)
err = srv.Start("127.0.0.1:" + strconv.Itoa(port))
if err != nil {
t.Fatal(err)

122
reflector/stats.go Normal file
View file

@ -0,0 +1,122 @@
package reflector
import (
"fmt"
"strings"
"sync"
"time"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
log "github.com/sirupsen/logrus"
)
// TODO: store daily stats too. and maybe other intervals
type Stats struct {
mu *sync.Mutex
blobs int
streams int
errors map[string]int
started bool
name string
logger *log.Logger
logFreq time.Duration
grp *stop.Group
}
func NewStatLogger(name string, logger *log.Logger, logFreq time.Duration, parentGrp *stop.Group) *Stats {
return &Stats{
mu: &sync.Mutex{},
grp: stop.New(parentGrp),
logger: logger,
logFreq: logFreq,
errors: make(map[string]int),
name: name,
}
}
func (s *Stats) Start() {
s.started = true
s.grp.Add(1)
go func() {
defer s.grp.Done()
s.runSlackLogger()
}()
}
func (s *Stats) Shutdown() {
if !s.started {
return
}
s.log()
s.grp.StopAndWait()
s.started = false
}
func (s *Stats) AddBlob() {
s.mu.Lock()
defer s.mu.Unlock()
s.blobs++
}
func (s *Stats) AddStream() {
s.mu.Lock()
defer s.mu.Unlock()
s.streams++
}
func (s *Stats) AddError(e error) (shouldLog bool) { // shouldLog is a hack, but whatever
if e == nil {
return
}
err := errors.Wrap(e, 0)
name := err.TypeName()
if strings.Contains(err.Error(), "i/o timeout") { // hit a read or write deadline
name = "i/o timeout"
} else if strings.Contains(err.Error(), "read: connection reset by peer") { // the other side closed the connection using TCP reset
name = "read conn reset"
} else if strings.Contains(err.Error(), "unexpected EOF") { // tried to read from closed pipe or socket
name = "unexpected EOF"
} else if strings.Contains(err.Error(), "write: broken pipe") { // tried to write to a pipe or socket that was closed by the peer
name = "write broken pipe"
} else {
shouldLog = true
}
s.mu.Lock()
defer s.mu.Unlock()
s.errors[name]++
return
}
func (s *Stats) runSlackLogger() {
t := time.NewTicker(s.logFreq)
for {
select {
case <-s.grp.Ch():
return
case <-t.C:
s.log()
}
}
}
func (s *Stats) log() {
s.mu.Lock()
blobs, streams := s.blobs, s.streams
s.blobs, s.streams = 0, 0
errStr := ""
for name, count := range s.errors {
errStr += fmt.Sprintf("%d %s, ", count, name)
delete(s.errors, name)
}
s.mu.Unlock()
if len(errStr) > 2 {
errStr = errStr[:len(errStr)-2] // trim last comma and space
}
s.logger.Printf("%s stats: %d blobs, %d streams, errors: %s", s.name, blobs, streams, errStr)
}

View file

@ -1,17 +1,17 @@
package reflector
import (
"io/ioutil"
"os"
"path"
"sync"
"time"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
log "github.com/sirupsen/logrus"
)
@ -24,31 +24,27 @@ const (
errInc
)
type Summary struct {
Total, AlreadyStored, Sd, Blob, Err int
}
type Uploader struct {
db *db.SQL
store *store.DBBackedStore // could just be store.BlobStore interface
workers int
skipExistsCheck bool
deleteBlobsAfterUpload bool
stopper *stop.Group
countChan chan increment
db *db.SQL
store *store.DBBackedS3Store // could just be store.BlobStore interface
workers int
skipExistsCheck bool
stopper *stop.Group
countChan chan increment
count Summary
count struct {
total, alreadyStored, sd, blob, err int
}
}
func NewUploader(db *db.SQL, store *store.DBBackedStore, workers int, skipExistsCheck, deleteBlobsAfterUpload bool) *Uploader {
func NewUploader(db *db.SQL, store *store.DBBackedS3Store, workers int, skipExistsCheck bool) *Uploader {
return &Uploader{
db: db,
store: store,
workers: workers,
skipExistsCheck: skipExistsCheck,
deleteBlobsAfterUpload: deleteBlobsAfterUpload,
stopper: stop.New(),
countChan: make(chan increment),
db: db,
store: store,
workers: workers,
skipExistsCheck: skipExistsCheck,
stopper: stop.New(),
countChan: make(chan increment),
}
}
@ -63,34 +59,32 @@ func (u *Uploader) Upload(dirOrFilePath string) error {
return err
}
u.count.Total = len(paths)
u.count.total = len(paths)
hashes := make([]string, len(paths))
for i, p := range paths {
hashes[i] = path.Base(p)
}
log.Debug("checking for existing blobs")
log.Infoln("checking for existing blobs")
var exists map[string]bool
if !u.skipExistsCheck {
exists, err = u.db.HasBlobs(hashes, false)
exists, err = u.db.HasBlobs(hashes)
if err != nil {
return err
}
u.count.AlreadyStored = len(exists)
u.count.alreadyStored = len(exists)
}
log.Debugf("%d new blobs to upload", u.count.Total-u.count.AlreadyStored)
log.Infof("%d new blobs to upload", u.count.total-u.count.alreadyStored)
workerWG := sync.WaitGroup{}
pathChan := make(chan string)
for i := 0; i < u.workers; i++ {
workerWG.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "upload").Inc()
go func(i int) {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "upload").Dec()
defer workerWG.Done()
defer func(i int) { log.Debugf("worker %d quitting", i) }(i)
u.worker(pathChan)
@ -99,9 +93,7 @@ func (u *Uploader) Upload(dirOrFilePath string) error {
countWG := sync.WaitGroup{}
countWG.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "uploader").Inc()
go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "uploader").Dec()
defer countWG.Done()
u.counter()
}()
@ -125,14 +117,17 @@ Upload:
countWG.Wait()
u.stopper.Stop()
log.Debugf(
"upload stats: %d blobs total, %d already stored, %d SD blobs uploaded, %d content blobs uploaded, %d errors",
u.count.Total, u.count.AlreadyStored, u.count.Sd, u.count.Blob, u.count.Err,
)
log.Infoln("SUMMARY")
log.Infof("%d blobs total", u.count.total)
log.Infof("%d blobs already stored", u.count.alreadyStored)
log.Infof("%d SD blobs uploaded", u.count.sd)
log.Infof("%d content blobs uploaded", u.count.blob)
log.Infof("%d errors encountered", u.count.err)
return nil
}
// worker reads paths from a channel, uploads them, and optionally deletes them
// worker reads paths from a channel and uploads them
func (u *Uploader) worker(pathChan chan string) {
for {
select {
@ -146,11 +141,6 @@ func (u *Uploader) worker(pathChan chan string) {
err := u.uploadBlob(filepath)
if err != nil {
log.Errorln(err)
} else if u.deleteBlobsAfterUpload {
err = os.Remove(filepath)
if err != nil {
log.Errorln(errors.Prefix("deleting blob", err))
}
}
}
}
@ -164,9 +154,9 @@ func (u *Uploader) uploadBlob(filepath string) (err error) {
}
}()
blob, err := os.ReadFile(filepath)
blob, err := ioutil.ReadFile(filepath)
if err != nil {
return errors.Err(err)
return err
}
hash := BlobHash(blob)
@ -175,17 +165,17 @@ func (u *Uploader) uploadBlob(filepath string) (err error) {
}
if IsValidJSON(blob) {
log.Debugf("uploading SD blob %s", hash)
log.Debugf("Uploading SD blob %s", hash)
err := u.store.PutSD(hash, blob)
if err != nil {
return errors.Prefix("uploading SD blob "+hash, err)
return errors.Prefix("Uploading SD blob "+hash, err)
}
u.inc(sdInc)
} else {
log.Debugf("uploading blob %s", hash)
log.Debugf("Uploading blob %s", hash)
err = u.store.Put(hash, blob)
if err != nil {
return errors.Prefix("uploading blob "+hash, err)
return errors.Prefix("Uploading blob "+hash, err)
}
u.inc(blobInc)
}
@ -208,23 +198,19 @@ func (u *Uploader) counter() {
}
switch incrementType {
case sdInc:
u.count.Sd++
u.count.sd++
case blobInc:
u.count.Blob++
u.count.blob++
case errInc:
u.count.Err++
u.count.err++
}
}
if (u.count.Sd+u.count.Blob)%50 == 0 {
log.Debugf("%d of %d done (%s elapsed, %.3fs per blob)", u.count.Sd+u.count.Blob, u.count.Total-u.count.AlreadyStored, time.Since(start).String(), time.Since(start).Seconds()/float64(u.count.Sd+u.count.Blob))
if (u.count.sd+u.count.blob)%50 == 0 {
log.Infof("%d of %d done (%s elapsed, %.3fs per blob)", u.count.sd+u.count.blob, u.count.total-u.count.alreadyStored, time.Since(start).String(), time.Since(start).Seconds()/float64(u.count.sd+u.count.blob))
}
}
}
func (u *Uploader) GetSummary() Summary {
return u.count
}
func (u *Uploader) inc(t increment) {
select {
case u.countChan <- t:

View file

@ -1,26 +0,0 @@
#!/usr/bin/env bash
err=0
trap 'err=1' ERR
# All the .go files, excluding auto generated folders
GO_FILES=$(find . -iname '*.go' -type f)
(
go install golang.org/x/tools/cmd/goimports@latest # Used in build script for generated files
# go install golang.org/x/lint/golint@latest # Linter
go install github.com/jgautheron/gocyclo@latest # Check against high complexity
go install github.com/mdempsky/unconvert@latest # Identifies unnecessary type conversions
go install github.com/kisielk/errcheck@latest # Checks for unhandled errors
go install github.com/opennota/check/cmd/varcheck@latest # Checks for unused vars
go install github.com/opennota/check/cmd/structcheck@latest # Checks for unused fields in structs
)
echo "Running varcheck..." && varcheck $(go list ./...)
echo "Running structcheck..." && structcheck $(go list ./...)
# go vet is the official Go static analyzer
echo "Running go vet..." && go vet $(go list ./...)
# checks for unhandled errors
echo "Running errcheck..." && errcheck $(go list ./...)
# check for unnecessary conversions - ignore autogen code
echo "Running unconvert..." && unconvert -v $(go list ./...)
echo "Running gocyclo..." && gocyclo -ignore "_test" -avg -over 28 $GO_FILES
#echo "Running golint..." && golint -set_exit_status $(go list ./...)
test $err = 0 # Return non-zero if any command failed

View file

@ -1,105 +0,0 @@
package http
import (
"net/http"
"sync"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
)
func (s *Server) getBlob(c *gin.Context) {
waiter := &sync.WaitGroup{}
waiter.Add(1)
enqueue(&blobRequest{c: c, finished: waiter})
waiter.Wait()
}
func (s *Server) HandleGetBlob(c *gin.Context) {
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %v", r)
}
}()
start := time.Now()
hash := c.Query("hash")
edgeToken := c.Query("edge_token")
if reflector.IsProtected(hash) && edgeToken != s.edgeToken {
_ = c.Error(errors.Err("requested blob is protected"))
c.String(http.StatusForbidden, "requested blob is protected")
return
}
if s.missesCache.Has(hash) {
serialized, err := shared.NewBlobTrace(time.Since(start), "http").Serialize()
c.Header("Via", serialized)
if err != nil {
_ = c.Error(errors.Err(err))
c.String(http.StatusInternalServerError, err.Error())
return
}
c.AbortWithStatus(http.StatusNotFound)
return
}
blob, trace, err := s.store.Get(hash)
if err != nil {
serialized, serializeErr := trace.Serialize()
if serializeErr != nil {
_ = c.Error(errors.Prefix(serializeErr.Error(), err))
c.String(http.StatusInternalServerError, errors.Prefix(serializeErr.Error(), err).Error())
return
}
c.Header("Via", serialized)
if errors.Is(err, store.ErrBlobNotFound) {
_ = s.missesCache.Set(hash, true)
c.AbortWithStatus(http.StatusNotFound)
return
}
_ = c.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return
}
serialized, err := trace.Serialize()
if err != nil {
_ = c.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return
}
metrics.MtrOutBytesHttp.Add(float64(len(blob)))
metrics.BlobDownloadCount.Inc()
metrics.HttpDownloadCount.Inc()
c.Header("Via", serialized)
c.Header("Content-Disposition", "filename="+hash)
c.Data(http.StatusOK, "application/octet-stream", blob)
}
func (s *Server) hasBlob(c *gin.Context) {
hash := c.Query("hash")
has, err := s.store.Has(hash)
if err != nil {
_ = c.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return
}
if has {
c.Status(http.StatusNoContent)
return
}
c.Status(http.StatusNotFound)
}
func (s *Server) recoveryHandler(c *gin.Context, err interface{}) {
c.JSON(500, gin.H{
"title": "Error",
"err": err,
})
}

View file

@ -1,82 +0,0 @@
package http
import (
"context"
"net/http"
"time"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/bluele/gcache"
nice "github.com/ekyoung/gin-nice-recovery"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
)
// Server is an instance of a peer server that houses the listener and store.
type Server struct {
store store.BlobStore
grp *stop.Group
concurrentRequests int
missesCache gcache.Cache
edgeToken string
}
// NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore, requestQueueSize int, edgeToken string) *Server {
return &Server{
store: store,
grp: stop.New(),
concurrentRequests: requestQueueSize,
missesCache: gcache.New(2000).Expiration(5 * time.Minute).ARC().Build(),
edgeToken: edgeToken,
}
}
// Shutdown gracefully shuts down the peer server.
func (s *Server) Shutdown() {
log.Debug("shutting down HTTP server")
s.grp.StopAndWait()
log.Debug("HTTP server stopped")
}
// Start starts the server listener to handle connections.
func (s *Server) Start(address string) error {
gin.SetMode(gin.ReleaseMode)
router := gin.New()
router.Use(gin.Logger())
// Install nice.Recovery, passing the handler to call after recovery
router.Use(nice.Recovery(s.recoveryHandler))
router.GET("/blob", s.getBlob)
router.HEAD("/blob", s.hasBlob)
srv := &http.Server{
Addr: address,
Handler: router,
}
go s.listenForShutdown(srv)
go InitWorkers(s, s.concurrentRequests)
// Initializing the server in a goroutine so that
// it won't block the graceful shutdown handling below
s.grp.Add(1)
go func() {
defer s.grp.Done()
log.Println("HTTP server listening on " + address)
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("listen: %s\n", err)
}
}()
return nil
}
func (s *Server) listenForShutdown(listener *http.Server) {
<-s.grp.Ch()
// The context is used to inform the server it has 5 seconds to finish
// the request it is currently handling
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := listener.Shutdown(ctx); err != nil {
log.Fatal("Server forced to shutdown:", err)
}
}

View file

@ -1,46 +0,0 @@
package http
import (
"sync"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/gin-gonic/gin"
)
type blobRequest struct {
c *gin.Context
finished *sync.WaitGroup
}
var getReqCh = make(chan *blobRequest, 20000)
func InitWorkers(server *Server, workers int) {
stopper := stop.New(server.grp)
for i := 0; i < workers; i++ {
metrics.RoutinesQueue.WithLabelValues("http", "worker").Inc()
go func(worker int) {
defer metrics.RoutinesQueue.WithLabelValues("http", "worker").Dec()
for {
select {
case <-stopper.Ch():
case r := <-getReqCh:
process(server, r)
metrics.HttpBlobReqQueue.Dec()
}
}
}(i)
}
}
func enqueue(b *blobRequest) {
metrics.HttpBlobReqQueue.Inc()
getReqCh <- b
}
func process(server *Server, r *blobRequest) {
server.HandleGetBlob(r.c)
r.finished.Done()
}

View file

@ -1,141 +0,0 @@
package http3
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"net/http"
"sync"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/quic-go/quic-go/http3"
log "github.com/sirupsen/logrus"
)
// Client is an instance of a client connected to a server.
type Client struct {
Timeout time.Duration
conn *http.Client
roundTripper *http3.RoundTripper
ServerAddr string
}
// Close closes the connection with the client.
func (c *Client) Close() error {
c.conn.CloseIdleConnections()
return c.roundTripper.Close()
}
// GetStream gets a stream
func (c *Client) GetStream(sdHash string, blobCache store.BlobStore) (stream.Stream, error) {
var sd stream.SDBlob
b, _, err := c.GetBlob(sdHash)
if err != nil {
return nil, err
}
err = sd.FromBlob(b)
if err != nil {
return nil, err
}
s := make(stream.Stream, len(sd.BlobInfos)+1-1) // +1 for sd blob, -1 for last null blob
s[0] = b
for i := 0; i < len(sd.BlobInfos)-1; i++ {
var trace shared.BlobTrace
s[i+1], trace, err = c.GetBlob(hex.EncodeToString(sd.BlobInfos[i].BlobHash))
if err != nil {
return nil, err
}
log.Debug(trace.String())
}
return s, nil
}
// HasBlob checks if the blob is available
func (c *Client) HasBlob(hash string) (bool, error) {
resp, err := c.conn.Get(fmt.Sprintf("https://%s/has/%s", c.ServerAddr, hash))
if err != nil {
return false, errors.Err(err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
return false, errors.Err("non 200 status code returned: %d", resp.StatusCode)
}
// GetBlob gets a blob
func (c *Client) GetBlob(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
resp, err := c.conn.Get(fmt.Sprintf("https://%s/get/%s?trace=true", c.ServerAddr, hash))
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(err)
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode == http.StatusNotFound {
fmt.Printf("%s blob not found %d\n", hash, resp.StatusCode)
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(store.ErrBlobNotFound)
} else if resp.StatusCode != http.StatusOK {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err("non 200 status code returned: %d", resp.StatusCode)
}
tmp := getBuffer()
defer putBuffer(tmp)
serialized := resp.Header.Get("Via")
trace := shared.NewBlobTrace(time.Since(start), "http3")
if serialized != "" {
parsedTrace, err := shared.Deserialize(serialized)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), err
}
trace = *parsedTrace
}
written, err := io.Copy(tmp, resp.Body)
if err != nil {
return nil, trace.Stack(time.Since(start), "http3"), errors.Err(err)
}
blob := make([]byte, written)
copy(blob, tmp.Bytes())
metrics.MtrInBytesUdp.Add(float64(len(blob)))
return blob, trace.Stack(time.Since(start), "http3"), nil
}
// buffer pool to reduce GC
// https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha
var buffers = sync.Pool{
// New is called when a new instance is needed
New: func() interface{} {
buf := make([]byte, 0, stream.MaxBlobSize)
return bytes.NewBuffer(buf)
},
}
// getBuffer fetches a buffer from the pool
func getBuffer() *bytes.Buffer {
return buffers.Get().(*bytes.Buffer)
}
// putBuffer returns a buffer to the pool
func putBuffer(buf *bytes.Buffer) {
buf.Reset()
buffers.Put(buf)
}

View file

@ -1,216 +0,0 @@
package http3
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
"net/http"
"strconv"
"sync"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/gorilla/mux"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
log "github.com/sirupsen/logrus"
)
// Server is an instance of a peer server that houses the listener and store.
type Server struct {
store store.BlobStore
grp *stop.Group
concurrentRequests int
}
// NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore, requestQueueSize int) *Server {
return &Server{
store: store,
grp: stop.New(),
concurrentRequests: requestQueueSize,
}
}
// Shutdown gracefully shuts down the peer server.
func (s *Server) Shutdown() {
log.Debug("shutting down http3 peer server")
s.grp.StopAndWait()
log.Debug("http3 peer server stopped")
}
func (s *Server) logError(e error) {
if e == nil {
return
}
shouldLog := metrics.TrackError(metrics.DirectionDownload, e)
if shouldLog {
log.Errorln(errors.FullTrace(e))
}
}
type availabilityResponse struct {
LbrycrdAddress string `json:"lbrycrd_address"`
IsAvailable bool `json:"is_available"`
}
// Start starts the server listener to handle connections.
func (s *Server) Start(address string) error {
log.Println("HTTP3 peer listening on " + address)
window500M := 500 * 1 << 20
quicConf := &quic.Config{
MaxStreamReceiveWindow: uint64(window500M),
MaxConnectionReceiveWindow: uint64(window500M),
EnableDatagrams: true,
HandshakeIdleTimeout: 4 * time.Second,
MaxIdleTimeout: 20 * time.Second,
}
r := mux.NewRouter()
r.HandleFunc("/get/{hash}", func(w http.ResponseWriter, r *http.Request) {
waiter := &sync.WaitGroup{}
waiter.Add(1)
enqueue(&blobRequest{request: r, reply: w, finished: waiter})
waiter.Wait()
})
r.HandleFunc("/has/{hash}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
requestedBlob := vars["hash"]
blobExists, err := s.store.Has(requestedBlob)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
s.logError(err)
return
}
if !blobExists {
w.WriteHeader(http.StatusNotFound)
}
// LbrycrdAddress to be used when paying for data. Not implemented yet.
const LbrycrdAddress = "bJxKvpD96kaJLriqVajZ7SaQTsWWyrGQct"
resp, err := json.Marshal(availabilityResponse{
LbrycrdAddress: LbrycrdAddress,
IsAvailable: blobExists,
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
s.logError(err)
return
}
_, err = w.Write(resp)
if err != nil {
s.logError(err)
}
})
server := http3.Server{
Addr: address,
Handler: r,
TLSConfig: generateTLSConfig(),
QuicConfig: quicConf,
}
go InitWorkers(s, s.concurrentRequests)
go s.listenForShutdown(&server)
s.grp.Add(1)
go func() {
s.listenAndServe(&server)
s.grp.Done()
}()
return nil
}
// Setup a bare-bones TLS config for the server
func generateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{
Certificates: []tls.Certificate{tlsCert},
NextProtos: []string{"http3-reflector-server"},
}
}
func (s *Server) listenAndServe(server *http3.Server) {
err := server.ListenAndServe()
if err != nil && err != quic.ErrServerClosed {
log.Errorln(errors.FullTrace(err))
}
}
func (s *Server) listenForShutdown(listener *http3.Server) {
<-s.grp.Ch()
err := listener.Close()
if err != nil {
log.Error("error closing listener for peer server - ", err)
}
}
func (s *Server) HandleGetBlob(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
requestedBlob := vars["hash"]
traceParam := r.URL.Query().Get("trace")
var err error
wantsTrace := false
if traceParam != "" {
wantsTrace, err = strconv.ParseBool(traceParam)
if err != nil {
wantsTrace = false
}
}
if reflector.IsProtected(requestedBlob) {
http.Error(w, "requested blob is protected", http.StatusForbidden)
return
}
blob, trace, err := s.store.Get(requestedBlob)
if wantsTrace {
serialized, err := trace.Serialize()
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
w.Header().Add("Via", serialized)
log.Debug(trace.String())
}
if err != nil {
if errors.Is(err, store.ErrBlobNotFound) {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
fmt.Printf("%s: %s", requestedBlob, errors.FullTrace(err))
s.logError(err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
_, err = w.Write(blob)
if err != nil {
s.logError(err)
}
metrics.MtrOutBytesUdp.Add(float64(len(blob)))
metrics.BlobDownloadCount.Inc()
metrics.Http3DownloadCount.Inc()
}

View file

@ -1,117 +0,0 @@
package http3
import (
"crypto/tls"
"crypto/x509"
"net/http"
"strings"
"sync"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
)
// Store is a blob store that gets blobs from a peer.
// It satisfies the store.BlobStore interface but cannot put or delete blobs.
type Store struct {
opts StoreOpts
NotFoundCache *sync.Map
}
// StoreOpts allows to set options for a new Store.
type StoreOpts struct {
Address string
Timeout time.Duration
}
// NewStore makes a new peer store.
func NewStore(opts StoreOpts) *Store {
return &Store{opts: opts, NotFoundCache: &sync.Map{}}
}
func (p *Store) getClient() (*Client, error) {
var qconf quic.Config
window500M := 500 * 1 << 20
qconf.MaxStreamReceiveWindow = uint64(window500M)
qconf.MaxConnectionReceiveWindow = uint64(window500M)
qconf.EnableDatagrams = true
qconf.HandshakeIdleTimeout = 4 * time.Second
qconf.MaxIdleTimeout = 20 * time.Second
pool, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
roundTripper := &http3.RoundTripper{
TLSClientConfig: &tls.Config{
RootCAs: pool,
InsecureSkipVerify: true,
},
QuicConfig: &qconf,
}
connection := &http.Client{
Transport: roundTripper,
}
c := &Client{
conn: connection,
roundTripper: roundTripper,
ServerAddr: p.opts.Address,
}
return c, errors.Prefix("connection error", err)
}
func (p *Store) Name() string { return "http3" }
// Has asks the peer if they have a hash
func (p *Store) Has(hash string) (bool, error) {
c, err := p.getClient()
if err != nil {
return false, err
}
defer func() { _ = c.Close() }()
return c.HasBlob(hash)
}
// Get downloads the blob from the peer
func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
if lastChecked, ok := p.NotFoundCache.Load(hash); ok {
if lastChecked.(time.Time).After(time.Now().Add(-5 * time.Minute)) {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()+"-notfoundcache"), store.ErrBlobNotFound
}
}
c, err := p.getClient()
if err != nil && strings.Contains(err.Error(), "blob not found") {
p.NotFoundCache.Store(hash, time.Now())
}
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
}
defer func() { _ = c.Close() }()
return c.GetBlob(hash)
}
// Put is not supported
func (p *Store) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD is not supported
func (p *Store) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete is not supported
func (p *Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown is not supported
func (p *Store) Shutdown() {
}

View file

@ -1,46 +0,0 @@
package http3
import (
"net/http"
"sync"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/lbry.go/v2/extras/stop"
)
type blobRequest struct {
request *http.Request
reply http.ResponseWriter
finished *sync.WaitGroup
}
var getReqCh = make(chan *blobRequest, 20000)
func InitWorkers(server *Server, workers int) {
stopper := stop.New(server.grp)
for i := 0; i < workers; i++ {
metrics.RoutinesQueue.WithLabelValues("http3", "worker").Inc()
go func(worker int) {
defer metrics.RoutinesQueue.WithLabelValues("http3", "worker").Dec()
for {
select {
case <-stopper.Ch():
case r := <-getReqCh:
metrics.Http3BlobReqQueue.Dec()
process(server, r)
}
}
}(i)
}
}
func enqueue(b *blobRequest) {
metrics.Http3BlobReqQueue.Inc()
getReqCh <- b
}
func process(server *Server, r *blobRequest) {
server.HandleGetBlob(r.reply, r.request)
r.finished.Done()
}

View file

@ -1,207 +0,0 @@
package peer
import (
"bufio"
"encoding/hex"
"encoding/json"
"io"
"net"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
)
// Client is an instance of a client connected to a server.
type Client struct {
Timeout time.Duration
conn net.Conn
buf *bufio.Reader
connected bool
}
// Connect connects to a specific clients and errors if it cannot be contacted.
func (c *Client) Connect(address string) error {
var err error
if c.Timeout == 0 {
c.Timeout = 5 * time.Second
}
c.conn, err = net.Dial("tcp4", address)
if err != nil {
return err
}
c.connected = true
c.buf = bufio.NewReader(c.conn)
return nil
}
// Close closes the connection with the client.
func (c *Client) Close() error {
c.connected = false
return c.conn.Close()
}
// GetStream gets a stream
func (c *Client) GetStream(sdHash string, blobCache store.BlobStore) (stream.Stream, error) {
if !c.connected {
return nil, errors.Err("not connected")
}
var sd stream.SDBlob
b, trace, err := c.GetBlob(sdHash)
if err != nil {
return nil, err
}
log.Debug(trace.String())
err = sd.FromBlob(b)
if err != nil {
return nil, err
}
s := make(stream.Stream, len(sd.BlobInfos)+1-1) // +1 for sd blob, -1 for last null blob
s[0] = b
for i := 0; i < len(sd.BlobInfos)-1; i++ {
s[i+1], trace, err = c.GetBlob(hex.EncodeToString(sd.BlobInfos[i].BlobHash))
if err != nil {
return nil, err
}
log.Debug(trace.String())
}
return s, nil
}
// HasBlob checks if the blob is available
func (c *Client) HasBlob(hash string) (bool, error) {
if !c.connected {
return false, errors.Err("not connected")
}
sendRequest, err := json.Marshal(availabilityRequest{
RequestedBlobs: []string{hash},
})
if err != nil {
return false, err
}
err = c.write(sendRequest)
if err != nil {
return false, err
}
var resp availabilityResponse
err = c.read(&resp)
if err != nil {
return false, err
}
for _, h := range resp.AvailableBlobs {
if h == hash {
return true, nil
}
}
return false, nil
}
// GetBlob gets a blob
func (c *Client) GetBlob(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
if !c.connected {
return nil, shared.NewBlobTrace(time.Since(start), "tcp"), errors.Err("not connected")
}
sendRequest, err := json.Marshal(blobRequest{
RequestedBlob: hash,
})
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "tcp"), err
}
err = c.write(sendRequest)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "tcp"), err
}
var resp blobResponse
err = c.read(&resp)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "tcp"), err
}
trace := shared.NewBlobTrace(time.Since(start), "tcp")
if resp.RequestTrace != nil {
trace = *resp.RequestTrace
}
if resp.IncomingBlob.Error != "" {
return nil, trace, errors.Prefix(hash[:8], resp.IncomingBlob.Error)
}
if resp.IncomingBlob.BlobHash != hash {
return nil, trace.Stack(time.Since(start), "tcp"), errors.Prefix(hash[:8], "blob hash in response does not match requested hash")
}
if resp.IncomingBlob.Length <= 0 {
return nil, trace, errors.Prefix(hash[:8], "length reported as <= 0")
}
log.Debugf("receiving blob %s from %s", hash[:8], c.conn.RemoteAddr())
blob, err := c.readRawBlob(resp.IncomingBlob.Length)
if err != nil {
return nil, (*resp.RequestTrace).Stack(time.Since(start), "tcp"), err
}
metrics.MtrInBytesTcp.Add(float64(len(blob)))
return blob, trace.Stack(time.Since(start), "tcp"), nil
}
func (c *Client) read(v interface{}) error {
err := c.conn.SetReadDeadline(time.Now().Add(c.Timeout))
if err != nil {
return errors.Err(err)
}
m, err := readNextMessage(c.buf)
if err != nil {
return err
}
log.Debugf("read %d bytes from %s", len(m), c.conn.RemoteAddr())
err = json.Unmarshal(m, v)
return errors.Err(err)
}
func (c *Client) readRawBlob(blobSize int) ([]byte, error) {
err := c.conn.SetReadDeadline(time.Now().Add(c.Timeout))
if err != nil {
return nil, errors.Err(err)
}
blob := make([]byte, blobSize)
n, err := io.ReadFull(c.buf, blob)
log.Debugf("read %d bytes from %s", n, c.conn.RemoteAddr())
return blob, errors.Err(err)
}
func (c *Client) write(b []byte) error {
err := c.conn.SetWriteDeadline(time.Now().Add(c.Timeout))
if err != nil {
return errors.Err(err)
}
log.Debugf("writing %d bytes to %s", len(b), c.conn.RemoteAddr())
n, err := c.conn.Write(b)
if err == nil && n != len(b) {
err = io.ErrShortWrite
}
return errors.Err(err)
}

View file

@ -1,82 +0,0 @@
package peer
import (
"strings"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// Store is a blob store that gets blobs from a peer.
// It satisfies the store.BlobStore interface but cannot put or delete blobs.
type Store struct {
opts StoreOpts
}
// StoreOpts allows to set options for a new Store.
type StoreOpts struct {
Address string
Timeout time.Duration
}
// NewStore makes a new peer store.
func NewStore(opts StoreOpts) *Store {
return &Store{opts: opts}
}
func (p *Store) getClient() (*Client, error) {
c := &Client{Timeout: p.opts.Timeout}
err := c.Connect(p.opts.Address)
return c, errors.Prefix("connection error", err)
}
func (p *Store) Name() string { return "peer" }
// Has asks the peer if they have a hash
func (p *Store) Has(hash string) (bool, error) {
c, err := p.getClient()
if err != nil {
return false, err
}
defer func() { _ = c.Close() }()
return c.HasBlob(hash)
}
// Get downloads the blob from the peer
func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
c, err := p.getClient()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
}
defer func() { _ = c.Close() }()
blob, trace, err := c.GetBlob(hash)
if err != nil && strings.Contains(err.Error(), "blob not found") {
return nil, trace, store.ErrBlobNotFound
}
return blob, trace, err
}
// Put is not supported
func (p *Store) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD is not supported
func (p *Store) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete is not supported
func (p *Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown is not supported
func (p *Store) Shutdown() {
}

View file

@ -1,6 +0,0 @@
package shared
import "github.com/lbryio/lbry.go/v2/extras/errors"
//ErrNotImplemented is a standard error when a store that implements the store interface does not implement a method
var ErrNotImplemented = errors.Base("this store does not implement this method")

View file

@ -1,82 +0,0 @@
package shared
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
)
type BlobStack struct {
Timing time.Duration `json:"timing"`
OriginName string `json:"origin_name"`
HostName string `json:"host_name"`
}
type BlobTrace struct {
Stacks []BlobStack `json:"stacks"`
}
var hostName *string
func getHostName() string {
if hostName == nil {
hn, err := os.Hostname()
if err != nil {
hn = "unknown"
}
hostName = &hn
}
return *hostName
}
func (b *BlobTrace) Stack(timing time.Duration, originName string) BlobTrace {
b.Stacks = append(b.Stacks, BlobStack{
Timing: timing,
OriginName: originName,
HostName: getHostName(),
})
return *b
}
func (b *BlobTrace) Merge(otherTrance BlobTrace) BlobTrace {
b.Stacks = append(b.Stacks, otherTrance.Stacks...)
return *b
}
func NewBlobTrace(timing time.Duration, originName string) BlobTrace {
b := BlobTrace{}
b.Stacks = append(b.Stacks, BlobStack{
Timing: timing,
OriginName: originName,
HostName: getHostName(),
})
return b
}
func (b BlobTrace) String() string {
var fullTrace string
for i, stack := range b.Stacks {
delta := time.Duration(0)
if i > 0 {
delta = stack.Timing - b.Stacks[i-1].Timing
}
fullTrace += fmt.Sprintf("[%d](%s) origin: %s - timing: %s - delta: %s\n", i, stack.HostName, stack.OriginName, stack.Timing.String(), delta.String())
}
return fullTrace
}
func (b BlobTrace) Serialize() (string, error) {
t, err := json.Marshal(b)
if err != nil {
return "", errors.Err(err)
}
return string(t), nil
}
func Deserialize(serializedData string) (*BlobTrace, error) {
var trace BlobTrace
err := json.Unmarshal([]byte(serializedData), &trace)
if err != nil {
return nil, errors.Err(err)
}
return &trace, nil
}

View file

@ -1,36 +0,0 @@
package shared
import (
"testing"
"time"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/stretchr/testify/assert"
)
func TestBlobTrace_Serialize(t *testing.T) {
hostName = util.PtrToString("test_machine")
stack := NewBlobTrace(10*time.Second, "test")
stack.Stack(20*time.Second, "test2")
stack.Stack(30*time.Second, "test3")
serialized, err := stack.Serialize()
assert.NoError(t, err)
t.Log(serialized)
expected := "{\"stacks\":[{\"timing\":10000000000,\"origin_name\":\"test\",\"host_name\":\"test_machine\"},{\"timing\":20000000000,\"origin_name\":\"test2\",\"host_name\":\"test_machine\"},{\"timing\":30000000000,\"origin_name\":\"test3\",\"host_name\":\"test_machine\"}]}"
assert.Equal(t, expected, serialized)
}
func TestBlobTrace_Deserialize(t *testing.T) {
hostName = util.PtrToString("test_machine")
serialized := "{\"stacks\":[{\"timing\":10000000000,\"origin_name\":\"test\"},{\"timing\":20000000000,\"origin_name\":\"test2\"},{\"timing\":30000000000,\"origin_name\":\"test3\"}]}"
stack, err := Deserialize(serialized)
assert.NoError(t, err)
assert.Len(t, stack.Stacks, 3)
assert.Equal(t, stack.Stacks[0].Timing, 10*time.Second)
assert.Equal(t, stack.Stacks[1].Timing, 20*time.Second)
assert.Equal(t, stack.Stacks[2].Timing, 30*time.Second)
assert.Equal(t, stack.Stacks[0].OriginName, "test")
assert.Equal(t, stack.Stacks[1].OriginName, "test2")
assert.Equal(t, stack.Stacks[2].OriginName, "test3")
}

View file

@ -1,18 +0,0 @@
//go:build linux
// +build linux
package store
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(ts.Sec, ts.Nsec)
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
}

View file

@ -1,12 +0,0 @@
// +build !linux
package store
import (
"os"
"time"
)
func atime(fi os.FileInfo) time.Time {
return fi.ModTime()
}

View file

@ -1,107 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
)
// CachingStore combines two stores, typically a local and a remote store, to improve performance.
// Accessed blobs are stored in and retrieved from the cache. If they are not in the cache, they
// are retrieved from the origin and cached. Puts are cached and also forwarded to the origin.
type CachingStore struct {
origin, cache BlobStore
component string
}
// NewCachingStore makes a new caching disk store and returns a pointer to it.
func NewCachingStore(component string, origin, cache BlobStore) *CachingStore {
return &CachingStore{
component: component,
origin: WithSingleFlight(component, origin),
cache: WithSingleFlight(component, cache),
}
}
const nameCaching = "caching"
// Name is the cache type name
func (c *CachingStore) Name() string { return nameCaching }
// Has checks the cache and then the origin for a hash. It returns true if either store has it.
func (c *CachingStore) Has(hash string) (bool, error) {
has, err := c.cache.Has(hash)
if has || err != nil {
return has, err
}
return c.origin.Has(hash)
}
// Get tries to get the blob from the cache first, falling back to the origin. If the blob comes
// from the origin, it is also stored in the cache.
func (c *CachingStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.cache.Get(hash)
if err == nil || !errors.Is(err, ErrBlobNotFound) {
metrics.CacheHitCount.With(metrics.CacheLabels(c.cache.Name(), c.component)).Inc()
rate := float64(len(blob)) / 1024 / 1024 / time.Since(start).Seconds()
metrics.CacheRetrievalSpeed.With(map[string]string{
metrics.LabelCacheType: c.cache.Name(),
metrics.LabelComponent: c.component,
metrics.LabelSource: "cache",
}).Set(rate)
return blob, trace.Stack(time.Since(start), c.Name()), err
}
metrics.CacheMissCount.With(metrics.CacheLabels(c.cache.Name(), c.component)).Inc()
blob, trace, err = c.origin.Get(hash)
if err != nil {
return nil, trace.Stack(time.Since(start), c.Name()), err
}
// do not do this async unless you're prepared to deal with mayhem
err = c.cache.Put(hash, blob)
if err != nil {
log.Errorf("error saving blob to underlying cache: %s", errors.FullTrace(err))
}
return blob, trace.Stack(time.Since(start), c.Name()), nil
}
// Put stores the blob in the origin and the cache
func (c *CachingStore) Put(hash string, blob stream.Blob) error {
err := c.origin.Put(hash, blob)
if err != nil {
return err
}
return c.cache.Put(hash, blob)
}
// PutSD stores the sd blob in the origin and the cache
func (c *CachingStore) PutSD(hash string, blob stream.Blob) error {
err := c.origin.PutSD(hash, blob)
if err != nil {
return err
}
return c.cache.PutSD(hash, blob)
}
// Delete deletes the blob from the origin and the cache
func (c *CachingStore) Delete(hash string) error {
err := c.origin.Delete(hash)
if err != nil {
return err
}
return c.cache.Delete(hash)
}
// Shutdown shuts down the store gracefully
func (c *CachingStore) Shutdown() {
c.origin.Shutdown()
c.cache.Shutdown()
}

View file

@ -1,178 +0,0 @@
package store
import (
"bytes"
"sync"
"testing"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
)
func TestCachingStore_Put(t *testing.T) {
origin := NewMemStore()
cache := NewMemStore()
s := NewCachingStore("test", origin, cache)
b := []byte("this is a blob of stuff")
hash := "hash"
err := s.Put(hash, b)
if err != nil {
t.Fatal(err)
}
has, err := origin.Has(hash)
if err != nil {
t.Fatal(err)
}
if !has {
t.Errorf("failed to store blob in origin")
}
has, err = cache.Has(hash)
if err != nil {
t.Fatal(err)
}
if !has {
t.Errorf("failed to store blob in cache")
}
}
func TestCachingStore_CacheMiss(t *testing.T) {
origin := NewMemStore()
cache := NewMemStore()
s := NewCachingStore("test", origin, cache)
b := []byte("this is a blob of stuff")
hash := "hash"
err := origin.Put(hash, b)
if err != nil {
t.Fatal(err)
}
res, stack, err := s.Get(hash)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(b, res) {
t.Errorf("expected Get() to return %s, got %s", string(b), string(res))
}
time.Sleep(10 * time.Millisecond) //storing to cache is done async so let's give it some time
has, err := cache.Has(hash)
if err != nil {
t.Fatal(err)
}
if !has {
t.Errorf("Get() did not copy blob to cache")
}
t.Logf("stack: %s", stack.String())
res, stack, err = cache.Get(hash)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(b, res) {
t.Errorf("expected cached Get() to return %s, got %s", string(b), string(res))
}
t.Logf("stack: %s", stack.String())
}
func TestCachingStore_ThunderingHerd(t *testing.T) {
storeDelay := 100 * time.Millisecond
origin := NewSlowBlobStore(storeDelay)
cache := NewMemStore()
s := NewCachingStore("test", origin, cache)
b := []byte("this is a blob of stuff")
hash := "hash"
err := origin.Put(hash, b)
if err != nil {
t.Fatal(err)
}
wg := &sync.WaitGroup{}
getNoErr := func() {
res, _, err := s.Get(hash)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(b, res) {
t.Errorf("expected Get() to return %s, got %s", string(b), string(res))
}
wg.Done()
}
start := time.Now()
wg.Add(4)
go func() {
go getNoErr()
time.Sleep(10 * time.Millisecond)
go getNoErr()
time.Sleep(10 * time.Millisecond)
go getNoErr()
time.Sleep(10 * time.Millisecond)
go getNoErr()
}()
wg.Wait()
duration := time.Since(start)
// only the first getNoErr() should hit the origin. the rest should wait for the first request to return
// once the first returns, the others should return immediately
// therefore, if the delay much longer than 100ms, it means subsequent requests also went to the origin
expectedMaxDelay := storeDelay + 5*time.Millisecond // a bit of extra time to let requests finish
if duration > expectedMaxDelay {
t.Errorf("Expected delay of at most %s, got %s", expectedMaxDelay, duration)
}
}
// SlowBlobStore adds a delay to each request
type SlowBlobStore struct {
mem *MemStore
delay time.Duration
}
func NewSlowBlobStore(delay time.Duration) *SlowBlobStore {
return &SlowBlobStore{
mem: NewMemStore(),
delay: delay,
}
}
func (s *SlowBlobStore) Name() string {
return "slow"
}
func (s *SlowBlobStore) Has(hash string) (bool, error) {
time.Sleep(s.delay)
return s.mem.Has(hash)
}
func (s *SlowBlobStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
time.Sleep(s.delay)
return s.mem.Get(hash)
}
func (s *SlowBlobStore) Put(hash string, blob stream.Blob) error {
time.Sleep(s.delay)
return s.mem.Put(hash, blob)
}
func (s *SlowBlobStore) PutSD(hash string, blob stream.Blob) error {
time.Sleep(s.delay)
return s.mem.PutSD(hash, blob)
}
func (s *SlowBlobStore) Delete(hash string) error {
time.Sleep(s.delay)
return s.mem.Delete(hash)
}
func (s *SlowBlobStore) Shutdown() {
return
}

View file

@ -1,108 +0,0 @@
package store
import (
"io"
"net/http"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
)
// CloudFrontROStore reads from cloudfront. All writes panic.
type CloudFrontROStore struct {
endpoint string // cloudflare endpoint
}
// NewCloudFrontROStore returns an initialized CloudFrontROStore store pointer.
func NewCloudFrontROStore(endpoint string) *CloudFrontROStore {
return &CloudFrontROStore{endpoint: endpoint}
}
const nameCloudFrontRO = "cloudfront_ro"
// Name is the cache type name
func (c *CloudFrontROStore) Name() string { return nameCloudFrontRO }
// Has checks if the hash is in the store.
func (c *CloudFrontROStore) Has(hash string) (bool, error) {
status, body, err := c.cfRequest(http.MethodHead, hash)
if err != nil {
return false, err
}
defer func() { _ = body.Close() }()
switch status {
case http.StatusNotFound, http.StatusForbidden:
return false, nil
case http.StatusOK:
return true, nil
default:
return false, errors.Err("unexpected status %d", status)
}
}
// Get gets the blob from Cloudfront.
func (c *CloudFrontROStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
log.Debugf("Getting %s from S3", hash[:8])
start := time.Now()
defer func(t time.Time) {
log.Debugf("Getting %s from S3 took %s", hash[:8], time.Since(t).String())
}(start)
status, body, err := c.cfRequest(http.MethodGet, hash)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), err
}
defer func() { _ = body.Close() }()
switch status {
case http.StatusNotFound, http.StatusForbidden:
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(ErrBlobNotFound)
case http.StatusOK:
b, err := io.ReadAll(body)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(err)
}
metrics.MtrInBytesS3.Add(float64(len(b)))
return b, shared.NewBlobTrace(time.Since(start), c.Name()), nil
default:
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err("unexpected status %d", status)
}
}
func (c *CloudFrontROStore) cfRequest(method, hash string) (int, io.ReadCloser, error) {
url := c.endpoint + hash
req, err := http.NewRequest(method, url, nil)
if err != nil {
return 0, nil, errors.Err(err)
}
req.Header.Add("User-Agent", "reflector.go/"+meta.Version())
res, err := http.DefaultClient.Do(req)
if err != nil {
return 0, nil, errors.Err(err)
}
return res.StatusCode, res.Body, nil
}
func (c *CloudFrontROStore) Put(_ string, _ stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
func (c *CloudFrontROStore) PutSD(_ string, _ stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
func (c *CloudFrontROStore) Delete(_ string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown shuts down the store gracefully
func (c *CloudFrontROStore) Shutdown() {
}

View file

@ -1,62 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
)
// CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3.
type CloudFrontRWStore struct {
cf *ITTTStore
s3 *S3Store
}
// NewCloudFrontRWStore returns an initialized CloudFrontRWStore store pointer.
// NOTE: It panics if either argument is nil.
func NewCloudFrontRWStore(cf *ITTTStore, s3 *S3Store) *CloudFrontRWStore {
if cf == nil || s3 == nil {
panic("both stores must be set")
}
return &CloudFrontRWStore{cf: cf, s3: s3}
}
const nameCloudFrontRW = "cloudfront_rw"
// Name is the cache type name
func (c *CloudFrontRWStore) Name() string { return nameCloudFrontRW }
// Has checks if the hash is in the store.
func (c *CloudFrontRWStore) Has(hash string) (bool, error) {
return c.cf.Has(hash)
}
// Get gets the blob from Cloudfront.
func (c *CloudFrontRWStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.cf.Get(hash)
return blob, trace.Stack(time.Since(start), c.Name()), err
}
// Put stores the blob on S3
func (c *CloudFrontRWStore) Put(hash string, blob stream.Blob) error {
return c.s3.Put(hash, blob)
}
// PutSD stores the sd blob on S3
func (c *CloudFrontRWStore) PutSD(hash string, blob stream.Blob) error {
return c.s3.PutSD(hash, blob)
}
// Delete deletes the blob from S3
func (c *CloudFrontRWStore) Delete(hash string) error {
return c.s3.Delete(hash)
}
// Shutdown shuts down the store gracefully
func (c *CloudFrontRWStore) Shutdown() {
c.s3.Shutdown()
c.cf.Shutdown()
}

View file

@ -3,66 +3,39 @@ package store
import (
"encoding/json"
"sync"
"time"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/extras/errors"
log "github.com/sirupsen/logrus"
)
// DBBackedStore is a store that's backed by a DB. The DB contains data about what's in the store.
type DBBackedStore struct {
blobs BlobStore
db *db.SQL
blockedMu sync.RWMutex
blocked map[string]bool
deleteOnMiss bool
// DBBackedS3Store is an instance of an S3 Store that is backed by a DB for what is stored.
type DBBackedS3Store struct {
s3 *S3BlobStore
db *db.SQL
blockedMu sync.RWMutex
blocked map[string]bool
}
// NewDBBackedStore returns an initialized store pointer.
func NewDBBackedStore(blobs BlobStore, db *db.SQL, deleteOnMiss bool) *DBBackedStore {
return &DBBackedStore{blobs: blobs, db: db, deleteOnMiss: deleteOnMiss}
// NewDBBackedS3Store returns an initialized store pointer.
func NewDBBackedS3Store(s3 *S3BlobStore, db *db.SQL) *DBBackedS3Store {
return &DBBackedS3Store{s3: s3, db: db}
}
const nameDBBacked = "db-backed"
// Name is the cache type name
func (d *DBBackedStore) Name() string { return nameDBBacked }
// Has returns true if the blob is in the store
func (d *DBBackedStore) Has(hash string) (bool, error) {
return d.db.HasBlob(hash, false)
func (d *DBBackedS3Store) Has(hash string) (bool, error) {
return d.db.HasBlob(hash)
}
// Get gets the blob
func (d *DBBackedStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
has, err := d.db.HasBlob(hash, true)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err
}
if !has {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), ErrBlobNotFound
}
b, stack, err := d.blobs.Get(hash)
if d.deleteOnMiss && errors.Is(err, ErrBlobNotFound) {
e2 := d.Delete(hash)
if e2 != nil {
log.Errorf("error while deleting blob from db: %s", errors.FullTrace(err))
}
}
return b, stack.Stack(time.Since(start), d.Name()), err
func (d *DBBackedS3Store) Get(hash string) ([]byte, error) {
return d.s3.Get(hash)
}
// Put stores the blob in the S3 store and stores the blob information in the DB.
func (d *DBBackedStore) Put(hash string, blob stream.Blob) error {
err := d.blobs.Put(hash, blob)
func (d *DBBackedS3Store) Put(hash string, blob []byte) error {
err := d.s3.Put(hash, blob)
if err != nil {
return err
}
@ -72,17 +45,17 @@ func (d *DBBackedStore) Put(hash string, blob stream.Blob) error {
// PutSD stores the SDBlob in the S3 store. It will return an error if the sd blob is missing the stream hash or if
// there is an error storing the blob information in the DB.
func (d *DBBackedStore) PutSD(hash string, blob stream.Blob) error {
func (d *DBBackedS3Store) PutSD(hash string, blob []byte) error {
var blobContents db.SdBlob
err := json.Unmarshal(blob, &blobContents)
if err != nil {
return errors.Err(err)
return err
}
if blobContents.StreamHash == "" {
return errors.Err("sd blob is missing stream hash")
}
err = d.blobs.PutSD(hash, blob)
err = d.s3.PutSD(hash, blob)
if err != nil {
return err
}
@ -90,8 +63,8 @@ func (d *DBBackedStore) PutSD(hash string, blob stream.Blob) error {
return d.db.AddSDBlob(hash, len(blob), blobContents)
}
func (d *DBBackedStore) Delete(hash string) error {
err := d.blobs.Delete(hash)
func (d *DBBackedS3Store) Delete(hash string) error {
err := d.s3.Delete(hash)
if err != nil {
return err
}
@ -100,7 +73,7 @@ func (d *DBBackedStore) Delete(hash string) error {
}
// Block deletes the blob and prevents it from being uploaded in the future
func (d *DBBackedStore) Block(hash string) error {
func (d *DBBackedS3Store) Block(hash string) error {
if blocked, err := d.isBlocked(hash); blocked || err != nil {
return err
}
@ -112,28 +85,28 @@ func (d *DBBackedStore) Block(hash string) error {
return err
}
//has, err := d.db.HasBlob(hash, false)
//if err != nil {
// return err
//}
//
//if has {
// err = d.blobs.Delete(hash)
// if err != nil {
// return err
// }
//
// err = d.db.Delete(hash)
// if err != nil {
// return err
// }
//}
has, err := d.db.HasBlob(hash)
if err != nil {
return err
}
if has {
err = d.s3.Delete(hash)
if err != nil {
return err
}
err = d.db.Delete(hash)
if err != nil {
return err
}
}
return d.markBlocked(hash)
}
// Wants returns false if the hash exists or is blocked, true otherwise
func (d *DBBackedStore) Wants(hash string) (bool, error) {
func (d *DBBackedS3Store) Wants(hash string) (bool, error) {
blocked, err := d.isBlocked(hash)
if blocked || err != nil {
return false, err
@ -146,11 +119,11 @@ func (d *DBBackedStore) Wants(hash string) (bool, error) {
// MissingBlobsForKnownStream returns missing blobs for an existing stream
// WARNING: if the stream does NOT exist, no blob hashes will be returned, which looks
// like no blobs are missing
func (d *DBBackedStore) MissingBlobsForKnownStream(sdHash string) ([]string, error) {
func (d *DBBackedS3Store) MissingBlobsForKnownStream(sdHash string) ([]string, error) {
return d.db.MissingBlobsForKnownStream(sdHash)
}
func (d *DBBackedStore) markBlocked(hash string) error {
func (d *DBBackedS3Store) markBlocked(hash string) error {
err := d.initBlocked()
if err != nil {
return err
@ -163,7 +136,7 @@ func (d *DBBackedStore) markBlocked(hash string) error {
return nil
}
func (d *DBBackedStore) isBlocked(hash string) (bool, error) {
func (d *DBBackedS3Store) isBlocked(hash string) (bool, error) {
err := d.initBlocked()
if err != nil {
return false, err
@ -175,7 +148,7 @@ func (d *DBBackedStore) isBlocked(hash string) (bool, error) {
return d.blocked[hash], nil
}
func (d *DBBackedStore) initBlocked() error {
func (d *DBBackedS3Store) initBlocked() error {
// first check without blocking since this is the most likely scenario
if d.blocked != nil {
return nil
@ -194,8 +167,3 @@ func (d *DBBackedStore) initBlocked() error {
return err
}
// Shutdown shuts down the store gracefully
func (d *DBBackedStore) Shutdown() {
d.blobs.Shutdown()
}

View file

@ -1,146 +0,0 @@
package store
import (
"os"
"path"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// DiskStore stores blobs on a local disk
type DiskStore struct {
// the location of blobs on disk
blobDir string
// store files in subdirectories based on the first N chars in the filename. 0 = don't create subdirectories.
prefixLength int
// true if initOnce ran, false otherwise
initialized bool
}
// NewDiskStore returns an initialized file disk store pointer.
func NewDiskStore(dir string, prefixLength int) *DiskStore {
return &DiskStore{
blobDir: dir,
prefixLength: prefixLength,
}
}
const nameDisk = "disk"
// Name is the cache type name
func (d *DiskStore) Name() string { return nameDisk }
// Has returns T/F or Error if it the blob stored already. It will error with any IO disk error.
func (d *DiskStore) Has(hash string) (bool, error) {
err := d.initOnce()
if err != nil {
return false, err
}
_, err = os.Stat(d.path(hash))
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, errors.Err(err)
}
return true, nil
}
// Get returns the blob or an error if the blob doesn't exist.
func (d *DiskStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
err := d.initOnce()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err
}
blob, err := os.ReadFile(d.path(hash))
if err != nil {
if os.IsNotExist(err) {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(ErrBlobNotFound)
}
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(err)
}
return blob, shared.NewBlobTrace(time.Since(start), d.Name()), nil
}
// PutSD stores the sd blob on the disk
func (d *DiskStore) PutSD(hash string, blob stream.Blob) error {
return d.Put(hash, blob)
}
// Delete deletes the blob from the store
func (d *DiskStore) Delete(hash string) error {
err := d.initOnce()
if err != nil {
return err
}
has, err := d.Has(hash)
if err != nil {
return err
}
if !has {
return nil
}
err = os.Remove(d.path(hash))
return errors.Err(err)
}
// list returns the hashes of blobs that already exist in the blobDir
func (d *DiskStore) list() ([]string, error) {
err := d.initOnce()
if err != nil {
return nil, err
}
return speedwalk.AllFiles(d.blobDir, true)
}
func (d *DiskStore) dir(hash string) string {
if d.prefixLength <= 0 || len(hash) < d.prefixLength {
return d.blobDir
}
return path.Join(d.blobDir, hash[:d.prefixLength])
}
func (d *DiskStore) tmpDir(hash string) string {
return path.Join(d.blobDir, "tmp")
}
func (d *DiskStore) path(hash string) string {
return path.Join(d.dir(hash), hash)
}
func (d *DiskStore) tmpPath(hash string) string {
return path.Join(d.tmpDir(hash), hash)
}
func (d *DiskStore) ensureDirExists(dir string) error {
return errors.Err(os.MkdirAll(dir, 0755))
}
func (d *DiskStore) initOnce() error {
if d.initialized {
return nil
}
err := d.ensureDirExists(d.blobDir)
if err != nil {
return err
}
err = d.ensureDirExists(path.Join(d.blobDir, "tmp"))
if err != nil {
return err
}
d.initialized = true
return nil
}
// Shutdown shuts down the store gracefully
func (d *DiskStore) Shutdown() {
}

View file

@ -1,44 +0,0 @@
package store
import (
"os"
"path"
"path/filepath"
"testing"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDiskStore_Get(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
hash := "f428b8265d65dad7f8ffa52922bba836404cbd62f3ecfe10adba6b444f8f658938e54f5981ac4de39644d5b93d89a94b"
data := []byte("oyuntyausntoyaunpdoyruoyduanrstjwfjyuwf")
expectedPath := path.Join(tmpDir, hash[:2], hash)
err = os.MkdirAll(filepath.Dir(expectedPath), os.ModePerm)
require.NoError(t, err)
err = os.WriteFile(expectedPath, data, os.ModePerm)
require.NoError(t, err)
blob, _, err := d.Get(hash)
assert.NoError(t, err)
assert.EqualValues(t, data, blob)
}
func TestDiskStore_GetNonexistentBlob(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
blob, _, err := d.Get("nonexistent")
assert.Nil(t, blob)
assert.True(t, errors.Is(err, ErrBlobNotFound))
}

View file

@ -1,42 +0,0 @@
//go:build darwin
// +build darwin
package store
import (
"bytes"
"io"
"os"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
var openFileFlags = os.O_WRONLY | os.O_CREATE
// Put stores the blob on disk
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
err := d.initOnce()
if err != nil {
return err
}
err = d.ensureDirExists(d.dir(hash))
if err != nil {
return err
}
// Open file with O_DIRECT
f, err := os.OpenFile(d.tmpPath(hash), openFileFlags, 0644)
if err != nil {
return errors.Err(err)
}
defer f.Close()
_, err = io.Copy(f, bytes.NewReader(blob))
if err != nil {
return errors.Err(err)
}
err = os.Rename(d.tmpPath(hash), d.path(hash))
return errors.Err(err)
}

View file

@ -1,49 +0,0 @@
//go:build linux
// +build linux
package store
import (
"bytes"
"io"
"os"
"syscall"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/brk0v/directio"
)
var openFileFlags = os.O_WRONLY | os.O_CREATE | syscall.O_DIRECT
// Put stores the blob on disk
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
err := d.initOnce()
if err != nil {
return err
}
err = d.ensureDirExists(d.dir(hash))
if err != nil {
return err
}
// Open file with O_DIRECT
f, err := os.OpenFile(d.tmpPath(hash), openFileFlags, 0644)
if err != nil {
return errors.Err(err)
}
defer func() { _ = f.Close() }()
dio, err := directio.New(f)
if err != nil {
return errors.Err(err)
}
defer func() { _ = dio.Flush() }()
_, err = io.Copy(dio, bytes.NewReader(blob))
if err != nil {
return errors.Err(err)
}
err = os.Rename(d.tmpPath(hash), d.path(hash))
return errors.Err(err)
}

115
store/file.go Normal file
View file

@ -0,0 +1,115 @@
package store
import (
"io/ioutil"
"os"
"path"
"github.com/lbryio/lbry.go/extras/errors"
)
// FileBlobStore is a local disk store.
type FileBlobStore struct {
dir string
initialized bool
}
// NewFileBlobStore returns an initialized file disk store pointer.
func NewFileBlobStore(dir string) *FileBlobStore {
return &FileBlobStore{dir: dir}
}
func (f *FileBlobStore) path(hash string) string {
return path.Join(f.dir, hash)
}
func (f *FileBlobStore) initOnce() error {
if f.initialized {
return nil
}
if stat, err := os.Stat(f.dir); err != nil {
if os.IsNotExist(err) {
err2 := os.Mkdir(f.dir, 0755)
if err2 != nil {
return err2
}
} else {
return err
}
} else if !stat.IsDir() {
return errors.Err("blob dir exists but is not a dir")
}
f.initialized = true
return nil
}
// Has returns T/F or Error if it the blob stored already. It will error with any IO disk error.
func (f *FileBlobStore) Has(hash string) (bool, error) {
err := f.initOnce()
if err != nil {
return false, err
}
_, err = os.Stat(f.path(hash))
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// Get returns the byte slice of the blob stored or will error if the blob doesn't exist.
func (f *FileBlobStore) Get(hash string) ([]byte, error) {
err := f.initOnce()
if err != nil {
return []byte{}, err
}
file, err := os.Open(f.path(hash))
if err != nil {
if os.IsNotExist(err) {
return []byte{}, errors.Err(ErrBlobNotFound)
}
return []byte{}, err
}
return ioutil.ReadAll(file)
}
// Put stores the blob on disk
func (f *FileBlobStore) Put(hash string, blob []byte) error {
err := f.initOnce()
if err != nil {
return err
}
return ioutil.WriteFile(f.path(hash), blob, 0644)
}
// PutSD stores the sd blob on the disk
func (f *FileBlobStore) PutSD(hash string, blob []byte) error {
return f.Put(hash, blob)
}
// Delete deletes the blob from the store
func (f *FileBlobStore) Delete(hash string) error {
err := f.initOnce()
if err != nil {
return err
}
has, err := f.Has(hash)
if err != nil {
return err
}
if !has {
return nil
}
return os.Remove(f.path(hash))
}

View file

@ -1,163 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/bluele/gcache"
"github.com/sirupsen/logrus"
)
// GcacheStore adds a max cache size and Greedy-Dual-Size-Frequency cache eviction strategy to a BlobStore
type GcacheStore struct {
// underlying store
store BlobStore
// cache implementation
cache gcache.Cache
}
type EvictionStrategy int
const (
//LFU Discards the least frequently used items first.
LFU EvictionStrategy = iota
//ARC Constantly balances between LRU and LFU, to improve the combined result.
ARC
//LRU Discards the least recently used items first.
LRU
//SIMPLE has no clear priority for evict cache. It depends on key-value map order.
SIMPLE
)
// NewGcacheStore initialize a new LRUStore
func NewGcacheStore(component string, store BlobStore, maxSize int, strategy EvictionStrategy) *GcacheStore {
cacheBuilder := gcache.New(maxSize)
var cache gcache.Cache
evictFunc := func(key interface{}, value interface{}) {
logrus.Infof("evicting %s", key)
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc()
_ = store.Delete(key.(string)) // TODO: log this error. may happen if underlying entry is gone but cache entry still there
}
switch strategy {
case LFU:
cache = cacheBuilder.LFU().EvictedFunc(evictFunc).Build()
case ARC:
cache = cacheBuilder.ARC().EvictedFunc(evictFunc).Build()
case LRU:
cache = cacheBuilder.LRU().EvictedFunc(evictFunc).Build()
case SIMPLE:
cache = cacheBuilder.Simple().EvictedFunc(evictFunc).Build()
}
l := &GcacheStore{
store: store,
cache: cache,
}
go func() {
if lstr, ok := store.(lister); ok {
err := l.loadExisting(lstr, maxSize)
if err != nil {
panic(err) // TODO: what should happen here? panic? return nil? just keep going?
}
}
}()
return l
}
const nameGcache = "gcache"
// Name is the cache type name
func (l *GcacheStore) Name() string { return nameGcache }
// Has returns whether the blob is in the store, without updating the recent-ness.
func (l *GcacheStore) Has(hash string) (bool, error) {
return l.cache.Has(hash), nil
}
// Get returns the blob or an error if the blob doesn't exist.
func (l *GcacheStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
_, err := l.cache.Get(hash)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound)
}
blob, stack, err := l.store.Get(hash)
if errors.Is(err, ErrBlobNotFound) {
// Blob disappeared from underlying store
l.cache.Remove(hash)
}
return blob, stack.Stack(time.Since(start), l.Name()), err
}
// Put stores the blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
func (l *GcacheStore) Put(hash string, blob stream.Blob) error {
_ = l.cache.Set(hash, true)
has, _ := l.Has(hash)
if has {
err := l.store.Put(hash, blob)
if err != nil {
return err
}
}
return nil
}
// PutSD stores the sd blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error {
_ = l.cache.Set(hash, true)
has, _ := l.Has(hash)
if has {
err := l.store.PutSD(hash, blob)
if err != nil {
return err
}
}
return nil
}
// Delete deletes the blob from the store
func (l *GcacheStore) Delete(hash string) error {
err := l.store.Delete(hash)
if err != nil {
return err
}
// This must come after store.Delete()
// Remove triggers onEvict function, which also tries to delete blob from store
// We need to delete it manually first so any errors can be propagated up
l.cache.Remove(hash)
return nil
}
// loadExisting imports existing blobs from the underlying store into the LRU cache
func (l *GcacheStore) loadExisting(store lister, maxItems int) error {
logrus.Infof("loading at most %d items", maxItems)
existing, err := store.list()
if err != nil {
return err
}
logrus.Infof("read %d files from underlying store", len(existing))
added := 0
for i, h := range existing {
_ = l.cache.Set(h, true)
added++
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than the cache
err := l.Delete(h)
logrus.Infof("deleted overflowing blob: %s (%d/%d)", h, i, len(existing))
if err != nil {
logrus.Warnf("error while deleting a blob that's overflowing the cache: %s", err.Error())
}
}
}
return nil
}
// Shutdown shuts down the store gracefully
func (l *GcacheStore) Shutdown() {
}

View file

@ -1,110 +0,0 @@
package store
import (
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const cacheMaxSize = 3
func getTestGcacheStore() (*GcacheStore, *MemStore) {
m := NewMemStore()
return NewGcacheStore("test", m, cacheMaxSize, LFU), m
}
func TestGcacheStore_Eviction(t *testing.T) {
lfu, mem := getTestGcacheStore()
b := []byte("x")
for i := 0; i < 3; i++ {
err := lfu.Put(fmt.Sprintf("%d", i), b)
require.NoError(t, err)
for j := 0; j < 3-i; j++ {
_, _, err = lfu.Get(fmt.Sprintf("%d", i))
require.NoError(t, err)
}
}
for k, v := range map[string]bool{
"0": true,
"1": true,
"2": true,
} {
has, err := lfu.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
err := lfu.Put("3", b)
require.NoError(t, err)
for k, v := range map[string]bool{
"0": true,
"1": true,
"2": false,
"3": true,
} {
has, err := lfu.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
assert.Equal(t, cacheMaxSize, len(mem.Debug()))
err = lfu.Delete("0")
assert.NoError(t, err)
err = lfu.Delete("1")
assert.NoError(t, err)
err = lfu.Delete("3")
assert.NoError(t, err)
assert.Equal(t, 0, len(mem.Debug()))
}
func TestGcacheStore_UnderlyingBlobMissing(t *testing.T) {
lfu, mem := getTestGcacheStore()
hash := "hash"
b := []byte("this is a blob of stuff")
err := lfu.Put(hash, b)
require.NoError(t, err)
err = mem.Delete(hash)
require.NoError(t, err)
// hash still exists in lru
assert.True(t, lfu.cache.Has(hash))
blob, _, err := lfu.Get(hash)
assert.Nil(t, blob)
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
reflect.TypeOf(err).String(), err.Error())
// lru.Get() removes hash if underlying store doesn't have it
assert.False(t, lfu.cache.Has(hash))
}
func TestGcacheStore_loadExisting(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
hash := "hash"
b := []byte("this is a blob of stuff")
err = d.Put(hash, b)
require.NoError(t, err)
existing, err := d.list()
require.NoError(t, err)
require.Equal(t, 1, len(existing), "blob should exist in cache")
assert.Equal(t, hash, existing[0])
lfu := NewGcacheStore("test", d, 3, LFU) // lru should load existing blobs when it's created
time.Sleep(100 * time.Millisecond) // async load so let's wait...
has, err := lfu.Has(hash)
require.NoError(t, err)
assert.True(t, has, "hash should be loaded from disk store but it's not")
}

View file

@ -1,170 +0,0 @@
package store
import (
"bytes"
"context"
"io"
"net"
"net/http"
"sync"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// HttpStore is a store that works on top of the HTTP protocol
type HttpStore struct {
upstream string
httpClient *http.Client
edgeToken string
}
func NewHttpStore(upstream, edgeToken string) *HttpStore {
return &HttpStore{
upstream: "http://" + upstream,
httpClient: getClient(),
edgeToken: edgeToken,
}
}
const nameHttp = "http"
func (n *HttpStore) Name() string { return nameHttp }
func (n *HttpStore) Has(hash string) (bool, error) {
url := n.upstream + "/blob?hash=" + hash
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return false, errors.Err(err)
}
res, err := n.httpClient.Do(req)
if err != nil {
return false, errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode == http.StatusNotFound {
return false, nil
}
if res.StatusCode == http.StatusNoContent {
return true, nil
}
var body []byte
if res.Body != nil {
body, _ = io.ReadAll(res.Body)
}
return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
url := n.upstream + "/blob?hash=" + hash
if n.edgeToken != "" {
url += "&edge_token=" + n.edgeToken
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
}
res, err := n.httpClient.Do(req)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
tmp := getBuffer()
defer putBuffer(tmp)
serialized := res.Header.Get("Via")
trace := shared.NewBlobTrace(time.Since(start), n.Name())
if serialized != "" {
parsedTrace, err := shared.Deserialize(serialized)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), err
}
trace = *parsedTrace
}
if res.StatusCode == http.StatusNotFound {
return nil, trace.Stack(time.Since(start), n.Name()), ErrBlobNotFound
}
if res.StatusCode == http.StatusOK {
written, err := io.Copy(tmp, res.Body)
if err != nil {
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err(err)
}
blob := make([]byte, written)
copy(blob, tmp.Bytes())
metrics.MtrInBytesHttp.Add(float64(len(blob)))
return blob, trace.Stack(time.Since(start), n.Name()), nil
}
var body []byte
if res.Body != nil {
body, _ = io.ReadAll(res.Body)
}
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
func (n *HttpStore) Put(string, stream.Blob) error {
return shared.ErrNotImplemented
}
func (n *HttpStore) PutSD(string, stream.Blob) error {
return shared.ErrNotImplemented
}
func (n *HttpStore) Delete(string) error {
return shared.ErrNotImplemented
}
func (n *HttpStore) Shutdown() {}
// buffer pool to reduce GC
// https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha
var buffers = sync.Pool{
// New is called when a new instance is needed
New: func() interface{} {
buf := make([]byte, 0, stream.MaxBlobSize)
return bytes.NewBuffer(buf)
},
}
// getBuffer fetches a buffer from the pool
func getBuffer() *bytes.Buffer {
return buffers.Get().(*bytes.Buffer)
}
// putBuffer returns a buffer to the pool
func putBuffer(buf *bytes.Buffer) {
buf.Reset()
buffers.Put(buf)
}
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
return dialer.DialContext(ctx, network, address)
}
// getClient gets an http client that's customized to be more performant when dealing with blobs of 2MB in size (most of our blobs)
func getClient() *http.Client {
// Customize the Transport to have larger connection pool
defaultTransport := &http.Transport{
DialContext: dialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableCompression: true,
MaxIdleConnsPerHost: 100,
ReadBufferSize: stream.MaxBlobSize + 1024*10, //add an extra few KBs to make sure it fits the extra information
}
return &http.Client{Transport: defaultTransport}
}

View file

@ -1,73 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// ITTTStore performs an operation on this storage, if this fails, it attempts to run it on that
type ITTTStore struct {
this, that BlobStore
}
// NewITTTStore returns a new instance of the IF THIS THAN THAT store
func NewITTTStore(this, that BlobStore) *ITTTStore {
return &ITTTStore{
this: this,
that: that,
}
}
const nameIttt = "ittt"
// Name is the cache type name
func (c *ITTTStore) Name() string { return nameIttt }
// Has checks in this for a hash, if it fails it checks in that. It returns true if either store has it.
func (c *ITTTStore) Has(hash string) (bool, error) {
has, err := c.this.Has(hash)
if err != nil || !has {
has, err = c.that.Has(hash)
}
return has, err
}
// Get tries to get the blob from this first, falling back to that.
func (c *ITTTStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.this.Get(hash)
if err == nil {
metrics.ThisHitCount.Inc()
return blob, trace.Stack(time.Since(start), c.Name()), err
}
blob, trace, err = c.that.Get(hash)
if err != nil {
return nil, trace.Stack(time.Since(start), c.Name()), err
}
metrics.ThatHitCount.Inc()
return blob, trace.Stack(time.Since(start), c.Name()), nil
}
// Put not implemented
func (c *ITTTStore) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD not implemented
func (c *ITTTStore) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete not implemented
func (c *ITTTStore) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown shuts down the store gracefully
func (c *ITTTStore) Shutdown() {}

View file

@ -1,80 +1,54 @@
package store
import (
"sync"
"time"
import "github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// MemStore is an in memory only blob store with no persistence.
type MemStore struct {
blobs map[string]stream.Blob
mu *sync.RWMutex
// MemoryBlobStore is an in memory only blob store with no persistence.
type MemoryBlobStore struct {
blobs map[string][]byte
}
func NewMemStore() *MemStore {
return &MemStore{
blobs: make(map[string]stream.Blob),
mu: &sync.RWMutex{},
}
}
const nameMem = "mem"
// Name is the cache type name
func (m *MemStore) Name() string { return nameMem }
// Has returns T/F if the blob is currently stored. It will never error.
func (m *MemStore) Has(hash string) (bool, error) {
m.mu.RLock()
defer m.mu.RUnlock()
func (m *MemoryBlobStore) Has(hash string) (bool, error) {
if m.blobs == nil {
m.blobs = make(map[string][]byte)
}
_, ok := m.blobs[hash]
return ok, nil
}
// Get returns the blob byte slice if present and errors if the blob is not found.
func (m *MemStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
m.mu.RLock()
defer m.mu.RUnlock()
func (m *MemoryBlobStore) Get(hash string) ([]byte, error) {
if m.blobs == nil {
m.blobs = make(map[string][]byte)
}
blob, ok := m.blobs[hash]
if !ok {
return nil, shared.NewBlobTrace(time.Since(start), m.Name()), errors.Err(ErrBlobNotFound)
return []byte{}, errors.Err(ErrBlobNotFound)
}
return blob, shared.NewBlobTrace(time.Since(start), m.Name()), nil
return blob, nil
}
// Put stores the blob in memory
func (m *MemStore) Put(hash string, blob stream.Blob) error {
m.mu.Lock()
defer m.mu.Unlock()
func (m *MemoryBlobStore) Put(hash string, blob []byte) error {
if m.blobs == nil {
m.blobs = make(map[string][]byte)
}
m.blobs[hash] = blob
return nil
}
// PutSD stores the sd blob in memory
func (m *MemStore) PutSD(hash string, blob stream.Blob) error {
func (m *MemoryBlobStore) PutSD(hash string, blob []byte) error {
return m.Put(hash, blob)
}
// Delete deletes the blob from the store
func (m *MemStore) Delete(hash string) error {
m.mu.Lock()
defer m.mu.Unlock()
func (m *MemoryBlobStore) Delete(hash string) error {
delete(m.blobs, hash)
return nil
}
// Debug returns the blobs in memory. It's useful for testing and debugging.
func (m *MemStore) Debug() map[string]stream.Blob {
m.mu.RLock()
defer m.mu.RUnlock()
func (m *MemoryBlobStore) Debug() map[string][]byte {
return m.blobs
}
// Shutdown shuts down the store gracefully
func (m *MemStore) Shutdown() {}

View file

@ -4,11 +4,11 @@ import (
"bytes"
"testing"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/extras/errors"
)
func TestMemStore_Put(t *testing.T) {
s := NewMemStore()
func TestMemoryBlobStore_Put(t *testing.T) {
s := MemoryBlobStore{}
blob := []byte("abcdefg")
err := s.Put("abc", blob)
if err != nil {
@ -16,8 +16,8 @@ func TestMemStore_Put(t *testing.T) {
}
}
func TestMemStore_Get(t *testing.T) {
s := NewMemStore()
func TestMemoryBlobStore_Get(t *testing.T) {
s := MemoryBlobStore{}
hash := "abc"
blob := []byte("abcdefg")
err := s.Put(hash, blob)
@ -25,7 +25,7 @@ func TestMemStore_Get(t *testing.T) {
t.Error("error getting memory blob - ", err)
}
gotBlob, _, err := s.Get(hash)
gotBlob, err := s.Get(hash)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
@ -33,7 +33,7 @@ func TestMemStore_Get(t *testing.T) {
t.Error("Got blob that is different from expected blob")
}
missingBlob, _, err := s.Get("nonexistent hash")
missingBlob, err := s.Get("nonexistent hash")
if err == nil {
t.Errorf("Expected ErrBlobNotFound, got nil")
}

View file

@ -1,24 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
)
// NoopStore is a store that does nothing
type NoopStore struct{}
const nameNoop = "noop"
func (n *NoopStore) Name() string { return nameNoop }
func (n *NoopStore) Has(_ string) (bool, error) { return false, nil }
func (n *NoopStore) Get(_ string) (stream.Blob, shared.BlobTrace, error) {
return nil, shared.NewBlobTrace(time.Since(time.Now()), n.Name()), nil
}
func (n *NoopStore) Put(_ string, _ stream.Blob) error { return nil }
func (n *NoopStore) PutSD(_ string, _ stream.Blob) error { return nil }
func (n *NoopStore) Delete(_ string) error { return nil }
func (n *NoopStore) Shutdown() { return }

View file

@ -5,11 +5,7 @@ import (
"net/http"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@ -20,8 +16,8 @@ import (
log "github.com/sirupsen/logrus"
)
// S3Store is an S3 store
type S3Store struct {
// S3BlobStore is an S3 store
type S3BlobStore struct {
awsID string
awsSecret string
region string
@ -30,9 +26,9 @@ type S3Store struct {
session *session.Session
}
// NewS3Store returns an initialized S3 store pointer.
func NewS3Store(awsID, awsSecret, region, bucket string) *S3Store {
return &S3Store{
// NewS3BlobStore returns an initialized S3 store pointer.
func NewS3BlobStore(awsID, awsSecret, region, bucket string) *S3BlobStore {
return &S3BlobStore{
awsID: awsID,
awsSecret: awsSecret,
region: region,
@ -40,13 +36,25 @@ func NewS3Store(awsID, awsSecret, region, bucket string) *S3Store {
}
}
const nameS3 = "s3"
func (s *S3BlobStore) initOnce() error {
if s.session != nil {
return nil
}
// Name is the cache type name
func (s *S3Store) Name() string { return nameS3 }
sess, err := session.NewSession(&aws.Config{
Credentials: credentials.NewStaticCredentials(s.awsID, s.awsSecret, ""),
Region: aws.String(s.region),
})
if err != nil {
return err
}
s.session = sess
return nil
}
// Has returns T/F or Error ( from S3 ) if the store contains the blob.
func (s *S3Store) Has(hash string) (bool, error) {
func (s *S3BlobStore) Has(hash string) (bool, error) {
err := s.initOnce()
if err != nil {
return false, err
@ -67,18 +75,17 @@ func (s *S3Store) Has(hash string) (bool, error) {
}
// Get returns the blob slice if present or errors on S3.
func (s *S3Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
func (s *S3BlobStore) Get(hash string) ([]byte, error) {
//Todo-Need to handle error for blob doesn't exist for consistency.
err := s.initOnce()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), err
return []byte{}, err
}
log.Debugf("Getting %s from S3", hash[:8])
defer func(t time.Time) {
log.Debugf("Getting %s from S3 took %s", hash[:8], time.Since(t).String())
}(start)
}(time.Now())
buf := &aws.WriteAtBuffer{}
_, err = s3manager.NewDownloader(s.session).Download(buf, &s3.GetObjectInput{
@ -89,19 +96,19 @@ func (s *S3Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchBucket:
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), errors.Err("bucket %s does not exist", s.bucket)
return []byte{}, errors.Err("bucket %s does not exist", s.bucket)
case s3.ErrCodeNoSuchKey:
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), errors.Err(ErrBlobNotFound)
return []byte{}, errors.Err(ErrBlobNotFound)
}
}
return buf.Bytes(), shared.NewBlobTrace(time.Since(start), s.Name()), err
return buf.Bytes(), err
}
return buf.Bytes(), shared.NewBlobTrace(time.Since(start), s.Name()), nil
return buf.Bytes(), nil
}
// Put stores the blob on S3 or errors if S3 connection errors.
func (s *S3Store) Put(hash string, blob stream.Blob) error {
func (s *S3BlobStore) Put(hash string, blob []byte) error {
err := s.initOnce()
if err != nil {
return err
@ -113,24 +120,22 @@ func (s *S3Store) Put(hash string, blob stream.Blob) error {
}(time.Now())
_, err = s3manager.NewUploader(s.session).Upload(&s3manager.UploadInput{
Bucket: aws.String(s.bucket),
Key: aws.String(hash),
Body: bytes.NewBuffer(blob),
ACL: aws.String("public-read"),
//StorageClass: aws.String(s3.StorageClassIntelligentTiering),
Bucket: aws.String(s.bucket),
Key: aws.String(hash),
Body: bytes.NewBuffer(blob),
StorageClass: aws.String(s3.StorageClassIntelligentTiering),
})
metrics.MtrOutBytesReflector.Add(float64(blob.Size()))
return err
}
// PutSD stores the sd blob on S3 or errors if S3 connection errors.
func (s *S3Store) PutSD(hash string, blob stream.Blob) error {
func (s *S3BlobStore) PutSD(hash string, blob []byte) error {
//Todo - handle missing stream for consistency
return s.Put(hash, blob)
}
func (s *S3Store) Delete(hash string) error {
func (s *S3BlobStore) Delete(hash string) error {
err := s.initOnce()
if err != nil {
return err
@ -145,26 +150,3 @@ func (s *S3Store) Delete(hash string) error {
return err
}
func (s *S3Store) initOnce() error {
if s.session != nil {
return nil
}
sess, err := session.NewSession(&aws.Config{
Credentials: credentials.NewStaticCredentials(s.awsID, s.awsSecret, ""),
Region: aws.String(s.region),
Endpoint: aws.String("https://s3.wasabisys.com"),
})
if err != nil {
return err
}
s.session = sess
return nil
}
// Shutdown shuts down the store gracefully
func (s *S3Store) Shutdown() {
return
}

View file

@ -1,128 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"golang.org/x/sync/singleflight"
)
func WithSingleFlight(component string, origin BlobStore) BlobStore {
return &singleflightStore{
BlobStore: origin,
component: component,
sf: new(singleflight.Group),
}
}
type singleflightStore struct {
BlobStore
component string
sf *singleflight.Group
}
func (s *singleflightStore) Name() string {
return "sf_" + s.BlobStore.Name()
}
type getterResponse struct {
blob stream.Blob
stack shared.BlobTrace
}
// Get ensures that only one request per hash is sent to the origin at a time,
// thereby protecting against https://en.wikipedia.org/wiki/Thundering_herd_problem
func (s *singleflightStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
metrics.CacheWaitingRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Inc()
defer metrics.CacheWaitingRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Dec()
gr, err, _ := s.sf.Do(hash, s.getter(hash))
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), err
}
if gr == nil {
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), errors.Err("getter response is nil")
}
rsp := gr.(getterResponse)
return rsp.blob, rsp.stack, nil
}
// getter returns a function that gets a blob from the origin
// only one getter per hash will be executing at a time
func (s *singleflightStore) getter(hash string) func() (interface{}, error) {
return func() (interface{}, error) {
metrics.CacheOriginRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Inc()
defer metrics.CacheOriginRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Dec()
start := time.Now()
blob, stack, err := s.BlobStore.Get(hash)
if err != nil {
return getterResponse{
blob: nil,
stack: stack.Stack(time.Since(start), s.Name()),
}, err
}
rate := float64(len(blob)) / 1024 / 1024 / time.Since(start).Seconds()
metrics.CacheRetrievalSpeed.With(map[string]string{
metrics.LabelCacheType: s.Name(),
metrics.LabelComponent: s.component,
metrics.LabelSource: "origin",
}).Set(rate)
return getterResponse{
blob: blob,
stack: stack.Stack(time.Since(start), s.Name()),
}, nil
}
}
// Put ensures that only one request per hash is sent to the origin at a time,
// thereby protecting against https://en.wikipedia.org/wiki/Thundering_herd_problem
func (s *singleflightStore) Put(hash string, blob stream.Blob) error {
metrics.CacheWaitingRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Inc()
defer metrics.CacheWaitingRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Dec()
_, err, _ := s.sf.Do(hash, s.putter(hash, blob))
if err != nil {
return err
}
return nil
}
// putter returns a function that puts a blob from the origin
// only one putter per hash will be executing at a time
func (s *singleflightStore) putter(hash string, blob stream.Blob) func() (interface{}, error) {
return func() (interface{}, error) {
metrics.CacheOriginRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Inc()
defer metrics.CacheOriginRequestsCount.With(metrics.CacheLabels(s.Name(), s.component)).Dec()
start := time.Now()
err := s.BlobStore.Put(hash, blob)
if err != nil {
return nil, err
}
rate := float64(len(blob)) / 1024 / 1024 / time.Since(start).Seconds()
metrics.CacheRetrievalSpeed.With(map[string]string{
metrics.LabelCacheType: s.Name(),
metrics.LabelComponent: s.component,
metrics.LabelSource: "origin",
}).Set(rate)
return nil, nil
}
}
// Shutdown shuts down the store gracefully
func (s *singleflightStore) Shutdown() {
s.BlobStore.Shutdown()
return
}

View file

@ -1,103 +0,0 @@
package speedwalk
import (
"io/fs"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/karrick/godirwalk"
"github.com/sirupsen/logrus"
)
// AllFiles recursively lists every file in every subdirectory of a given directory
// If basename is true, return the basename of each file. Otherwise return the full path starting at startDir.
func AllFiles(startDir string, basename bool) ([]string, error) {
entries, err := os.ReadDir(startDir)
if err != nil {
return nil, err
}
items := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
return nil, err
}
items = append(items, info)
}
if err != nil {
return nil, err
}
pathChan := make(chan string)
paths := make([]string, 0, 1000)
pathWG := &sync.WaitGroup{}
pathWG.Add(1)
metrics.RoutinesQueue.WithLabelValues("speedwalk", "worker").Inc()
go func() {
defer pathWG.Done()
for {
path, ok := <-pathChan
if !ok {
return
}
paths = append(paths, path)
}
}()
maxThreads := runtime.NumCPU() - 1
goroutineLimiter := make(chan struct{}, maxThreads)
for i := 0; i < maxThreads; i++ {
goroutineLimiter <- struct{}{}
}
walkerWG := &sync.WaitGroup{}
for _, item := range items {
if !item.IsDir() {
if basename {
pathChan <- item.Name()
} else {
pathChan <- filepath.Join(startDir, item.Name())
}
continue
}
<-goroutineLimiter
walkerWG.Add(1)
go func(dir string) {
defer func() {
walkerWG.Done()
goroutineLimiter <- struct{}{}
}()
err = godirwalk.Walk(filepath.Join(startDir, dir), &godirwalk.Options{
Unsorted: true, // faster this way
Callback: func(osPathname string, de *godirwalk.Dirent) error {
if de.IsRegular() {
if basename {
pathChan <- de.Name()
} else {
pathChan <- osPathname
}
}
return nil
},
})
if err != nil {
logrus.Errorf(errors.FullTrace(err))
}
}(item.Name())
}
walkerWG.Wait()
close(pathChan)
pathWG.Wait()
return paths, nil
}

View file

@ -1,43 +1,27 @@
package store
import (
"github.com/lbryio/reflector.go/shared"
import "github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// BlobStore is an interface for handling blob storage.
// BlobStore is an interface with methods for consistently handling blob storage.
type BlobStore interface {
// Name of blob store (useful for metrics)
Name() string
// Has Does blob exist in the store.
// Does blob exist in the store
Has(hash string) (bool, error)
// Get the blob from the store. Must return ErrBlobNotFound if blob is not in store.
Get(hash string) (stream.Blob, shared.BlobTrace, error)
// Put the blob into the store.
Put(hash string, blob stream.Blob) error
// PutSD an SD blob into the store.
PutSD(hash string, blob stream.Blob) error
// Delete the blob from the store.
// Get the blob from the store
Get(hash string) ([]byte, error)
// Put the blob into the store
Put(hash string, blob []byte) error
// Put an SD blob into the store
PutSD(hash string, blob []byte) error
// Delete the blob from the store
Delete(hash string) error
// Shutdown the store gracefully
Shutdown()
}
// Blocklister is a store that supports blocking blobs to prevent their inclusion in the store.
type Blocklister interface {
// Block deletes the blob and prevents it from being uploaded in the future
Block(hash string) error
// Wants returns false if the hash exists in store or is blocked, true otherwise
// Wants returns false if the hash exists or is blocked, true otherwise
Wants(hash string) (bool, error)
}
// lister is a store that can list cached blobs. This is helpful when an overlay
// cache needs to track blob existence.
type lister interface {
list() ([]string, error)
}
//ErrBlobNotFound is a standard error when a blob is not found in the store.
var ErrBlobNotFound = errors.Base("blob not found")

View file

@ -1,24 +1,5 @@
package wallet
import (
"encoding/base64"
"encoding/hex"
"github.com/lbryio/chainquery/lbrycrd"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/schema/stake"
types "github.com/lbryio/types/v2/go"
"github.com/btcsuite/btcutil"
"github.com/golang/protobuf/proto"
"github.com/spf13/cast"
)
// Raw makes a raw wallet server request
func (n *Node) Raw(method string, params []string, v interface{}) error {
return n.request(method, params, v)
}
// ServerVersion returns the server's version.
// https://electrumx.readthedocs.io/en/latest/protocol-methods.html#server-version
func (n *Node) ServerVersion() (string, error) {
@ -35,38 +16,6 @@ func (n *Node) ServerVersion() (string, error) {
return v, err
}
func (n *Node) Resolve(url string) (*types.Output, error) {
outputs := &types.Outputs{}
resp := &struct {
Result string `json:"result"`
}{}
err := n.request("blockchain.claimtrie.resolve", []string{url}, resp)
if err != nil {
return nil, err
}
b, err := base64.StdEncoding.DecodeString(resp.Result)
if err != nil {
return nil, errors.Err(err)
}
err = proto.Unmarshal(b, outputs)
if err != nil {
return nil, errors.Err(err)
}
if len(outputs.GetTxos()) != 1 {
return nil, errors.Err("expected 1 output, got " + cast.ToString(len(outputs.GetTxos())))
}
if e := outputs.GetTxos()[0].GetError(); e != nil {
return nil, errors.Err("%s: %s", e.GetCode(), e.GetText())
}
return outputs.GetTxos()[0], nil
}
type GetClaimsInTxResp struct {
Jsonrpc string `json:"jsonrpc"`
ID int `json:"id"`
@ -92,58 +41,3 @@ func (n *Node) GetClaimsInTx(txid string) (*GetClaimsInTxResp, error) {
err := n.request("blockchain.claimtrie.getclaimsintx", []string{txid}, &resp)
return &resp, err
}
func (n *Node) GetTx(txid string) (string, error) {
resp := &struct {
Result string `json:"result"`
}{}
err := n.request("blockchain.transaction.get", []string{txid}, resp)
if err != nil {
return "", err
}
return resp.Result, nil
}
func (n *Node) GetClaimInTx(txid string, nout int) (*types.Claim, error) {
hexTx, err := n.GetTx(txid)
if err != nil {
return nil, errors.Err(err)
}
rawTx, err := hex.DecodeString(hexTx)
if err != nil {
return nil, errors.Err(err)
}
tx, err := btcutil.NewTxFromBytes(rawTx)
if err != nil {
return nil, errors.Err(err)
}
if len(tx.MsgTx().TxOut) <= nout {
return nil, errors.Err("nout not found")
}
script := tx.MsgTx().TxOut[nout].PkScript
var value []byte
if lbrycrd.IsClaimNameScript(script) {
_, value, _, err = lbrycrd.ParseClaimNameScript(script)
} else if lbrycrd.IsClaimUpdateScript(script) {
_, _, value, _, err = lbrycrd.ParseClaimUpdateScript(script)
} else {
err = errors.Err("no claim found in output")
}
if err != nil {
return nil, errors.Err(err)
}
ch, err := stake.DecodeClaimBytes(value, "")
if err != nil {
return nil, errors.Err(err)
}
return ch.Claim, nil
}

View file

@ -10,11 +10,11 @@ import (
"sync"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/extras/errors"
"github.com/lbryio/lbry.go/extras/stop"
log "github.com/sirupsen/logrus"
"go.uber.org/atomic"
"github.com/uber-go/atomic"
)
const (
@ -63,7 +63,7 @@ func NewNode() *Node {
// Connect creates a new connection to the specified address.
func (n *Node) Connect(addrs []string, config *tls.Config) error {
if n.transport != nil {
return errors.Err(ErrNodeConnected)
return ErrNodeConnected
}
// shuffle addresses for load balancing
@ -76,18 +76,15 @@ func (n *Node) Connect(addrs []string, config *tls.Config) error {
if err == nil {
break
}
if errors.Is(err, ErrTimeout) {
continue
}
if e, ok := err.(*net.OpError); ok && e.Err.Error() == "no such host" {
// net.errNoSuchHost is not exported, so we have to string-match
continue
}
return errors.Err(err)
return err
}
if n.transport == nil {
return errors.Err(ErrConnectFailed)
return ErrConnectFailed
}
log.Debugf("wallet connected to %s", n.transport.conn.RemoteAddr())
@ -115,13 +112,7 @@ func (n *Node) Connect(addrs []string, config *tls.Config) error {
}
func (n *Node) Shutdown() {
var addr net.Addr
if n.transport != nil {
addr = n.transport.conn.RemoteAddr()
}
log.Debugf("shutting down wallet %s", addr)
n.grp.StopAndWait()
log.Debugf("wallet stopped")
}
func (n *Node) handleErrors() {
@ -130,7 +121,7 @@ func (n *Node) handleErrors() {
case <-n.grp.Ch():
return
case err := <-n.transport.Errors():
n.err(errors.Err(err))
n.err(err)
}
}
}
@ -138,18 +129,12 @@ func (n *Node) handleErrors() {
// err handles errors produced by the foreign node.
func (n *Node) err(err error) {
// TODO: Better error handling.
log.Error(errors.FullTrace(err))
log.Error(err)
}
// listen processes messages from the server.
func (n *Node) listen() {
for {
select {
case <-n.grp.Ch():
return
default:
}
select {
case <-n.grp.Ch():
return
@ -162,36 +147,14 @@ func (n *Node) listen() {
Message string `json:"message"`
} `json:"error"`
}{}
msg2 := &struct {
Id uint32 `json:"id"`
Method string `json:"method"`
Error struct {
Code int `json:"code"`
Message struct {
Code int `json:"code"`
Message string `json:"message"`
} `json:"message"`
} `json:"error"`
}{}
r := response{}
err := json.Unmarshal(bytes, msg)
if err != nil {
// try msg2, a hack around the weird error-in-error response we sometimes get from wallet server
// maybe that happens because the wallet server passes a lbrycrd error through to us?
if err2 := json.Unmarshal(bytes, msg2); err2 == nil {
err = nil
msg.Id = msg2.Id
msg.Method = msg2.Method
msg.Error = msg2.Error.Message
}
if err := json.Unmarshal(bytes, msg); err != nil {
n.err(err)
continue
}
if err != nil {
r.err = errors.Err(err)
n.err(r.err)
} else if len(msg.Error.Message) > 0 {
r.err = errors.Err("%d: %s", msg.Error.Code, msg.Error.Message)
r := response{}
if len(msg.Error.Message) > 0 {
r.err = errors.Base("%d: %s", msg.Error.Code, msg.Error.Message)
} else {
r.data = bytes
}
@ -243,7 +206,7 @@ func (n *Node) request(method string, params []string, v interface{}) error {
bytes, err := json.Marshal(msg)
if err != nil {
return errors.Err(err)
return err
}
bytes = append(bytes, delimiter)
@ -255,13 +218,11 @@ func (n *Node) request(method string, params []string, v interface{}) error {
err = n.transport.Send(bytes)
if err != nil {
return errors.Err(err)
return err
}
var r response
select {
case <-n.grp.Ch():
return nil
case r = <-c:
case <-time.After(n.timeout):
r = response{err: errors.Err(ErrTimeout)}
@ -272,8 +233,8 @@ func (n *Node) request(method string, params []string, v interface{}) error {
n.handlersMu.Unlock()
if r.err != nil {
return errors.Err(r.err)
return r.err
}
return errors.Err(json.Unmarshal(r.data, v))
return json.Unmarshal(r.data, v)
}

View file

@ -5,14 +5,10 @@ package wallet
import (
"bufio"
"crypto/tls"
"encoding/json"
"fmt"
"net"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/extras/stop"
log "github.com/sirupsen/logrus"
)
@ -53,31 +49,15 @@ func NewTransport(addr string, config *tls.Config) (*TCPTransport, error) {
t.grp.Add(1)
go func() {
defer t.grp.Done()
t.grp.Done()
t.listen()
}()
err = t.test()
if err != nil {
t.grp.StopAndWait()
return nil, errors.Prefix(addr, err)
}
return t, nil
}
const delimiter = byte('\n')
func (t *TCPTransport) Send(body []byte) error {
log.Debugf("%s <- %s", t.conn.RemoteAddr(), body)
_, err := t.conn.Write(body)
return err
}
func (t *TCPTransport) Responses() <-chan []byte { return t.responses }
func (t *TCPTransport) Errors() <-chan error { return t.errors }
func (t *TCPTransport) Shutdown() { t.grp.StopAndWait() }
func (t *TCPTransport) listen() {
reader := bufio.NewReader(t.conn)
for {
@ -93,6 +73,12 @@ func (t *TCPTransport) listen() {
}
}
func (t *TCPTransport) Send(body []byte) error {
log.Debugf("%s <- %s", t.conn.RemoteAddr(), body)
_, err := t.conn.Write(body)
return err
}
func (t *TCPTransport) error(err error) {
select {
case t.errors <- err:
@ -100,33 +86,11 @@ func (t *TCPTransport) error(err error) {
}
}
func (t *TCPTransport) test() error {
err := t.Send([]byte(`{"id":1,"method":"server.version"}` + "\n"))
if err != nil {
return errors.Err(err)
}
func (t *TCPTransport) Responses() <-chan []byte { return t.responses }
func (t *TCPTransport) Errors() <-chan error { return t.errors }
var data []byte
select {
case data = <-t.Responses():
case <-time.Tick(1 * time.Second):
return errors.Err(ErrTimeout)
}
var response struct {
Error struct {
Message string `json:"message"`
} `json:"error"`
}
err = json.Unmarshal(data, &response)
if err != nil {
return errors.Err(err)
}
if response.Error.Message != "" {
return fmt.Errorf(response.Error.Message)
}
return nil
func (t *TCPTransport) Shutdown() {
t.grp.StopAndWait()
}
func (t *TCPTransport) close() {