Compare commits

..

52 commits

Author SHA1 Message Date
Alex Grintsvayg
2a1557845d
wrap blob insertion in tx. fixes lbryio/lbry-sdk#3296
The problem is that inserting an sd blob with ~5k
blobs takes longer than 30 seconds. So the client
times out and retries the request. At that point,
reflector is not done inserting so it replies with
a smaller number of blobs than it should. The client
uploads that many blobs and marks the stream as
reflected. The remaining blobs never get uploaded.

Doing the insert inside a transaction should be
faster than doing 10k (2 per blob) inserts
independently.
2021-05-24 10:40:31 -04:00
Niko Storni
9670bc14f8 fix unsafe dereference 2021-05-21 21:06:59 +02:00
Niko Storni
df881e16b5 add metrics 2021-05-21 19:09:02 +02:00
Niko Storni
bc889001bb update lbry.go dep 2021-05-21 18:05:37 +02:00
Niko Storni
1ec2184833 upgrade singleflight
http store fix
2021-05-21 17:58:33 +02:00
Niko Storni
5cc1e84adb remove locks causing deadlocks 2021-05-21 05:53:13 +02:00
Niko Storni
a7086a00f3 add http server/client 2021-05-21 05:49:02 +02:00
Mark Beamer Jr
76ece1e117
Add queue to prevent writing too many files at once. 2021-05-20 20:43:01 -04:00
Mark Beamer Jr
c1caf1938c
Add queue to prevent writing too many files at once. 2021-05-20 20:41:47 -04:00
Mark Beamer Jr
213d21b021
Add locks to disk store. 2021-05-20 19:59:50 -04:00
Niko Storni
006b04f6e9 add a lot of extra heavy debugging 2021-05-21 01:48:46 +02:00
Mark Beamer Jr
45130499cd
Add single flight for cache not just origin 2021-05-20 19:05:48 -04:00
Mark Beamer Jr
4ecce75e23
add metric calls for other packages 2021-05-20 18:12:30 -04:00
Mark Beamer Jr
0152300d8d
add guage metrics for go routines in reflector package 2021-05-20 18:01:13 -04:00
Mark Beamer Jr
eafc62f2a6
add gops to reflector server 2021-05-20 17:21:35 -04:00
Niko Storni
50c077a9cb request queue size param 2021-05-20 23:17:18 +02:00
Niko Storni
070938e12a increase window size 2021-05-06 22:53:18 +02:00
Niko Storni
c4084eeb68 improve disk cleanup
add index to is_stored
fix test
replace LRU cache
2021-04-29 03:41:18 +02:00
Niko Storni
4392c97242 fix mess with lbry.go 2021-04-13 00:52:56 +02:00
Niko Storni
ec3aae33ba add if this than that store
switch to wasabi for uploads
2021-04-12 23:05:50 +02:00
Niko Storni
dc95351cf3 add integrity check cmd
throttle live integrity checks
bug fixes
2021-04-07 04:46:18 +02:00
Mark Beamer Jr
25a7fac4f0
use wait group not stopper 2021-04-06 14:28:29 -04:00
Mark Beamer Jr
b97595311f
Wait for request to be handled before returning 2021-04-06 14:21:05 -04:00
Mark Beamer Jr
bd13836897
Add request queue for blob cache 2021-04-06 14:00:36 -04:00
Niko Storni
38b44218f2 check blobs when reading them 2021-04-05 23:34:45 +02:00
Niko Storni
90c36fbe24 upgrade quic-go
add cache for blobs not found
2021-03-31 04:53:27 +02:00
Niko Storni
3a441aed3a fix issues caused by beamer's renaming 2021-03-29 19:56:18 +02:00
Niko Storni
ebb62d0a24 run go mod tidy 2021-03-29 19:44:27 +02:00
Niko Storni
8cb7389619 make it simpler 2021-02-23 15:23:46 +01:00
Niko Storni
7b49dd115b remove panics 2021-02-23 15:08:32 +01:00
Niko Storni
6291e33ee1 add tracing to blobs 2021-01-14 20:38:04 +01:00
Niko Storni
3e475e537b optimize batch insertions
reduce touch time to every 6 hours
2021-01-07 01:28:34 +01:00
Alex Grintsvayg
c4504631bc
avoid heavy interpolateparams call 2021-01-06 10:43:35 -05:00
Alex Grintsvayg
cc504e6c44
fix long query 2021-01-05 12:16:44 -05:00
Alex Grintsvayg
49714c02a6
only touch blobs when you get them 2021-01-05 11:36:33 -05:00
Niko Storni
b33651ae26 save uploaded blobs and work around the blocklist issue 2021-01-05 05:09:55 +01:00
Niko Storni
0d5004a83b add cmd to populate db
fix store init
try fixing unreasonable db bottleneck
2020-12-30 04:24:11 +01:00
Niko Storni
04f6859c74 Merge branch 'grin' into litedb 2020-12-24 23:13:31 +01:00
Alex Grintsvayg
3a1d9d3304
something like this 2020-12-23 17:08:13 -05:00
Niko Storni
03304312e8 add PoC for litedb to avoid all the overhead 2020-12-23 06:04:42 +01:00
Niko Storni
869030fc58 address some review comments 2020-12-22 21:19:48 +01:00
Niko Storni
def551cc89 add option to run with RO-CF only as upstream
increase idle timeout to avoid errors downstream
add option to delete blobs from DB if storage doesn't have it (for future local tracking)
2020-12-22 20:53:48 +01:00
Niko Storni
74b76a11e4 upgrade quic 2020-12-17 23:49:37 +01:00
Niko Storni
2c0df2ca8a
update lfuda library 2020-11-27 16:20:50 -05:00
Niko Storni
9fc96ac01b
only store the blobs in the underlying storage if LFUDA accepted them 2020-11-27 16:20:50 -05:00
Niko Storni
ff9b61b034
fix cache size mess 2020-11-27 16:20:49 -05:00
Niko Storni
7b80b2d4d2
fix buffer cache running out of space 2020-11-27 16:20:49 -05:00
Niko Storni
d45abdbdb0
use LFUDA store
swap size to bytes
2020-11-27 16:19:46 -05:00
Niko Storni
bc54601dde
add LFUDA store
update quic
fix tests
2020-11-27 16:19:46 -05:00
Alex Grintsvayg
bb41a84bb7
rename cahces 2020-11-27 16:19:45 -05:00
Niko Storni
a574fecf4e
add buffer cache for nvme drive 2020-11-27 16:19:45 -05:00
Niko Storni
9146c8b084
update quic
don't wait for a blob to be written to disk before sending it downstream
don't wait for the disk store to be walked before starting everything up
2020-11-27 16:19:45 -05:00
73 changed files with 1591 additions and 2352 deletions

View file

@ -1,37 +0,0 @@
name: Go
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.x
- name: Build linux
run: make linux
- name: Build macos
run: make macos
- name: Test
run: make test
- name: Lint
run: make lint
- name: retrieve all tags
run: git fetch --prune --unshallow --tags
- name: Print changes since last version
run: git log $(git describe --tags --abbrev=0)..HEAD --no-merges --oneline

View file

@ -1,62 +0,0 @@
name: release
on:
push:
tags:
- "*.*.*"
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.x
- name: Build linux
run: make linux
- name: Build macos
run: make macos
- name: Test
run: make test
- name: Lint
run: make lint
- name: Zip macos
run: zip -r reflector_darwin_amd64.zip ./dist/darwin_amd64
- name: Zip linux
run: zip -r reflector_linux_amd64.zip ./dist/linux_amd64
- name: retrieve all tags
run: git fetch --prune --unshallow --tags
- name: Generate Changelog
run: git log $(git describe --tags --abbrev=0 @^)..@ --no-merges --oneline > ${{ github.workspace }}-CHANGELOG.txt
- name: upload to github releases
uses: softprops/action-gh-release@v1
with:
files: |
./reflector_linux_amd64.zip
./reflector_darwin_amd64.zip
body_path: ${{ github.workspace }}-CHANGELOG.txt
# - name: Login to DockerHub
# uses: docker/login-action@v2
# with:
# username: ${{ secrets.DOCKERHUB_USERNAME }}
# password: ${{ secrets.DOCKERHUB_TOKEN }}
# - name: Generate docker image
# run: make image
# - name: Docker push
# run: make publish_image

1
.gitignore vendored
View file

@ -1,4 +1,3 @@
/vendor /vendor
/config.json* /config.json*
/dist
/bin /bin

View file

@ -1,9 +1,12 @@
os: linux os: linux
dist: bionic dist: trusty
language: go language: go
env:
- GO111MODULE=on
go: go:
- 1.20.x - 1.15.x
cache: cache:
directories: directories:
@ -14,7 +17,7 @@ notifications:
email: false email: false
# Skip the install step. Don't `go get` dependencies. Only build with the code in vendor/ # Skip the install step. Don't `go get` dependencies. Only build with the code in vendor/
#install: true install: true
# Anything in before_script that returns a nonzero exit code will # Anything in before_script that returns a nonzero exit code will
# flunk the build and immediately stop. It's sorta like having # flunk the build and immediately stop. It's sorta like having
@ -22,14 +25,14 @@ notifications:
before_script: before_script:
# All the .go files, excluding vendor/ and model (auto generated) # All the .go files, excluding vendor/ and model (auto generated)
- GO_FILES=$(find . -iname '*.go' ! -iname '*_test.go' -type f | grep -v /vendor/ ) #i wish we were this crazy :p - GO_FILES=$(find . -iname '*.go' ! -iname '*_test.go' -type f | grep -v /vendor/ ) #i wish we were this crazy :p
- go install golang.org/x/tools/cmd/goimports # Used in build script for generated files - go get golang.org/x/tools/cmd/goimports # Used in build script for generated files
# - go get github.com/golang/lint/golint # Linter # - go get github.com/golang/lint/golint # Linter
# - go get honnef.co/go/tools/cmd/megacheck # Badass static analyzer/linter # - go get honnef.co/go/tools/cmd/megacheck # Badass static analyzer/linter
- go install github.com/fzipp/gocyclo/cmd/gocyclo@latest # Check against high complexity - go get github.com/jgautheron/gocyclo # Check against high complexity
- go install github.com/mdempsky/unconvert@latest # Identifies unnecessary type conversions - go get github.com/mdempsky/unconvert # Identifies unnecessary type conversions
- go install github.com/kisielk/errcheck@latest # Checks for unhandled errors - go get github.com/kisielk/errcheck # Checks for unhandled errors
- go install github.com/opennota/check/cmd/varcheck@latest # Checks for unused vars - go get github.com/opennota/check/cmd/varcheck # Checks for unused vars
- go install github.com/opennota/check/cmd/structcheck@latest # Checks for unused fields in structs - go get github.com/opennota/check/cmd/structcheck # Checks for unused fields in structs
@ -37,7 +40,7 @@ before_script:
# in a modern Go project. # in a modern Go project.
script: script:
# Fail if a .go file hasn't been formatted with gofmt # Fail if a .go file hasn't been formatted with gofmt
- for i in $GO_FILES; do test -z $(gofmt -s -l $i); done - test -z $(gofmt -s -l $GO_FILES)
# Run unit tests # Run unit tests
- make test - make test
# Checks for unused vars and fields on structs # Checks for unused vars and fields on structs
@ -56,11 +59,11 @@ script:
# one last linter - ignore autogen code # one last linter - ignore autogen code
#- golint -set_exit_status $(go list ./... | grep -v /vendor/ ) #- golint -set_exit_status $(go list ./... | grep -v /vendor/ )
# Finally, build the binary # Finally, build the binary
- make linux - make
deploy: deploy:
- provider: s3 - provider: s3
local_dir: ./dist/linux_amd64 local_dir: ./bin
skip_cleanup: true skip_cleanup: true
on: on:
repo: lbryio/reflector.go repo: lbryio/reflector.go

View file

@ -3,7 +3,7 @@ EXPOSE 8080
RUN mkdir /app RUN mkdir /app
WORKDIR /app WORKDIR /app
COPY dist/linux_amd64/prism-bin ./prism COPY bin/prism-bin ./prism
RUN chmod +x prism RUN chmod +x prism
ENTRYPOINT [ "/app/prism" ] ENTRYPOINT [ "/app/prism" ]

View file

@ -1,33 +1,25 @@
version := $(shell git describe --dirty --always --long --abbrev=7)
commit := $(shell git rev-parse --short HEAD)
commit_long := $(shell git rev-parse HEAD)
branch := $(shell git rev-parse --abbrev-ref HEAD)
curTime := $(shell date +%s)
BINARY=prism-bin BINARY=prism-bin
IMPORT_PATH = github.com/lbryio/reflector.go
LDFLAGS="-X ${IMPORT_PATH}/meta.version=$(version) -X ${IMPORT_PATH}/meta.commit=$(commit) -X ${IMPORT_PATH}/meta.commitLong=$(commit_long) -X ${IMPORT_PATH}/meta.branch=$(branch) -X '${IMPORT_PATH}/meta.Time=$(curTime)'"
DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd) DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
BIN_DIR = $(DIR)/dist BIN_DIR = ${DIR}/bin
IMPORT_PATH = github.com/lbryio/reflector.go
.DEFAULT_GOAL := linux VERSION = $(shell git --git-dir=${DIR}/.git describe --dirty --always --long --abbrev=7)
LDFLAGS = -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION} -X ${IMPORT_PATH}/meta.Time=$(shell date +%s)"
.PHONY: build clean test lint
.DEFAULT_GOAL: build
build:
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
clean:
if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi
.PHONY: test
test: test:
go test -cover -v ./... go test ./... -v -cover
.PHONY: lint
lint: lint:
./scripts/lint.sh go get github.com/alecthomas/gometalinter && gometalinter --install && gometalinter ./...
.PHONY: linux
linux:
GOARCH=amd64 GOOS=linux go build -ldflags ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/linux_amd64/${BINARY}
.PHONY: macos
macos:
GOARCH=amd64 GOOS=darwin go build -ldflags ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/darwin_amd64/${BINARY}
.PHONY: image
image:
docker buildx build -t lbry/reflector:$(version) -t lbry/reflector:latest --platform linux/amd64 .

View file

@ -1,7 +1,7 @@
package cluster package cluster
import ( import (
"io" "io/ioutil"
baselog "log" baselog "log"
"sort" "sort"
"time" "time"
@ -52,7 +52,7 @@ func (c *Cluster) Connect() error {
conf.MemberlistConfig.AdvertisePort = c.port conf.MemberlistConfig.AdvertisePort = c.port
conf.NodeName = c.name conf.NodeName = c.name
nullLogger := baselog.New(io.Discard, "", 0) nullLogger := baselog.New(ioutil.Discard, "", 0)
conf.Logger = nullLogger conf.Logger = nullLogger
c.eventCh = make(chan serf.Event) c.eventCh = make(chan serf.Event)

View file

@ -7,7 +7,6 @@ import (
"syscall" "syscall"
"github.com/lbryio/lbry.go/v2/extras/crypto" "github.com/lbryio/lbry.go/v2/extras/crypto"
"github.com/lbryio/reflector.go/cluster" "github.com/lbryio/reflector.go/cluster"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"

View file

@ -7,7 +7,7 @@ import (
"github.com/lbryio/lbry.go/v2/schema/stake" "github.com/lbryio/lbry.go/v2/schema/stake"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/gogo/protobuf/jsonpb" "github.com/golang/protobuf/jsonpb"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -5,7 +5,7 @@ import (
"os" "os"
"time" "time"
"github.com/lbryio/reflector.go/server/peer" "github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"

View file

@ -3,17 +3,17 @@ package cmd
import ( import (
"crypto/sha512" "crypto/sha512"
"encoding/hex" "encoding/hex"
"io/ioutil"
"os" "os"
"path" "path"
"runtime" "runtime"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/store/speedwalk" "github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -39,7 +39,7 @@ func integrityCheckCmd(cmd *cobra.Command, args []string) {
blobs, err := speedwalk.AllFiles(diskStorePath, true) blobs, err := speedwalk.AllFiles(diskStorePath, true)
if err != nil { if err != nil {
log.Fatalf("error while reading blobs from disk %s", errors.FullTrace(err)) log.Errorf("error while reading blobs from disk %s", errors.FullTrace(err))
} }
tasks := make(chan string, len(blobs)) tasks := make(chan string, len(blobs))
done := make(chan bool) done := make(chan bool)
@ -63,15 +63,15 @@ func consume(worker int, tasks <-chan string, done chan<- bool, totalTasks int,
start := time.Now() start := time.Now()
for b := range tasks { for b := range tasks {
processedSoFar := atomic.AddInt32(processed, 1) checked := atomic.AddInt32(processed, 1)
if worker == 0 { if worker == 0 {
remaining := int32(totalTasks) - processedSoFar remaining := int32(totalTasks) - checked
timePerBlob := time.Since(start).Microseconds() / int64(processedSoFar) timePerBlob := time.Since(start).Microseconds() / int64(checked)
remainingTime := time.Duration(int64(remaining)*timePerBlob) * time.Microsecond remainingTime := time.Duration(int64(remaining)*timePerBlob) * time.Microsecond
log.Infof("[T%d] %d/%d blobs processed so far. ETA: %s", worker, processedSoFar, totalTasks, remainingTime.String()) log.Infof("[T%d] %d/%d blobs checked. ETA: %s", worker, checked, totalTasks, remainingTime.String())
} }
blobPath := path.Join(diskStorePath, b[:2], b) blobPath := path.Join(diskStorePath, b[:2], b)
blob, err := os.ReadFile(blobPath) blob, err := ioutil.ReadFile(blobPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
continue continue

View file

@ -7,7 +7,7 @@ import (
"syscall" "syscall"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/server/peer" "github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"

View file

@ -1,12 +1,11 @@
package cmd package cmd
import ( import (
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/store/speedwalk" "github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -41,9 +40,6 @@ func populateDbCmd(cmd *cobra.Command, args []string) {
log.Fatal(err) log.Fatal(err)
} }
blobs, err := speedwalk.AllFiles(diskStorePath, true) blobs, err := speedwalk.AllFiles(diskStorePath, true)
if err != nil {
log.Fatal(err)
}
err = localDb.AddBlobs(blobs) err = localDb.AddBlobs(blobs)
if err != nil { if err != nil {
log.Errorf("error while storing to db: %s", errors.FullTrace(err)) log.Errorf("error while storing to db: %s", errors.FullTrace(err))

View file

@ -8,18 +8,17 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/util" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/peer/http3"
"github.com/lbryio/reflector.go/reflector" "github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/http" "github.com/lbryio/reflector.go/server/http"
"github.com/lbryio/reflector.go/server/http3"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
@ -28,43 +27,24 @@ import (
) )
var ( var (
//port configuration tcpPeerPort int
tcpPeerPort int http3PeerPort int
http3PeerPort int httpPort int
httpPeerPort int receiverPort int
receiverPort int metricsPort int
metricsPort int disableUploads bool
disableBlocklist bool
//flags configuration proxyAddress string
disableUploads bool proxyPort string
disableBlocklist bool proxyProtocol string
useDB bool useDB bool
cloudFrontEndpoint string
//upstream configuration WasabiEndpoint string
upstreamReflector string reflectorCmdDiskCache string
upstreamProtocol string bufferReflectorCmdDiskCache string
upstreamEdgeToken string reflectorCmdMemCache int
requestQueueSize int
//downstream configuration
requestQueueSize int
//upstream edge configuration (to "cold" storage)
originEndpoint string
originEndpointFallback string
//cache configuration
diskCache string
secondaryDiskCache string
memCache int
) )
var cacheManagers = []string{"localdb", "lfu", "arc", "lru", "simple"}
var cacheMangerToGcache = map[string]store.EvictionStrategy{
"lfu": store.LFU,
"arc": store.ARC,
"lru": store.LRU,
"simple": store.SIMPLE,
}
func init() { func init() {
var cmd = &cobra.Command{ var cmd = &cobra.Command{
@ -72,42 +52,38 @@ func init() {
Short: "Run reflector server", Short: "Run reflector server",
Run: reflectorCmd, Run: reflectorCmd,
} }
cmd.Flags().StringVar(&proxyAddress, "proxy-address", "", "address of another reflector server where blobs are fetched from")
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from for the TCP (LBRY) protocol") cmd.Flags().StringVar(&proxyPort, "proxy-port", "5567", "port of another reflector server where blobs are fetched from")
cmd.Flags().StringVar(&proxyProtocol, "proxy-protocol", "http3", "protocol used to fetch blobs from another reflector server (tcp/http3)")
cmd.Flags().StringVar(&cloudFrontEndpoint, "cloudfront-endpoint", "", "CloudFront edge endpoint for standard HTTP retrieval")
cmd.Flags().StringVar(&WasabiEndpoint, "wasabi-endpoint", "", "Wasabi edge endpoint for standard HTTP retrieval")
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from")
cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol") cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol")
cmd.Flags().IntVar(&httpPeerPort, "http-peer-port", 5569, "The port reflector will distribute content from over HTTP protocol") cmd.Flags().IntVar(&httpPort, "http-port", 5569, "The port reflector will distribute content from over HTTP protocol")
cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from") cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from")
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for prometheus metrics") cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for metrics")
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests should be submitted to upstream")
cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server") cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server")
cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating") cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating")
cmd.Flags().BoolVar(&useDB, "use-db", true, "Whether to connect to the reflector db or not") cmd.Flags().BoolVar(&useDB, "use-db", true, "whether to connect to the reflector db or not")
cmd.Flags().StringVar(&reflectorCmdDiskCache, "disk-cache", "",
cmd.Flags().StringVar(&upstreamReflector, "upstream-reflector", "", "host:port of a reflector server where blobs are fetched from") "enable disk cache, setting max size and path where to store blobs. format is 'sizeGB:CACHE_PATH'")
cmd.Flags().StringVar(&upstreamProtocol, "upstream-protocol", "http", "protocol used to fetch blobs from another upstream reflector server (tcp/http3/http)") cmd.Flags().StringVar(&bufferReflectorCmdDiskCache, "buffer-disk-cache", "",
cmd.Flags().StringVar(&upstreamEdgeToken, "upstream-edge-token", "", "token used to retrieve/authenticate protected content") "enable buffer disk cache, setting max size and path where to store blobs. format is 'sizeGB:CACHE_PATH'")
cmd.Flags().IntVar(&reflectorCmdMemCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests from downstream should be handled at once (the rest will wait)")
cmd.Flags().StringVar(&originEndpoint, "origin-endpoint", "", "HTTP edge endpoint for standard HTTP retrieval")
cmd.Flags().StringVar(&originEndpointFallback, "origin-endpoint-fallback", "", "HTTP edge endpoint for standard HTTP retrieval if first origin fails")
cmd.Flags().StringVar(&diskCache, "disk-cache", "100GB:/tmp/downloaded_blobs:localdb", "Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru)")
cmd.Flags().StringVar(&secondaryDiskCache, "optional-disk-cache", "", "Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru) (this would get hit before the one specified in disk-cache)")
cmd.Flags().IntVar(&memCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
rootCmd.AddCommand(cmd) rootCmd.AddCommand(cmd)
} }
func reflectorCmd(cmd *cobra.Command, args []string) { func reflectorCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString()) log.Printf("reflector %s", meta.VersionString())
cleanerStopper := stop.New()
// the blocklist logic requires the db backed store to be the outer-most store // the blocklist logic requires the db backed store to be the outer-most store
underlyingStore := initStores() underlyingStore := setupStore()
underlyingStoreWithCaches, cleanerStopper := initCaches(underlyingStore) outerStore := wrapWithCache(underlyingStore, cleanerStopper)
if !disableUploads { if !disableUploads {
reflectorServer := reflector.NewServer(underlyingStore, underlyingStoreWithCaches) reflectorServer := reflector.NewServer(underlyingStore, outerStore)
reflectorServer.Timeout = 3 * time.Minute reflectorServer.Timeout = 3 * time.Minute
reflectorServer.EnableBlocklist = !disableBlocklist reflectorServer.EnableBlocklist = !disableBlocklist
@ -118,22 +94,22 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
defer reflectorServer.Shutdown() defer reflectorServer.Shutdown()
} }
peerServer := peer.NewServer(underlyingStoreWithCaches) peerServer := peer.NewServer(outerStore)
err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort)) err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
defer peerServer.Shutdown() defer peerServer.Shutdown()
http3PeerServer := http3.NewServer(underlyingStoreWithCaches, requestQueueSize) http3PeerServer := http3.NewServer(outerStore, requestQueueSize)
err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort)) err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
defer http3PeerServer.Shutdown() defer http3PeerServer.Shutdown()
httpServer := http.NewServer(store.WithSingleFlight("sf-http", underlyingStoreWithCaches), requestQueueSize, upstreamEdgeToken) httpServer := http.NewServer(outerStore)
err = httpServer.Start(":" + strconv.Itoa(httpPeerPort)) err = httpServer.Start(":" + strconv.Itoa(httpPort))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -142,8 +118,8 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics") metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics")
metricsServer.Start() metricsServer.Start()
defer metricsServer.Shutdown() defer metricsServer.Shutdown()
defer underlyingStoreWithCaches.Shutdown() defer outerStore.Shutdown()
defer underlyingStore.Shutdown() //do we actually need this? Oo defer underlyingStore.Shutdown()
interruptChan := make(chan os.Signal, 1) interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM) signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
@ -152,52 +128,45 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
cleanerStopper.StopAndWait() cleanerStopper.StopAndWait()
} }
func initUpstreamStore() store.BlobStore { func setupStore() store.BlobStore {
var s store.BlobStore
if upstreamReflector == "" {
return nil
}
switch upstreamProtocol {
case "tcp":
s = peer.NewStore(peer.StoreOpts{
Address: upstreamReflector,
Timeout: 30 * time.Second,
})
case "http3":
s = http3.NewStore(http3.StoreOpts{
Address: upstreamReflector,
Timeout: 30 * time.Second,
})
case "http":
s = store.NewHttpStore(upstreamReflector, upstreamEdgeToken)
default:
log.Fatalf("protocol is not recognized: %s", upstreamProtocol)
}
return s
}
func initEdgeStore() store.BlobStore {
var s3Store *store.S3Store
var s store.BlobStore var s store.BlobStore
if conf != "none" { if proxyAddress != "" {
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName) switch proxyProtocol {
} case "tcp":
if originEndpointFallback != "" && originEndpoint != "" { s = peer.NewStore(peer.StoreOpts{
ittt := store.NewITTTStore(store.NewCloudFrontROStore(originEndpoint), store.NewCloudFrontROStore(originEndpointFallback)) Address: proxyAddress + ":" + proxyPort,
if s3Store != nil { Timeout: 30 * time.Second,
s = store.NewCloudFrontRWStore(ittt, s3Store) })
} else { case "http3":
s = ittt s = http3.NewStore(http3.StoreOpts{
Address: proxyAddress + ":" + proxyPort,
Timeout: 30 * time.Second,
})
case "http":
s = store.NewHttpStore(proxyAddress + ":" + proxyPort)
default:
log.Fatalf("protocol is not recognized: %s", proxyProtocol)
} }
} else if s3Store != nil {
s = s3Store
} else { } else {
log.Fatalf("this configuration does not include a valid upstream source") var s3Store *store.S3Store
if conf != "none" {
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
}
if cloudFrontEndpoint != "" && WasabiEndpoint != "" {
ittt := store.NewITTTStore(store.NewCloudFrontROStore(WasabiEndpoint), store.NewCloudFrontROStore(cloudFrontEndpoint))
if s3Store != nil {
s = store.NewCloudFrontRWStore(ittt, s3Store)
} else {
s = ittt
}
} else if s3Store != nil {
s = s3Store
} else {
log.Fatalf("this configuration does not include a valid upstream source")
}
} }
return s
}
func initDBStore(s store.BlobStore) store.BlobStore {
if useDB { if useDB {
dbInst := &db.SQL{ dbInst := &db.SQL{
TrackAccess: db.TrackAccessStreams, TrackAccess: db.TrackAccessStreams,
@ -207,54 +176,26 @@ func initDBStore(s store.BlobStore) store.BlobStore {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
s = store.NewDBBackedStore(s, dbInst, false) s = store.NewDBBackedStore(s, dbInst, false)
} }
return s return s
} }
func initStores() store.BlobStore { func wrapWithCache(s store.BlobStore, cleanerStopper *stop.Group) store.BlobStore {
s := initUpstreamStore() wrapped := s
if s == nil {
s = initEdgeStore()
}
s = initDBStore(s)
return s
}
// initCaches returns a store wrapped with caches and a stop group to execute a clean shutdown diskCacheMaxSize, diskCachePath := diskCacheParams(reflectorCmdDiskCache)
func initCaches(s store.BlobStore) (store.BlobStore, *stop.Group) {
stopper := stop.New()
diskStore := initDiskStore(s, diskCache, stopper)
finalStore := initDiskStore(diskStore, secondaryDiskCache, stopper)
stop.New()
if memCache > 0 {
finalStore = store.NewCachingStore(
"reflector",
finalStore,
store.NewGcacheStore("mem", store.NewMemStore(), memCache, store.LRU),
)
}
return finalStore, stopper
}
func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *stop.Group) store.BlobStore {
diskCacheMaxSize, diskCachePath, cacheManager := diskCacheParams(diskParams)
//we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte //we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte
// so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup // so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup
realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize) realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
if diskCacheMaxSize == 0 { if diskCacheMaxSize > 0 {
return upstreamStore err := os.MkdirAll(diskCachePath, os.ModePerm)
} if err != nil {
err := os.MkdirAll(diskCachePath, os.ModePerm) log.Fatal(err)
if err != nil { }
log.Fatal(err)
}
diskStore := store.NewDiskStore(diskCachePath, 2)
var unwrappedStore store.BlobStore
cleanerStopper := stop.New(stopper)
if cacheManager == "localdb" {
localDb := &db.SQL{ localDb := &db.SQL{
SoftDelete: true, SoftDelete: true,
TrackAccess: db.TrackAccessBlobs, TrackAccess: db.TrackAccessBlobs,
@ -264,40 +205,55 @@ func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *st
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
unwrappedStore = store.NewDBBackedStore(diskStore, localDb, true) dbBackedDiskStore := store.NewDBBackedStore(store.NewDiskStore(diskCachePath, 2), localDb, true)
go cleanOldestBlobs(int(realCacheSize), localDb, unwrappedStore, cleanerStopper) wrapped = store.NewCachingStore(
} else { "reflector",
unwrappedStore = store.NewGcacheStore("nvme", store.NewDiskStore(diskCachePath, 2), int(realCacheSize), cacheMangerToGcache[cacheManager]) wrapped,
dbBackedDiskStore,
)
go cleanOldestBlobs(int(realCacheSize), localDb, dbBackedDiskStore, cleanerStopper)
}
diskCacheMaxSize, diskCachePath = diskCacheParams(bufferReflectorCmdDiskCache)
realCacheSize = float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
if diskCacheMaxSize > 0 {
err := os.MkdirAll(diskCachePath, os.ModePerm)
if err != nil {
log.Fatal(err)
}
wrapped = store.NewCachingStore(
"reflector",
wrapped,
store.NewLFUDAStore("nvme", store.NewDiskStore(diskCachePath, 2), realCacheSize),
)
}
if reflectorCmdMemCache > 0 {
wrapped = store.NewCachingStore(
"reflector",
wrapped,
store.NewLRUStore("mem", store.NewMemStore(), reflectorCmdMemCache),
)
} }
wrapped := store.NewCachingStore(
"reflector",
upstreamStore,
unwrappedStore,
)
return wrapped return wrapped
} }
func diskCacheParams(diskParams string) (int, string, string) { func diskCacheParams(diskParams string) (int, string) {
if diskParams == "" { if diskParams == "" {
return 0, "", "" return 0, ""
} }
parts := strings.Split(diskParams, ":") parts := strings.Split(diskParams, ":")
if len(parts) != 3 { if len(parts) != 2 {
log.Fatalf("%s does is formatted incorrectly. Expected format: 'sizeGB:CACHE_PATH:cachemanager' for example: '100GB:/tmp/downloaded_blobs:localdb'", diskParams) log.Fatalf("--disk-cache must be a number, followed by ':', followed by a string")
} }
diskCacheSize := parts[0] diskCacheSize := parts[0]
path := parts[1] path := parts[1]
cacheManager := parts[2]
if len(path) == 0 || path[0] != '/' { if len(path) == 0 || path[0] != '/' {
log.Fatalf("disk cache paths must start with '/'") log.Fatalf("--disk-cache path must start with '/'")
}
if !util.InSlice(cacheManager, cacheManagers) {
log.Fatalf("specified cache manager '%s' is not supported. Use one of the following: %v", cacheManager, cacheManagers)
} }
var maxSize datasize.ByteSize var maxSize datasize.ByteSize
@ -306,9 +262,9 @@ func diskCacheParams(diskParams string) (int, string, string) {
log.Fatal(err) log.Fatal(err)
} }
if maxSize <= 0 { if maxSize <= 0 {
log.Fatal("disk cache size must be more than 0") log.Fatal("--disk-cache size must be more than 0")
} }
return int(maxSize), path, cacheManager return int(maxSize), path
} }
func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) { func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) {

View file

@ -2,14 +2,14 @@ package cmd
import ( import (
"encoding/json" "encoding/json"
"io/ioutil"
"os" "os"
"strings" "strings"
"github.com/lbryio/reflector.go/updater"
"github.com/lbryio/lbry.go/v2/dht" "github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util" "github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/reflector.go/updater"
"github.com/johntdyer/slackrus" "github.com/johntdyer/slackrus"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -24,7 +24,6 @@ type Config struct {
BucketName string `json:"bucket_name"` BucketName string `json:"bucket_name"`
DBConn string `json:"db_conn"` DBConn string `json:"db_conn"`
SlackHookURL string `json:"slack_hook_url"` SlackHookURL string `json:"slack_hook_url"`
SlackChannel string `json:"slack_channel"`
UpdateBinURL string `json:"update_bin_url"` UpdateBinURL string `json:"update_bin_url"`
UpdateCmd string `json:"update_cmd"` UpdateCmd string `json:"update_cmd"`
} }
@ -102,7 +101,7 @@ func preRun(cmd *cobra.Command, args []string) {
hook := &slackrus.SlackrusHook{ hook := &slackrus.SlackrusHook{
HookURL: globalConfig.SlackHookURL, HookURL: globalConfig.SlackHookURL,
AcceptedLevels: slackrus.LevelThreshold(logrus.InfoLevel), AcceptedLevels: slackrus.LevelThreshold(logrus.InfoLevel),
Channel: globalConfig.SlackChannel, Channel: "#reflector-logs",
//IconEmoji: ":ghost:", //IconEmoji: ":ghost:",
//Username: "reflector.go", //Username: "reflector.go",
} }
@ -141,7 +140,7 @@ func argFuncs(funcs ...cobra.PositionalArgs) cobra.PositionalArgs {
func loadConfig(path string) (Config, error) { func loadConfig(path string) (Config, error) {
var c Config var c Config
raw, err := os.ReadFile(path) raw, err := ioutil.ReadFile(path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return c, errors.Err("config file not found") return c, errors.Err("config file not found")
@ -165,8 +164,8 @@ func mustGetFlagInt64(cmd *cobra.Command, name string) int64 {
return v return v
} }
//func mustGetFlagBool(cmd *cobra.Command, name string) bool { func mustGetFlagBool(cmd *cobra.Command, name string) bool {
// v, err := cmd.Flags().GetBool(name) v, err := cmd.Flags().GetBool(name)
// checkErr(err) checkErr(err)
// return v return v
//} }

View file

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"os/signal" "os/signal"
"path" "path"
@ -38,12 +39,13 @@ func sendCmd(cmd *cobra.Command, args []string) {
reflectorAddress := args[0] reflectorAddress := args[0]
err := hackyReflector.Connect(reflectorAddress) err := hackyReflector.Connect(reflectorAddress)
checkErr(err) checkErr(err)
defer func() { _ = hackyReflector.Close() }() defer hackyReflector.Close()
filePath := args[1] filePath := args[1]
file, err := os.Open(filePath) file, err := os.Open(filePath)
checkErr(err) checkErr(err)
defer func() { _ = file.Close() }() defer file.Close()
sdCachePath := "" sdCachePath := ""
sdCacheDir := mustGetFlagString(cmd, "sd-cache") sdCacheDir := mustGetFlagString(cmd, "sd-cache")
if sdCacheDir != "" { if sdCacheDir != "" {
@ -58,7 +60,7 @@ func sendCmd(cmd *cobra.Command, args []string) {
if sdCachePath != "" { if sdCachePath != "" {
if _, err := os.Stat(sdCachePath); !os.IsNotExist(err) { if _, err := os.Stat(sdCachePath); !os.IsNotExist(err) {
sdBlob, err := os.ReadFile(sdCachePath) sdBlob, err := ioutil.ReadFile(sdCachePath)
checkErr(err) checkErr(err)
cachedSDBlob := &stream.SDBlob{} cachedSDBlob := &stream.SDBlob{}
err = cachedSDBlob.FromBlob(sdBlob) err = cachedSDBlob.FromBlob(sdBlob)
@ -108,7 +110,7 @@ func sendCmd(cmd *cobra.Command, args []string) {
sd := enc.SDBlob() sd := enc.SDBlob()
//sd.StreamName = filepath.Base(filePath) //sd.StreamName = filepath.Base(filePath)
//sd.SuggestedFileName = filepath.Base(filePath) //sd.SuggestedFileName = filepath.Base(filePath)
err = os.WriteFile(sdCachePath, sd.ToBlob(), 0666) err = ioutil.WriteFile(sdCachePath, sd.ToBlob(), 0666)
if err != nil { if err != nil {
fmt.Printf("error saving sd blob: %v\n", err) fmt.Printf("error saving sd blob: %v\n", err)
fmt.Println(sd.ToJson()) fmt.Println(sd.ToJson())

View file

@ -51,7 +51,7 @@ func sendBlobCmd(cmd *cobra.Command, args []string) {
file, err := os.Open(path) file, err := os.Open(path)
checkErr(err) checkErr(err)
defer func() { _ = file.Close() }() defer file.Close()
s, err := stream.New(file) s, err := stream.New(file)
checkErr(err) checkErr(err)

View file

@ -7,15 +7,14 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/prism"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht" "github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits" "github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/prism"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"

View file

@ -9,8 +9,8 @@ import (
"time" "time"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/reflector" "github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"

View file

@ -8,8 +8,8 @@ import (
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/reflector" "github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -18,5 +17,5 @@ func init() {
} }
func versionCmd(cmd *cobra.Command, args []string) { func versionCmd(cmd *cobra.Command, args []string) {
fmt.Println(meta.FullName()) fmt.Println(meta.VersionString())
} }

221
db/db.go
View file

@ -17,7 +17,7 @@ import (
"github.com/go-sql-driver/mysql" "github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used _ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/volatiletech/null/v8" "github.com/volatiletech/null"
"go.uber.org/atomic" "go.uber.org/atomic"
) )
@ -39,9 +39,12 @@ type SdBlob struct {
type trackAccess int type trackAccess int
const ( const (
TrackAccessNone trackAccess = iota // Don't track accesses // Don't track accesses
TrackAccessStreams // Track accesses at the stream level TrackAccessNone trackAccess = iota
TrackAccessBlobs // Track accesses at the blob level // Track accesses at the stream level
TrackAccessStreams
// Track accesses at the blob level
TrackAccessBlobs
) )
// SQL implements the DB interface // SQL implements the DB interface
@ -94,23 +97,22 @@ func (s *SQL) AddBlob(hash string, length int, isStored bool) error {
return errors.Err("not connected") return errors.Err("not connected")
} }
_, err := s.insertBlob(hash, length, isStored) _, err := s.insertBlob(s.conn, hash, length, isStored)
return err return err
} }
//AddBlobs adds blobs to the database. // AddBlob adds a blob to the database.
func (s *SQL) AddBlobs(hash []string) error { func (s *SQL) AddBlobs(hash []string) error {
if s.conn == nil { if s.conn == nil {
return errors.Err("not connected") return errors.Err("not connected")
} }
// Split the slice into batches of 20 items.
batch := 10000 batch := 10000
totalBlobs := int64(len(hash)) totalBlobs := int64(len(hash))
work := make(chan []string, 1000) work := make(chan []string, 1000)
stopper := stop.New() stopper := stop.New()
var totalInserted atomic.Int64 var totalInserted atomic.Int64
start := time.Now() start := time.Now()
go func() { go func() {
for i := 0; i < len(hash); i += batch { for i := 0; i < len(hash); i += batch {
j := i + batch j := i + batch
@ -122,7 +124,6 @@ func (s *SQL) AddBlobs(hash []string) error {
log.Infof("done loading %d hashes in the work queue", len(hash)) log.Infof("done loading %d hashes in the work queue", len(hash))
close(work) close(work)
}() }()
for i := 0; i < runtime.NumCPU(); i++ { for i := 0; i < runtime.NumCPU(); i++ {
stopper.Add(1) stopper.Add(1)
go func(worker int) { go func(worker int) {
@ -144,7 +145,6 @@ func (s *SQL) AddBlobs(hash []string) error {
} }
}(i) }(i)
} }
stopper.Wait() stopper.Wait()
return nil return nil
} }
@ -163,7 +163,7 @@ func (s *SQL) insertBlobs(hashes []string) error {
//args = append(args, hash, true, stream.MaxBlobSize, dayAgo) //args = append(args, hash, true, stream.MaxBlobSize, dayAgo)
} }
q = strings.TrimSuffix(q, ",") q = strings.TrimSuffix(q, ",")
_, err := s.exec(q) _, err := s.exec(s.conn, q)
if err != nil { if err != nil {
return err return err
} }
@ -171,7 +171,7 @@ func (s *SQL) insertBlobs(hashes []string) error {
return nil return nil
} }
func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error) { func (s *SQL) insertBlob(ex Executor, hash string, length int, isStored bool) (int64, error) {
if length <= 0 { if length <= 0 {
return 0, errors.Err("length must be positive") return 0, errors.Err("length must be positive")
} }
@ -188,13 +188,13 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
q = "INSERT INTO blob_ (hash, is_stored, length) VALUES (" + qt.Qs(len(args)) + ") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored))" q = "INSERT INTO blob_ (hash, is_stored, length) VALUES (" + qt.Qs(len(args)) + ") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored))"
} }
blobID, err := s.exec(q, args...) blobID, err := s.exec(ex, q, args...)
if err != nil { if err != nil {
return 0, err return 0, err
} }
if blobID == 0 { if blobID == 0 {
err = s.conn.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID) err = ex.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID)
if err != nil { if err != nil {
return 0, errors.Err(err) return 0, errors.Err(err)
} }
@ -203,7 +203,7 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
} }
if s.TrackAccess == TrackAccessBlobs { if s.TrackAccess == TrackAccessBlobs {
err := s.touchBlobs([]uint64{uint64(blobID)}) err := s.touchBlobs(ex, []uint64{uint64(blobID)})
if err != nil { if err != nil {
return 0, errors.Err(err) return 0, errors.Err(err)
} }
@ -227,7 +227,7 @@ func (s *SQL) insertStream(hash string, sdBlobID int64) (int64, error) {
q = "INSERT IGNORE INTO stream (hash, sd_blob_id) VALUES (" + qt.Qs(len(args)) + ")" q = "INSERT IGNORE INTO stream (hash, sd_blob_id) VALUES (" + qt.Qs(len(args)) + ")"
} }
streamID, err := s.exec(q, args...) streamID, err := s.exec(s.conn, q, args...)
if err != nil { if err != nil {
return 0, errors.Err(err) return 0, errors.Err(err)
} }
@ -266,7 +266,7 @@ func (s *SQL) HasBlobs(hashes []string, touch bool) (map[string]bool, error) {
if touch { if touch {
if s.TrackAccess == TrackAccessBlobs { if s.TrackAccess == TrackAccessBlobs {
_ = s.touchBlobs(idsNeedingTouch) _ = s.touchBlobs(s.conn, idsNeedingTouch)
} else if s.TrackAccess == TrackAccessStreams { } else if s.TrackAccess == TrackAccessStreams {
_ = s.touchStreams(idsNeedingTouch) _ = s.touchStreams(idsNeedingTouch)
} }
@ -275,7 +275,7 @@ func (s *SQL) HasBlobs(hashes []string, touch bool) (map[string]bool, error) {
return exists, err return exists, err
} }
func (s *SQL) touchBlobs(blobIDs []uint64) error { func (s *SQL) touchBlobs(ex Executor, blobIDs []uint64) error {
if len(blobIDs) == 0 { if len(blobIDs) == 0 {
return nil return nil
} }
@ -288,7 +288,7 @@ func (s *SQL) touchBlobs(blobIDs []uint64) error {
} }
startTime := time.Now() startTime := time.Now()
_, err := s.exec(query, args...) _, err := s.exec(ex, query, args...)
log.Debugf("touched %d blobs and took %s", len(blobIDs), time.Since(startTime)) log.Debugf("touched %d blobs and took %s", len(blobIDs), time.Since(startTime))
return errors.Err(err) return errors.Err(err)
} }
@ -306,7 +306,7 @@ func (s *SQL) touchStreams(streamIDs []uint64) error {
} }
startTime := time.Now() startTime := time.Now()
_, err := s.exec(query, args...) _, err := s.exec(s.conn, query, args...)
log.Debugf("touched %d streams and took %s", len(streamIDs), time.Since(startTime)) log.Debugf("touched %d streams and took %s", len(streamIDs), time.Since(startTime))
return errors.Err(err) return errors.Err(err)
} }
@ -406,20 +406,20 @@ WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
// NOTE: If SoftDelete is enabled, streams will never be deleted // NOTE: If SoftDelete is enabled, streams will never be deleted
func (s *SQL) Delete(hash string) error { func (s *SQL) Delete(hash string) error {
if s.SoftDelete { if s.SoftDelete {
_, err := s.exec("UPDATE blob_ SET is_stored = 0 WHERE hash = ?", hash) _, err := s.exec(s.conn, "UPDATE blob_ SET is_stored = 0 WHERE hash = ?", hash)
return errors.Err(err) return errors.Err(err)
} }
_, err := s.exec("DELETE FROM stream WHERE sd_blob_id = (SELECT id FROM blob_ WHERE hash = ?)", hash) _, err := s.exec(s.conn, "DELETE FROM stream WHERE sd_blob_id = (SELECT id FROM blob_ WHERE hash = ?)", hash)
if err != nil { if err != nil {
return errors.Err(err) return errors.Err(err)
} }
_, err = s.exec("DELETE FROM blob_ WHERE hash = ?", hash) _, err = s.exec(s.conn, "DELETE FROM blob_ WHERE hash = ?", hash)
return errors.Err(err) return errors.Err(err)
} }
//LeastRecentlyAccessedHashes gets the least recently accessed blobs // GetHashRange gets the smallest and biggest hashes in the db
func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) { func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
if s.conn == nil { if s.conn == nil {
return nil, errors.Err("not connected") return nil, errors.Err("not connected")
@ -451,6 +451,40 @@ func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
return blobs, nil return blobs, nil
} }
// AllHashes writes all hashes from the db into the channel.
// It does not close the channel when it finishes.
//func (s *SQL) AllHashes(ch chan<- string) error {
// if s.conn == nil {
// return errors.Err("not connected")
// }
//
// query := "SELECT hash from blob_"
// if s.SoftDelete {
// query += " where is_stored = 1"
// }
// s.logQuery(query)
//
// rows, err := s.conn.Query(query)
// if err != nil {
// return errors.Err(err)
// }
// defer closeRows(rows)
//
// for rows.Next() {
// var hash string
// err := rows.Scan(&hash)
// if err != nil {
// return errors.Err(err)
// }
// ch <- hash
// // TODO: this needs testing
// // TODO: need a way to cancel this early (e.g. in case of shutdown)
// }
//
// close(ch)
// return nil
//}
func (s *SQL) Count() (int, error) { func (s *SQL) Count() (int, error) {
if s.conn == nil { if s.conn == nil {
return 0, errors.Err("not connected") return 0, errors.Err("not connected")
@ -556,7 +590,7 @@ func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
return errors.Err("not connected") return errors.Err("not connected")
} }
sdBlobID, err := s.insertBlob(sdHash, sdBlobLength, true) sdBlobID, err := s.insertBlob(s.conn, sdHash, sdBlobLength, true)
if err != nil { if err != nil {
return err return err
} }
@ -566,28 +600,30 @@ func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
return err return err
} }
// insert content blobs and connect them to stream return withTx(s.conn, func(tx Transactor) error {
for _, contentBlob := range sdBlob.Blobs { // insert content blobs and connect them to stream
if contentBlob.BlobHash == "" { for _, contentBlob := range sdBlob.Blobs {
// null terminator blob if contentBlob.BlobHash == "" {
continue // null terminator blob
} continue
}
blobID, err := s.insertBlob(contentBlob.BlobHash, contentBlob.Length, false) blobID, err := s.insertBlob(tx, contentBlob.BlobHash, contentBlob.Length, false)
if err != nil { if err != nil {
return err return err
} }
args := []interface{}{streamID, blobID, contentBlob.BlobNum} args := []interface{}{streamID, blobID, contentBlob.BlobNum}
_, err = s.exec( _, err = s.exec(tx,
"INSERT IGNORE INTO stream_blob (stream_id, blob_id, num) VALUES ("+qt.Qs(len(args))+")", "INSERT IGNORE INTO stream_blob (stream_id, blob_id, num) VALUES ("+qt.Qs(len(args))+")",
args..., args...,
) )
if err != nil { if err != nil {
return errors.Err(err) return errors.Err(err)
}
} }
} return nil
return nil })
} }
// GetHashRange gets the smallest and biggest hashes in the db // GetHashRange gets the smallest and biggest hashes in the db
@ -660,39 +696,38 @@ func (s *SQL) GetStoredHashesInRange(ctx context.Context, start, end bits.Bitmap
} }
// txFunc is a function that can be wrapped in a transaction // txFunc is a function that can be wrapped in a transaction
type txFunc func(tx *sql.Tx) error type txFunc func(tx Transactor) error
// withTx wraps a function in an sql transaction. the transaction is committed if there's no error, or rolled back if there is one. // withTx wraps a function in an sql transaction. the transaction is committed if there's
// if dbOrTx is an sql.DB, a new transaction is started // no error, or rolled back if there is one. if dbOrTx is not a Transactor (e.g. if it's
// an *sql.DB), withTx attempts to start a new transaction to use.
func withTx(dbOrTx interface{}, f txFunc) (err error) { func withTx(dbOrTx interface{}, f txFunc) (err error) {
var tx *sql.Tx var tx Transactor
var ok bool
switch t := dbOrTx.(type) { tx, ok = dbOrTx.(Transactor)
case *sql.Tx: if !ok {
tx = t tx, err = Begin(dbOrTx)
case *sql.DB:
tx, err = t.Begin()
if err != nil { if err != nil {
return err return err
} }
defer func() {
if p := recover(); p != nil {
if rollBackError := tx.Rollback(); rollBackError != nil {
log.Error("failed to rollback tx on panic - ", rollBackError)
}
panic(p)
} else if err != nil {
if rollBackError := tx.Rollback(); rollBackError != nil {
log.Error("failed to rollback tx on panic - ", rollBackError)
}
} else {
err = errors.Err(tx.Commit())
}
}()
default:
return errors.Err("db or tx required")
} }
defer func() {
if p := recover(); p != nil {
if rollBackError := tx.Rollback(); rollBackError != nil {
log.Error("failed to rollback tx on panic: ", rollBackError)
}
err = errors.Prefix("panic", p)
} else if err != nil {
if rollBackError := tx.Rollback(); rollBackError != nil {
log.Error("failed to rollback tx: ", rollBackError)
}
} else {
err = errors.Err(tx.Commit())
}
}()
return f(tx) return f(tx)
} }
@ -705,12 +740,12 @@ func closeRows(rows *sql.Rows) {
} }
} }
func (s *SQL) exec(query string, args ...interface{}) (int64, error) { func (s *SQL) exec(ex Executor, query string, args ...interface{}) (int64, error) {
s.logQuery(query, args...) s.logQuery(query, args...)
attempt, maxAttempts := 0, 3 attempt, maxAttempts := 0, 3
Retry: Retry:
attempt++ attempt++
result, err := s.conn.Exec(query, args...) result, err := ex.Exec(query, args...)
if isLockTimeoutError(err) { if isLockTimeoutError(err) {
if attempt <= maxAttempts { if attempt <= maxAttempts {
//Error 1205: Lock wait timeout exceeded; try restarting transaction //Error 1205: Lock wait timeout exceeded; try restarting transaction
@ -779,3 +814,47 @@ CREATE TABLE blocked (
); );
*/ */
//func (d *LiteDBBackedStore) selfClean() {
// d.stopper.Add(1)
// defer d.stopper.Done()
// lastCleanup := time.Now()
// const cleanupInterval = 10 * time.Second
// for {
// select {
// case <-d.stopper.Ch():
// log.Infoln("stopping self cleanup")
// return
// default:
// time.Sleep(1 * time.Second)
// }
// if time.Since(lastCleanup) < cleanupInterval {
// continue
//
// blobsCount, err := d.db.BlobsCount()
// if err != nil {
// log.Errorf(errors.FullTrace(err))
// }
// if blobsCount >= d.maxItems {
// itemsToDelete := blobsCount / 100 * 10
// blobs, err := d.db.GetLRUBlobs(itemsToDelete)
// if err != nil {
// log.Errorf(errors.FullTrace(err))
// }
// for _, hash := range blobs {
// select {
// case <-d.stopper.Ch():
// return
// default:
//
// }
// err = d.Delete(hash)
// if err != nil {
// log.Errorf(errors.FullTrace(err))
// }
// metrics.CacheLRUEvictCount.With(metrics.CacheLabels(d.Name(), d.component)).Inc()
// }
// }
// lastCleanup = time.Now()
// }
//}

45
db/interfaces.go Normal file
View file

@ -0,0 +1,45 @@
package db
import (
"database/sql"
"github.com/lbryio/lbry.go/v2/extras/errors"
)
// Executor can perform SQL queries.
type Executor interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
}
// Transactor can commit and rollback, on top of being able to execute queries.
type Transactor interface {
Commit() error
Rollback() error
Executor
}
// Begin begins a transaction
func Begin(db interface{}) (Transactor, error) {
type beginner interface {
Begin() (Transactor, error)
}
creator, ok := db.(beginner)
if ok {
return creator.Begin()
}
type sqlBeginner interface {
Begin() (*sql.Tx, error)
}
creator2, ok := db.(sqlBeginner)
if ok {
return creator2.Begin()
}
return nil, errors.Err("database does not support transactions")
}

157
go.mod
View file

@ -1,124 +1,49 @@
module github.com/lbryio/reflector.go module github.com/lbryio/reflector.go
go 1.20
replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19 replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19
require ( //replace github.com/lbryio/lbry.go/v2 => ../lbry.go
github.com/aws/aws-sdk-go v1.45.24
github.com/bluele/gcache v0.0.2
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
github.com/davecgh/go-spew v1.1.1
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db
github.com/gin-gonic/gin v1.9.1
github.com/go-sql-driver/mysql v1.7.1
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.3
github.com/google/gops v0.3.28
github.com/gorilla/mux v1.8.0
github.com/hashicorp/serf v0.10.1
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc
github.com/karrick/godirwalk v1.17.0
github.com/lbryio/chainquery v1.9.1-0.20230515181855-2fcba3115cfe
github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
github.com/prometheus/client_golang v1.16.0
github.com/quic-go/quic-go v0.39.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cast v1.5.1
github.com/spf13/cobra v1.7.0
github.com/stretchr/testify v1.8.4
github.com/volatiletech/null/v8 v8.1.2
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.4.0
)
require ( require (
github.com/armon/go-metrics v0.4.0 // indirect github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/aws/aws-sdk-go v1.16.11
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/bluele/gcache v0.0.2
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect github.com/bparli/lfuda-go v0.3.1
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3
github.com/bytedance/sonic v1.9.1 // indirect github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/davecgh/go-spew v1.1.1
github.com/friendsofgo/errors v0.9.2 // indirect github.com/gin-gonic/gin v1.7.1
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-sql-driver/mysql v1.4.1
github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/golang/protobuf v1.4.2
github.com/gin-contrib/sse v0.1.0 // indirect github.com/google/gops v0.3.18
github.com/go-errors/errors v1.4.2 // indirect github.com/gorilla/mux v1.7.4
github.com/go-ini/ini v1.67.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gofrs/uuid v4.2.0+incompatible // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
github.com/gorilla/rpc v1.2.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/memberlist v0.1.4 // indirect
github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.8.2
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/johntdyer/slackrus v0.0.0-20180518184837-f7aae3243a07
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e // indirect github.com/karrick/godirwalk v1.16.1
github.com/json-iterator/go v1.1.12 // indirect github.com/lbryio/chainquery v1.9.0
github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/lbryio/lbry.go v1.1.2 // indirect
github.com/leodido/go-urn v1.2.4 // indirect github.com/lbryio/lbry.go/v2 v2.7.2-0.20210416195322-6516df1418e3
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect github.com/lbryio/types v0.0.0-20201019032447-f0b4476ef386
github.com/magiconair/properties v1.8.7 // indirect github.com/lucas-clemente/quic-go v0.20.1
github.com/mattn/go-isatty v0.0.19 // indirect github.com/phayes/freeport v0.0.0-20171002185219-e27662a4a9d6
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_golang v0.9.3
github.com/miekg/dns v1.1.41 // indirect github.com/sirupsen/logrus v1.4.2
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/spf13/afero v1.4.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/spf13/cast v1.3.0
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/spf13/cobra v0.0.3
github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/spf13/viper v1.7.1 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/stretchr/testify v1.7.0
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/volatiletech/null v8.0.0+incompatible
github.com/prometheus/client_model v0.3.0 // indirect go.uber.org/atomic v1.5.1
github.com/prometheus/common v0.42.0 // indirect golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
github.com/prometheus/procfs v0.10.1 // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
github.com/quic-go/qpack v0.4.0 // indirect golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 // indirect
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect google.golang.org/appengine v1.6.2 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/slack-go/slack v0.12.1 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.15.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/volatiletech/inflect v0.0.1 // indirect
github.com/volatiletech/randomize v0.0.1 // indirect
github.com/volatiletech/strmangle v0.0.4 // indirect
go.uber.org/mock v0.3.0 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.9.1 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )
go 1.15

949
go.sum

File diff suppressed because it is too large Load diff

View file

@ -86,13 +86,8 @@ const (
errUnexpectedEOFStr = "unexpected_eof_str" errUnexpectedEOFStr = "unexpected_eof_str"
errJSONSyntax = "json_syntax" errJSONSyntax = "json_syntax"
errBlobTooBig = "blob_too_big" errBlobTooBig = "blob_too_big"
errInvalidPeerJSON = "invalid_peer_json"
errInvalidPeerData = "invalid_peer_data"
errRequestTooLarge = "request_too_large"
errDeadlineExceeded = "deadline_exceeded" errDeadlineExceeded = "deadline_exceeded"
errHashMismatch = "hash_mismatch" errHashMismatch = "hash_mismatch"
errProtectedBlob = "protected_blob"
errInvalidBlobHash = "invalid_blob_hash"
errZeroByteBlob = "zero_byte_blob" errZeroByteBlob = "zero_byte_blob"
errInvalidCharacter = "invalid_character" errInvalidCharacter = "invalid_character"
errBlobNotFound = "blob_not_found" errBlobNotFound = "blob_not_found"
@ -159,7 +154,7 @@ var (
Name: "origin_requests_total", Name: "origin_requests_total",
Help: "How many Get requests are in flight from the cache to the origin", Help: "How many Get requests are in flight from the cache to the origin",
}, []string{LabelCacheType, LabelComponent}) }, []string{LabelCacheType, LabelComponent})
//during thundering-herd situations, the metric below should be a lot smaller than the metric above // during thundering-herd situations, the metric below should be a lot smaller than the metric above
CacheWaitingRequestsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{ CacheWaitingRequestsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Subsystem: subsystemCache, Subsystem: subsystemCache,
@ -239,11 +234,6 @@ var (
Name: "http3_blob_request_queue_size", Name: "http3_blob_request_queue_size",
Help: "Blob requests of https queue size", Help: "Blob requests of https queue size",
}) })
HttpBlobReqQueue = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: ns,
Name: "http_blob_request_queue_size",
Help: "Blob requests queue size of the HTTP protocol",
})
RoutinesQueue = promauto.NewGaugeVec(prometheus.GaugeOpts{ RoutinesQueue = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns, Namespace: ns,
Name: "routines", Name: "routines",
@ -301,20 +291,10 @@ func TrackError(direction string, e error) (shouldLog bool) { // shouldLog is a
} else if strings.Contains(err.Error(), "blob must be at most") { } else if strings.Contains(err.Error(), "blob must be at most") {
//log.Warnln("blob must be at most X bytes is not the same as ErrBlobTooBig") //log.Warnln("blob must be at most X bytes is not the same as ErrBlobTooBig")
errType = errBlobTooBig errType = errBlobTooBig
} else if strings.Contains(err.Error(), "invalid json request") {
errType = errInvalidPeerJSON
} else if strings.Contains(err.Error(), "Invalid data") {
errType = errInvalidPeerData
} else if strings.Contains(err.Error(), "request is too large") {
errType = errRequestTooLarge
} else if strings.Contains(err.Error(), "Invalid blob hash length") {
errType = errInvalidBlobHash
} else if strings.Contains(err.Error(), "hash of received blob data does not match hash from send request") { } else if strings.Contains(err.Error(), "hash of received blob data does not match hash from send request") {
errType = errHashMismatch errType = errHashMismatch
} else if strings.Contains(err.Error(), "blob not found") { } else if strings.Contains(err.Error(), "blob not found") {
errType = errBlobNotFound errType = errBlobNotFound
} else if strings.Contains(err.Error(), "requested blob is protected") {
errType = errProtectedBlob
} else if strings.Contains(err.Error(), "0-byte blob received") { } else if strings.Contains(err.Error(), "0-byte blob received") {
errType = errZeroByteBlob errType = errZeroByteBlob
} else if strings.Contains(err.Error(), "PROTOCOL_VIOLATION: tried to retire connection") { } else if strings.Contains(err.Error(), "PROTOCOL_VIOLATION: tried to retire connection") {

View file

@ -1,348 +0,0 @@
package lite_db
import (
"database/sql"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
qt "github.com/lbryio/lbry.go/v2/extras/query"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
log "github.com/sirupsen/logrus"
"github.com/volatiletech/null/v8"
)
// SdBlob is a special blob that contains information on the rest of the blobs in the stream
type SdBlob struct {
StreamName string `json:"stream_name"`
Blobs []struct {
Length int `json:"length"`
BlobNum int `json:"blob_num"`
BlobHash string `json:"blob_hash,omitempty"`
IV string `json:"iv"`
} `json:"blobs"`
StreamType string `json:"stream_type"`
Key string `json:"key"`
SuggestedFileName string `json:"suggested_file_name"`
StreamHash string `json:"stream_hash"`
}
// SQL implements the DB interface
type SQL struct {
conn *sql.DB
TrackAccessTime bool
}
func logQuery(query string, args ...interface{}) {
s, err := qt.InterpolateParams(query, args...)
if err != nil {
log.Errorln(err)
} else {
log.Debugln(s)
}
}
// Connect will create a connection to the database
func (s *SQL) Connect(dsn string) error {
var err error
// interpolateParams is necessary. otherwise uploading a stream with thousands of blobs
// will hit MySQL's max_prepared_stmt_count limit because the prepared statements are all
// opened inside a transaction. closing them manually doesn't seem to help
dsn += "?parseTime=1&collation=utf8mb4_unicode_ci&interpolateParams=1"
s.conn, err = sql.Open("mysql", dsn)
if err != nil {
return errors.Err(err)
}
s.conn.SetMaxIdleConns(12)
return errors.Err(s.conn.Ping())
}
// AddBlob adds a blob to the database.
func (s *SQL) AddBlob(hash string, length int) error {
if s.conn == nil {
return errors.Err("not connected")
}
_, err := s.insertBlob(hash, length)
return err
}
func (s *SQL) insertBlob(hash string, length int) (int64, error) {
if length <= 0 {
return 0, errors.Err("length must be positive")
}
const isStored = true
now := time.Now()
args := []interface{}{hash, isStored, length, now}
blobID, err := s.exec(
"INSERT INTO blob_ (hash, is_stored, length, last_accessed_at) VALUES ("+qt.Qs(len(args))+") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored)), last_accessed_at=VALUES(last_accessed_at)",
args...,
)
if err != nil {
return 0, err
}
if blobID == 0 {
err = s.conn.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID)
if err != nil {
return 0, errors.Err(err)
}
if blobID == 0 {
return 0, errors.Err("blob ID is 0 even after INSERTing and SELECTing")
}
}
return blobID, nil
}
// HasBlob checks if the database contains the blob information.
func (s *SQL) HasBlob(hash string) (bool, error) {
exists, err := s.HasBlobs([]string{hash})
if err != nil {
return false, err
}
return exists[hash], nil
}
// HasBlobs checks if the database contains the set of blobs and returns a bool map.
func (s *SQL) HasBlobs(hashes []string) (map[string]bool, error) {
exists, streamsNeedingTouch, err := s.hasBlobs(hashes)
_ = s.touch(streamsNeedingTouch)
return exists, err
}
func (s *SQL) touch(blobIDs []uint64) error {
if len(blobIDs) == 0 {
return nil
}
query := "UPDATE blob_ SET last_accessed_at = ? WHERE id IN (" + qt.Qs(len(blobIDs)) + ")"
args := make([]interface{}, len(blobIDs)+1)
args[0] = time.Now()
for i := range blobIDs {
args[i+1] = blobIDs[i]
}
startTime := time.Now()
_, err := s.exec(query, args...)
log.Debugf("blobs access query touched %d blobs and took %s", len(blobIDs), time.Since(startTime))
return errors.Err(err)
}
func (s *SQL) hasBlobs(hashes []string) (map[string]bool, []uint64, error) {
if s.conn == nil {
return nil, nil, errors.Err("not connected")
}
var (
hash string
blobID uint64
lastAccessedAt null.Time
)
var needsTouch []uint64
exists := make(map[string]bool)
touchDeadline := time.Now().AddDate(0, 0, -1) // touch blob if last accessed before this time
maxBatchSize := 10000
doneIndex := 0
for len(hashes) > doneIndex {
sliceEnd := doneIndex + maxBatchSize
if sliceEnd > len(hashes) {
sliceEnd = len(hashes)
}
log.Debugf("getting hashes[%d:%d] of %d", doneIndex, sliceEnd, len(hashes))
batch := hashes[doneIndex:sliceEnd]
// TODO: this query doesn't work for SD blobs, which are not in the stream_blob table
query := `SELECT hash, id, last_accessed_at
FROM blob_
WHERE is_stored = ? and hash IN (` + qt.Qs(len(batch)) + `)`
args := make([]interface{}, len(batch)+1)
args[0] = true
for i := range batch {
args[i+1] = batch[i]
}
logQuery(query, args...)
err := func() error {
startTime := time.Now()
rows, err := s.conn.Query(query, args...)
log.Debugf("hashes query took %s", time.Since(startTime))
if err != nil {
return errors.Err(err)
}
defer closeRows(rows)
for rows.Next() {
err := rows.Scan(&hash, &blobID, &lastAccessedAt)
if err != nil {
return errors.Err(err)
}
exists[hash] = true
if s.TrackAccessTime && (!lastAccessedAt.Valid || lastAccessedAt.Time.Before(touchDeadline)) {
needsTouch = append(needsTouch, blobID)
}
}
err = rows.Err()
if err != nil {
return errors.Err(err)
}
doneIndex += len(batch)
return nil
}()
if err != nil {
return nil, nil, err
}
}
return exists, needsTouch, nil
}
// Delete will remove the blob from the db
func (s *SQL) Delete(hash string) error {
_, err := s.exec("UPDATE blob_ set is_stored = ? WHERE hash = ?", 0, hash)
return errors.Err(err)
}
// AddSDBlob insert the SD blob and all the content blobs. The content blobs are marked as "not stored",
// but they are tracked so reflector knows what it is missing.
func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int) error {
if s.conn == nil {
return errors.Err("not connected")
}
_, err := s.insertBlob(sdHash, sdBlobLength)
return err
}
// GetHashRange gets the smallest and biggest hashes in the db
func (s *SQL) GetLRUBlobs(maxBlobs int) ([]string, error) {
if s.conn == nil {
return nil, errors.Err("not connected")
}
query := "SELECT hash from blob_ where is_stored = ? order by last_accessed_at limit ?"
const isStored = true
logQuery(query, isStored, maxBlobs)
rows, err := s.conn.Query(query, isStored, maxBlobs)
if err != nil {
return nil, errors.Err(err)
}
defer closeRows(rows)
blobs := make([]string, 0, maxBlobs)
for rows.Next() {
var hash string
err := rows.Scan(&hash)
if err != nil {
return nil, errors.Err(err)
}
blobs = append(blobs, hash)
}
return blobs, nil
}
func (s *SQL) AllBlobs() ([]string, error) {
if s.conn == nil {
return nil, errors.Err("not connected")
}
query := "SELECT hash from blob_ where is_stored = ?" //TODO: maybe sorting them makes more sense?
const isStored = true
logQuery(query, isStored)
rows, err := s.conn.Query(query, isStored)
if err != nil {
return nil, errors.Err(err)
}
defer closeRows(rows)
totalBlobs, err := s.BlobsCount()
if err != nil {
return nil, err
}
blobs := make([]string, 0, totalBlobs)
for rows.Next() {
var hash string
err := rows.Scan(&hash)
if err != nil {
return nil, errors.Err(err)
}
blobs = append(blobs, hash)
}
return blobs, nil
}
func (s *SQL) BlobsCount() (int, error) {
if s.conn == nil {
return 0, errors.Err("not connected")
}
query := "SELECT count(id) from blob_ where is_stored = ?" //TODO: maybe sorting them makes more sense?
const isStored = true
logQuery(query, isStored)
var count int
err := s.conn.QueryRow(query, isStored).Scan(&count)
return count, errors.Err(err)
}
func closeRows(rows *sql.Rows) {
if rows != nil {
err := rows.Close()
if err != nil {
log.Error("error closing rows: ", err)
}
}
}
func (s *SQL) exec(query string, args ...interface{}) (int64, error) {
logQuery(query, args...)
attempt, maxAttempts := 0, 3
Retry:
attempt++
result, err := s.conn.Exec(query, args...)
if isLockTimeoutError(err) {
if attempt <= maxAttempts {
//Error 1205: Lock wait timeout exceeded; try restarting transaction
goto Retry
}
err = errors.Prefix("Lock timeout for query "+query, err)
}
if err != nil {
return 0, errors.Err(err)
}
lastID, err := result.LastInsertId()
return lastID, errors.Err(err)
}
func isLockTimeoutError(err error) bool {
e, ok := err.(*mysql.MySQLError)
return ok && e != nil && e.Number == 1205
}
/* SQL schema
in prod make sure you use latin1 or utf8 charset, NOT utf8mb4. that's a waste of space.
CREATE TABLE `blob_` (
`id` bigint unsigned NOT NULL AUTO_INCREMENT,
`hash` char(96) NOT NULL,
`is_stored` tinyint(1) NOT NULL DEFAULT '0',
`length` bigint unsigned DEFAULT NULL,
`last_accessed_at` datetime DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `id` (`id`),
UNIQUE KEY `blob_hash_idx` (`hash`),
KEY `blob_last_accessed_idx` (`last_accessed_at`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
*/

View file

@ -6,36 +6,9 @@ import (
"time" "time"
) )
var ( var Version = ""
name = "prism-bin" var Time = ""
version = "unknown" var BuildTime time.Time
commit = "unknown"
commitLong = "unknown"
branch = "unknown"
Time = "unknown"
BuildTime time.Time
)
// Name returns main application name
func Name() string {
return name
}
// Version returns current application version
func Version() string {
return version
}
// FullName returns current app version, commit and build time
func FullName() string {
return fmt.Sprintf(
`Name: %v
Version: %v
branch: %v
commit: %v
commit long: %v
build date: %v`, Name(), Version(), branch, commit, commitLong, BuildTime.String())
}
func init() { func init() {
if Time != "" { if Time != "" {
@ -47,6 +20,11 @@ func init() {
} }
func VersionString() string { func VersionString() string {
version := Version
if version == "" {
version = "<unset>"
}
var buildTime string var buildTime string
if BuildTime.IsZero() { if BuildTime.IsZero() {
buildTime = "<now>" buildTime = "<now>"

View file

@ -18,6 +18,9 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// ErrBlobExists is a default error for when a blob already exists on the reflector server.
var ErrBlobExists = errors.Base("blob exists on server")
// Client is an instance of a client connected to a server. // Client is an instance of a client connected to a server.
type Client struct { type Client struct {
Timeout time.Duration Timeout time.Duration

View file

@ -15,8 +15,7 @@ import (
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lucas-clemente/quic-go/http3"
"github.com/quic-go/quic-go/http3"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -69,7 +68,7 @@ func (c *Client) HasBlob(hash string) (bool, error) {
if err != nil { if err != nil {
return false, errors.Err(err) return false, errors.Err(err)
} }
defer func() { _ = resp.Body.Close() }() defer resp.Body.Close()
if resp.StatusCode == http.StatusOK { if resp.StatusCode == http.StatusOK {
return true, nil return true, nil
} }
@ -86,7 +85,7 @@ func (c *Client) GetBlob(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil { if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(err) return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(err)
} }
defer func() { _ = resp.Body.Close() }() defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
fmt.Printf("%s blob not found %d\n", hash, resp.StatusCode) fmt.Printf("%s blob not found %d\n", hash, resp.StatusCode)

View file

@ -15,31 +15,28 @@ import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop" "github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/quic-go/quic-go" "github.com/lucas-clemente/quic-go"
"github.com/quic-go/quic-go/http3" "github.com/lucas-clemente/quic-go/http3"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// Server is an instance of a peer server that houses the listener and store. // Server is an instance of a peer server that houses the listener and store.
type Server struct { type Server struct {
store store.BlobStore store store.BlobStore
grp *stop.Group grp *stop.Group
concurrentRequests int
} }
// NewServer returns an initialized Server pointer. // NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore, requestQueueSize int) *Server { func NewServer(store store.BlobStore, requestQueueSize int) *Server {
return &Server{ return &Server{
store: store, store: store,
grp: stop.New(), grp: stop.New(),
concurrentRequests: requestQueueSize,
} }
} }
@ -113,12 +110,14 @@ func (s *Server) Start(address string) error {
} }
}) })
server := http3.Server{ server := http3.Server{
Addr: address, Server: &http.Server{
Handler: r, Handler: r,
TLSConfig: generateTLSConfig(), Addr: address,
TLSConfig: generateTLSConfig(),
},
QuicConfig: quicConf, QuicConfig: quicConf,
} }
go InitWorkers(s, s.concurrentRequests) go InitWorkers(s, 200)
go s.listenForShutdown(&server) go s.listenForShutdown(&server)
s.grp.Add(1) s.grp.Add(1)
go func() { go func() {
@ -155,7 +154,7 @@ func generateTLSConfig() *tls.Config {
func (s *Server) listenAndServe(server *http3.Server) { func (s *Server) listenAndServe(server *http3.Server) {
err := server.ListenAndServe() err := server.ListenAndServe()
if err != nil && err != quic.ErrServerClosed { if err != nil && err.Error() != "server closed" {
log.Errorln(errors.FullTrace(err)) log.Errorln(errors.FullTrace(err))
} }
} }
@ -180,10 +179,7 @@ func (s *Server) HandleGetBlob(w http.ResponseWriter, r *http.Request) {
wantsTrace = false wantsTrace = false
} }
} }
if reflector.IsProtected(requestedBlob) {
http.Error(w, "requested blob is protected", http.StatusForbidden)
return
}
blob, trace, err := s.store.Get(requestedBlob) blob, trace, err := s.store.Get(requestedBlob)
if wantsTrace { if wantsTrace {

View file

@ -8,14 +8,12 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
"github.com/quic-go/quic-go" "github.com/lbryio/reflector.go/store"
"github.com/quic-go/quic-go/http3" "github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/http3"
) )
// Store is a blob store that gets blobs from a peer. // Store is a blob store that gets blobs from a peer.
@ -74,7 +72,7 @@ func (p *Store) Has(hash string) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
defer func() { _ = c.Close() }() defer c.Close()
return c.HasBlob(hash) return c.HasBlob(hash)
} }
@ -93,7 +91,7 @@ func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil { if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
} }
defer func() { _ = c.Close() }() defer c.Close()
return c.GetBlob(hash) return c.GetBlob(hash)
} }
@ -112,6 +110,7 @@ func (p *Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented) return errors.Err(shared.ErrNotImplemented)
} }
// Shutdown is not supported // Delete is not supported
func (p *Store) Shutdown() { func (p *Store) Shutdown() {
return
} }

View file

@ -33,6 +33,7 @@ func InitWorkers(server *Server, workers int) {
} }
}(i) }(i)
} }
return
} }
func enqueue(b *blobRequest) { func enqueue(b *blobRequest) {

View file

@ -227,36 +227,32 @@ func (s *Server) handleCompositeRequest(data []byte) ([]byte, error) {
if err != nil { if err != nil {
var je *json.SyntaxError var je *json.SyntaxError
if ee.As(err, &je) { if ee.As(err, &je) {
return nil, errors.Err("invalid json request: offset %d in data %s", je.Offset, hex.EncodeToString(data)) return nil, errors.Err("invalid json at offset %d in data %s", je.Offset, hex.EncodeToString(data))
} }
return nil, errors.Err(err) return nil, errors.Err(err)
} }
response := compositeResponse{ response := compositeResponse{
LbrycrdAddress: LbrycrdAddress, LbrycrdAddress: LbrycrdAddress,
AvailableBlobs: []string{},
} }
if len(request.RequestedBlobs) > 0 { if len(request.RequestedBlobs) > 0 {
var availableBlobs []string
for _, blobHash := range request.RequestedBlobs { for _, blobHash := range request.RequestedBlobs {
if reflector.IsProtected(blobHash) {
return nil, errors.Err("requested blob is protected")
}
exists, err := s.store.Has(blobHash) exists, err := s.store.Has(blobHash)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if exists { if exists {
response.AvailableBlobs = append(response.AvailableBlobs, blobHash) availableBlobs = append(availableBlobs, blobHash)
} }
} }
response.AvailableBlobs = availableBlobs
} }
if request.BlobDataPaymentRate != nil { response.BlobDataPaymentRate = paymentRateAccepted
response.BlobDataPaymentRate = paymentRateAccepted if request.BlobDataPaymentRate < 0 {
if *request.BlobDataPaymentRate < 0 { response.BlobDataPaymentRate = paymentRateTooLow
response.BlobDataPaymentRate = paymentRateTooLow
}
} }
var blob []byte var blob []byte
@ -271,14 +267,14 @@ func (s *Server) handleCompositeRequest(data []byte) ([]byte, error) {
blob, trace, err = s.store.Get(request.RequestedBlob) blob, trace, err = s.store.Get(request.RequestedBlob)
log.Debug(trace.String()) log.Debug(trace.String())
if errors.Is(err, store.ErrBlobNotFound) { if errors.Is(err, store.ErrBlobNotFound) {
response.IncomingBlob = &incomingBlob{ response.IncomingBlob = incomingBlob{
Error: err.Error(), Error: err.Error(),
} }
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} else { } else {
response.IncomingBlob = &incomingBlob{ response.IncomingBlob = incomingBlob{
BlobHash: request.RequestedBlob, BlobHash: reflector.BlobHash(blob),
Length: len(blob), Length: len(blob),
} }
metrics.MtrOutBytesTcp.Add(float64(len(blob))) metrics.MtrOutBytesTcp.Add(float64(len(blob)))
@ -306,15 +302,7 @@ func (s *Server) logError(e error) {
} }
func readNextMessage(buf *bufio.Reader) ([]byte, error) { func readNextMessage(buf *bufio.Reader) ([]byte, error) {
first_byte, err := buf.ReadByte() msg := make([]byte, 0)
if err != nil {
return nil, err
}
if first_byte != '{' {
// every request starts with '{'. Checking here disconnects earlier, so we don't wait until timeout
return nil, errInvalidData
}
msg := []byte("{")
eof := false eof := false
for { for {
@ -335,8 +323,6 @@ func readNextMessage(buf *bufio.Reader) ([]byte, error) {
if len(msg) > maxRequestSize { if len(msg) > maxRequestSize {
return msg, errRequestTooLarge return msg, errRequestTooLarge
} else if len(msg) > 0 && msg[0] != '{' {
return msg, errInvalidData
} }
// yes, this is how the peer protocol knows when the request finishes // yes, this is how the peer protocol knows when the request finishes
@ -371,7 +357,6 @@ const (
) )
var errRequestTooLarge = errors.Base("request is too large") var errRequestTooLarge = errors.Base("request is too large")
var errInvalidData = errors.Base("Invalid data")
type availabilityRequest struct { type availabilityRequest struct {
LbrycrdAddress bool `json:"lbrycrd_address"` LbrycrdAddress bool `json:"lbrycrd_address"`
@ -408,13 +393,13 @@ type blobResponse struct {
type compositeRequest struct { type compositeRequest struct {
LbrycrdAddress bool `json:"lbrycrd_address"` LbrycrdAddress bool `json:"lbrycrd_address"`
RequestedBlobs []string `json:"requested_blobs"` RequestedBlobs []string `json:"requested_blobs"`
BlobDataPaymentRate *float64 `json:"blob_data_payment_rate"` BlobDataPaymentRate float64 `json:"blob_data_payment_rate"`
RequestedBlob string `json:"requested_blob"` RequestedBlob string `json:"requested_blob"`
} }
type compositeResponse struct { type compositeResponse struct {
LbrycrdAddress string `json:"lbrycrd_address,omitempty"` LbrycrdAddress string `json:"lbrycrd_address,omitempty"`
AvailableBlobs []string `json:"available_blobs"` AvailableBlobs []string `json:"available_blobs,omitempty"`
BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"` BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"`
IncomingBlob *incomingBlob `json:"incoming_blob,omitempty"` IncomingBlob incomingBlob `json:"incoming_blob,omitempty"`
} }

View file

@ -2,10 +2,7 @@ package peer
import ( import (
"bytes" "bytes"
"io"
"net"
"testing" "testing"
"time"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
) )
@ -78,62 +75,3 @@ func TestAvailabilityRequest_WithBlobs(t *testing.T) {
} }
} }
} }
func TestRequestFromConnection(t *testing.T) {
s := getServer(t, true)
err := s.Start("127.0.0.1:50505")
defer s.Shutdown()
if err != nil {
t.Error("error starting server", err)
}
for _, p := range availabilityRequests {
conn, err := net.Dial("tcp", "127.0.0.1:50505")
if err != nil {
t.Error("error opening connection", err)
}
defer func() { _ = conn.Close() }()
response := make([]byte, 8192)
_, err = conn.Write(p.request)
if err != nil {
t.Error("error writing", err)
}
_, err = conn.Read(response)
if err != nil {
t.Error("error reading", err)
}
if !bytes.Equal(response[:len(p.response)], p.response) {
t.Errorf("Response did not match expected response.\nExpected: %s\nGot: %s", string(p.response), string(response))
}
}
}
func TestInvalidData(t *testing.T) {
s := getServer(t, true)
err := s.Start("127.0.0.1:50503")
defer s.Shutdown()
if err != nil {
t.Error("error starting server", err)
}
conn, err := net.Dial("tcp", "127.0.0.1:50503")
if err != nil {
t.Error("error opening connection", err)
}
defer func() { _ = conn.Close() }()
response := make([]byte, 8192)
_, err = conn.Write([]byte("hello dear server, I would like blobs. Please"))
if err != nil {
t.Error("error writing", err)
}
err = conn.SetReadDeadline(time.Now().Add(5 * time.Second))
if err != nil {
t.Error("error setting read deadline", err)
}
_, err = conn.Read(response)
if err != io.EOF {
t.Error("error reading", err)
}
println(response)
}

View file

@ -4,11 +4,10 @@ import (
"strings" "strings"
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
) )
// Store is a blob store that gets blobs from a peer. // Store is a blob store that gets blobs from a peer.
@ -42,7 +41,7 @@ func (p *Store) Has(hash string) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
defer func() { _ = c.Close() }() defer c.Close()
return c.HasBlob(hash) return c.HasBlob(hash)
} }
@ -53,7 +52,7 @@ func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil { if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
} }
defer func() { _ = c.Close() }() defer c.Close()
blob, trace, err := c.GetBlob(hash) blob, trace, err := c.GetBlob(hash)
if err != nil && strings.Contains(err.Error(), "blob not found") { if err != nil && strings.Contains(err.Error(), "blob not found") {
return nil, trace, store.ErrBlobNotFound return nil, trace, store.ErrBlobNotFound
@ -77,6 +76,7 @@ func (p *Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented) return errors.Err(shared.ErrNotImplemented)
} }
// Shutdown is not supported // Delete is not supported
func (p *Store) Shutdown() { func (p *Store) Shutdown() {
return
} }

View file

@ -5,14 +5,14 @@ import (
"strconv" "strconv"
"sync" "sync"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht" "github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits" "github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop" "github.com/lbryio/lbry.go/v2/extras/stop"

View file

@ -4,9 +4,8 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/lbryio/lbry.go/v2/dht/bits"
) )
func TestAnnounceRange(t *testing.T) { func TestAnnounceRange(t *testing.T) {

View file

@ -22,23 +22,24 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
) )
/* TODO: var TODO = `
import cert from wallet import cert from wallet
get all utxos from chainquery get all utxos from chainquery
create transaction create transaction
sign it with the channel sign it with the channel
track state of utxos across publishes from this channel so that we can just do one query to get utxos track state of utxos across publishes from this channel so that we can just do one query to get utxos
prioritize only confirmed utxos prioritize only confirmed utxos
Handling all the issues we handle currently with lbrynet: Handling all the issues we handle currently with lbrynet:
"Couldn't find private key for id", "Couldn't find private key for id",
"You already have a stream claim published under the name", "You already have a stream claim published under the name",
"Cannot publish using channel", "Cannot publish using channel",
"txn-mempool-conflict", "txn-mempool-conflict",
"too-long-mempool-chain", "too-long-mempool-chain",
"Missing inputs", "Missing inputs",
"Not enough funds to cover this transaction", "Not enough funds to cover this transaction",
*/ }
`
type Details struct { type Details struct {
Title string Title string
@ -115,7 +116,7 @@ func Publish(client *lbrycrd.Client, path, name, address string, details Details
return signedTx, txid, nil return signedTx, txid, nil
} }
// TODO: lots of assumptions. hardcoded values need to be passed in or calculated //TODO: lots of assumptions. hardcoded values need to be passed in or calculated
func baseTx(client *lbrycrd.Client, amount float64, changeAddress btcutil.Address) (*wire.MsgTx, error) { func baseTx(client *lbrycrd.Client, amount float64, changeAddress btcutil.Address) (*wire.MsgTx, error) {
txFee := 0.0002 // TODO: estimate this better? txFee := 0.0002 // TODO: estimate this better?
@ -222,7 +223,8 @@ func makeStream(path string) (stream.Stream, *pb.Stream, error) {
if err != nil { if err != nil {
return nil, nil, errors.Err(err) return nil, nil, errors.Err(err)
} }
defer func() { _ = file.Close() }() defer file.Close()
enc := stream.NewEncoder(file) enc := stream.NewEncoder(file)
s, err := enc.Stream() s, err := enc.Stream()

102
readme.md
View file

@ -1,110 +1,25 @@
# Reflector # Reflector
Reflector is a central piece of software that providers LBRY with the following features: A reflector cluster to accept LBRY content for hosting en masse, rehost the content, and make money on data fees (TODO).
- Blobs reflection: when something is published, we capture the data and store it on our servers for quicker retrieval This code includes Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
- Blobs distribution: when a piece of content is requested and the LBRY network doesn't have it, reflector will retrieve it from its storage and distribute it
- Blobs caching: reflectors can be chained together in multiple regions or servers to form a chain of cached content. We call those "blobcaches". They are layered so that content distribution is favorable in all the regions we deploy it to
There are a few other features embedded in reflector.go including publishing streams from Go, downloading or upload blobs, resolving content and more unfinished tools.
This code includes a Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
## Installation ## Installation
- Install mysql 8 (5.7 might work too) coming soon
- add a reflector user and database with password `reflector` with localhost access only
- Create the tables as described [here](https://github.com/lbryio/reflector.go/blob/master/db/db.go#L735) (the link might not update as the code does so just look for the schema in that file)
#### We do not support running reflector.go as a blob receiver, however if you want to run it as a private blobcache you may compile it yourself and run it as following:
```bash
./prism-bin reflector \
--conf="none" \
--disable-uploads=true \
--use-db=false \
--upstream-reflector="reflector.lbry.com" \
--upstream-protocol="http" \
--request-queue-size=200 \
--disk-cache="2GB:/path/to/your/storage/:localdb" \
```
Create a systemd script if you want to run it automatically on startup or as a service.
## Usage ## Usage
Usage as reflector/blobcache: coming soon
```bash
Run reflector server
Usage:
prism reflector [flags]
Flags:
--disable-blocklist Disable blocklist watching/updating
--disable-uploads Disable uploads to this reflector server
--disk-cache string Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (default "100GB:/tmp/downloaded_blobs:localdb")
-h, --help help for reflector
--http-peer-port int The port reflector will distribute content from over HTTP protocol (default 5569)
--http3-peer-port int The port reflector will distribute content from over HTTP3 protocol (default 5568)
--mem-cache int enable in-memory cache with a max size of this many blobs
--metrics-port int The port reflector will use for prometheus metrics (default 2112)
--optional-disk-cache string Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (this would get hit before the one specified in disk-cache)
--origin-endpoint string HTTP edge endpoint for standard HTTP retrieval
--origin-endpoint-fallback string HTTP edge endpoint for standard HTTP retrieval if first origin fails
--receiver-port int The port reflector will receive content from (default 5566)
--request-queue-size int How many concurrent requests from downstream should be handled at once (the rest will wait) (default 200)
--tcp-peer-port int The port reflector will distribute content from for the TCP (LBRY) protocol (default 5567)
--upstream-protocol string protocol used to fetch blobs from another upstream reflector server (tcp/http3/http) (default "http")
--upstream-reflector string host:port of a reflector server where blobs are fetched from
--use-db Whether to connect to the reflector db or not (default true)
Global Flags:
--conf string Path to config. Use 'none' to disable (default "config.json")
-v, --verbose strings Verbose logging for specific components
```
Other uses:
```bash
Prism is a single entry point application with multiple sub modules which can be leveraged individually or together
Usage:
prism [command]
Available Commands:
check-integrity check blobs integrity for a given path
cluster Start(join) to or Start a new cluster
decode Decode a claim value
dht Run dht node
getstream Get a stream from a reflector server
help Help about any command
peer Run peer server
populate-db populate local database with blobs from a disk storage
publish Publish a file
reflector Run reflector server
resolve Resolve a URL
send Send a file to a reflector
sendblob Send a random blob to a reflector server
start Runs full prism application with cluster, dht, peer server, and reflector server.
test Test things
upload Upload blobs to S3
version Print the version
Flags:
--conf string Path to config. Use 'none' to disable (default "config.json")
-h, --help help for prism
-v, --verbose strings Verbose logging for specific components
```
## Running from Source ## Running from Source
This project requires [Go v1.20](https://golang.org/doc/install). This project requires [Go v1.11](https://golang.org/doc/install) or higher because it uses Go modules.
On Ubuntu you can install it with `sudo snap install go --classic`
``` ```
git clone git@github.com:lbryio/reflector.go.git git clone git@github.com:lbryio/reflector.go.git
cd reflector.go cd reflector.go
make make
./dist/linux_amd64/prism-bin ./bin/prism-bin
``` ```
## Contributing ## Contributing
@ -118,7 +33,8 @@ This project is MIT licensed.
## Security ## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. We take security seriously. Please contact security@lbry.com regarding any security issues.
Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it. Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it.
## Contact ## Contact
The primary contact for this project is [@Nikooo777](https://github.com/Nikooo777) (niko-at-lbry.com)
The primary contact for this project is [@lyoshenka](https://github.com/lyoshenka) (grin@lbry.com)

View file

@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/reflector.go/wallet" "github.com/lbryio/reflector.go/wallet"

View file

@ -1,81 +0,0 @@
package reflector
import (
"encoding/json"
"net/http"
"time"
"github.com/bluele/gcache"
"github.com/lbryio/lbry.go/v2/extras/errors"
"golang.org/x/sync/singleflight"
)
const protectedListURL = "https://api.odysee.com/file/list_protected"
type ProtectedContent struct {
SDHash string `json:"sd_hash"`
ClaimID string `json:"claim_id"`
}
var protectedCache = gcache.New(10).Expiration(2 * time.Minute).Build()
func GetProtectedContent() (interface{}, error) {
cachedVal, err := protectedCache.Get("protected")
if err == nil && cachedVal != nil {
return cachedVal.(map[string]bool), nil
}
method := "GET"
var r struct {
Success bool `json:"success"`
Error string `json:"error"`
Data []ProtectedContent `json:"data"`
}
client := &http.Client{}
req, err := http.NewRequest(method, protectedListURL, nil)
if err != nil {
return nil, errors.Err(err)
}
res, err := client.Do(req)
if err != nil {
return nil, errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode != http.StatusOK {
return nil, errors.Err("unexpected status code %d", res.StatusCode)
}
if err = json.NewDecoder(res.Body).Decode(&r); err != nil {
return nil, errors.Err(err)
}
if !r.Success {
return nil, errors.Prefix("file/list_protected API call", r.Error)
}
protectedMap := make(map[string]bool, len(r.Data))
for _, pc := range r.Data {
protectedMap[pc.SDHash] = true
}
err = protectedCache.Set("protected", protectedMap)
if err != nil {
return protectedMap, errors.Err(err)
}
return protectedMap, nil
}
var sf = singleflight.Group{}
func IsProtected(sdHash string) bool {
val, err, _ := sf.Do("protected", GetProtectedContent)
if err != nil {
return false
}
cachedMap, ok := val.(map[string]bool)
if !ok {
return false
}
return cachedMap[sdHash]
}

View file

@ -6,9 +6,11 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"net" "net"
"time" "time"
"github.com/google/gops/agent"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
@ -61,13 +63,16 @@ func (s *Server) Shutdown() {
log.Println("reflector server stopped") log.Println("reflector server stopped")
} }
// Start starts the server to handle connections. //Start starts the server to handle connections.
func (s *Server) Start(address string) error { func (s *Server) Start(address string) error {
l, err := net.Listen(network, address) l, err := net.Listen(network, address)
if err != nil { if err != nil {
return errors.Err(err) return errors.Err(err)
} }
log.Println("reflector listening on " + address) log.Println("reflector listening on " + address)
if err := agent.Listen(agent.Options{}); err != nil {
log.Fatal(err)
}
s.grp.Add(1) s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc() metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
go func() { go func() {
@ -118,7 +123,7 @@ func (s *Server) listenAndServe(listener net.Listener) {
s.grp.Add(1) s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc() metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
go func() { go func() {
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec() defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
s.handleConn(conn) s.handleConn(conn)
s.grp.Done() s.grp.Done()
}() }()
@ -366,7 +371,7 @@ func (s *Server) read(conn net.Conn, v interface{}) error {
dec := json.NewDecoder(conn) dec := json.NewDecoder(conn)
err = dec.Decode(v) err = dec.Decode(v)
if err != nil { if err != nil {
data, _ := io.ReadAll(dec.Buffered()) data, _ := ioutil.ReadAll(dec.Buffered())
if len(data) > 0 { if len(data) > 0 {
return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data)) return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data))
} }

View file

@ -9,9 +9,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht/bits" "github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/reflector.go/store"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/phayes/freeport" "github.com/phayes/freeport"

View file

@ -1,13 +1,15 @@
package reflector package reflector
import ( import (
"io/ioutil"
"os" "os"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
@ -164,7 +166,7 @@ func (u *Uploader) uploadBlob(filepath string) (err error) {
} }
}() }()
blob, err := os.ReadFile(filepath) blob, err := ioutil.ReadFile(filepath)
if err != nil { if err != nil {
return errors.Err(err) return errors.Err(err)
} }

View file

@ -1,26 +0,0 @@
#!/usr/bin/env bash
err=0
trap 'err=1' ERR
# All the .go files, excluding auto generated folders
GO_FILES=$(find . -iname '*.go' -type f)
(
go install golang.org/x/tools/cmd/goimports@latest # Used in build script for generated files
# go install golang.org/x/lint/golint@latest # Linter
go install github.com/jgautheron/gocyclo@latest # Check against high complexity
go install github.com/mdempsky/unconvert@latest # Identifies unnecessary type conversions
go install github.com/kisielk/errcheck@latest # Checks for unhandled errors
go install github.com/opennota/check/cmd/varcheck@latest # Checks for unused vars
go install github.com/opennota/check/cmd/structcheck@latest # Checks for unused fields in structs
)
echo "Running varcheck..." && varcheck $(go list ./...)
echo "Running structcheck..." && structcheck $(go list ./...)
# go vet is the official Go static analyzer
echo "Running go vet..." && go vet $(go list ./...)
# checks for unhandled errors
echo "Running errcheck..." && errcheck $(go list ./...)
# check for unnecessary conversions - ignore autogen code
echo "Running unconvert..." && unconvert -v $(go list ./...)
echo "Running gocyclo..." && gocyclo -ignore "_test" -avg -over 28 $GO_FILES
#echo "Running golint..." && golint -set_exit_status $(go list ./...)
test $err = 0 # Return non-zero if any command failed

View file

@ -2,76 +2,38 @@ package http
import ( import (
"net/http" "net/http"
"sync"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
func (s *Server) getBlob(c *gin.Context) { func (s *Server) getBlob(c *gin.Context) {
waiter := &sync.WaitGroup{}
waiter.Add(1)
enqueue(&blobRequest{c: c, finished: waiter})
waiter.Wait()
}
func (s *Server) HandleGetBlob(c *gin.Context) {
defer func() {
if r := recover(); r != nil {
log.Errorf("Recovered from panic: %v", r)
}
}()
start := time.Now()
hash := c.Query("hash") hash := c.Query("hash")
edgeToken := c.Query("edge_token")
if reflector.IsProtected(hash) && edgeToken != s.edgeToken {
_ = c.Error(errors.Err("requested blob is protected"))
c.String(http.StatusForbidden, "requested blob is protected")
return
}
if s.missesCache.Has(hash) {
serialized, err := shared.NewBlobTrace(time.Since(start), "http").Serialize()
c.Header("Via", serialized)
if err != nil {
_ = c.Error(errors.Err(err))
c.String(http.StatusInternalServerError, err.Error())
return
}
c.AbortWithStatus(http.StatusNotFound)
return
}
blob, trace, err := s.store.Get(hash) blob, trace, err := s.store.Get(hash)
if err != nil { if err != nil {
serialized, serializeErr := trace.Serialize() serialized, serializeErr := trace.Serialize()
if serializeErr != nil { if serializeErr != nil {
_ = c.Error(errors.Prefix(serializeErr.Error(), err)) _ = c.AbortWithError(http.StatusInternalServerError, errors.Prefix(serializeErr.Error(), err))
c.String(http.StatusInternalServerError, errors.Prefix(serializeErr.Error(), err).Error())
return return
} }
c.Header("Via", serialized) c.Header("Via", serialized)
if errors.Is(err, store.ErrBlobNotFound) { if errors.Is(err, store.ErrBlobNotFound) {
_ = s.missesCache.Set(hash, true) log.Errorf("wtf: %s", err.Error())
c.AbortWithStatus(http.StatusNotFound) c.AbortWithStatus(http.StatusNotFound)
return return
} }
_ = c.Error(err) _ = c.AbortWithError(http.StatusInternalServerError, err)
c.String(http.StatusInternalServerError, err.Error())
return return
} }
serialized, err := trace.Serialize() serialized, err := trace.Serialize()
if err != nil { if err != nil {
_ = c.Error(err) _ = c.AbortWithError(http.StatusInternalServerError, err)
c.String(http.StatusInternalServerError, err.Error())
return return
} }
metrics.MtrOutBytesHttp.Add(float64(len(blob))) metrics.MtrOutBytesHttp.Add(float64(len(blob)))
@ -86,8 +48,7 @@ func (s *Server) hasBlob(c *gin.Context) {
hash := c.Query("hash") hash := c.Query("hash")
has, err := s.store.Has(hash) has, err := s.store.Has(hash)
if err != nil { if err != nil {
_ = c.Error(err) _ = c.AbortWithError(http.StatusInternalServerError, err)
c.String(http.StatusInternalServerError, err.Error())
return return
} }
if has { if has {
@ -96,10 +57,3 @@ func (s *Server) hasBlob(c *gin.Context) {
} }
c.Status(http.StatusNotFound) c.Status(http.StatusNotFound)
} }
func (s *Server) recoveryHandler(c *gin.Context, err interface{}) {
c.JSON(500, gin.H{
"title": "Error",
"err": err,
})
}

View file

@ -5,33 +5,23 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/bluele/gcache"
nice "github.com/ekyoung/gin-nice-recovery"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// Server is an instance of a peer server that houses the listener and store. // Server is an instance of a peer server that houses the listener and store.
type Server struct { type Server struct {
store store.BlobStore store store.BlobStore
grp *stop.Group grp *stop.Group
concurrentRequests int
missesCache gcache.Cache
edgeToken string
} }
// NewServer returns an initialized Server pointer. // NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore, requestQueueSize int, edgeToken string) *Server { func NewServer(store store.BlobStore) *Server {
return &Server{ return &Server{
store: store, store: store,
grp: stop.New(), grp: stop.New(),
concurrentRequests: requestQueueSize,
missesCache: gcache.New(2000).Expiration(5 * time.Minute).ARC().Build(),
edgeToken: edgeToken,
} }
} }
@ -45,10 +35,7 @@ func (s *Server) Shutdown() {
// Start starts the server listener to handle connections. // Start starts the server listener to handle connections.
func (s *Server) Start(address string) error { func (s *Server) Start(address string) error {
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
router := gin.New() router := gin.Default()
router.Use(gin.Logger())
// Install nice.Recovery, passing the handler to call after recovery
router.Use(nice.Recovery(s.recoveryHandler))
router.GET("/blob", s.getBlob) router.GET("/blob", s.getBlob)
router.HEAD("/blob", s.hasBlob) router.HEAD("/blob", s.hasBlob)
srv := &http.Server{ srv := &http.Server{
@ -56,7 +43,6 @@ func (s *Server) Start(address string) error {
Handler: router, Handler: router,
} }
go s.listenForShutdown(srv) go s.listenForShutdown(srv)
go InitWorkers(s, s.concurrentRequests)
// Initializing the server in a goroutine so that // Initializing the server in a goroutine so that
// it won't block the graceful shutdown handling below // it won't block the graceful shutdown handling below
s.grp.Add(1) s.grp.Add(1)

View file

@ -1,46 +0,0 @@
package http
import (
"sync"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/gin-gonic/gin"
)
type blobRequest struct {
c *gin.Context
finished *sync.WaitGroup
}
var getReqCh = make(chan *blobRequest, 20000)
func InitWorkers(server *Server, workers int) {
stopper := stop.New(server.grp)
for i := 0; i < workers; i++ {
metrics.RoutinesQueue.WithLabelValues("http", "worker").Inc()
go func(worker int) {
defer metrics.RoutinesQueue.WithLabelValues("http", "worker").Dec()
for {
select {
case <-stopper.Ch():
case r := <-getReqCh:
process(server, r)
metrics.HttpBlobReqQueue.Dec()
}
}
}(i)
}
}
func enqueue(b *blobRequest) {
metrics.HttpBlobReqQueue.Inc()
getReqCh <- b
}
func process(server *Server, r *blobRequest) {
server.HandleGetBlob(r.c)
r.finished.Done()
}

View file

@ -1,31 +1,39 @@
package shared package shared
import ( import (
"os"
"testing" "testing"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestBlobTrace_Serialize(t *testing.T) { func TestBlobTrace_Serialize(t *testing.T) {
hostName = util.PtrToString("test_machine") hostName, err := os.Hostname()
require.NoError(t, err)
stack := NewBlobTrace(10*time.Second, "test") stack := NewBlobTrace(10*time.Second, "test")
stack.Stack(20*time.Second, "test2") stack.Stack(20*time.Second, "test2")
stack.Stack(30*time.Second, "test3") stack.Stack(30*time.Second, "test3")
serialized, err := stack.Serialize() serialized, err := stack.Serialize()
assert.NoError(t, err) require.NoError(t, err)
t.Log(serialized)
expected := "{\"stacks\":[{\"timing\":10000000000,\"origin_name\":\"test\",\"host_name\":\"test_machine\"},{\"timing\":20000000000,\"origin_name\":\"test2\",\"host_name\":\"test_machine\"},{\"timing\":30000000000,\"origin_name\":\"test3\",\"host_name\":\"test_machine\"}]}" expected := `{"stacks":[{"timing":10000000000,"origin_name":"test","host_name":"` +
hostName +
`"},{"timing":20000000000,"origin_name":"test2","host_name":"` +
hostName +
`"},{"timing":30000000000,"origin_name":"test3","host_name":"` +
hostName +
`"}]}`
assert.Equal(t, expected, serialized) assert.Equal(t, expected, serialized)
} }
func TestBlobTrace_Deserialize(t *testing.T) { func TestBlobTrace_Deserialize(t *testing.T) {
hostName = util.PtrToString("test_machine") serialized := `{"stacks":[{"timing":10000000000,"origin_name":"test"},{"timing":20000000000,"origin_name":"test2"},{"timing":30000000000,"origin_name":"test3"}]}`
serialized := "{\"stacks\":[{\"timing\":10000000000,\"origin_name\":\"test\"},{\"timing\":20000000000,\"origin_name\":\"test2\"},{\"timing\":30000000000,\"origin_name\":\"test3\"}]}"
stack, err := Deserialize(serialized) stack, err := Deserialize(serialized)
assert.NoError(t, err) require.NoError(t, err)
assert.Len(t, stack.Stacks, 3) assert.Len(t, stack.Stacks, 3)
assert.Equal(t, stack.Stacks[0].Timing, 10*time.Second) assert.Equal(t, stack.Stacks[0].Timing, 10*time.Second)
assert.Equal(t, stack.Stacks[1].Timing, 20*time.Second) assert.Equal(t, stack.Stacks[1].Timing, 20*time.Second)

View file

@ -1,4 +1,3 @@
//go:build linux
// +build linux // +build linux
package store package store
@ -10,7 +9,7 @@ import (
) )
func timespecToTime(ts syscall.Timespec) time.Time { func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(ts.Sec, ts.Nsec) return time.Unix(int64(ts.Sec), int64(ts.Nsec))
} }
func atime(fi os.FileInfo) time.Time { func atime(fi os.FileInfo) time.Time {

View file

@ -3,13 +3,12 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/lbryio/reflector.go/internal/metrics"
) )
// CachingStore combines two stores, typically a local and a remote store, to improve performance. // CachingStore combines two stores, typically a local and a remote store, to improve performance.
@ -65,11 +64,16 @@ func (c *CachingStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil { if err != nil {
return nil, trace.Stack(time.Since(start), c.Name()), err return nil, trace.Stack(time.Since(start), c.Name()), err
} }
// do not do this async unless you're prepared to deal with mayhem // there is no need to wait for the blob to be stored before we return it
err = c.cache.Put(hash, blob) // TODO: however this should be refactored to limit the amount of routines that the process can spawn to avoid a possible DoS
if err != nil { metrics.RoutinesQueue.WithLabelValues("store", "cache-put").Inc()
log.Errorf("error saving blob to underlying cache: %s", errors.FullTrace(err)) go func() {
} defer metrics.RoutinesQueue.WithLabelValues("store", "cache-put").Dec()
err = c.cache.Put(hash, blob)
if err != nil {
log.Errorf("error saving blob to underlying cache: %s", errors.FullTrace(err))
}
}()
return blob, trace.Stack(time.Since(start), c.Name()), nil return blob, trace.Stack(time.Since(start), c.Name()), nil
} }
@ -104,4 +108,5 @@ func (c *CachingStore) Delete(hash string) error {
func (c *CachingStore) Shutdown() { func (c *CachingStore) Shutdown() {
c.origin.Shutdown() c.origin.Shutdown()
c.cache.Shutdown() c.cache.Shutdown()
return
} }

View file

@ -6,9 +6,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
func TestCachingStore_Put(t *testing.T) { func TestCachingStore_Put(t *testing.T) {

View file

@ -2,16 +2,16 @@ package store
import ( import (
"io" "io"
"io/ioutil"
"net/http" "net/http"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -36,7 +36,8 @@ func (c *CloudFrontROStore) Has(hash string) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
defer func() { _ = body.Close() }() defer body.Close()
switch status { switch status {
case http.StatusNotFound, http.StatusForbidden: case http.StatusNotFound, http.StatusForbidden:
return false, nil return false, nil
@ -59,12 +60,12 @@ func (c *CloudFrontROStore) Get(hash string) (stream.Blob, shared.BlobTrace, err
if err != nil { if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), err return nil, shared.NewBlobTrace(time.Since(start), c.Name()), err
} }
defer func() { _ = body.Close() }() defer body.Close()
switch status { switch status {
case http.StatusNotFound, http.StatusForbidden: case http.StatusNotFound, http.StatusForbidden:
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(ErrBlobNotFound) return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(ErrBlobNotFound)
case http.StatusOK: case http.StatusOK:
b, err := io.ReadAll(body) b, err := ioutil.ReadAll(body)
if err != nil { if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(err) return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(err)
} }
@ -81,7 +82,7 @@ func (c *CloudFrontROStore) cfRequest(method, hash string) (int, io.ReadCloser,
if err != nil { if err != nil {
return 0, nil, errors.Err(err) return 0, nil, errors.Err(err)
} }
req.Header.Add("User-Agent", "reflector.go/"+meta.Version()) req.Header.Add("User-Agent", "reflector.go/"+meta.Version)
res, err := http.DefaultClient.Do(req) res, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
@ -105,4 +106,5 @@ func (c *CloudFrontROStore) Delete(_ string) error {
// Shutdown shuts down the store gracefully // Shutdown shuts down the store gracefully
func (c *CloudFrontROStore) Shutdown() { func (c *CloudFrontROStore) Shutdown() {
return
} }

View file

@ -3,9 +3,8 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
// CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3. // CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3.
@ -59,4 +58,5 @@ func (c *CloudFrontRWStore) Delete(hash string) error {
func (c *CloudFrontRWStore) Shutdown() { func (c *CloudFrontRWStore) Shutdown() {
c.s3.Shutdown() c.s3.Shutdown()
c.cf.Shutdown() c.cf.Shutdown()
return
} }

View file

@ -5,11 +5,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/shared"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -112,22 +111,22 @@ func (d *DBBackedStore) Block(hash string) error {
return err return err
} }
//has, err := d.db.HasBlob(hash, false) has, err := d.db.HasBlob(hash, false)
//if err != nil { if err != nil {
// return err return err
//} }
//
//if has { if has {
// err = d.blobs.Delete(hash) err = d.blobs.Delete(hash)
// if err != nil { if err != nil {
// return err return err
// } }
//
// err = d.db.Delete(hash) err = d.db.Delete(hash)
// if err != nil { if err != nil {
// return err return err
// } }
//} }
return d.markBlocked(hash) return d.markBlocked(hash)
} }
@ -198,4 +197,5 @@ func (d *DBBackedStore) initBlocked() error {
// Shutdown shuts down the store gracefully // Shutdown shuts down the store gracefully
func (d *DBBackedStore) Shutdown() { func (d *DBBackedStore) Shutdown() {
d.blobs.Shutdown() d.blobs.Shutdown()
return
} }

View file

@ -1,17 +1,42 @@
package store package store
import ( import (
"crypto/sha512"
"encoding/hex"
"fmt"
"io/ioutil"
"os" "os"
"path" "path"
"runtime"
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store/speedwalk"
log "github.com/sirupsen/logrus"
"go.uber.org/atomic"
) )
func init() {
writeCh = make(chan writeRequest)
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
for {
select {
case r := <-writeCh:
err := ioutil.WriteFile(r.filename, r.data, r.perm)
if err != nil {
log.Errorf("could not write file %s to disk, failed with error: %s", r.filename, err.Error())
}
}
}
}()
}
}
var writeCh chan writeRequest
// DiskStore stores blobs on a local disk // DiskStore stores blobs on a local disk
type DiskStore struct { type DiskStore struct {
// the location of blobs on disk // the location of blobs on disk
@ -21,8 +46,12 @@ type DiskStore struct {
// true if initOnce ran, false otherwise // true if initOnce ran, false otherwise
initialized bool initialized bool
concurrentChecks atomic.Int32
} }
const maxConcurrentChecks = 3
// NewDiskStore returns an initialized file disk store pointer. // NewDiskStore returns an initialized file disk store pointer.
func NewDiskStore(dir string, prefixLength int) *DiskStore { func NewDiskStore(dir string, prefixLength int) *DiskStore {
return &DiskStore{ return &DiskStore{
@ -61,16 +90,50 @@ func (d *DiskStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err
} }
blob, err := os.ReadFile(d.path(hash)) blob, err := ioutil.ReadFile(d.path(hash))
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(ErrBlobNotFound) return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(ErrBlobNotFound)
} }
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(err) return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(err)
} }
// this is a rather poor yet effective way of throttling how many blobs can be checked concurrently
// poor because there is a possible race condition between the check and the actual +1
if d.concurrentChecks.Load() < maxConcurrentChecks {
d.concurrentChecks.Add(1)
defer d.concurrentChecks.Sub(1)
hashBytes := sha512.Sum384(blob)
readHash := hex.EncodeToString(hashBytes[:])
if hash != readHash {
message := fmt.Sprintf("[%s] found a broken blob while reading from disk. Actual hash: %s", hash, readHash)
log.Errorf("%s", message)
err := d.Delete(hash)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err
}
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(message)
}
}
return blob, shared.NewBlobTrace(time.Since(start), d.Name()), nil return blob, shared.NewBlobTrace(time.Since(start), d.Name()), nil
} }
// Put stores the blob on disk
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
err := d.initOnce()
if err != nil {
return err
}
err = d.ensureDirExists(d.dir(hash))
if err != nil {
return err
}
writeFile(d.path(hash), blob, 0644)
return errors.Err(err)
}
// PutSD stores the sd blob on the disk // PutSD stores the sd blob on the disk
func (d *DiskStore) PutSD(hash string, blob stream.Blob) error { func (d *DiskStore) PutSD(hash string, blob stream.Blob) error {
return d.Put(hash, blob) return d.Put(hash, blob)
@ -111,15 +174,11 @@ func (d *DiskStore) dir(hash string) string {
} }
return path.Join(d.blobDir, hash[:d.prefixLength]) return path.Join(d.blobDir, hash[:d.prefixLength])
} }
func (d *DiskStore) tmpDir(hash string) string {
return path.Join(d.blobDir, "tmp")
}
func (d *DiskStore) path(hash string) string { func (d *DiskStore) path(hash string) string {
return path.Join(d.dir(hash), hash) return path.Join(d.dir(hash), hash)
} }
func (d *DiskStore) tmpPath(hash string) string {
return path.Join(d.tmpDir(hash), hash)
}
func (d *DiskStore) ensureDirExists(dir string) error { func (d *DiskStore) ensureDirExists(dir string) error {
return errors.Err(os.MkdirAll(dir, 0755)) return errors.Err(os.MkdirAll(dir, 0755))
} }
@ -133,14 +192,26 @@ func (d *DiskStore) initOnce() error {
if err != nil { if err != nil {
return err return err
} }
err = d.ensureDirExists(path.Join(d.blobDir, "tmp"))
if err != nil {
return err
}
d.initialized = true d.initialized = true
return nil return nil
} }
type writeRequest struct {
filename string
data []byte
perm os.FileMode
}
// Shutdown shuts down the store gracefully // Shutdown shuts down the store gracefully
func (d *DiskStore) Shutdown() { func (d *DiskStore) Shutdown() {
return
}
func writeFile(filename string, data []byte, perm os.FileMode) {
writeCh <- writeRequest{
filename: filename,
data: data,
perm: perm,
}
} }

View file

@ -1,6 +1,7 @@
package store package store
import ( import (
"io/ioutil"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@ -13,9 +14,9 @@ import (
) )
func TestDiskStore_Get(t *testing.T) { func TestDiskStore_Get(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*") tmpDir, err := ioutil.TempDir("", "reflector_test_*")
require.NoError(t, err) require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }() defer os.RemoveAll(tmpDir)
d := NewDiskStore(tmpDir, 2) d := NewDiskStore(tmpDir, 2)
hash := "f428b8265d65dad7f8ffa52922bba836404cbd62f3ecfe10adba6b444f8f658938e54f5981ac4de39644d5b93d89a94b" hash := "f428b8265d65dad7f8ffa52922bba836404cbd62f3ecfe10adba6b444f8f658938e54f5981ac4de39644d5b93d89a94b"
@ -24,7 +25,7 @@ func TestDiskStore_Get(t *testing.T) {
expectedPath := path.Join(tmpDir, hash[:2], hash) expectedPath := path.Join(tmpDir, hash[:2], hash)
err = os.MkdirAll(filepath.Dir(expectedPath), os.ModePerm) err = os.MkdirAll(filepath.Dir(expectedPath), os.ModePerm)
require.NoError(t, err) require.NoError(t, err)
err = os.WriteFile(expectedPath, data, os.ModePerm) err = ioutil.WriteFile(expectedPath, data, os.ModePerm)
require.NoError(t, err) require.NoError(t, err)
blob, _, err := d.Get(hash) blob, _, err := d.Get(hash)
@ -33,9 +34,9 @@ func TestDiskStore_Get(t *testing.T) {
} }
func TestDiskStore_GetNonexistentBlob(t *testing.T) { func TestDiskStore_GetNonexistentBlob(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*") tmpDir, err := ioutil.TempDir("", "reflector_test_*")
require.NoError(t, err) require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }() defer os.RemoveAll(tmpDir)
d := NewDiskStore(tmpDir, 2) d := NewDiskStore(tmpDir, 2)
blob, _, err := d.Get("nonexistent") blob, _, err := d.Get("nonexistent")

View file

@ -1,42 +0,0 @@
//go:build darwin
// +build darwin
package store
import (
"bytes"
"io"
"os"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
var openFileFlags = os.O_WRONLY | os.O_CREATE
// Put stores the blob on disk
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
err := d.initOnce()
if err != nil {
return err
}
err = d.ensureDirExists(d.dir(hash))
if err != nil {
return err
}
// Open file with O_DIRECT
f, err := os.OpenFile(d.tmpPath(hash), openFileFlags, 0644)
if err != nil {
return errors.Err(err)
}
defer f.Close()
_, err = io.Copy(f, bytes.NewReader(blob))
if err != nil {
return errors.Err(err)
}
err = os.Rename(d.tmpPath(hash), d.path(hash))
return errors.Err(err)
}

View file

@ -1,49 +0,0 @@
//go:build linux
// +build linux
package store
import (
"bytes"
"io"
"os"
"syscall"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/brk0v/directio"
)
var openFileFlags = os.O_WRONLY | os.O_CREATE | syscall.O_DIRECT
// Put stores the blob on disk
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
err := d.initOnce()
if err != nil {
return err
}
err = d.ensureDirExists(d.dir(hash))
if err != nil {
return err
}
// Open file with O_DIRECT
f, err := os.OpenFile(d.tmpPath(hash), openFileFlags, 0644)
if err != nil {
return errors.Err(err)
}
defer func() { _ = f.Close() }()
dio, err := directio.New(f)
if err != nil {
return errors.Err(err)
}
defer func() { _ = dio.Flush() }()
_, err = io.Copy(dio, bytes.NewReader(blob))
if err != nil {
return errors.Err(err)
}
err = os.Rename(d.tmpPath(hash), d.path(hash))
return errors.Err(err)
}

View file

@ -1,110 +0,0 @@
package store
import (
"fmt"
"os"
"reflect"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const cacheMaxSize = 3
func getTestGcacheStore() (*GcacheStore, *MemStore) {
m := NewMemStore()
return NewGcacheStore("test", m, cacheMaxSize, LFU), m
}
func TestGcacheStore_Eviction(t *testing.T) {
lfu, mem := getTestGcacheStore()
b := []byte("x")
for i := 0; i < 3; i++ {
err := lfu.Put(fmt.Sprintf("%d", i), b)
require.NoError(t, err)
for j := 0; j < 3-i; j++ {
_, _, err = lfu.Get(fmt.Sprintf("%d", i))
require.NoError(t, err)
}
}
for k, v := range map[string]bool{
"0": true,
"1": true,
"2": true,
} {
has, err := lfu.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
err := lfu.Put("3", b)
require.NoError(t, err)
for k, v := range map[string]bool{
"0": true,
"1": true,
"2": false,
"3": true,
} {
has, err := lfu.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
assert.Equal(t, cacheMaxSize, len(mem.Debug()))
err = lfu.Delete("0")
assert.NoError(t, err)
err = lfu.Delete("1")
assert.NoError(t, err)
err = lfu.Delete("3")
assert.NoError(t, err)
assert.Equal(t, 0, len(mem.Debug()))
}
func TestGcacheStore_UnderlyingBlobMissing(t *testing.T) {
lfu, mem := getTestGcacheStore()
hash := "hash"
b := []byte("this is a blob of stuff")
err := lfu.Put(hash, b)
require.NoError(t, err)
err = mem.Delete(hash)
require.NoError(t, err)
// hash still exists in lru
assert.True(t, lfu.cache.Has(hash))
blob, _, err := lfu.Get(hash)
assert.Nil(t, blob)
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
reflect.TypeOf(err).String(), err.Error())
// lru.Get() removes hash if underlying store doesn't have it
assert.False(t, lfu.cache.Has(hash))
}
func TestGcacheStore_loadExisting(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
hash := "hash"
b := []byte("this is a blob of stuff")
err = d.Put(hash, b)
require.NoError(t, err)
existing, err := d.list()
require.NoError(t, err)
require.Equal(t, 1, len(existing), "blob should exist in cache")
assert.Equal(t, hash, existing[0])
lfu := NewGcacheStore("test", d, 3, LFU) // lru should load existing blobs when it's created
time.Sleep(100 * time.Millisecond) // async load so let's wait...
has, err := lfu.Has(hash)
require.NoError(t, err)
assert.True(t, has, "hash should be loaded from disk store but it's not")
}

View file

@ -2,8 +2,8 @@ package store
import ( import (
"bytes" "bytes"
"context"
"io" "io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"sync" "sync"
@ -16,24 +16,22 @@ import (
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
) )
// HttpStore is a store that works on top of the HTTP protocol // NoopStore is a store that does nothing
type HttpStore struct { type HttpStore struct {
upstream string upstream string
httpClient *http.Client httpClient *http.Client
edgeToken string
} }
func NewHttpStore(upstream, edgeToken string) *HttpStore { func NewHttpStore(upstream string) *HttpStore {
return &HttpStore{ return &HttpStore{
upstream: "http://" + upstream, upstream: "http://" + upstream,
httpClient: getClient(), httpClient: getClient(),
edgeToken: edgeToken,
} }
} }
const nameHttp = "http" const nameHttp = "http"
func (n *HttpStore) Name() string { return nameHttp } func (n *HttpStore) Name() string { return nameNoop }
func (n *HttpStore) Has(hash string) (bool, error) { func (n *HttpStore) Has(hash string) (bool, error) {
url := n.upstream + "/blob?hash=" + hash url := n.upstream + "/blob?hash=" + hash
@ -46,7 +44,7 @@ func (n *HttpStore) Has(hash string) (bool, error) {
if err != nil { if err != nil {
return false, errors.Err(err) return false, errors.Err(err)
} }
defer func() { _ = res.Body.Close() }() defer res.Body.Close()
if res.StatusCode == http.StatusNotFound { if res.StatusCode == http.StatusNotFound {
return false, nil return false, nil
} }
@ -55,7 +53,7 @@ func (n *HttpStore) Has(hash string) (bool, error) {
} }
var body []byte var body []byte
if res.Body != nil { if res.Body != nil {
body, _ = io.ReadAll(res.Body) body, _ = ioutil.ReadAll(res.Body)
} }
return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body)) return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
} }
@ -63,9 +61,6 @@ func (n *HttpStore) Has(hash string) (bool, error) {
func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) { func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now() start := time.Now()
url := n.upstream + "/blob?hash=" + hash url := n.upstream + "/blob?hash=" + hash
if n.edgeToken != "" {
url += "&edge_token=" + n.edgeToken
}
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
@ -76,7 +71,7 @@ func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil { if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err) return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
} }
defer func() { _ = res.Body.Close() }() defer res.Body.Close()
tmp := getBuffer() tmp := getBuffer()
defer putBuffer(tmp) defer putBuffer(tmp)
serialized := res.Header.Get("Via") serialized := res.Header.Get("Via")
@ -105,7 +100,7 @@ func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
} }
var body []byte var body []byte
if res.Body != nil { if res.Body != nil {
body, _ = io.ReadAll(res.Body) body, _ = ioutil.ReadAll(res.Body)
} }
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body)) return nil, trace.Stack(time.Since(start), n.Name()), errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
@ -120,7 +115,7 @@ func (n *HttpStore) PutSD(string, stream.Blob) error {
func (n *HttpStore) Delete(string) error { func (n *HttpStore) Delete(string) error {
return shared.ErrNotImplemented return shared.ErrNotImplemented
} }
func (n *HttpStore) Shutdown() {} func (n *HttpStore) Shutdown() { return }
// buffer pool to reduce GC // buffer pool to reduce GC
// https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha // https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha
@ -143,19 +138,14 @@ func putBuffer(buf *bytes.Buffer) {
buffers.Put(buf) buffers.Put(buf)
} }
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
return dialer.DialContext(ctx, network, address)
}
// getClient gets an http client that's customized to be more performant when dealing with blobs of 2MB in size (most of our blobs) // getClient gets an http client that's customized to be more performant when dealing with blobs of 2MB in size (most of our blobs)
func getClient() *http.Client { func getClient() *http.Client {
// Customize the Transport to have larger connection pool // Customize the Transport to have larger connection pool
defaultTransport := &http.Transport{ defaultTransport := &http.Transport{
DialContext: dialContext, DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true, ForceAttemptHTTP2: true,
MaxIdleConns: 100, MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second, IdleConnTimeout: 90 * time.Second,

View file

@ -3,19 +3,19 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
) )
// ITTTStore performs an operation on this storage, if this fails, it attempts to run it on that // ITTT store performs an operation on this storage, if this fails, it attempts to run it on that
type ITTTStore struct { type ITTTStore struct {
this, that BlobStore this, that BlobStore
} }
// NewITTTStore returns a new instance of the IF THIS THAN THAT store // NewCachingStore makes a new caching disk store and returns a pointer to it.
func NewITTTStore(this, that BlobStore) *ITTTStore { func NewITTTStore(this, that BlobStore) *ITTTStore {
return &ITTTStore{ return &ITTTStore{
this: this, this: this,
@ -28,7 +28,7 @@ const nameIttt = "ittt"
// Name is the cache type name // Name is the cache type name
func (c *ITTTStore) Name() string { return nameIttt } func (c *ITTTStore) Name() string { return nameIttt }
// Has checks in this for a hash, if it fails it checks in that. It returns true if either store has it. // Has checks the cache and then the origin for a hash. It returns true if either store has it.
func (c *ITTTStore) Has(hash string) (bool, error) { func (c *ITTTStore) Has(hash string) (bool, error) {
has, err := c.this.Has(hash) has, err := c.this.Has(hash)
if err != nil || !has { if err != nil || !has {
@ -70,4 +70,6 @@ func (c *ITTTStore) Delete(hash string) error {
} }
// Shutdown shuts down the store gracefully // Shutdown shuts down the store gracefully
func (c *ITTTStore) Shutdown() {} func (c *ITTTStore) Shutdown() {
return
}

View file

@ -3,63 +3,35 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/bparli/lfuda-go"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/bluele/gcache" "github.com/lbryio/reflector.go/shared"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// GcacheStore adds a max cache size and Greedy-Dual-Size-Frequency cache eviction strategy to a BlobStore // LRUStore adds a max cache size and LRU eviction to a BlobStore
type GcacheStore struct { type LFUDAStore struct {
// underlying store // underlying store
store BlobStore store BlobStore
// cache implementation // lfuda implementation
cache gcache.Cache lfuda *lfuda.Cache
} }
type EvictionStrategy int
const ( // NewLRUStore initialize a new LRUStore
//LFU Discards the least frequently used items first. func NewLFUDAStore(component string, store BlobStore, maxSize float64) *LFUDAStore {
LFU EvictionStrategy = iota lfuda := lfuda.NewGDSFWithEvict(maxSize, func(key interface{}, value interface{}) {
//ARC Constantly balances between LRU and LFU, to improve the combined result.
ARC
//LRU Discards the least recently used items first.
LRU
//SIMPLE has no clear priority for evict cache. It depends on key-value map order.
SIMPLE
)
// NewGcacheStore initialize a new LRUStore
func NewGcacheStore(component string, store BlobStore, maxSize int, strategy EvictionStrategy) *GcacheStore {
cacheBuilder := gcache.New(maxSize)
var cache gcache.Cache
evictFunc := func(key interface{}, value interface{}) {
logrus.Infof("evicting %s", key)
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc() metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc()
_ = store.Delete(key.(string)) // TODO: log this error. may happen if underlying entry is gone but cache entry still there _ = store.Delete(key.(string)) // TODO: log this error. may happen if underlying entry is gone but cache entry still there
} })
switch strategy { l := &LFUDAStore{
case LFU:
cache = cacheBuilder.LFU().EvictedFunc(evictFunc).Build()
case ARC:
cache = cacheBuilder.ARC().EvictedFunc(evictFunc).Build()
case LRU:
cache = cacheBuilder.LRU().EvictedFunc(evictFunc).Build()
case SIMPLE:
cache = cacheBuilder.Simple().EvictedFunc(evictFunc).Build()
}
l := &GcacheStore{
store: store, store: store,
cache: cache, lfuda: lfuda,
} }
go func() { go func() {
if lstr, ok := store.(lister); ok { if lstr, ok := store.(lister); ok {
err := l.loadExisting(lstr, maxSize) err := l.loadExisting(lstr, int(maxSize))
if err != nil { if err != nil {
panic(err) // TODO: what should happen here? panic? return nil? just keep going? panic(err) // TODO: what should happen here? panic? return nil? just keep going?
} }
@ -69,34 +41,34 @@ func NewGcacheStore(component string, store BlobStore, maxSize int, strategy Evi
return l return l
} }
const nameGcache = "gcache" const nameLFUDA = "lfuda"
// Name is the cache type name // Name is the cache type name
func (l *GcacheStore) Name() string { return nameGcache } func (l *LFUDAStore) Name() string { return nameLFUDA }
// Has returns whether the blob is in the store, without updating the recent-ness. // Has returns whether the blob is in the store, without updating the recent-ness.
func (l *GcacheStore) Has(hash string) (bool, error) { func (l *LFUDAStore) Has(hash string) (bool, error) {
return l.cache.Has(hash), nil return l.lfuda.Contains(hash), nil
} }
// Get returns the blob or an error if the blob doesn't exist. // Get returns the blob or an error if the blob doesn't exist.
func (l *GcacheStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) { func (l *LFUDAStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now() start := time.Now()
_, err := l.cache.Get(hash) _, has := l.lfuda.Get(hash)
if err != nil { if !has {
return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound) return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound)
} }
blob, stack, err := l.store.Get(hash) blob, stack, err := l.store.Get(hash)
if errors.Is(err, ErrBlobNotFound) { if errors.Is(err, ErrBlobNotFound) {
// Blob disappeared from underlying store // Blob disappeared from underlying store
l.cache.Remove(hash) l.lfuda.Remove(hash)
} }
return blob, stack.Stack(time.Since(start), l.Name()), err return blob, stack.Stack(time.Since(start), l.Name()), err
} }
// Put stores the blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!! // Put stores the blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
func (l *GcacheStore) Put(hash string, blob stream.Blob) error { func (l *LFUDAStore) Put(hash string, blob stream.Blob) error {
_ = l.cache.Set(hash, true) l.lfuda.Set(hash, true)
has, _ := l.Has(hash) has, _ := l.Has(hash)
if has { if has {
err := l.store.Put(hash, blob) err := l.store.Put(hash, blob)
@ -108,8 +80,8 @@ func (l *GcacheStore) Put(hash string, blob stream.Blob) error {
} }
// PutSD stores the sd blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!! // PutSD stores the sd blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error { func (l *LFUDAStore) PutSD(hash string, blob stream.Blob) error {
_ = l.cache.Set(hash, true) l.lfuda.Set(hash, true)
has, _ := l.Has(hash) has, _ := l.Has(hash)
if has { if has {
err := l.store.PutSD(hash, blob) err := l.store.PutSD(hash, blob)
@ -121,7 +93,7 @@ func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error {
} }
// Delete deletes the blob from the store // Delete deletes the blob from the store
func (l *GcacheStore) Delete(hash string) error { func (l *LFUDAStore) Delete(hash string) error {
err := l.store.Delete(hash) err := l.store.Delete(hash)
if err != nil { if err != nil {
return err return err
@ -130,12 +102,12 @@ func (l *GcacheStore) Delete(hash string) error {
// This must come after store.Delete() // This must come after store.Delete()
// Remove triggers onEvict function, which also tries to delete blob from store // Remove triggers onEvict function, which also tries to delete blob from store
// We need to delete it manually first so any errors can be propagated up // We need to delete it manually first so any errors can be propagated up
l.cache.Remove(hash) l.lfuda.Remove(hash)
return nil return nil
} }
// loadExisting imports existing blobs from the underlying store into the LRU cache // loadExisting imports existing blobs from the underlying store into the LRU cache
func (l *GcacheStore) loadExisting(store lister, maxItems int) error { func (l *LFUDAStore) loadExisting(store lister, maxItems int) error {
logrus.Infof("loading at most %d items", maxItems) logrus.Infof("loading at most %d items", maxItems)
existing, err := store.list() existing, err := store.list()
if err != nil { if err != nil {
@ -144,20 +116,17 @@ func (l *GcacheStore) loadExisting(store lister, maxItems int) error {
logrus.Infof("read %d files from underlying store", len(existing)) logrus.Infof("read %d files from underlying store", len(existing))
added := 0 added := 0
for i, h := range existing { for _, h := range existing {
_ = l.cache.Set(h, true) l.lfuda.Set(h, true)
added++ added++
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than the cache if maxItems > 0 && added >= maxItems { // underlying cache is bigger than the cache
err := l.Delete(h) break
logrus.Infof("deleted overflowing blob: %s (%d/%d)", h, i, len(existing))
if err != nil {
logrus.Warnf("error while deleting a blob that's overflowing the cache: %s", err.Error())
}
} }
} }
return nil return nil
} }
// Shutdown shuts down the store gracefully // Shutdown shuts down the store gracefully
func (l *GcacheStore) Shutdown() { func (l *LFUDAStore) Shutdown() {
return
} }

136
store/lfuda_test.go Normal file
View file

@ -0,0 +1,136 @@
package store
import (
"io/ioutil"
"os"
"reflect"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const cacheMaxSize = 3
func getTestLFUDAStore() (*LFUDAStore, *MemStore) {
m := NewMemStore()
return NewLFUDAStore("test", m, cacheMaxSize), m
}
func TestFUDAStore_Eviction(t *testing.T) {
lfuda, mem := getTestLFUDAStore()
b := []byte("x")
err := lfuda.Put("one", b)
require.NoError(t, err)
err = lfuda.Put("two", b)
require.NoError(t, err)
err = lfuda.Put("three", b)
require.NoError(t, err)
err = lfuda.Put("four", b)
require.NoError(t, err)
err = lfuda.Put("five", b)
require.NoError(t, err)
err = lfuda.Put("five", b)
require.NoError(t, err)
err = lfuda.Put("four", b)
require.NoError(t, err)
err = lfuda.Put("two", b)
require.NoError(t, err)
_, _, err = lfuda.Get("five")
require.NoError(t, err)
_, _, err = lfuda.Get("four")
require.NoError(t, err)
_, _, err = lfuda.Get("two")
require.NoError(t, err)
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
for k, v := range map[string]bool{
"one": false,
"two": true,
"three": false,
"four": true,
"five": true,
"six": false,
} {
has, err := lfuda.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
lfuda.Get("two") // touch so it stays in cache
lfuda.Get("five") // touch so it stays in cache
lfuda.Put("six", b)
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
for k, v := range map[string]bool{
"one": false,
"two": true,
"three": false,
"four": false,
"five": true,
"six": true,
} {
has, err := lfuda.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
err = lfuda.Delete("six")
assert.NoError(t, err)
err = lfuda.Delete("five")
assert.NoError(t, err)
err = lfuda.Delete("two")
assert.NoError(t, err)
assert.Equal(t, 0, len(mem.Debug()))
}
func TestFUDAStore_UnderlyingBlobMissing(t *testing.T) {
lfuda, mem := getTestLFUDAStore()
hash := "hash"
b := []byte("this is a blob of stuff")
err := lfuda.Put(hash, b)
require.NoError(t, err)
err = mem.Delete(hash)
require.NoError(t, err)
// hash still exists in lru
assert.True(t, lfuda.lfuda.Contains(hash))
blob, _, err := lfuda.Get(hash)
assert.Nil(t, blob)
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
reflect.TypeOf(err).String(), err.Error())
// lru.Get() removes hash if underlying store doesn't have it
assert.False(t, lfuda.lfuda.Contains(hash))
}
func TestFUDAStore_loadExisting(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "reflector_test_*")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
d := NewDiskStore(tmpDir, 2)
hash := "hash"
b := []byte("this is a blob of stuff")
err = d.Put(hash, b)
require.NoError(t, err)
existing, err := d.list()
require.NoError(t, err)
require.Equal(t, 1, len(existing), "blob should exist in cache")
assert.Equal(t, hash, existing[0])
lfuda := NewLFUDAStore("test", d, 3) // lru should load existing blobs when it's created
time.Sleep(100 * time.Millisecond) // async load so let's wait...
has, err := lfuda.Has(hash)
require.NoError(t, err)
assert.True(t, has, "hash should be loaded from disk store but it's not")
}

129
store/lru.go Normal file
View file

@ -0,0 +1,129 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/bluele/gcache"
"github.com/sirupsen/logrus"
)
// LRUStore adds a max cache size and LRU eviction to a BlobStore
type LRUStore struct {
// underlying store
store BlobStore
// lru implementation
lru gcache.Cache
}
// NewLRUStore initialize a new LRUStore
func NewLRUStore(component string, store BlobStore, maxItems int) *LRUStore {
l := &LRUStore{
store: store,
}
l.lru = gcache.New(maxItems).ARC().EvictedFunc(func(key, value interface{}) {
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(l.Name(), component)).Inc()
_ = store.Delete(key.(string))
}).Build()
go func() {
if lstr, ok := store.(lister); ok {
err := l.loadExisting(lstr, maxItems)
if err != nil {
panic(err) // TODO: what should happen here? panic? return nil? just keep going?
}
}
}()
return l
}
// Name is the cache type name
func (l *LRUStore) Name() string {
return "lru_" + l.store.Name()
}
// Has returns whether the blob is in the store, without updating the recent-ness.
func (l *LRUStore) Has(hash string) (bool, error) {
return l.lru.Has(hash), nil
}
// Get returns the blob or an error if the blob doesn't exist.
func (l *LRUStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
_, err := l.lru.Get(hash)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound)
}
blob, stack, err := l.store.Get(hash)
if errors.Is(err, ErrBlobNotFound) {
// Blob disappeared from underlying store
l.lru.Remove(hash)
}
return blob, stack.Stack(time.Since(start), l.Name()), err
}
// Put stores the blob
func (l *LRUStore) Put(hash string, blob stream.Blob) error {
err := l.store.Put(hash, blob)
if err != nil {
return err
}
l.lru.Set(hash, true)
return nil
}
// PutSD stores the sd blob
func (l *LRUStore) PutSD(hash string, blob stream.Blob) error {
err := l.store.PutSD(hash, blob)
if err != nil {
return err
}
_ = l.lru.Set(hash, true)
return nil
}
// Delete deletes the blob from the store
func (l *LRUStore) Delete(hash string) error {
err := l.store.Delete(hash)
if err != nil {
return err
}
// This must come after store.Delete()
// Remove triggers onEvict function, which also tries to delete blob from store
// We need to delete it manually first so any errors can be propagated up
l.lru.Remove(hash)
return nil
}
// loadExisting imports existing blobs from the underlying store into the LRU cache
func (l *LRUStore) loadExisting(store lister, maxItems int) error {
logrus.Infof("loading at most %d items", maxItems)
existing, err := store.list()
if err != nil {
return err
}
logrus.Infof("read %d files from disk", len(existing))
added := 0
for _, h := range existing {
l.lru.Set(h, true)
added++
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than LRU cache
break
}
}
return nil
}
// Shutdown shuts down the store gracefully
func (l *LRUStore) Shutdown() {
return
}

123
store/lru_test.go Normal file
View file

@ -0,0 +1,123 @@
package store
import (
"io/ioutil"
"os"
"reflect"
"testing"
"time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const cacheMaxBlobs = 3
func getTestLRUStore() (*LRUStore, *MemStore) {
m := NewMemStore()
return NewLRUStore("test", m, 3), m
}
func TestLRUStore_Eviction(t *testing.T) {
lru, mem := getTestLRUStore()
b := []byte("x")
err := lru.Put("one", b)
require.NoError(t, err)
err = lru.Put("two", b)
require.NoError(t, err)
err = lru.Put("three", b)
require.NoError(t, err)
err = lru.Put("four", b)
require.NoError(t, err)
err = lru.Put("five", b)
require.NoError(t, err)
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
for k, v := range map[string]bool{
"one": false,
"two": false,
"three": true,
"four": true,
"five": true,
"six": false,
} {
has, err := lru.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
lru.Get("three") // touch so it stays in cache
lru.Put("six", b)
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
for k, v := range map[string]bool{
"one": false,
"two": false,
"three": true,
"four": false,
"five": true,
"six": true,
} {
has, err := lru.Has(k)
assert.NoError(t, err)
assert.Equal(t, v, has)
}
err = lru.Delete("three")
assert.NoError(t, err)
err = lru.Delete("five")
assert.NoError(t, err)
err = lru.Delete("six")
assert.NoError(t, err)
assert.Equal(t, 0, len(mem.Debug()))
}
func TestLRUStore_UnderlyingBlobMissing(t *testing.T) {
lru, mem := getTestLRUStore()
hash := "hash"
b := []byte("this is a blob of stuff")
err := lru.Put(hash, b)
require.NoError(t, err)
err = mem.Delete(hash)
require.NoError(t, err)
// hash still exists in lru
assert.True(t, lru.lru.Has(hash))
blob, _, err := lru.Get(hash)
assert.Nil(t, blob)
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
reflect.TypeOf(err).String(), err.Error())
// lru.Get() removes hash if underlying store doesn't have it
assert.False(t, lru.lru.Has(hash))
}
func TestLRUStore_loadExisting(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "reflector_test_*")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
d := NewDiskStore(tmpDir, 2)
hash := "hash"
b := []byte("this is a blob of stuff")
err = d.Put(hash, b)
require.NoError(t, err)
existing, err := d.list()
require.NoError(t, err)
require.Equal(t, 1, len(existing), "blob should exist in cache")
assert.Equal(t, hash, existing[0])
lru := NewLRUStore("test", d, 3) // lru should load existing blobs when it's created
time.Sleep(100 * time.Millisecond) // async load so let's wait...
has, err := lru.Has(hash)
require.NoError(t, err)
assert.True(t, has, "hash should be loaded from disk store but it's not")
}

View file

@ -4,10 +4,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
// MemStore is an in memory only blob store with no persistence. // MemStore is an in memory only blob store with no persistence.
@ -77,4 +76,6 @@ func (m *MemStore) Debug() map[string]stream.Blob {
} }
// Shutdown shuts down the store gracefully // Shutdown shuts down the store gracefully
func (m *MemStore) Shutdown() {} func (m *MemStore) Shutdown() {
return
}

View file

@ -3,9 +3,8 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
// NoopStore is a store that does nothing // NoopStore is a store that does nothing

View file

@ -5,11 +5,10 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"

View file

@ -3,10 +3,10 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"golang.org/x/sync/singleflight" "golang.org/x/sync/singleflight"

View file

@ -1,8 +1,7 @@
package speedwalk package speedwalk
import ( import (
"io/fs" "io/ioutil"
"os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sync" "sync"
@ -18,19 +17,7 @@ import (
// AllFiles recursively lists every file in every subdirectory of a given directory // AllFiles recursively lists every file in every subdirectory of a given directory
// If basename is true, return the basename of each file. Otherwise return the full path starting at startDir. // If basename is true, return the basename of each file. Otherwise return the full path starting at startDir.
func AllFiles(startDir string, basename bool) ([]string, error) { func AllFiles(startDir string, basename bool) ([]string, error) {
entries, err := os.ReadDir(startDir) items, err := ioutil.ReadDir(startDir)
if err != nil {
return nil, err
}
items := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
return nil, err
}
items = append(items, info)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1,23 +1,22 @@
package store package store
import ( import (
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
// BlobStore is an interface for handling blob storage. // BlobStore is an interface for handling blob storage.
type BlobStore interface { type BlobStore interface {
// Name of blob store (useful for metrics) // Name of blob store (useful for metrics)
Name() string Name() string
// Has Does blob exist in the store. // Does blob exist in the store.
Has(hash string) (bool, error) Has(hash string) (bool, error)
// Get the blob from the store. Must return ErrBlobNotFound if blob is not in store. // Get the blob from the store. Must return ErrBlobNotFound if blob is not in store.
Get(hash string) (stream.Blob, shared.BlobTrace, error) Get(hash string) (stream.Blob, shared.BlobTrace, error)
// Put the blob into the store. // Put the blob into the store.
Put(hash string, blob stream.Blob) error Put(hash string, blob stream.Blob) error
// PutSD an SD blob into the store. // Put an SD blob into the store.
PutSD(hash string, blob stream.Blob) error PutSD(hash string, blob stream.Blob) error
// Delete the blob from the store. // Delete the blob from the store.
Delete(hash string) error Delete(hash string) error