Compare commits
52 commits
master
...
insert_in_
Author | SHA1 | Date | |
---|---|---|---|
|
2a1557845d | ||
|
9670bc14f8 | ||
|
df881e16b5 | ||
|
bc889001bb | ||
|
1ec2184833 | ||
|
5cc1e84adb | ||
|
a7086a00f3 | ||
|
76ece1e117 | ||
|
c1caf1938c | ||
|
213d21b021 | ||
|
006b04f6e9 | ||
|
45130499cd | ||
|
4ecce75e23 | ||
|
0152300d8d | ||
|
eafc62f2a6 | ||
|
50c077a9cb | ||
|
070938e12a | ||
|
c4084eeb68 | ||
|
4392c97242 | ||
|
ec3aae33ba | ||
|
dc95351cf3 | ||
|
25a7fac4f0 | ||
|
b97595311f | ||
|
bd13836897 | ||
|
38b44218f2 | ||
|
90c36fbe24 | ||
|
3a441aed3a | ||
|
ebb62d0a24 | ||
|
8cb7389619 | ||
|
7b49dd115b | ||
|
6291e33ee1 | ||
|
3e475e537b | ||
|
c4504631bc | ||
|
cc504e6c44 | ||
|
49714c02a6 | ||
|
b33651ae26 | ||
|
0d5004a83b | ||
|
04f6859c74 | ||
|
3a1d9d3304 | ||
|
03304312e8 | ||
|
869030fc58 | ||
|
def551cc89 | ||
|
74b76a11e4 | ||
|
2c0df2ca8a | ||
|
9fc96ac01b | ||
|
ff9b61b034 | ||
|
7b80b2d4d2 | ||
|
d45abdbdb0 | ||
|
bc54601dde | ||
|
bb41a84bb7 | ||
|
a574fecf4e | ||
|
9146c8b084 |
73 changed files with 1591 additions and 2352 deletions
37
.github/workflows/go.yml
vendored
37
.github/workflows/go.yml
vendored
|
@ -1,37 +0,0 @@
|
|||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
|
||||
- name: Build linux
|
||||
run: make linux
|
||||
|
||||
- name: Build macos
|
||||
run: make macos
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Lint
|
||||
run: make lint
|
||||
|
||||
- name: retrieve all tags
|
||||
run: git fetch --prune --unshallow --tags
|
||||
|
||||
- name: Print changes since last version
|
||||
run: git log $(git describe --tags --abbrev=0)..HEAD --no-merges --oneline
|
62
.github/workflows/release.yml
vendored
62
.github/workflows/release.yml
vendored
|
@ -1,62 +0,0 @@
|
|||
name: release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*.*.*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
|
||||
- name: Build linux
|
||||
run: make linux
|
||||
|
||||
- name: Build macos
|
||||
run: make macos
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Lint
|
||||
run: make lint
|
||||
|
||||
- name: Zip macos
|
||||
run: zip -r reflector_darwin_amd64.zip ./dist/darwin_amd64
|
||||
|
||||
- name: Zip linux
|
||||
run: zip -r reflector_linux_amd64.zip ./dist/linux_amd64
|
||||
|
||||
- name: retrieve all tags
|
||||
run: git fetch --prune --unshallow --tags
|
||||
|
||||
- name: Generate Changelog
|
||||
run: git log $(git describe --tags --abbrev=0 @^)..@ --no-merges --oneline > ${{ github.workspace }}-CHANGELOG.txt
|
||||
|
||||
- name: upload to github releases
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: |
|
||||
./reflector_linux_amd64.zip
|
||||
./reflector_darwin_amd64.zip
|
||||
body_path: ${{ github.workspace }}-CHANGELOG.txt
|
||||
|
||||
# - name: Login to DockerHub
|
||||
# uses: docker/login-action@v2
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# - name: Generate docker image
|
||||
# run: make image
|
||||
|
||||
# - name: Docker push
|
||||
# run: make publish_image
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,4 +1,3 @@
|
|||
/vendor
|
||||
/config.json*
|
||||
/dist
|
||||
/bin
|
||||
|
|
27
.travis.yml
27
.travis.yml
|
@ -1,9 +1,12 @@
|
|||
os: linux
|
||||
dist: bionic
|
||||
dist: trusty
|
||||
language: go
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
go:
|
||||
- 1.20.x
|
||||
- 1.15.x
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
@ -14,7 +17,7 @@ notifications:
|
|||
email: false
|
||||
|
||||
# Skip the install step. Don't `go get` dependencies. Only build with the code in vendor/
|
||||
#install: true
|
||||
install: true
|
||||
|
||||
# Anything in before_script that returns a nonzero exit code will
|
||||
# flunk the build and immediately stop. It's sorta like having
|
||||
|
@ -22,14 +25,14 @@ notifications:
|
|||
before_script:
|
||||
# All the .go files, excluding vendor/ and model (auto generated)
|
||||
- GO_FILES=$(find . -iname '*.go' ! -iname '*_test.go' -type f | grep -v /vendor/ ) #i wish we were this crazy :p
|
||||
- go install golang.org/x/tools/cmd/goimports # Used in build script for generated files
|
||||
- go get golang.org/x/tools/cmd/goimports # Used in build script for generated files
|
||||
# - go get github.com/golang/lint/golint # Linter
|
||||
# - go get honnef.co/go/tools/cmd/megacheck # Badass static analyzer/linter
|
||||
- go install github.com/fzipp/gocyclo/cmd/gocyclo@latest # Check against high complexity
|
||||
- go install github.com/mdempsky/unconvert@latest # Identifies unnecessary type conversions
|
||||
- go install github.com/kisielk/errcheck@latest # Checks for unhandled errors
|
||||
- go install github.com/opennota/check/cmd/varcheck@latest # Checks for unused vars
|
||||
- go install github.com/opennota/check/cmd/structcheck@latest # Checks for unused fields in structs
|
||||
- go get github.com/jgautheron/gocyclo # Check against high complexity
|
||||
- go get github.com/mdempsky/unconvert # Identifies unnecessary type conversions
|
||||
- go get github.com/kisielk/errcheck # Checks for unhandled errors
|
||||
- go get github.com/opennota/check/cmd/varcheck # Checks for unused vars
|
||||
- go get github.com/opennota/check/cmd/structcheck # Checks for unused fields in structs
|
||||
|
||||
|
||||
|
||||
|
@ -37,7 +40,7 @@ before_script:
|
|||
# in a modern Go project.
|
||||
script:
|
||||
# Fail if a .go file hasn't been formatted with gofmt
|
||||
- for i in $GO_FILES; do test -z $(gofmt -s -l $i); done
|
||||
- test -z $(gofmt -s -l $GO_FILES)
|
||||
# Run unit tests
|
||||
- make test
|
||||
# Checks for unused vars and fields on structs
|
||||
|
@ -56,11 +59,11 @@ script:
|
|||
# one last linter - ignore autogen code
|
||||
#- golint -set_exit_status $(go list ./... | grep -v /vendor/ )
|
||||
# Finally, build the binary
|
||||
- make linux
|
||||
- make
|
||||
|
||||
deploy:
|
||||
- provider: s3
|
||||
local_dir: ./dist/linux_amd64
|
||||
local_dir: ./bin
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: lbryio/reflector.go
|
||||
|
|
|
@ -3,7 +3,7 @@ EXPOSE 8080
|
|||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
COPY dist/linux_amd64/prism-bin ./prism
|
||||
COPY bin/prism-bin ./prism
|
||||
RUN chmod +x prism
|
||||
|
||||
ENTRYPOINT [ "/app/prism" ]
|
||||
|
|
44
Makefile
44
Makefile
|
@ -1,33 +1,25 @@
|
|||
version := $(shell git describe --dirty --always --long --abbrev=7)
|
||||
commit := $(shell git rev-parse --short HEAD)
|
||||
commit_long := $(shell git rev-parse HEAD)
|
||||
branch := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
curTime := $(shell date +%s)
|
||||
|
||||
BINARY=prism-bin
|
||||
IMPORT_PATH = github.com/lbryio/reflector.go
|
||||
LDFLAGS="-X ${IMPORT_PATH}/meta.version=$(version) -X ${IMPORT_PATH}/meta.commit=$(commit) -X ${IMPORT_PATH}/meta.commitLong=$(commit_long) -X ${IMPORT_PATH}/meta.branch=$(branch) -X '${IMPORT_PATH}/meta.Time=$(curTime)'"
|
||||
|
||||
DIR = $(shell cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)
|
||||
BIN_DIR = $(DIR)/dist
|
||||
BIN_DIR = ${DIR}/bin
|
||||
IMPORT_PATH = github.com/lbryio/reflector.go
|
||||
|
||||
.DEFAULT_GOAL := linux
|
||||
VERSION = $(shell git --git-dir=${DIR}/.git describe --dirty --always --long --abbrev=7)
|
||||
LDFLAGS = -ldflags "-X ${IMPORT_PATH}/meta.Version=${VERSION} -X ${IMPORT_PATH}/meta.Time=$(shell date +%s)"
|
||||
|
||||
|
||||
.PHONY: build clean test lint
|
||||
.DEFAULT_GOAL: build
|
||||
|
||||
|
||||
build:
|
||||
mkdir -p ${BIN_DIR} && CGO_ENABLED=0 go build ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/${BINARY} main.go
|
||||
|
||||
clean:
|
||||
if [ -f ${BIN_DIR}/${BINARY} ]; then rm ${BIN_DIR}/${BINARY}; fi
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go test -cover -v ./...
|
||||
go test ./... -v -cover
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
./scripts/lint.sh
|
||||
|
||||
.PHONY: linux
|
||||
linux:
|
||||
GOARCH=amd64 GOOS=linux go build -ldflags ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/linux_amd64/${BINARY}
|
||||
|
||||
.PHONY: macos
|
||||
macos:
|
||||
GOARCH=amd64 GOOS=darwin go build -ldflags ${LDFLAGS} -asmflags -trimpath=${DIR} -o ${BIN_DIR}/darwin_amd64/${BINARY}
|
||||
|
||||
.PHONY: image
|
||||
image:
|
||||
docker buildx build -t lbry/reflector:$(version) -t lbry/reflector:latest --platform linux/amd64 .
|
||||
go get github.com/alecthomas/gometalinter && gometalinter --install && gometalinter ./...
|
|
@ -1,7 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
baselog "log"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -52,7 +52,7 @@ func (c *Cluster) Connect() error {
|
|||
conf.MemberlistConfig.AdvertisePort = c.port
|
||||
conf.NodeName = c.name
|
||||
|
||||
nullLogger := baselog.New(io.Discard, "", 0)
|
||||
nullLogger := baselog.New(ioutil.Discard, "", 0)
|
||||
conf.Logger = nullLogger
|
||||
|
||||
c.eventCh = make(chan serf.Event)
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/crypto"
|
||||
|
||||
"github.com/lbryio/reflector.go/cluster"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/lbryio/lbry.go/v2/schema/stake"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/server/peer"
|
||||
"github.com/lbryio/reflector.go/peer"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
|
|
@ -3,17 +3,17 @@ package cmd
|
|||
import (
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/reflector.go/meta"
|
||||
"github.com/lbryio/reflector.go/store/speedwalk"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -39,7 +39,7 @@ func integrityCheckCmd(cmd *cobra.Command, args []string) {
|
|||
|
||||
blobs, err := speedwalk.AllFiles(diskStorePath, true)
|
||||
if err != nil {
|
||||
log.Fatalf("error while reading blobs from disk %s", errors.FullTrace(err))
|
||||
log.Errorf("error while reading blobs from disk %s", errors.FullTrace(err))
|
||||
}
|
||||
tasks := make(chan string, len(blobs))
|
||||
done := make(chan bool)
|
||||
|
@ -63,15 +63,15 @@ func consume(worker int, tasks <-chan string, done chan<- bool, totalTasks int,
|
|||
start := time.Now()
|
||||
|
||||
for b := range tasks {
|
||||
processedSoFar := atomic.AddInt32(processed, 1)
|
||||
checked := atomic.AddInt32(processed, 1)
|
||||
if worker == 0 {
|
||||
remaining := int32(totalTasks) - processedSoFar
|
||||
timePerBlob := time.Since(start).Microseconds() / int64(processedSoFar)
|
||||
remaining := int32(totalTasks) - checked
|
||||
timePerBlob := time.Since(start).Microseconds() / int64(checked)
|
||||
remainingTime := time.Duration(int64(remaining)*timePerBlob) * time.Microsecond
|
||||
log.Infof("[T%d] %d/%d blobs processed so far. ETA: %s", worker, processedSoFar, totalTasks, remainingTime.String())
|
||||
log.Infof("[T%d] %d/%d blobs checked. ETA: %s", worker, checked, totalTasks, remainingTime.String())
|
||||
}
|
||||
blobPath := path.Join(diskStorePath, b[:2], b)
|
||||
blob, err := os.ReadFile(blobPath)
|
||||
blob, err := ioutil.ReadFile(blobPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/server/peer"
|
||||
"github.com/lbryio/reflector.go/peer"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/meta"
|
||||
"github.com/lbryio/reflector.go/store/speedwalk"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -41,9 +40,6 @@ func populateDbCmd(cmd *cobra.Command, args []string) {
|
|||
log.Fatal(err)
|
||||
}
|
||||
blobs, err := speedwalk.AllFiles(diskStorePath, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = localDb.AddBlobs(blobs)
|
||||
if err != nil {
|
||||
log.Errorf("error while storing to db: %s", errors.FullTrace(err))
|
||||
|
|
296
cmd/reflector.go
296
cmd/reflector.go
|
@ -8,18 +8,17 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/meta"
|
||||
"github.com/lbryio/reflector.go/peer"
|
||||
"github.com/lbryio/reflector.go/peer/http3"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/server/http"
|
||||
"github.com/lbryio/reflector.go/server/http3"
|
||||
"github.com/lbryio/reflector.go/server/peer"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
|
@ -28,43 +27,24 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
//port configuration
|
||||
tcpPeerPort int
|
||||
http3PeerPort int
|
||||
httpPeerPort int
|
||||
receiverPort int
|
||||
metricsPort int
|
||||
|
||||
//flags configuration
|
||||
disableUploads bool
|
||||
disableBlocklist bool
|
||||
useDB bool
|
||||
|
||||
//upstream configuration
|
||||
upstreamReflector string
|
||||
upstreamProtocol string
|
||||
upstreamEdgeToken string
|
||||
|
||||
//downstream configuration
|
||||
requestQueueSize int
|
||||
|
||||
//upstream edge configuration (to "cold" storage)
|
||||
originEndpoint string
|
||||
originEndpointFallback string
|
||||
|
||||
//cache configuration
|
||||
diskCache string
|
||||
secondaryDiskCache string
|
||||
memCache int
|
||||
tcpPeerPort int
|
||||
http3PeerPort int
|
||||
httpPort int
|
||||
receiverPort int
|
||||
metricsPort int
|
||||
disableUploads bool
|
||||
disableBlocklist bool
|
||||
proxyAddress string
|
||||
proxyPort string
|
||||
proxyProtocol string
|
||||
useDB bool
|
||||
cloudFrontEndpoint string
|
||||
WasabiEndpoint string
|
||||
reflectorCmdDiskCache string
|
||||
bufferReflectorCmdDiskCache string
|
||||
reflectorCmdMemCache int
|
||||
requestQueueSize int
|
||||
)
|
||||
var cacheManagers = []string{"localdb", "lfu", "arc", "lru", "simple"}
|
||||
|
||||
var cacheMangerToGcache = map[string]store.EvictionStrategy{
|
||||
"lfu": store.LFU,
|
||||
"arc": store.ARC,
|
||||
"lru": store.LRU,
|
||||
"simple": store.SIMPLE,
|
||||
}
|
||||
|
||||
func init() {
|
||||
var cmd = &cobra.Command{
|
||||
|
@ -72,42 +52,38 @@ func init() {
|
|||
Short: "Run reflector server",
|
||||
Run: reflectorCmd,
|
||||
}
|
||||
|
||||
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from for the TCP (LBRY) protocol")
|
||||
cmd.Flags().StringVar(&proxyAddress, "proxy-address", "", "address of another reflector server where blobs are fetched from")
|
||||
cmd.Flags().StringVar(&proxyPort, "proxy-port", "5567", "port of another reflector server where blobs are fetched from")
|
||||
cmd.Flags().StringVar(&proxyProtocol, "proxy-protocol", "http3", "protocol used to fetch blobs from another reflector server (tcp/http3)")
|
||||
cmd.Flags().StringVar(&cloudFrontEndpoint, "cloudfront-endpoint", "", "CloudFront edge endpoint for standard HTTP retrieval")
|
||||
cmd.Flags().StringVar(&WasabiEndpoint, "wasabi-endpoint", "", "Wasabi edge endpoint for standard HTTP retrieval")
|
||||
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from")
|
||||
cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol")
|
||||
cmd.Flags().IntVar(&httpPeerPort, "http-peer-port", 5569, "The port reflector will distribute content from over HTTP protocol")
|
||||
cmd.Flags().IntVar(&httpPort, "http-port", 5569, "The port reflector will distribute content from over HTTP protocol")
|
||||
cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from")
|
||||
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for prometheus metrics")
|
||||
|
||||
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for metrics")
|
||||
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests should be submitted to upstream")
|
||||
cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server")
|
||||
cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating")
|
||||
cmd.Flags().BoolVar(&useDB, "use-db", true, "Whether to connect to the reflector db or not")
|
||||
|
||||
cmd.Flags().StringVar(&upstreamReflector, "upstream-reflector", "", "host:port of a reflector server where blobs are fetched from")
|
||||
cmd.Flags().StringVar(&upstreamProtocol, "upstream-protocol", "http", "protocol used to fetch blobs from another upstream reflector server (tcp/http3/http)")
|
||||
cmd.Flags().StringVar(&upstreamEdgeToken, "upstream-edge-token", "", "token used to retrieve/authenticate protected content")
|
||||
|
||||
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests from downstream should be handled at once (the rest will wait)")
|
||||
|
||||
cmd.Flags().StringVar(&originEndpoint, "origin-endpoint", "", "HTTP edge endpoint for standard HTTP retrieval")
|
||||
cmd.Flags().StringVar(&originEndpointFallback, "origin-endpoint-fallback", "", "HTTP edge endpoint for standard HTTP retrieval if first origin fails")
|
||||
|
||||
cmd.Flags().StringVar(&diskCache, "disk-cache", "100GB:/tmp/downloaded_blobs:localdb", "Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru)")
|
||||
cmd.Flags().StringVar(&secondaryDiskCache, "optional-disk-cache", "", "Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru) (this would get hit before the one specified in disk-cache)")
|
||||
cmd.Flags().IntVar(&memCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
|
||||
|
||||
cmd.Flags().BoolVar(&useDB, "use-db", true, "whether to connect to the reflector db or not")
|
||||
cmd.Flags().StringVar(&reflectorCmdDiskCache, "disk-cache", "",
|
||||
"enable disk cache, setting max size and path where to store blobs. format is 'sizeGB:CACHE_PATH'")
|
||||
cmd.Flags().StringVar(&bufferReflectorCmdDiskCache, "buffer-disk-cache", "",
|
||||
"enable buffer disk cache, setting max size and path where to store blobs. format is 'sizeGB:CACHE_PATH'")
|
||||
cmd.Flags().IntVar(&reflectorCmdMemCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
|
||||
rootCmd.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func reflectorCmd(cmd *cobra.Command, args []string) {
|
||||
log.Printf("reflector %s", meta.VersionString())
|
||||
cleanerStopper := stop.New()
|
||||
|
||||
// the blocklist logic requires the db backed store to be the outer-most store
|
||||
underlyingStore := initStores()
|
||||
underlyingStoreWithCaches, cleanerStopper := initCaches(underlyingStore)
|
||||
underlyingStore := setupStore()
|
||||
outerStore := wrapWithCache(underlyingStore, cleanerStopper)
|
||||
|
||||
if !disableUploads {
|
||||
reflectorServer := reflector.NewServer(underlyingStore, underlyingStoreWithCaches)
|
||||
reflectorServer := reflector.NewServer(underlyingStore, outerStore)
|
||||
reflectorServer.Timeout = 3 * time.Minute
|
||||
reflectorServer.EnableBlocklist = !disableBlocklist
|
||||
|
||||
|
@ -118,22 +94,22 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
|
|||
defer reflectorServer.Shutdown()
|
||||
}
|
||||
|
||||
peerServer := peer.NewServer(underlyingStoreWithCaches)
|
||||
peerServer := peer.NewServer(outerStore)
|
||||
err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer peerServer.Shutdown()
|
||||
|
||||
http3PeerServer := http3.NewServer(underlyingStoreWithCaches, requestQueueSize)
|
||||
http3PeerServer := http3.NewServer(outerStore, requestQueueSize)
|
||||
err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer http3PeerServer.Shutdown()
|
||||
|
||||
httpServer := http.NewServer(store.WithSingleFlight("sf-http", underlyingStoreWithCaches), requestQueueSize, upstreamEdgeToken)
|
||||
err = httpServer.Start(":" + strconv.Itoa(httpPeerPort))
|
||||
httpServer := http.NewServer(outerStore)
|
||||
err = httpServer.Start(":" + strconv.Itoa(httpPort))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -142,8 +118,8 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
|
|||
metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics")
|
||||
metricsServer.Start()
|
||||
defer metricsServer.Shutdown()
|
||||
defer underlyingStoreWithCaches.Shutdown()
|
||||
defer underlyingStore.Shutdown() //do we actually need this? Oo
|
||||
defer outerStore.Shutdown()
|
||||
defer underlyingStore.Shutdown()
|
||||
|
||||
interruptChan := make(chan os.Signal, 1)
|
||||
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
|
||||
|
@ -152,52 +128,45 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
|
|||
cleanerStopper.StopAndWait()
|
||||
}
|
||||
|
||||
func initUpstreamStore() store.BlobStore {
|
||||
var s store.BlobStore
|
||||
if upstreamReflector == "" {
|
||||
return nil
|
||||
}
|
||||
switch upstreamProtocol {
|
||||
case "tcp":
|
||||
s = peer.NewStore(peer.StoreOpts{
|
||||
Address: upstreamReflector,
|
||||
Timeout: 30 * time.Second,
|
||||
})
|
||||
case "http3":
|
||||
s = http3.NewStore(http3.StoreOpts{
|
||||
Address: upstreamReflector,
|
||||
Timeout: 30 * time.Second,
|
||||
})
|
||||
case "http":
|
||||
s = store.NewHttpStore(upstreamReflector, upstreamEdgeToken)
|
||||
default:
|
||||
log.Fatalf("protocol is not recognized: %s", upstreamProtocol)
|
||||
}
|
||||
return s
|
||||
}
|
||||
func initEdgeStore() store.BlobStore {
|
||||
var s3Store *store.S3Store
|
||||
func setupStore() store.BlobStore {
|
||||
var s store.BlobStore
|
||||
|
||||
if conf != "none" {
|
||||
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
|
||||
}
|
||||
if originEndpointFallback != "" && originEndpoint != "" {
|
||||
ittt := store.NewITTTStore(store.NewCloudFrontROStore(originEndpoint), store.NewCloudFrontROStore(originEndpointFallback))
|
||||
if s3Store != nil {
|
||||
s = store.NewCloudFrontRWStore(ittt, s3Store)
|
||||
} else {
|
||||
s = ittt
|
||||
if proxyAddress != "" {
|
||||
switch proxyProtocol {
|
||||
case "tcp":
|
||||
s = peer.NewStore(peer.StoreOpts{
|
||||
Address: proxyAddress + ":" + proxyPort,
|
||||
Timeout: 30 * time.Second,
|
||||
})
|
||||
case "http3":
|
||||
s = http3.NewStore(http3.StoreOpts{
|
||||
Address: proxyAddress + ":" + proxyPort,
|
||||
Timeout: 30 * time.Second,
|
||||
})
|
||||
case "http":
|
||||
s = store.NewHttpStore(proxyAddress + ":" + proxyPort)
|
||||
default:
|
||||
log.Fatalf("protocol is not recognized: %s", proxyProtocol)
|
||||
}
|
||||
} else if s3Store != nil {
|
||||
s = s3Store
|
||||
} else {
|
||||
log.Fatalf("this configuration does not include a valid upstream source")
|
||||
var s3Store *store.S3Store
|
||||
if conf != "none" {
|
||||
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
|
||||
}
|
||||
if cloudFrontEndpoint != "" && WasabiEndpoint != "" {
|
||||
ittt := store.NewITTTStore(store.NewCloudFrontROStore(WasabiEndpoint), store.NewCloudFrontROStore(cloudFrontEndpoint))
|
||||
if s3Store != nil {
|
||||
s = store.NewCloudFrontRWStore(ittt, s3Store)
|
||||
} else {
|
||||
s = ittt
|
||||
}
|
||||
} else if s3Store != nil {
|
||||
s = s3Store
|
||||
} else {
|
||||
log.Fatalf("this configuration does not include a valid upstream source")
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func initDBStore(s store.BlobStore) store.BlobStore {
|
||||
if useDB {
|
||||
dbInst := &db.SQL{
|
||||
TrackAccess: db.TrackAccessStreams,
|
||||
|
@ -207,54 +176,26 @@ func initDBStore(s store.BlobStore) store.BlobStore {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s = store.NewDBBackedStore(s, dbInst, false)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func initStores() store.BlobStore {
|
||||
s := initUpstreamStore()
|
||||
if s == nil {
|
||||
s = initEdgeStore()
|
||||
}
|
||||
s = initDBStore(s)
|
||||
return s
|
||||
}
|
||||
func wrapWithCache(s store.BlobStore, cleanerStopper *stop.Group) store.BlobStore {
|
||||
wrapped := s
|
||||
|
||||
// initCaches returns a store wrapped with caches and a stop group to execute a clean shutdown
|
||||
func initCaches(s store.BlobStore) (store.BlobStore, *stop.Group) {
|
||||
stopper := stop.New()
|
||||
diskStore := initDiskStore(s, diskCache, stopper)
|
||||
finalStore := initDiskStore(diskStore, secondaryDiskCache, stopper)
|
||||
stop.New()
|
||||
if memCache > 0 {
|
||||
finalStore = store.NewCachingStore(
|
||||
"reflector",
|
||||
finalStore,
|
||||
store.NewGcacheStore("mem", store.NewMemStore(), memCache, store.LRU),
|
||||
)
|
||||
}
|
||||
return finalStore, stopper
|
||||
}
|
||||
|
||||
func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *stop.Group) store.BlobStore {
|
||||
diskCacheMaxSize, diskCachePath, cacheManager := diskCacheParams(diskParams)
|
||||
diskCacheMaxSize, diskCachePath := diskCacheParams(reflectorCmdDiskCache)
|
||||
//we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte
|
||||
// so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup
|
||||
realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
|
||||
if diskCacheMaxSize == 0 {
|
||||
return upstreamStore
|
||||
}
|
||||
err := os.MkdirAll(diskCachePath, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if diskCacheMaxSize > 0 {
|
||||
err := os.MkdirAll(diskCachePath, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
diskStore := store.NewDiskStore(diskCachePath, 2)
|
||||
var unwrappedStore store.BlobStore
|
||||
cleanerStopper := stop.New(stopper)
|
||||
|
||||
if cacheManager == "localdb" {
|
||||
localDb := &db.SQL{
|
||||
SoftDelete: true,
|
||||
TrackAccess: db.TrackAccessBlobs,
|
||||
|
@ -264,40 +205,55 @@ func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *st
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
unwrappedStore = store.NewDBBackedStore(diskStore, localDb, true)
|
||||
go cleanOldestBlobs(int(realCacheSize), localDb, unwrappedStore, cleanerStopper)
|
||||
} else {
|
||||
unwrappedStore = store.NewGcacheStore("nvme", store.NewDiskStore(diskCachePath, 2), int(realCacheSize), cacheMangerToGcache[cacheManager])
|
||||
dbBackedDiskStore := store.NewDBBackedStore(store.NewDiskStore(diskCachePath, 2), localDb, true)
|
||||
wrapped = store.NewCachingStore(
|
||||
"reflector",
|
||||
wrapped,
|
||||
dbBackedDiskStore,
|
||||
)
|
||||
|
||||
go cleanOldestBlobs(int(realCacheSize), localDb, dbBackedDiskStore, cleanerStopper)
|
||||
}
|
||||
|
||||
diskCacheMaxSize, diskCachePath = diskCacheParams(bufferReflectorCmdDiskCache)
|
||||
realCacheSize = float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
|
||||
if diskCacheMaxSize > 0 {
|
||||
err := os.MkdirAll(diskCachePath, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
wrapped = store.NewCachingStore(
|
||||
"reflector",
|
||||
wrapped,
|
||||
store.NewLFUDAStore("nvme", store.NewDiskStore(diskCachePath, 2), realCacheSize),
|
||||
)
|
||||
}
|
||||
|
||||
if reflectorCmdMemCache > 0 {
|
||||
wrapped = store.NewCachingStore(
|
||||
"reflector",
|
||||
wrapped,
|
||||
store.NewLRUStore("mem", store.NewMemStore(), reflectorCmdMemCache),
|
||||
)
|
||||
}
|
||||
|
||||
wrapped := store.NewCachingStore(
|
||||
"reflector",
|
||||
upstreamStore,
|
||||
unwrappedStore,
|
||||
)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
func diskCacheParams(diskParams string) (int, string, string) {
|
||||
func diskCacheParams(diskParams string) (int, string) {
|
||||
if diskParams == "" {
|
||||
return 0, "", ""
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
parts := strings.Split(diskParams, ":")
|
||||
if len(parts) != 3 {
|
||||
log.Fatalf("%s does is formatted incorrectly. Expected format: 'sizeGB:CACHE_PATH:cachemanager' for example: '100GB:/tmp/downloaded_blobs:localdb'", diskParams)
|
||||
if len(parts) != 2 {
|
||||
log.Fatalf("--disk-cache must be a number, followed by ':', followed by a string")
|
||||
}
|
||||
|
||||
diskCacheSize := parts[0]
|
||||
path := parts[1]
|
||||
cacheManager := parts[2]
|
||||
|
||||
if len(path) == 0 || path[0] != '/' {
|
||||
log.Fatalf("disk cache paths must start with '/'")
|
||||
}
|
||||
|
||||
if !util.InSlice(cacheManager, cacheManagers) {
|
||||
log.Fatalf("specified cache manager '%s' is not supported. Use one of the following: %v", cacheManager, cacheManagers)
|
||||
log.Fatalf("--disk-cache path must start with '/'")
|
||||
}
|
||||
|
||||
var maxSize datasize.ByteSize
|
||||
|
@ -306,9 +262,9 @@ func diskCacheParams(diskParams string) (int, string, string) {
|
|||
log.Fatal(err)
|
||||
}
|
||||
if maxSize <= 0 {
|
||||
log.Fatal("disk cache size must be more than 0")
|
||||
log.Fatal("--disk-cache size must be more than 0")
|
||||
}
|
||||
return int(maxSize), path, cacheManager
|
||||
return int(maxSize), path
|
||||
}
|
||||
|
||||
func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) {
|
||||
|
|
19
cmd/root.go
19
cmd/root.go
|
@ -2,14 +2,14 @@ package cmd
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/lbryio/reflector.go/updater"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/dht"
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||
"github.com/lbryio/reflector.go/updater"
|
||||
|
||||
"github.com/johntdyer/slackrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -24,7 +24,6 @@ type Config struct {
|
|||
BucketName string `json:"bucket_name"`
|
||||
DBConn string `json:"db_conn"`
|
||||
SlackHookURL string `json:"slack_hook_url"`
|
||||
SlackChannel string `json:"slack_channel"`
|
||||
UpdateBinURL string `json:"update_bin_url"`
|
||||
UpdateCmd string `json:"update_cmd"`
|
||||
}
|
||||
|
@ -102,7 +101,7 @@ func preRun(cmd *cobra.Command, args []string) {
|
|||
hook := &slackrus.SlackrusHook{
|
||||
HookURL: globalConfig.SlackHookURL,
|
||||
AcceptedLevels: slackrus.LevelThreshold(logrus.InfoLevel),
|
||||
Channel: globalConfig.SlackChannel,
|
||||
Channel: "#reflector-logs",
|
||||
//IconEmoji: ":ghost:",
|
||||
//Username: "reflector.go",
|
||||
}
|
||||
|
@ -141,7 +140,7 @@ func argFuncs(funcs ...cobra.PositionalArgs) cobra.PositionalArgs {
|
|||
func loadConfig(path string) (Config, error) {
|
||||
var c Config
|
||||
|
||||
raw, err := os.ReadFile(path)
|
||||
raw, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return c, errors.Err("config file not found")
|
||||
|
@ -165,8 +164,8 @@ func mustGetFlagInt64(cmd *cobra.Command, name string) int64 {
|
|||
return v
|
||||
}
|
||||
|
||||
//func mustGetFlagBool(cmd *cobra.Command, name string) bool {
|
||||
// v, err := cmd.Flags().GetBool(name)
|
||||
// checkErr(err)
|
||||
// return v
|
||||
//}
|
||||
func mustGetFlagBool(cmd *cobra.Command, name string) bool {
|
||||
v, err := cmd.Flags().GetBool(name)
|
||||
checkErr(err)
|
||||
return v
|
||||
}
|
||||
|
|
10
cmd/send.go
10
cmd/send.go
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
|
@ -38,12 +39,13 @@ func sendCmd(cmd *cobra.Command, args []string) {
|
|||
reflectorAddress := args[0]
|
||||
err := hackyReflector.Connect(reflectorAddress)
|
||||
checkErr(err)
|
||||
defer func() { _ = hackyReflector.Close() }()
|
||||
defer hackyReflector.Close()
|
||||
|
||||
filePath := args[1]
|
||||
file, err := os.Open(filePath)
|
||||
checkErr(err)
|
||||
defer func() { _ = file.Close() }()
|
||||
defer file.Close()
|
||||
|
||||
sdCachePath := ""
|
||||
sdCacheDir := mustGetFlagString(cmd, "sd-cache")
|
||||
if sdCacheDir != "" {
|
||||
|
@ -58,7 +60,7 @@ func sendCmd(cmd *cobra.Command, args []string) {
|
|||
|
||||
if sdCachePath != "" {
|
||||
if _, err := os.Stat(sdCachePath); !os.IsNotExist(err) {
|
||||
sdBlob, err := os.ReadFile(sdCachePath)
|
||||
sdBlob, err := ioutil.ReadFile(sdCachePath)
|
||||
checkErr(err)
|
||||
cachedSDBlob := &stream.SDBlob{}
|
||||
err = cachedSDBlob.FromBlob(sdBlob)
|
||||
|
@ -108,7 +110,7 @@ func sendCmd(cmd *cobra.Command, args []string) {
|
|||
sd := enc.SDBlob()
|
||||
//sd.StreamName = filepath.Base(filePath)
|
||||
//sd.SuggestedFileName = filepath.Base(filePath)
|
||||
err = os.WriteFile(sdCachePath, sd.ToBlob(), 0666)
|
||||
err = ioutil.WriteFile(sdCachePath, sd.ToBlob(), 0666)
|
||||
if err != nil {
|
||||
fmt.Printf("error saving sd blob: %v\n", err)
|
||||
fmt.Println(sd.ToJson())
|
||||
|
|
|
@ -51,7 +51,7 @@ func sendBlobCmd(cmd *cobra.Command, args []string) {
|
|||
|
||||
file, err := os.Open(path)
|
||||
checkErr(err)
|
||||
defer func() { _ = file.Close() }()
|
||||
defer file.Close()
|
||||
s, err := stream.New(file)
|
||||
checkErr(err)
|
||||
|
||||
|
|
13
cmd/start.go
13
cmd/start.go
|
@ -7,15 +7,14 @@ import (
|
|||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/lbryio/reflector.go/cluster"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/prism"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/server/peer"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/dht"
|
||||
"github.com/lbryio/lbry.go/v2/dht/bits"
|
||||
"github.com/lbryio/reflector.go/cluster"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/peer"
|
||||
"github.com/lbryio/reflector.go/prism"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
|
|
@ -9,8 +9,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/meta"
|
||||
"github.com/lbryio/reflector.go/peer"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/server/peer"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/lbryio/reflector.go/meta"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -18,5 +17,5 @@ func init() {
|
|||
}
|
||||
|
||||
func versionCmd(cmd *cobra.Command, args []string) {
|
||||
fmt.Println(meta.FullName())
|
||||
fmt.Println(meta.VersionString())
|
||||
}
|
||||
|
|
221
db/db.go
221
db/db.go
|
@ -17,7 +17,7 @@ import (
|
|||
"github.com/go-sql-driver/mysql"
|
||||
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/volatiletech/null/v8"
|
||||
"github.com/volatiletech/null"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
|
@ -39,9 +39,12 @@ type SdBlob struct {
|
|||
type trackAccess int
|
||||
|
||||
const (
|
||||
TrackAccessNone trackAccess = iota // Don't track accesses
|
||||
TrackAccessStreams // Track accesses at the stream level
|
||||
TrackAccessBlobs // Track accesses at the blob level
|
||||
// Don't track accesses
|
||||
TrackAccessNone trackAccess = iota
|
||||
// Track accesses at the stream level
|
||||
TrackAccessStreams
|
||||
// Track accesses at the blob level
|
||||
TrackAccessBlobs
|
||||
)
|
||||
|
||||
// SQL implements the DB interface
|
||||
|
@ -94,23 +97,22 @@ func (s *SQL) AddBlob(hash string, length int, isStored bool) error {
|
|||
return errors.Err("not connected")
|
||||
}
|
||||
|
||||
_, err := s.insertBlob(hash, length, isStored)
|
||||
_, err := s.insertBlob(s.conn, hash, length, isStored)
|
||||
return err
|
||||
}
|
||||
|
||||
//AddBlobs adds blobs to the database.
|
||||
// AddBlob adds a blob to the database.
|
||||
func (s *SQL) AddBlobs(hash []string) error {
|
||||
if s.conn == nil {
|
||||
return errors.Err("not connected")
|
||||
}
|
||||
|
||||
// Split the slice into batches of 20 items.
|
||||
batch := 10000
|
||||
totalBlobs := int64(len(hash))
|
||||
work := make(chan []string, 1000)
|
||||
stopper := stop.New()
|
||||
var totalInserted atomic.Int64
|
||||
start := time.Now()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < len(hash); i += batch {
|
||||
j := i + batch
|
||||
|
@ -122,7 +124,6 @@ func (s *SQL) AddBlobs(hash []string) error {
|
|||
log.Infof("done loading %d hashes in the work queue", len(hash))
|
||||
close(work)
|
||||
}()
|
||||
|
||||
for i := 0; i < runtime.NumCPU(); i++ {
|
||||
stopper.Add(1)
|
||||
go func(worker int) {
|
||||
|
@ -144,7 +145,6 @@ func (s *SQL) AddBlobs(hash []string) error {
|
|||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
stopper.Wait()
|
||||
return nil
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func (s *SQL) insertBlobs(hashes []string) error {
|
|||
//args = append(args, hash, true, stream.MaxBlobSize, dayAgo)
|
||||
}
|
||||
q = strings.TrimSuffix(q, ",")
|
||||
_, err := s.exec(q)
|
||||
_, err := s.exec(s.conn, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func (s *SQL) insertBlobs(hashes []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error) {
|
||||
func (s *SQL) insertBlob(ex Executor, hash string, length int, isStored bool) (int64, error) {
|
||||
if length <= 0 {
|
||||
return 0, errors.Err("length must be positive")
|
||||
}
|
||||
|
@ -188,13 +188,13 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
|
|||
q = "INSERT INTO blob_ (hash, is_stored, length) VALUES (" + qt.Qs(len(args)) + ") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored))"
|
||||
}
|
||||
|
||||
blobID, err := s.exec(q, args...)
|
||||
blobID, err := s.exec(ex, q, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if blobID == 0 {
|
||||
err = s.conn.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID)
|
||||
err = ex.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID)
|
||||
if err != nil {
|
||||
return 0, errors.Err(err)
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
|
|||
}
|
||||
|
||||
if s.TrackAccess == TrackAccessBlobs {
|
||||
err := s.touchBlobs([]uint64{uint64(blobID)})
|
||||
err := s.touchBlobs(ex, []uint64{uint64(blobID)})
|
||||
if err != nil {
|
||||
return 0, errors.Err(err)
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ func (s *SQL) insertStream(hash string, sdBlobID int64) (int64, error) {
|
|||
q = "INSERT IGNORE INTO stream (hash, sd_blob_id) VALUES (" + qt.Qs(len(args)) + ")"
|
||||
}
|
||||
|
||||
streamID, err := s.exec(q, args...)
|
||||
streamID, err := s.exec(s.conn, q, args...)
|
||||
if err != nil {
|
||||
return 0, errors.Err(err)
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func (s *SQL) HasBlobs(hashes []string, touch bool) (map[string]bool, error) {
|
|||
|
||||
if touch {
|
||||
if s.TrackAccess == TrackAccessBlobs {
|
||||
_ = s.touchBlobs(idsNeedingTouch)
|
||||
_ = s.touchBlobs(s.conn, idsNeedingTouch)
|
||||
} else if s.TrackAccess == TrackAccessStreams {
|
||||
_ = s.touchStreams(idsNeedingTouch)
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ func (s *SQL) HasBlobs(hashes []string, touch bool) (map[string]bool, error) {
|
|||
return exists, err
|
||||
}
|
||||
|
||||
func (s *SQL) touchBlobs(blobIDs []uint64) error {
|
||||
func (s *SQL) touchBlobs(ex Executor, blobIDs []uint64) error {
|
||||
if len(blobIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func (s *SQL) touchBlobs(blobIDs []uint64) error {
|
|||
}
|
||||
|
||||
startTime := time.Now()
|
||||
_, err := s.exec(query, args...)
|
||||
_, err := s.exec(ex, query, args...)
|
||||
log.Debugf("touched %d blobs and took %s", len(blobIDs), time.Since(startTime))
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
@ -306,7 +306,7 @@ func (s *SQL) touchStreams(streamIDs []uint64) error {
|
|||
}
|
||||
|
||||
startTime := time.Now()
|
||||
_, err := s.exec(query, args...)
|
||||
_, err := s.exec(s.conn, query, args...)
|
||||
log.Debugf("touched %d streams and took %s", len(streamIDs), time.Since(startTime))
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
@ -406,20 +406,20 @@ WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
|
|||
// NOTE: If SoftDelete is enabled, streams will never be deleted
|
||||
func (s *SQL) Delete(hash string) error {
|
||||
if s.SoftDelete {
|
||||
_, err := s.exec("UPDATE blob_ SET is_stored = 0 WHERE hash = ?", hash)
|
||||
_, err := s.exec(s.conn, "UPDATE blob_ SET is_stored = 0 WHERE hash = ?", hash)
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
_, err := s.exec("DELETE FROM stream WHERE sd_blob_id = (SELECT id FROM blob_ WHERE hash = ?)", hash)
|
||||
_, err := s.exec(s.conn, "DELETE FROM stream WHERE sd_blob_id = (SELECT id FROM blob_ WHERE hash = ?)", hash)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
_, err = s.exec("DELETE FROM blob_ WHERE hash = ?", hash)
|
||||
_, err = s.exec(s.conn, "DELETE FROM blob_ WHERE hash = ?", hash)
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
//LeastRecentlyAccessedHashes gets the least recently accessed blobs
|
||||
// GetHashRange gets the smallest and biggest hashes in the db
|
||||
func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
|
||||
if s.conn == nil {
|
||||
return nil, errors.Err("not connected")
|
||||
|
@ -451,6 +451,40 @@ func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
|
|||
return blobs, nil
|
||||
}
|
||||
|
||||
// AllHashes writes all hashes from the db into the channel.
|
||||
// It does not close the channel when it finishes.
|
||||
//func (s *SQL) AllHashes(ch chan<- string) error {
|
||||
// if s.conn == nil {
|
||||
// return errors.Err("not connected")
|
||||
// }
|
||||
//
|
||||
// query := "SELECT hash from blob_"
|
||||
// if s.SoftDelete {
|
||||
// query += " where is_stored = 1"
|
||||
// }
|
||||
// s.logQuery(query)
|
||||
//
|
||||
// rows, err := s.conn.Query(query)
|
||||
// if err != nil {
|
||||
// return errors.Err(err)
|
||||
// }
|
||||
// defer closeRows(rows)
|
||||
//
|
||||
// for rows.Next() {
|
||||
// var hash string
|
||||
// err := rows.Scan(&hash)
|
||||
// if err != nil {
|
||||
// return errors.Err(err)
|
||||
// }
|
||||
// ch <- hash
|
||||
// // TODO: this needs testing
|
||||
// // TODO: need a way to cancel this early (e.g. in case of shutdown)
|
||||
// }
|
||||
//
|
||||
// close(ch)
|
||||
// return nil
|
||||
//}
|
||||
|
||||
func (s *SQL) Count() (int, error) {
|
||||
if s.conn == nil {
|
||||
return 0, errors.Err("not connected")
|
||||
|
@ -556,7 +590,7 @@ func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
|
|||
return errors.Err("not connected")
|
||||
}
|
||||
|
||||
sdBlobID, err := s.insertBlob(sdHash, sdBlobLength, true)
|
||||
sdBlobID, err := s.insertBlob(s.conn, sdHash, sdBlobLength, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -566,28 +600,30 @@ func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int, sdBlob SdBlob) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// insert content blobs and connect them to stream
|
||||
for _, contentBlob := range sdBlob.Blobs {
|
||||
if contentBlob.BlobHash == "" {
|
||||
// null terminator blob
|
||||
continue
|
||||
}
|
||||
return withTx(s.conn, func(tx Transactor) error {
|
||||
// insert content blobs and connect them to stream
|
||||
for _, contentBlob := range sdBlob.Blobs {
|
||||
if contentBlob.BlobHash == "" {
|
||||
// null terminator blob
|
||||
continue
|
||||
}
|
||||
|
||||
blobID, err := s.insertBlob(contentBlob.BlobHash, contentBlob.Length, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobID, err := s.insertBlob(tx, contentBlob.BlobHash, contentBlob.Length, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []interface{}{streamID, blobID, contentBlob.BlobNum}
|
||||
_, err = s.exec(
|
||||
"INSERT IGNORE INTO stream_blob (stream_id, blob_id, num) VALUES ("+qt.Qs(len(args))+")",
|
||||
args...,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
args := []interface{}{streamID, blobID, contentBlob.BlobNum}
|
||||
_, err = s.exec(tx,
|
||||
"INSERT IGNORE INTO stream_blob (stream_id, blob_id, num) VALUES ("+qt.Qs(len(args))+")",
|
||||
args...,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetHashRange gets the smallest and biggest hashes in the db
|
||||
|
@ -660,39 +696,38 @@ func (s *SQL) GetStoredHashesInRange(ctx context.Context, start, end bits.Bitmap
|
|||
}
|
||||
|
||||
// txFunc is a function that can be wrapped in a transaction
|
||||
type txFunc func(tx *sql.Tx) error
|
||||
type txFunc func(tx Transactor) error
|
||||
|
||||
// withTx wraps a function in an sql transaction. the transaction is committed if there's no error, or rolled back if there is one.
|
||||
// if dbOrTx is an sql.DB, a new transaction is started
|
||||
// withTx wraps a function in an sql transaction. the transaction is committed if there's
|
||||
// no error, or rolled back if there is one. if dbOrTx is not a Transactor (e.g. if it's
|
||||
// an *sql.DB), withTx attempts to start a new transaction to use.
|
||||
func withTx(dbOrTx interface{}, f txFunc) (err error) {
|
||||
var tx *sql.Tx
|
||||
var tx Transactor
|
||||
var ok bool
|
||||
|
||||
switch t := dbOrTx.(type) {
|
||||
case *sql.Tx:
|
||||
tx = t
|
||||
case *sql.DB:
|
||||
tx, err = t.Begin()
|
||||
tx, ok = dbOrTx.(Transactor)
|
||||
if !ok {
|
||||
tx, err = Begin(dbOrTx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
if rollBackError := tx.Rollback(); rollBackError != nil {
|
||||
log.Error("failed to rollback tx on panic - ", rollBackError)
|
||||
}
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
if rollBackError := tx.Rollback(); rollBackError != nil {
|
||||
log.Error("failed to rollback tx on panic - ", rollBackError)
|
||||
}
|
||||
} else {
|
||||
err = errors.Err(tx.Commit())
|
||||
}
|
||||
}()
|
||||
default:
|
||||
return errors.Err("db or tx required")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
if rollBackError := tx.Rollback(); rollBackError != nil {
|
||||
log.Error("failed to rollback tx on panic: ", rollBackError)
|
||||
}
|
||||
err = errors.Prefix("panic", p)
|
||||
} else if err != nil {
|
||||
if rollBackError := tx.Rollback(); rollBackError != nil {
|
||||
log.Error("failed to rollback tx: ", rollBackError)
|
||||
}
|
||||
} else {
|
||||
err = errors.Err(tx.Commit())
|
||||
}
|
||||
}()
|
||||
|
||||
return f(tx)
|
||||
}
|
||||
|
||||
|
@ -705,12 +740,12 @@ func closeRows(rows *sql.Rows) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *SQL) exec(query string, args ...interface{}) (int64, error) {
|
||||
func (s *SQL) exec(ex Executor, query string, args ...interface{}) (int64, error) {
|
||||
s.logQuery(query, args...)
|
||||
attempt, maxAttempts := 0, 3
|
||||
Retry:
|
||||
attempt++
|
||||
result, err := s.conn.Exec(query, args...)
|
||||
result, err := ex.Exec(query, args...)
|
||||
if isLockTimeoutError(err) {
|
||||
if attempt <= maxAttempts {
|
||||
//Error 1205: Lock wait timeout exceeded; try restarting transaction
|
||||
|
@ -779,3 +814,47 @@ CREATE TABLE blocked (
|
|||
);
|
||||
|
||||
*/
|
||||
|
||||
//func (d *LiteDBBackedStore) selfClean() {
|
||||
// d.stopper.Add(1)
|
||||
// defer d.stopper.Done()
|
||||
// lastCleanup := time.Now()
|
||||
// const cleanupInterval = 10 * time.Second
|
||||
// for {
|
||||
// select {
|
||||
// case <-d.stopper.Ch():
|
||||
// log.Infoln("stopping self cleanup")
|
||||
// return
|
||||
// default:
|
||||
// time.Sleep(1 * time.Second)
|
||||
// }
|
||||
// if time.Since(lastCleanup) < cleanupInterval {
|
||||
// continue
|
||||
//
|
||||
// blobsCount, err := d.db.BlobsCount()
|
||||
// if err != nil {
|
||||
// log.Errorf(errors.FullTrace(err))
|
||||
// }
|
||||
// if blobsCount >= d.maxItems {
|
||||
// itemsToDelete := blobsCount / 100 * 10
|
||||
// blobs, err := d.db.GetLRUBlobs(itemsToDelete)
|
||||
// if err != nil {
|
||||
// log.Errorf(errors.FullTrace(err))
|
||||
// }
|
||||
// for _, hash := range blobs {
|
||||
// select {
|
||||
// case <-d.stopper.Ch():
|
||||
// return
|
||||
// default:
|
||||
//
|
||||
// }
|
||||
// err = d.Delete(hash)
|
||||
// if err != nil {
|
||||
// log.Errorf(errors.FullTrace(err))
|
||||
// }
|
||||
// metrics.CacheLRUEvictCount.With(metrics.CacheLabels(d.Name(), d.component)).Inc()
|
||||
// }
|
||||
// }
|
||||
// lastCleanup = time.Now()
|
||||
// }
|
||||
//}
|
||||
|
|
45
db/interfaces.go
Normal file
45
db/interfaces.go
Normal file
|
@ -0,0 +1,45 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
)
|
||||
|
||||
// Executor can perform SQL queries.
|
||||
type Executor interface {
|
||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
||||
QueryRow(query string, args ...interface{}) *sql.Row
|
||||
}
|
||||
|
||||
// Transactor can commit and rollback, on top of being able to execute queries.
|
||||
type Transactor interface {
|
||||
Commit() error
|
||||
Rollback() error
|
||||
|
||||
Executor
|
||||
}
|
||||
|
||||
// Begin begins a transaction
|
||||
func Begin(db interface{}) (Transactor, error) {
|
||||
type beginner interface {
|
||||
Begin() (Transactor, error)
|
||||
}
|
||||
|
||||
creator, ok := db.(beginner)
|
||||
if ok {
|
||||
return creator.Begin()
|
||||
}
|
||||
|
||||
type sqlBeginner interface {
|
||||
Begin() (*sql.Tx, error)
|
||||
}
|
||||
|
||||
creator2, ok := db.(sqlBeginner)
|
||||
if ok {
|
||||
return creator2.Begin()
|
||||
}
|
||||
|
||||
return nil, errors.Err("database does not support transactions")
|
||||
}
|
157
go.mod
157
go.mod
|
@ -1,124 +1,49 @@
|
|||
module github.com/lbryio/reflector.go
|
||||
|
||||
go 1.20
|
||||
|
||||
replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.45.24
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7
|
||||
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
|
||||
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-sql-driver/mysql v1.7.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/google/gops v0.3.28
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/hashicorp/serf v0.10.1
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
|
||||
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc
|
||||
github.com/karrick/godirwalk v1.17.0
|
||||
github.com/lbryio/chainquery v1.9.1-0.20230515181855-2fcba3115cfe
|
||||
github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629
|
||||
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6
|
||||
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/quic-go/quic-go v0.39.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cast v1.5.1
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/volatiletech/null/v8 v8.1.2
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/sync v0.4.0
|
||||
)
|
||||
//replace github.com/lbryio/lbry.go/v2 => ../lbry.go
|
||||
|
||||
require (
|
||||
github.com/armon/go-metrics v0.4.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/friendsofgo/errors v0.9.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gofrs/uuid v4.2.0+incompatible // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
||||
github.com/gorilla/rpc v1.2.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack v0.5.3 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.0 // indirect
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
|
||||
github.com/aws/aws-sdk-go v1.16.11
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/bparli/lfuda-go v0.3.1
|
||||
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
|
||||
github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/gin-gonic/gin v1.7.1
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/gops v0.3.18
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/memberlist v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.41 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/quic-go/qpack v0.4.0 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/slack-go/slack v0.12.1 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.15.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/volatiletech/inflect v0.0.1 // indirect
|
||||
github.com/volatiletech/randomize v0.0.1 // indirect
|
||||
github.com/volatiletech/strmangle v0.0.4 // indirect
|
||||
go.uber.org/mock v0.3.0 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.9.1 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
github.com/hashicorp/memberlist v0.1.4 // indirect
|
||||
github.com/hashicorp/serf v0.8.2
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
|
||||
github.com/johntdyer/slackrus v0.0.0-20180518184837-f7aae3243a07
|
||||
github.com/karrick/godirwalk v1.16.1
|
||||
github.com/lbryio/chainquery v1.9.0
|
||||
github.com/lbryio/lbry.go v1.1.2 // indirect
|
||||
github.com/lbryio/lbry.go/v2 v2.7.2-0.20210416195322-6516df1418e3
|
||||
github.com/lbryio/types v0.0.0-20201019032447-f0b4476ef386
|
||||
github.com/lucas-clemente/quic-go v0.20.1
|
||||
github.com/phayes/freeport v0.0.0-20171002185219-e27662a4a9d6
|
||||
github.com/prometheus/client_golang v0.9.3
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/afero v1.4.1 // indirect
|
||||
github.com/spf13/cast v1.3.0
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/viper v1.7.1 // indirect
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/volatiletech/null v8.0.0+incompatible
|
||||
go.uber.org/atomic v1.5.1
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 // indirect
|
||||
google.golang.org/appengine v1.6.2 // indirect
|
||||
)
|
||||
|
||||
go 1.15
|
||||
|
|
|
@ -86,13 +86,8 @@ const (
|
|||
errUnexpectedEOFStr = "unexpected_eof_str"
|
||||
errJSONSyntax = "json_syntax"
|
||||
errBlobTooBig = "blob_too_big"
|
||||
errInvalidPeerJSON = "invalid_peer_json"
|
||||
errInvalidPeerData = "invalid_peer_data"
|
||||
errRequestTooLarge = "request_too_large"
|
||||
errDeadlineExceeded = "deadline_exceeded"
|
||||
errHashMismatch = "hash_mismatch"
|
||||
errProtectedBlob = "protected_blob"
|
||||
errInvalidBlobHash = "invalid_blob_hash"
|
||||
errZeroByteBlob = "zero_byte_blob"
|
||||
errInvalidCharacter = "invalid_character"
|
||||
errBlobNotFound = "blob_not_found"
|
||||
|
@ -159,7 +154,7 @@ var (
|
|||
Name: "origin_requests_total",
|
||||
Help: "How many Get requests are in flight from the cache to the origin",
|
||||
}, []string{LabelCacheType, LabelComponent})
|
||||
//during thundering-herd situations, the metric below should be a lot smaller than the metric above
|
||||
// during thundering-herd situations, the metric below should be a lot smaller than the metric above
|
||||
CacheWaitingRequestsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: ns,
|
||||
Subsystem: subsystemCache,
|
||||
|
@ -239,11 +234,6 @@ var (
|
|||
Name: "http3_blob_request_queue_size",
|
||||
Help: "Blob requests of https queue size",
|
||||
})
|
||||
HttpBlobReqQueue = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: ns,
|
||||
Name: "http_blob_request_queue_size",
|
||||
Help: "Blob requests queue size of the HTTP protocol",
|
||||
})
|
||||
RoutinesQueue = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: ns,
|
||||
Name: "routines",
|
||||
|
@ -301,20 +291,10 @@ func TrackError(direction string, e error) (shouldLog bool) { // shouldLog is a
|
|||
} else if strings.Contains(err.Error(), "blob must be at most") {
|
||||
//log.Warnln("blob must be at most X bytes is not the same as ErrBlobTooBig")
|
||||
errType = errBlobTooBig
|
||||
} else if strings.Contains(err.Error(), "invalid json request") {
|
||||
errType = errInvalidPeerJSON
|
||||
} else if strings.Contains(err.Error(), "Invalid data") {
|
||||
errType = errInvalidPeerData
|
||||
} else if strings.Contains(err.Error(), "request is too large") {
|
||||
errType = errRequestTooLarge
|
||||
} else if strings.Contains(err.Error(), "Invalid blob hash length") {
|
||||
errType = errInvalidBlobHash
|
||||
} else if strings.Contains(err.Error(), "hash of received blob data does not match hash from send request") {
|
||||
errType = errHashMismatch
|
||||
} else if strings.Contains(err.Error(), "blob not found") {
|
||||
errType = errBlobNotFound
|
||||
} else if strings.Contains(err.Error(), "requested blob is protected") {
|
||||
errType = errProtectedBlob
|
||||
} else if strings.Contains(err.Error(), "0-byte blob received") {
|
||||
errType = errZeroByteBlob
|
||||
} else if strings.Contains(err.Error(), "PROTOCOL_VIOLATION: tried to retire connection") {
|
||||
|
|
348
lite_db/db.go
348
lite_db/db.go
|
@ -1,348 +0,0 @@
|
|||
package lite_db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
qt "github.com/lbryio/lbry.go/v2/extras/query"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
_ "github.com/go-sql-driver/mysql" // blank import for db driver ensures its imported even if its not used
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/volatiletech/null/v8"
|
||||
)
|
||||
|
||||
// SdBlob is a special blob that contains information on the rest of the blobs in the stream
|
||||
type SdBlob struct {
|
||||
StreamName string `json:"stream_name"`
|
||||
Blobs []struct {
|
||||
Length int `json:"length"`
|
||||
BlobNum int `json:"blob_num"`
|
||||
BlobHash string `json:"blob_hash,omitempty"`
|
||||
IV string `json:"iv"`
|
||||
} `json:"blobs"`
|
||||
StreamType string `json:"stream_type"`
|
||||
Key string `json:"key"`
|
||||
SuggestedFileName string `json:"suggested_file_name"`
|
||||
StreamHash string `json:"stream_hash"`
|
||||
}
|
||||
|
||||
// SQL implements the DB interface
|
||||
type SQL struct {
|
||||
conn *sql.DB
|
||||
|
||||
TrackAccessTime bool
|
||||
}
|
||||
|
||||
func logQuery(query string, args ...interface{}) {
|
||||
s, err := qt.InterpolateParams(query, args...)
|
||||
if err != nil {
|
||||
log.Errorln(err)
|
||||
} else {
|
||||
log.Debugln(s)
|
||||
}
|
||||
}
|
||||
|
||||
// Connect will create a connection to the database
|
||||
func (s *SQL) Connect(dsn string) error {
|
||||
var err error
|
||||
// interpolateParams is necessary. otherwise uploading a stream with thousands of blobs
|
||||
// will hit MySQL's max_prepared_stmt_count limit because the prepared statements are all
|
||||
// opened inside a transaction. closing them manually doesn't seem to help
|
||||
dsn += "?parseTime=1&collation=utf8mb4_unicode_ci&interpolateParams=1"
|
||||
s.conn, err = sql.Open("mysql", dsn)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
s.conn.SetMaxIdleConns(12)
|
||||
|
||||
return errors.Err(s.conn.Ping())
|
||||
}
|
||||
|
||||
// AddBlob adds a blob to the database.
|
||||
func (s *SQL) AddBlob(hash string, length int) error {
|
||||
if s.conn == nil {
|
||||
return errors.Err("not connected")
|
||||
}
|
||||
|
||||
_, err := s.insertBlob(hash, length)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *SQL) insertBlob(hash string, length int) (int64, error) {
|
||||
if length <= 0 {
|
||||
return 0, errors.Err("length must be positive")
|
||||
}
|
||||
const isStored = true
|
||||
now := time.Now()
|
||||
args := []interface{}{hash, isStored, length, now}
|
||||
blobID, err := s.exec(
|
||||
"INSERT INTO blob_ (hash, is_stored, length, last_accessed_at) VALUES ("+qt.Qs(len(args))+") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored)), last_accessed_at=VALUES(last_accessed_at)",
|
||||
args...,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if blobID == 0 {
|
||||
err = s.conn.QueryRow("SELECT id FROM blob_ WHERE hash = ?", hash).Scan(&blobID)
|
||||
if err != nil {
|
||||
return 0, errors.Err(err)
|
||||
}
|
||||
if blobID == 0 {
|
||||
return 0, errors.Err("blob ID is 0 even after INSERTing and SELECTing")
|
||||
}
|
||||
}
|
||||
|
||||
return blobID, nil
|
||||
}
|
||||
|
||||
// HasBlob checks if the database contains the blob information.
|
||||
func (s *SQL) HasBlob(hash string) (bool, error) {
|
||||
exists, err := s.HasBlobs([]string{hash})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists[hash], nil
|
||||
}
|
||||
|
||||
// HasBlobs checks if the database contains the set of blobs and returns a bool map.
|
||||
func (s *SQL) HasBlobs(hashes []string) (map[string]bool, error) {
|
||||
exists, streamsNeedingTouch, err := s.hasBlobs(hashes)
|
||||
_ = s.touch(streamsNeedingTouch)
|
||||
return exists, err
|
||||
}
|
||||
|
||||
func (s *SQL) touch(blobIDs []uint64) error {
|
||||
if len(blobIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
query := "UPDATE blob_ SET last_accessed_at = ? WHERE id IN (" + qt.Qs(len(blobIDs)) + ")"
|
||||
args := make([]interface{}, len(blobIDs)+1)
|
||||
args[0] = time.Now()
|
||||
for i := range blobIDs {
|
||||
args[i+1] = blobIDs[i]
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
_, err := s.exec(query, args...)
|
||||
log.Debugf("blobs access query touched %d blobs and took %s", len(blobIDs), time.Since(startTime))
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
func (s *SQL) hasBlobs(hashes []string) (map[string]bool, []uint64, error) {
|
||||
if s.conn == nil {
|
||||
return nil, nil, errors.Err("not connected")
|
||||
}
|
||||
|
||||
var (
|
||||
hash string
|
||||
blobID uint64
|
||||
lastAccessedAt null.Time
|
||||
)
|
||||
|
||||
var needsTouch []uint64
|
||||
exists := make(map[string]bool)
|
||||
|
||||
touchDeadline := time.Now().AddDate(0, 0, -1) // touch blob if last accessed before this time
|
||||
maxBatchSize := 10000
|
||||
doneIndex := 0
|
||||
|
||||
for len(hashes) > doneIndex {
|
||||
sliceEnd := doneIndex + maxBatchSize
|
||||
if sliceEnd > len(hashes) {
|
||||
sliceEnd = len(hashes)
|
||||
}
|
||||
log.Debugf("getting hashes[%d:%d] of %d", doneIndex, sliceEnd, len(hashes))
|
||||
batch := hashes[doneIndex:sliceEnd]
|
||||
|
||||
// TODO: this query doesn't work for SD blobs, which are not in the stream_blob table
|
||||
|
||||
query := `SELECT hash, id, last_accessed_at
|
||||
FROM blob_
|
||||
WHERE is_stored = ? and hash IN (` + qt.Qs(len(batch)) + `)`
|
||||
args := make([]interface{}, len(batch)+1)
|
||||
args[0] = true
|
||||
for i := range batch {
|
||||
args[i+1] = batch[i]
|
||||
}
|
||||
|
||||
logQuery(query, args...)
|
||||
|
||||
err := func() error {
|
||||
startTime := time.Now()
|
||||
rows, err := s.conn.Query(query, args...)
|
||||
log.Debugf("hashes query took %s", time.Since(startTime))
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
defer closeRows(rows)
|
||||
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&hash, &blobID, &lastAccessedAt)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
exists[hash] = true
|
||||
if s.TrackAccessTime && (!lastAccessedAt.Valid || lastAccessedAt.Time.Before(touchDeadline)) {
|
||||
needsTouch = append(needsTouch, blobID)
|
||||
}
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
doneIndex += len(batch)
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return exists, needsTouch, nil
|
||||
}
|
||||
|
||||
// Delete will remove the blob from the db
|
||||
func (s *SQL) Delete(hash string) error {
|
||||
_, err := s.exec("UPDATE blob_ set is_stored = ? WHERE hash = ?", 0, hash)
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
// AddSDBlob insert the SD blob and all the content blobs. The content blobs are marked as "not stored",
|
||||
// but they are tracked so reflector knows what it is missing.
|
||||
func (s *SQL) AddSDBlob(sdHash string, sdBlobLength int) error {
|
||||
if s.conn == nil {
|
||||
return errors.Err("not connected")
|
||||
}
|
||||
|
||||
_, err := s.insertBlob(sdHash, sdBlobLength)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetHashRange gets the smallest and biggest hashes in the db
|
||||
func (s *SQL) GetLRUBlobs(maxBlobs int) ([]string, error) {
|
||||
if s.conn == nil {
|
||||
return nil, errors.Err("not connected")
|
||||
}
|
||||
|
||||
query := "SELECT hash from blob_ where is_stored = ? order by last_accessed_at limit ?"
|
||||
const isStored = true
|
||||
logQuery(query, isStored, maxBlobs)
|
||||
rows, err := s.conn.Query(query, isStored, maxBlobs)
|
||||
if err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
defer closeRows(rows)
|
||||
blobs := make([]string, 0, maxBlobs)
|
||||
for rows.Next() {
|
||||
var hash string
|
||||
err := rows.Scan(&hash)
|
||||
if err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
blobs = append(blobs, hash)
|
||||
}
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
func (s *SQL) AllBlobs() ([]string, error) {
|
||||
if s.conn == nil {
|
||||
return nil, errors.Err("not connected")
|
||||
}
|
||||
|
||||
query := "SELECT hash from blob_ where is_stored = ?" //TODO: maybe sorting them makes more sense?
|
||||
const isStored = true
|
||||
logQuery(query, isStored)
|
||||
rows, err := s.conn.Query(query, isStored)
|
||||
if err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
defer closeRows(rows)
|
||||
totalBlobs, err := s.BlobsCount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobs := make([]string, 0, totalBlobs)
|
||||
for rows.Next() {
|
||||
var hash string
|
||||
err := rows.Scan(&hash)
|
||||
if err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
blobs = append(blobs, hash)
|
||||
}
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
func (s *SQL) BlobsCount() (int, error) {
|
||||
if s.conn == nil {
|
||||
return 0, errors.Err("not connected")
|
||||
}
|
||||
|
||||
query := "SELECT count(id) from blob_ where is_stored = ?" //TODO: maybe sorting them makes more sense?
|
||||
const isStored = true
|
||||
logQuery(query, isStored)
|
||||
var count int
|
||||
err := s.conn.QueryRow(query, isStored).Scan(&count)
|
||||
return count, errors.Err(err)
|
||||
}
|
||||
|
||||
func closeRows(rows *sql.Rows) {
|
||||
if rows != nil {
|
||||
err := rows.Close()
|
||||
if err != nil {
|
||||
log.Error("error closing rows: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SQL) exec(query string, args ...interface{}) (int64, error) {
|
||||
logQuery(query, args...)
|
||||
attempt, maxAttempts := 0, 3
|
||||
Retry:
|
||||
attempt++
|
||||
result, err := s.conn.Exec(query, args...)
|
||||
if isLockTimeoutError(err) {
|
||||
if attempt <= maxAttempts {
|
||||
//Error 1205: Lock wait timeout exceeded; try restarting transaction
|
||||
goto Retry
|
||||
}
|
||||
err = errors.Prefix("Lock timeout for query "+query, err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, errors.Err(err)
|
||||
}
|
||||
|
||||
lastID, err := result.LastInsertId()
|
||||
return lastID, errors.Err(err)
|
||||
}
|
||||
|
||||
func isLockTimeoutError(err error) bool {
|
||||
e, ok := err.(*mysql.MySQLError)
|
||||
return ok && e != nil && e.Number == 1205
|
||||
}
|
||||
|
||||
/* SQL schema
|
||||
|
||||
in prod make sure you use latin1 or utf8 charset, NOT utf8mb4. that's a waste of space.
|
||||
|
||||
CREATE TABLE `blob_` (
|
||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT,
|
||||
`hash` char(96) NOT NULL,
|
||||
`is_stored` tinyint(1) NOT NULL DEFAULT '0',
|
||||
`length` bigint unsigned DEFAULT NULL,
|
||||
`last_accessed_at` datetime DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `id` (`id`),
|
||||
UNIQUE KEY `blob_hash_idx` (`hash`),
|
||||
KEY `blob_last_accessed_idx` (`last_accessed_at`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
|
||||
*/
|
38
meta/meta.go
38
meta/meta.go
|
@ -6,36 +6,9 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
name = "prism-bin"
|
||||
version = "unknown"
|
||||
commit = "unknown"
|
||||
commitLong = "unknown"
|
||||
branch = "unknown"
|
||||
Time = "unknown"
|
||||
BuildTime time.Time
|
||||
)
|
||||
|
||||
// Name returns main application name
|
||||
func Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
// Version returns current application version
|
||||
func Version() string {
|
||||
return version
|
||||
}
|
||||
|
||||
// FullName returns current app version, commit and build time
|
||||
func FullName() string {
|
||||
return fmt.Sprintf(
|
||||
`Name: %v
|
||||
Version: %v
|
||||
branch: %v
|
||||
commit: %v
|
||||
commit long: %v
|
||||
build date: %v`, Name(), Version(), branch, commit, commitLong, BuildTime.String())
|
||||
}
|
||||
var Version = ""
|
||||
var Time = ""
|
||||
var BuildTime time.Time
|
||||
|
||||
func init() {
|
||||
if Time != "" {
|
||||
|
@ -47,6 +20,11 @@ func init() {
|
|||
}
|
||||
|
||||
func VersionString() string {
|
||||
version := Version
|
||||
if version == "" {
|
||||
version = "<unset>"
|
||||
}
|
||||
|
||||
var buildTime string
|
||||
if BuildTime.IsZero() {
|
||||
buildTime = "<now>"
|
||||
|
|
|
@ -18,6 +18,9 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrBlobExists is a default error for when a blob already exists on the reflector server.
|
||||
var ErrBlobExists = errors.Base("blob exists on server")
|
||||
|
||||
// Client is an instance of a client connected to a server.
|
||||
type Client struct {
|
||||
Timeout time.Duration
|
|
@ -15,8 +15,7 @@ import (
|
|||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/quic-go/quic-go/http3"
|
||||
"github.com/lucas-clemente/quic-go/http3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -69,7 +68,7 @@ func (c *Client) HasBlob(hash string) (bool, error) {
|
|||
if err != nil {
|
||||
return false, errors.Err(err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -86,7 +85,7 @@ func (c *Client) GetBlob(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
fmt.Printf("%s blob not found %d\n", hash, resp.StatusCode)
|
|
@ -15,31 +15,28 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/quic-go/quic-go/http3"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/lucas-clemente/quic-go/http3"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Server is an instance of a peer server that houses the listener and store.
|
||||
type Server struct {
|
||||
store store.BlobStore
|
||||
grp *stop.Group
|
||||
concurrentRequests int
|
||||
store store.BlobStore
|
||||
grp *stop.Group
|
||||
}
|
||||
|
||||
// NewServer returns an initialized Server pointer.
|
||||
func NewServer(store store.BlobStore, requestQueueSize int) *Server {
|
||||
return &Server{
|
||||
store: store,
|
||||
grp: stop.New(),
|
||||
concurrentRequests: requestQueueSize,
|
||||
store: store,
|
||||
grp: stop.New(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,12 +110,14 @@ func (s *Server) Start(address string) error {
|
|||
}
|
||||
})
|
||||
server := http3.Server{
|
||||
Addr: address,
|
||||
Handler: r,
|
||||
TLSConfig: generateTLSConfig(),
|
||||
Server: &http.Server{
|
||||
Handler: r,
|
||||
Addr: address,
|
||||
TLSConfig: generateTLSConfig(),
|
||||
},
|
||||
QuicConfig: quicConf,
|
||||
}
|
||||
go InitWorkers(s, s.concurrentRequests)
|
||||
go InitWorkers(s, 200)
|
||||
go s.listenForShutdown(&server)
|
||||
s.grp.Add(1)
|
||||
go func() {
|
||||
|
@ -155,7 +154,7 @@ func generateTLSConfig() *tls.Config {
|
|||
|
||||
func (s *Server) listenAndServe(server *http3.Server) {
|
||||
err := server.ListenAndServe()
|
||||
if err != nil && err != quic.ErrServerClosed {
|
||||
if err != nil && err.Error() != "server closed" {
|
||||
log.Errorln(errors.FullTrace(err))
|
||||
}
|
||||
}
|
||||
|
@ -180,10 +179,7 @@ func (s *Server) HandleGetBlob(w http.ResponseWriter, r *http.Request) {
|
|||
wantsTrace = false
|
||||
}
|
||||
}
|
||||
if reflector.IsProtected(requestedBlob) {
|
||||
http.Error(w, "requested blob is protected", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
blob, trace, err := s.store.Get(requestedBlob)
|
||||
|
||||
if wantsTrace {
|
|
@ -8,14 +8,12 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/quic-go/quic-go/http3"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/lucas-clemente/quic-go/http3"
|
||||
)
|
||||
|
||||
// Store is a blob store that gets blobs from a peer.
|
||||
|
@ -74,7 +72,7 @@ func (p *Store) Has(hash string) (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() { _ = c.Close() }()
|
||||
defer c.Close()
|
||||
return c.HasBlob(hash)
|
||||
}
|
||||
|
||||
|
@ -93,7 +91,7 @@ func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
|
||||
}
|
||||
defer func() { _ = c.Close() }()
|
||||
defer c.Close()
|
||||
return c.GetBlob(hash)
|
||||
}
|
||||
|
||||
|
@ -112,6 +110,7 @@ func (p *Store) Delete(hash string) error {
|
|||
return errors.Err(shared.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// Shutdown is not supported
|
||||
// Delete is not supported
|
||||
func (p *Store) Shutdown() {
|
||||
return
|
||||
}
|
|
@ -33,6 +33,7 @@ func InitWorkers(server *Server, workers int) {
|
|||
}
|
||||
}(i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func enqueue(b *blobRequest) {
|
|
@ -227,36 +227,32 @@ func (s *Server) handleCompositeRequest(data []byte) ([]byte, error) {
|
|||
if err != nil {
|
||||
var je *json.SyntaxError
|
||||
if ee.As(err, &je) {
|
||||
return nil, errors.Err("invalid json request: offset %d in data %s", je.Offset, hex.EncodeToString(data))
|
||||
return nil, errors.Err("invalid json at offset %d in data %s", je.Offset, hex.EncodeToString(data))
|
||||
}
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
|
||||
response := compositeResponse{
|
||||
LbrycrdAddress: LbrycrdAddress,
|
||||
AvailableBlobs: []string{},
|
||||
}
|
||||
|
||||
if len(request.RequestedBlobs) > 0 {
|
||||
var availableBlobs []string
|
||||
for _, blobHash := range request.RequestedBlobs {
|
||||
if reflector.IsProtected(blobHash) {
|
||||
return nil, errors.Err("requested blob is protected")
|
||||
}
|
||||
exists, err := s.store.Has(blobHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
response.AvailableBlobs = append(response.AvailableBlobs, blobHash)
|
||||
availableBlobs = append(availableBlobs, blobHash)
|
||||
}
|
||||
}
|
||||
response.AvailableBlobs = availableBlobs
|
||||
}
|
||||
|
||||
if request.BlobDataPaymentRate != nil {
|
||||
response.BlobDataPaymentRate = paymentRateAccepted
|
||||
if *request.BlobDataPaymentRate < 0 {
|
||||
response.BlobDataPaymentRate = paymentRateTooLow
|
||||
}
|
||||
response.BlobDataPaymentRate = paymentRateAccepted
|
||||
if request.BlobDataPaymentRate < 0 {
|
||||
response.BlobDataPaymentRate = paymentRateTooLow
|
||||
}
|
||||
|
||||
var blob []byte
|
||||
|
@ -271,14 +267,14 @@ func (s *Server) handleCompositeRequest(data []byte) ([]byte, error) {
|
|||
blob, trace, err = s.store.Get(request.RequestedBlob)
|
||||
log.Debug(trace.String())
|
||||
if errors.Is(err, store.ErrBlobNotFound) {
|
||||
response.IncomingBlob = &incomingBlob{
|
||||
response.IncomingBlob = incomingBlob{
|
||||
Error: err.Error(),
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
response.IncomingBlob = &incomingBlob{
|
||||
BlobHash: request.RequestedBlob,
|
||||
response.IncomingBlob = incomingBlob{
|
||||
BlobHash: reflector.BlobHash(blob),
|
||||
Length: len(blob),
|
||||
}
|
||||
metrics.MtrOutBytesTcp.Add(float64(len(blob)))
|
||||
|
@ -306,15 +302,7 @@ func (s *Server) logError(e error) {
|
|||
}
|
||||
|
||||
func readNextMessage(buf *bufio.Reader) ([]byte, error) {
|
||||
first_byte, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if first_byte != '{' {
|
||||
// every request starts with '{'. Checking here disconnects earlier, so we don't wait until timeout
|
||||
return nil, errInvalidData
|
||||
}
|
||||
msg := []byte("{")
|
||||
msg := make([]byte, 0)
|
||||
eof := false
|
||||
|
||||
for {
|
||||
|
@ -335,8 +323,6 @@ func readNextMessage(buf *bufio.Reader) ([]byte, error) {
|
|||
|
||||
if len(msg) > maxRequestSize {
|
||||
return msg, errRequestTooLarge
|
||||
} else if len(msg) > 0 && msg[0] != '{' {
|
||||
return msg, errInvalidData
|
||||
}
|
||||
|
||||
// yes, this is how the peer protocol knows when the request finishes
|
||||
|
@ -371,7 +357,6 @@ const (
|
|||
)
|
||||
|
||||
var errRequestTooLarge = errors.Base("request is too large")
|
||||
var errInvalidData = errors.Base("Invalid data")
|
||||
|
||||
type availabilityRequest struct {
|
||||
LbrycrdAddress bool `json:"lbrycrd_address"`
|
||||
|
@ -408,13 +393,13 @@ type blobResponse struct {
|
|||
type compositeRequest struct {
|
||||
LbrycrdAddress bool `json:"lbrycrd_address"`
|
||||
RequestedBlobs []string `json:"requested_blobs"`
|
||||
BlobDataPaymentRate *float64 `json:"blob_data_payment_rate"`
|
||||
BlobDataPaymentRate float64 `json:"blob_data_payment_rate"`
|
||||
RequestedBlob string `json:"requested_blob"`
|
||||
}
|
||||
|
||||
type compositeResponse struct {
|
||||
LbrycrdAddress string `json:"lbrycrd_address,omitempty"`
|
||||
AvailableBlobs []string `json:"available_blobs"`
|
||||
BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"`
|
||||
IncomingBlob *incomingBlob `json:"incoming_blob,omitempty"`
|
||||
LbrycrdAddress string `json:"lbrycrd_address,omitempty"`
|
||||
AvailableBlobs []string `json:"available_blobs,omitempty"`
|
||||
BlobDataPaymentRate string `json:"blob_data_payment_rate,omitempty"`
|
||||
IncomingBlob incomingBlob `json:"incoming_blob,omitempty"`
|
||||
}
|
|
@ -2,10 +2,7 @@ package peer
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
)
|
||||
|
@ -78,62 +75,3 @@ func TestAvailabilityRequest_WithBlobs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestFromConnection(t *testing.T) {
|
||||
s := getServer(t, true)
|
||||
err := s.Start("127.0.0.1:50505")
|
||||
defer s.Shutdown()
|
||||
if err != nil {
|
||||
t.Error("error starting server", err)
|
||||
}
|
||||
|
||||
for _, p := range availabilityRequests {
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:50505")
|
||||
if err != nil {
|
||||
t.Error("error opening connection", err)
|
||||
}
|
||||
defer func() { _ = conn.Close() }()
|
||||
|
||||
response := make([]byte, 8192)
|
||||
_, err = conn.Write(p.request)
|
||||
if err != nil {
|
||||
t.Error("error writing", err)
|
||||
}
|
||||
_, err = conn.Read(response)
|
||||
if err != nil {
|
||||
t.Error("error reading", err)
|
||||
}
|
||||
if !bytes.Equal(response[:len(p.response)], p.response) {
|
||||
t.Errorf("Response did not match expected response.\nExpected: %s\nGot: %s", string(p.response), string(response))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidData(t *testing.T) {
|
||||
s := getServer(t, true)
|
||||
err := s.Start("127.0.0.1:50503")
|
||||
defer s.Shutdown()
|
||||
if err != nil {
|
||||
t.Error("error starting server", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", "127.0.0.1:50503")
|
||||
if err != nil {
|
||||
t.Error("error opening connection", err)
|
||||
}
|
||||
defer func() { _ = conn.Close() }()
|
||||
|
||||
response := make([]byte, 8192)
|
||||
_, err = conn.Write([]byte("hello dear server, I would like blobs. Please"))
|
||||
if err != nil {
|
||||
t.Error("error writing", err)
|
||||
}
|
||||
err = conn.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
if err != nil {
|
||||
t.Error("error setting read deadline", err)
|
||||
}
|
||||
_, err = conn.Read(response)
|
||||
if err != io.EOF {
|
||||
t.Error("error reading", err)
|
||||
}
|
||||
println(response)
|
||||
}
|
|
@ -4,11 +4,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
)
|
||||
|
||||
// Store is a blob store that gets blobs from a peer.
|
||||
|
@ -42,7 +41,7 @@ func (p *Store) Has(hash string) (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() { _ = c.Close() }()
|
||||
defer c.Close()
|
||||
return c.HasBlob(hash)
|
||||
}
|
||||
|
||||
|
@ -53,7 +52,7 @@ func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
|
||||
}
|
||||
defer func() { _ = c.Close() }()
|
||||
defer c.Close()
|
||||
blob, trace, err := c.GetBlob(hash)
|
||||
if err != nil && strings.Contains(err.Error(), "blob not found") {
|
||||
return nil, trace, store.ErrBlobNotFound
|
||||
|
@ -77,6 +76,7 @@ func (p *Store) Delete(hash string) error {
|
|||
return errors.Err(shared.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// Shutdown is not supported
|
||||
// Delete is not supported
|
||||
func (p *Store) Shutdown() {
|
||||
return
|
||||
}
|
|
@ -5,14 +5,14 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/lbryio/reflector.go/cluster"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/server/peer"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/dht"
|
||||
"github.com/lbryio/lbry.go/v2/dht/bits"
|
||||
"github.com/lbryio/reflector.go/cluster"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/peer"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
|
||||
|
|
|
@ -4,9 +4,8 @@ import (
|
|||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/dht/bits"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/lbryio/lbry.go/v2/dht/bits"
|
||||
)
|
||||
|
||||
func TestAnnounceRange(t *testing.T) {
|
||||
|
|
|
@ -22,23 +22,24 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
/* TODO:
|
||||
import cert from wallet
|
||||
get all utxos from chainquery
|
||||
create transaction
|
||||
sign it with the channel
|
||||
track state of utxos across publishes from this channel so that we can just do one query to get utxos
|
||||
prioritize only confirmed utxos
|
||||
var TODO = `
|
||||
import cert from wallet
|
||||
get all utxos from chainquery
|
||||
create transaction
|
||||
sign it with the channel
|
||||
track state of utxos across publishes from this channel so that we can just do one query to get utxos
|
||||
prioritize only confirmed utxos
|
||||
|
||||
Handling all the issues we handle currently with lbrynet:
|
||||
"Couldn't find private key for id",
|
||||
"You already have a stream claim published under the name",
|
||||
"Cannot publish using channel",
|
||||
"txn-mempool-conflict",
|
||||
"too-long-mempool-chain",
|
||||
"Missing inputs",
|
||||
"Not enough funds to cover this transaction",
|
||||
*/
|
||||
Handling all the issues we handle currently with lbrynet:
|
||||
"Couldn't find private key for id",
|
||||
"You already have a stream claim published under the name",
|
||||
"Cannot publish using channel",
|
||||
"txn-mempool-conflict",
|
||||
"too-long-mempool-chain",
|
||||
"Missing inputs",
|
||||
"Not enough funds to cover this transaction",
|
||||
}
|
||||
`
|
||||
|
||||
type Details struct {
|
||||
Title string
|
||||
|
@ -115,7 +116,7 @@ func Publish(client *lbrycrd.Client, path, name, address string, details Details
|
|||
return signedTx, txid, nil
|
||||
}
|
||||
|
||||
// TODO: lots of assumptions. hardcoded values need to be passed in or calculated
|
||||
//TODO: lots of assumptions. hardcoded values need to be passed in or calculated
|
||||
func baseTx(client *lbrycrd.Client, amount float64, changeAddress btcutil.Address) (*wire.MsgTx, error) {
|
||||
txFee := 0.0002 // TODO: estimate this better?
|
||||
|
||||
|
@ -222,7 +223,8 @@ func makeStream(path string) (stream.Stream, *pb.Stream, error) {
|
|||
if err != nil {
|
||||
return nil, nil, errors.Err(err)
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
defer file.Close()
|
||||
|
||||
enc := stream.NewEncoder(file)
|
||||
|
||||
s, err := enc.Stream()
|
||||
|
|
102
readme.md
102
readme.md
|
@ -1,110 +1,25 @@
|
|||
# Reflector
|
||||
|
||||
Reflector is a central piece of software that providers LBRY with the following features:
|
||||
- Blobs reflection: when something is published, we capture the data and store it on our servers for quicker retrieval
|
||||
- Blobs distribution: when a piece of content is requested and the LBRY network doesn't have it, reflector will retrieve it from its storage and distribute it
|
||||
- Blobs caching: reflectors can be chained together in multiple regions or servers to form a chain of cached content. We call those "blobcaches". They are layered so that content distribution is favorable in all the regions we deploy it to
|
||||
|
||||
There are a few other features embedded in reflector.go including publishing streams from Go, downloading or upload blobs, resolving content and more unfinished tools.
|
||||
|
||||
This code includes a Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
|
||||
A reflector cluster to accept LBRY content for hosting en masse, rehost the content, and make money on data fees (TODO).
|
||||
This code includes Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
|
||||
|
||||
## Installation
|
||||
|
||||
- Install mysql 8 (5.7 might work too)
|
||||
- add a reflector user and database with password `reflector` with localhost access only
|
||||
- Create the tables as described [here](https://github.com/lbryio/reflector.go/blob/master/db/db.go#L735) (the link might not update as the code does so just look for the schema in that file)
|
||||
|
||||
#### We do not support running reflector.go as a blob receiver, however if you want to run it as a private blobcache you may compile it yourself and run it as following:
|
||||
```bash
|
||||
./prism-bin reflector \
|
||||
--conf="none" \
|
||||
--disable-uploads=true \
|
||||
--use-db=false \
|
||||
--upstream-reflector="reflector.lbry.com" \
|
||||
--upstream-protocol="http" \
|
||||
--request-queue-size=200 \
|
||||
--disk-cache="2GB:/path/to/your/storage/:localdb" \
|
||||
```
|
||||
|
||||
Create a systemd script if you want to run it automatically on startup or as a service.
|
||||
coming soon
|
||||
|
||||
## Usage
|
||||
|
||||
Usage as reflector/blobcache:
|
||||
```bash
|
||||
Run reflector server
|
||||
coming soon
|
||||
|
||||
Usage:
|
||||
prism reflector [flags]
|
||||
|
||||
Flags:
|
||||
--disable-blocklist Disable blocklist watching/updating
|
||||
--disable-uploads Disable uploads to this reflector server
|
||||
--disk-cache string Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (default "100GB:/tmp/downloaded_blobs:localdb")
|
||||
-h, --help help for reflector
|
||||
--http-peer-port int The port reflector will distribute content from over HTTP protocol (default 5569)
|
||||
--http3-peer-port int The port reflector will distribute content from over HTTP3 protocol (default 5568)
|
||||
--mem-cache int enable in-memory cache with a max size of this many blobs
|
||||
--metrics-port int The port reflector will use for prometheus metrics (default 2112)
|
||||
--optional-disk-cache string Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (this would get hit before the one specified in disk-cache)
|
||||
--origin-endpoint string HTTP edge endpoint for standard HTTP retrieval
|
||||
--origin-endpoint-fallback string HTTP edge endpoint for standard HTTP retrieval if first origin fails
|
||||
--receiver-port int The port reflector will receive content from (default 5566)
|
||||
--request-queue-size int How many concurrent requests from downstream should be handled at once (the rest will wait) (default 200)
|
||||
--tcp-peer-port int The port reflector will distribute content from for the TCP (LBRY) protocol (default 5567)
|
||||
--upstream-protocol string protocol used to fetch blobs from another upstream reflector server (tcp/http3/http) (default "http")
|
||||
--upstream-reflector string host:port of a reflector server where blobs are fetched from
|
||||
--use-db Whether to connect to the reflector db or not (default true)
|
||||
|
||||
Global Flags:
|
||||
--conf string Path to config. Use 'none' to disable (default "config.json")
|
||||
-v, --verbose strings Verbose logging for specific components
|
||||
```
|
||||
|
||||
Other uses:
|
||||
|
||||
```bash
|
||||
Prism is a single entry point application with multiple sub modules which can be leveraged individually or together
|
||||
|
||||
Usage:
|
||||
prism [command]
|
||||
|
||||
Available Commands:
|
||||
check-integrity check blobs integrity for a given path
|
||||
cluster Start(join) to or Start a new cluster
|
||||
decode Decode a claim value
|
||||
dht Run dht node
|
||||
getstream Get a stream from a reflector server
|
||||
help Help about any command
|
||||
peer Run peer server
|
||||
populate-db populate local database with blobs from a disk storage
|
||||
publish Publish a file
|
||||
reflector Run reflector server
|
||||
resolve Resolve a URL
|
||||
send Send a file to a reflector
|
||||
sendblob Send a random blob to a reflector server
|
||||
start Runs full prism application with cluster, dht, peer server, and reflector server.
|
||||
test Test things
|
||||
upload Upload blobs to S3
|
||||
version Print the version
|
||||
|
||||
Flags:
|
||||
--conf string Path to config. Use 'none' to disable (default "config.json")
|
||||
-h, --help help for prism
|
||||
-v, --verbose strings Verbose logging for specific components
|
||||
```
|
||||
## Running from Source
|
||||
|
||||
This project requires [Go v1.20](https://golang.org/doc/install).
|
||||
|
||||
On Ubuntu you can install it with `sudo snap install go --classic`
|
||||
This project requires [Go v1.11](https://golang.org/doc/install) or higher because it uses Go modules.
|
||||
|
||||
```
|
||||
git clone git@github.com:lbryio/reflector.go.git
|
||||
cd reflector.go
|
||||
make
|
||||
./dist/linux_amd64/prism-bin
|
||||
./bin/prism-bin
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
@ -118,7 +33,8 @@ This project is MIT licensed.
|
|||
## Security
|
||||
|
||||
We take security seriously. Please contact security@lbry.com regarding any security issues.
|
||||
Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
|
||||
Our PGP key is [here](https://keybase.io/lbry/key.asc) if you need it.
|
||||
|
||||
## Contact
|
||||
The primary contact for this project is [@Nikooo777](https://github.com/Nikooo777) (niko-at-lbry.com)
|
||||
|
||||
The primary contact for this project is [@lyoshenka](https://github.com/lyoshenka) (grin@lbry.com)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
"github.com/lbryio/reflector.go/wallet"
|
||||
|
||||
|
|
|
@ -1,81 +0,0 @@
|
|||
package reflector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
const protectedListURL = "https://api.odysee.com/file/list_protected"
|
||||
|
||||
type ProtectedContent struct {
|
||||
SDHash string `json:"sd_hash"`
|
||||
ClaimID string `json:"claim_id"`
|
||||
}
|
||||
|
||||
var protectedCache = gcache.New(10).Expiration(2 * time.Minute).Build()
|
||||
|
||||
func GetProtectedContent() (interface{}, error) {
|
||||
cachedVal, err := protectedCache.Get("protected")
|
||||
if err == nil && cachedVal != nil {
|
||||
return cachedVal.(map[string]bool), nil
|
||||
}
|
||||
|
||||
method := "GET"
|
||||
var r struct {
|
||||
Success bool `json:"success"`
|
||||
Error string `json:"error"`
|
||||
Data []ProtectedContent `json:"data"`
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest(method, protectedListURL, nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, errors.Err("unexpected status code %d", res.StatusCode)
|
||||
}
|
||||
if err = json.NewDecoder(res.Body).Decode(&r); err != nil {
|
||||
return nil, errors.Err(err)
|
||||
}
|
||||
|
||||
if !r.Success {
|
||||
return nil, errors.Prefix("file/list_protected API call", r.Error)
|
||||
}
|
||||
|
||||
protectedMap := make(map[string]bool, len(r.Data))
|
||||
for _, pc := range r.Data {
|
||||
protectedMap[pc.SDHash] = true
|
||||
}
|
||||
err = protectedCache.Set("protected", protectedMap)
|
||||
if err != nil {
|
||||
return protectedMap, errors.Err(err)
|
||||
}
|
||||
return protectedMap, nil
|
||||
}
|
||||
|
||||
var sf = singleflight.Group{}
|
||||
|
||||
func IsProtected(sdHash string) bool {
|
||||
val, err, _ := sf.Do("protected", GetProtectedContent)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
cachedMap, ok := val.(map[string]bool)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return cachedMap[sdHash]
|
||||
}
|
|
@ -6,9 +6,11 @@ import (
|
|||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/google/gops/agent"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
|
@ -61,13 +63,16 @@ func (s *Server) Shutdown() {
|
|||
log.Println("reflector server stopped")
|
||||
}
|
||||
|
||||
// Start starts the server to handle connections.
|
||||
//Start starts the server to handle connections.
|
||||
func (s *Server) Start(address string) error {
|
||||
l, err := net.Listen(network, address)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
log.Println("reflector listening on " + address)
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.grp.Add(1)
|
||||
metrics.RoutinesQueue.WithLabelValues("reflector", "listener").Inc()
|
||||
go func() {
|
||||
|
@ -118,7 +123,7 @@ func (s *Server) listenAndServe(listener net.Listener) {
|
|||
s.grp.Add(1)
|
||||
metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
|
||||
go func() {
|
||||
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Dec()
|
||||
defer metrics.RoutinesQueue.WithLabelValues("reflector", "server-listenandserve").Inc()
|
||||
s.handleConn(conn)
|
||||
s.grp.Done()
|
||||
}()
|
||||
|
@ -366,7 +371,7 @@ func (s *Server) read(conn net.Conn, v interface{}) error {
|
|||
dec := json.NewDecoder(conn)
|
||||
err = dec.Decode(v)
|
||||
if err != nil {
|
||||
data, _ := io.ReadAll(dec.Buffered())
|
||||
data, _ := ioutil.ReadAll(dec.Buffered())
|
||||
if len(data) > 0 {
|
||||
return errors.Err("%s. Data: %s", err.Error(), hex.EncodeToString(data))
|
||||
}
|
||||
|
|
|
@ -9,9 +9,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/dht/bits"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/phayes/freeport"
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
package reflector
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
@ -164,7 +166,7 @@ func (u *Uploader) uploadBlob(filepath string) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
blob, err := os.ReadFile(filepath)
|
||||
blob, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
err=0
|
||||
trap 'err=1' ERR
|
||||
# All the .go files, excluding auto generated folders
|
||||
GO_FILES=$(find . -iname '*.go' -type f)
|
||||
(
|
||||
go install golang.org/x/tools/cmd/goimports@latest # Used in build script for generated files
|
||||
# go install golang.org/x/lint/golint@latest # Linter
|
||||
go install github.com/jgautheron/gocyclo@latest # Check against high complexity
|
||||
go install github.com/mdempsky/unconvert@latest # Identifies unnecessary type conversions
|
||||
go install github.com/kisielk/errcheck@latest # Checks for unhandled errors
|
||||
go install github.com/opennota/check/cmd/varcheck@latest # Checks for unused vars
|
||||
go install github.com/opennota/check/cmd/structcheck@latest # Checks for unused fields in structs
|
||||
)
|
||||
echo "Running varcheck..." && varcheck $(go list ./...)
|
||||
echo "Running structcheck..." && structcheck $(go list ./...)
|
||||
# go vet is the official Go static analyzer
|
||||
echo "Running go vet..." && go vet $(go list ./...)
|
||||
# checks for unhandled errors
|
||||
echo "Running errcheck..." && errcheck $(go list ./...)
|
||||
# check for unnecessary conversions - ignore autogen code
|
||||
echo "Running unconvert..." && unconvert -v $(go list ./...)
|
||||
echo "Running gocyclo..." && gocyclo -ignore "_test" -avg -over 28 $GO_FILES
|
||||
#echo "Running golint..." && golint -set_exit_status $(go list ./...)
|
||||
test $err = 0 # Return non-zero if any command failed
|
|
@ -2,76 +2,38 @@ package http
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/reflector"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Server) getBlob(c *gin.Context) {
|
||||
waiter := &sync.WaitGroup{}
|
||||
waiter.Add(1)
|
||||
enqueue(&blobRequest{c: c, finished: waiter})
|
||||
waiter.Wait()
|
||||
}
|
||||
|
||||
func (s *Server) HandleGetBlob(c *gin.Context) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Errorf("Recovered from panic: %v", r)
|
||||
}
|
||||
}()
|
||||
start := time.Now()
|
||||
hash := c.Query("hash")
|
||||
edgeToken := c.Query("edge_token")
|
||||
|
||||
if reflector.IsProtected(hash) && edgeToken != s.edgeToken {
|
||||
_ = c.Error(errors.Err("requested blob is protected"))
|
||||
c.String(http.StatusForbidden, "requested blob is protected")
|
||||
return
|
||||
}
|
||||
if s.missesCache.Has(hash) {
|
||||
serialized, err := shared.NewBlobTrace(time.Since(start), "http").Serialize()
|
||||
c.Header("Via", serialized)
|
||||
if err != nil {
|
||||
_ = c.Error(errors.Err(err))
|
||||
c.String(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
blob, trace, err := s.store.Get(hash)
|
||||
if err != nil {
|
||||
serialized, serializeErr := trace.Serialize()
|
||||
if serializeErr != nil {
|
||||
_ = c.Error(errors.Prefix(serializeErr.Error(), err))
|
||||
c.String(http.StatusInternalServerError, errors.Prefix(serializeErr.Error(), err).Error())
|
||||
_ = c.AbortWithError(http.StatusInternalServerError, errors.Prefix(serializeErr.Error(), err))
|
||||
return
|
||||
}
|
||||
c.Header("Via", serialized)
|
||||
|
||||
if errors.Is(err, store.ErrBlobNotFound) {
|
||||
_ = s.missesCache.Set(hash, true)
|
||||
log.Errorf("wtf: %s", err.Error())
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
_ = c.Error(err)
|
||||
c.String(http.StatusInternalServerError, err.Error())
|
||||
_ = c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
serialized, err := trace.Serialize()
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
c.String(http.StatusInternalServerError, err.Error())
|
||||
_ = c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
metrics.MtrOutBytesHttp.Add(float64(len(blob)))
|
||||
|
@ -86,8 +48,7 @@ func (s *Server) hasBlob(c *gin.Context) {
|
|||
hash := c.Query("hash")
|
||||
has, err := s.store.Has(hash)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
c.String(http.StatusInternalServerError, err.Error())
|
||||
_ = c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
if has {
|
||||
|
@ -96,10 +57,3 @@ func (s *Server) hasBlob(c *gin.Context) {
|
|||
}
|
||||
c.Status(http.StatusNotFound)
|
||||
}
|
||||
|
||||
func (s *Server) recoveryHandler(c *gin.Context, err interface{}) {
|
||||
c.JSON(500, gin.H{
|
||||
"title": "Error",
|
||||
"err": err,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -5,33 +5,23 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
|
||||
"github.com/bluele/gcache"
|
||||
nice "github.com/ekyoung/gin-nice-recovery"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
"github.com/lbryio/reflector.go/store"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Server is an instance of a peer server that houses the listener and store.
|
||||
type Server struct {
|
||||
store store.BlobStore
|
||||
grp *stop.Group
|
||||
concurrentRequests int
|
||||
missesCache gcache.Cache
|
||||
edgeToken string
|
||||
store store.BlobStore
|
||||
grp *stop.Group
|
||||
}
|
||||
|
||||
// NewServer returns an initialized Server pointer.
|
||||
func NewServer(store store.BlobStore, requestQueueSize int, edgeToken string) *Server {
|
||||
func NewServer(store store.BlobStore) *Server {
|
||||
return &Server{
|
||||
store: store,
|
||||
grp: stop.New(),
|
||||
concurrentRequests: requestQueueSize,
|
||||
missesCache: gcache.New(2000).Expiration(5 * time.Minute).ARC().Build(),
|
||||
edgeToken: edgeToken,
|
||||
store: store,
|
||||
grp: stop.New(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,10 +35,7 @@ func (s *Server) Shutdown() {
|
|||
// Start starts the server listener to handle connections.
|
||||
func (s *Server) Start(address string) error {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
router := gin.New()
|
||||
router.Use(gin.Logger())
|
||||
// Install nice.Recovery, passing the handler to call after recovery
|
||||
router.Use(nice.Recovery(s.recoveryHandler))
|
||||
router := gin.Default()
|
||||
router.GET("/blob", s.getBlob)
|
||||
router.HEAD("/blob", s.hasBlob)
|
||||
srv := &http.Server{
|
||||
|
@ -56,7 +43,6 @@ func (s *Server) Start(address string) error {
|
|||
Handler: router,
|
||||
}
|
||||
go s.listenForShutdown(srv)
|
||||
go InitWorkers(s, s.concurrentRequests)
|
||||
// Initializing the server in a goroutine so that
|
||||
// it won't block the graceful shutdown handling below
|
||||
s.grp.Add(1)
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/stop"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type blobRequest struct {
|
||||
c *gin.Context
|
||||
finished *sync.WaitGroup
|
||||
}
|
||||
|
||||
var getReqCh = make(chan *blobRequest, 20000)
|
||||
|
||||
func InitWorkers(server *Server, workers int) {
|
||||
stopper := stop.New(server.grp)
|
||||
for i := 0; i < workers; i++ {
|
||||
metrics.RoutinesQueue.WithLabelValues("http", "worker").Inc()
|
||||
go func(worker int) {
|
||||
defer metrics.RoutinesQueue.WithLabelValues("http", "worker").Dec()
|
||||
for {
|
||||
select {
|
||||
case <-stopper.Ch():
|
||||
case r := <-getReqCh:
|
||||
process(server, r)
|
||||
metrics.HttpBlobReqQueue.Dec()
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
}
|
||||
|
||||
func enqueue(b *blobRequest) {
|
||||
metrics.HttpBlobReqQueue.Inc()
|
||||
getReqCh <- b
|
||||
}
|
||||
|
||||
func process(server *Server, r *blobRequest) {
|
||||
server.HandleGetBlob(r.c)
|
||||
r.finished.Done()
|
||||
}
|
|
@ -1,31 +1,39 @@
|
|||
package shared
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/util"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBlobTrace_Serialize(t *testing.T) {
|
||||
hostName = util.PtrToString("test_machine")
|
||||
hostName, err := os.Hostname()
|
||||
require.NoError(t, err)
|
||||
|
||||
stack := NewBlobTrace(10*time.Second, "test")
|
||||
stack.Stack(20*time.Second, "test2")
|
||||
stack.Stack(30*time.Second, "test3")
|
||||
serialized, err := stack.Serialize()
|
||||
assert.NoError(t, err)
|
||||
t.Log(serialized)
|
||||
expected := "{\"stacks\":[{\"timing\":10000000000,\"origin_name\":\"test\",\"host_name\":\"test_machine\"},{\"timing\":20000000000,\"origin_name\":\"test2\",\"host_name\":\"test_machine\"},{\"timing\":30000000000,\"origin_name\":\"test3\",\"host_name\":\"test_machine\"}]}"
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := `{"stacks":[{"timing":10000000000,"origin_name":"test","host_name":"` +
|
||||
hostName +
|
||||
`"},{"timing":20000000000,"origin_name":"test2","host_name":"` +
|
||||
hostName +
|
||||
`"},{"timing":30000000000,"origin_name":"test3","host_name":"` +
|
||||
hostName +
|
||||
`"}]}`
|
||||
assert.Equal(t, expected, serialized)
|
||||
}
|
||||
|
||||
func TestBlobTrace_Deserialize(t *testing.T) {
|
||||
hostName = util.PtrToString("test_machine")
|
||||
serialized := "{\"stacks\":[{\"timing\":10000000000,\"origin_name\":\"test\"},{\"timing\":20000000000,\"origin_name\":\"test2\"},{\"timing\":30000000000,\"origin_name\":\"test3\"}]}"
|
||||
serialized := `{"stacks":[{"timing":10000000000,"origin_name":"test"},{"timing":20000000000,"origin_name":"test2"},{"timing":30000000000,"origin_name":"test3"}]}`
|
||||
stack, err := Deserialize(serialized)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, stack.Stacks, 3)
|
||||
assert.Equal(t, stack.Stacks[0].Timing, 10*time.Second)
|
||||
assert.Equal(t, stack.Stacks[1].Timing, 20*time.Second)
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package store
|
||||
|
@ -10,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
func timespecToTime(ts syscall.Timespec) time.Time {
|
||||
return time.Unix(ts.Sec, ts.Nsec)
|
||||
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
|
||||
}
|
||||
|
||||
func atime(fi os.FileInfo) time.Time {
|
||||
|
|
|
@ -3,13 +3,12 @@ package store
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
)
|
||||
|
||||
// CachingStore combines two stores, typically a local and a remote store, to improve performance.
|
||||
|
@ -65,11 +64,16 @@ func (c *CachingStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
if err != nil {
|
||||
return nil, trace.Stack(time.Since(start), c.Name()), err
|
||||
}
|
||||
// do not do this async unless you're prepared to deal with mayhem
|
||||
err = c.cache.Put(hash, blob)
|
||||
if err != nil {
|
||||
log.Errorf("error saving blob to underlying cache: %s", errors.FullTrace(err))
|
||||
}
|
||||
// there is no need to wait for the blob to be stored before we return it
|
||||
// TODO: however this should be refactored to limit the amount of routines that the process can spawn to avoid a possible DoS
|
||||
metrics.RoutinesQueue.WithLabelValues("store", "cache-put").Inc()
|
||||
go func() {
|
||||
defer metrics.RoutinesQueue.WithLabelValues("store", "cache-put").Dec()
|
||||
err = c.cache.Put(hash, blob)
|
||||
if err != nil {
|
||||
log.Errorf("error saving blob to underlying cache: %s", errors.FullTrace(err))
|
||||
}
|
||||
}()
|
||||
return blob, trace.Stack(time.Since(start), c.Name()), nil
|
||||
}
|
||||
|
||||
|
@ -104,4 +108,5 @@ func (c *CachingStore) Delete(hash string) error {
|
|||
func (c *CachingStore) Shutdown() {
|
||||
c.origin.Shutdown()
|
||||
c.cache.Shutdown()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -6,9 +6,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
)
|
||||
|
||||
func TestCachingStore_Put(t *testing.T) {
|
||||
|
|
|
@ -2,16 +2,16 @@ package store
|
|||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/meta"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -36,7 +36,8 @@ func (c *CloudFrontROStore) Has(hash string) (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() { _ = body.Close() }()
|
||||
defer body.Close()
|
||||
|
||||
switch status {
|
||||
case http.StatusNotFound, http.StatusForbidden:
|
||||
return false, nil
|
||||
|
@ -59,12 +60,12 @@ func (c *CloudFrontROStore) Get(hash string) (stream.Blob, shared.BlobTrace, err
|
|||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), err
|
||||
}
|
||||
defer func() { _ = body.Close() }()
|
||||
defer body.Close()
|
||||
switch status {
|
||||
case http.StatusNotFound, http.StatusForbidden:
|
||||
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(ErrBlobNotFound)
|
||||
case http.StatusOK:
|
||||
b, err := io.ReadAll(body)
|
||||
b, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(err)
|
||||
}
|
||||
|
@ -81,7 +82,7 @@ func (c *CloudFrontROStore) cfRequest(method, hash string) (int, io.ReadCloser,
|
|||
if err != nil {
|
||||
return 0, nil, errors.Err(err)
|
||||
}
|
||||
req.Header.Add("User-Agent", "reflector.go/"+meta.Version())
|
||||
req.Header.Add("User-Agent", "reflector.go/"+meta.Version)
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
|
@ -105,4 +106,5 @@ func (c *CloudFrontROStore) Delete(_ string) error {
|
|||
|
||||
// Shutdown shuts down the store gracefully
|
||||
func (c *CloudFrontROStore) Shutdown() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -3,9 +3,8 @@ package store
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
)
|
||||
|
||||
// CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3.
|
||||
|
@ -59,4 +58,5 @@ func (c *CloudFrontRWStore) Delete(hash string) error {
|
|||
func (c *CloudFrontRWStore) Shutdown() {
|
||||
c.s3.Shutdown()
|
||||
c.cf.Shutdown()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -5,11 +5,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/db"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -112,22 +111,22 @@ func (d *DBBackedStore) Block(hash string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//has, err := d.db.HasBlob(hash, false)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//if has {
|
||||
// err = d.blobs.Delete(hash)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// err = d.db.Delete(hash)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//}
|
||||
has, err := d.db.HasBlob(hash, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if has {
|
||||
err = d.blobs.Delete(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d.db.Delete(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return d.markBlocked(hash)
|
||||
}
|
||||
|
@ -198,4 +197,5 @@ func (d *DBBackedStore) initBlocked() error {
|
|||
// Shutdown shuts down the store gracefully
|
||||
func (d *DBBackedStore) Shutdown() {
|
||||
d.blobs.Shutdown()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,17 +1,42 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store/speedwalk"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/lbryio/reflector.go/store/speedwalk"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
writeCh = make(chan writeRequest)
|
||||
for i := 0; i < runtime.NumCPU(); i++ {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case r := <-writeCh:
|
||||
err := ioutil.WriteFile(r.filename, r.data, r.perm)
|
||||
if err != nil {
|
||||
log.Errorf("could not write file %s to disk, failed with error: %s", r.filename, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
var writeCh chan writeRequest
|
||||
|
||||
// DiskStore stores blobs on a local disk
|
||||
type DiskStore struct {
|
||||
// the location of blobs on disk
|
||||
|
@ -21,8 +46,12 @@ type DiskStore struct {
|
|||
|
||||
// true if initOnce ran, false otherwise
|
||||
initialized bool
|
||||
|
||||
concurrentChecks atomic.Int32
|
||||
}
|
||||
|
||||
const maxConcurrentChecks = 3
|
||||
|
||||
// NewDiskStore returns an initialized file disk store pointer.
|
||||
func NewDiskStore(dir string, prefixLength int) *DiskStore {
|
||||
return &DiskStore{
|
||||
|
@ -61,16 +90,50 @@ func (d *DiskStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err
|
||||
}
|
||||
|
||||
blob, err := os.ReadFile(d.path(hash))
|
||||
blob, err := ioutil.ReadFile(d.path(hash))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(ErrBlobNotFound)
|
||||
}
|
||||
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(err)
|
||||
}
|
||||
|
||||
// this is a rather poor yet effective way of throttling how many blobs can be checked concurrently
|
||||
// poor because there is a possible race condition between the check and the actual +1
|
||||
if d.concurrentChecks.Load() < maxConcurrentChecks {
|
||||
d.concurrentChecks.Add(1)
|
||||
defer d.concurrentChecks.Sub(1)
|
||||
hashBytes := sha512.Sum384(blob)
|
||||
readHash := hex.EncodeToString(hashBytes[:])
|
||||
if hash != readHash {
|
||||
message := fmt.Sprintf("[%s] found a broken blob while reading from disk. Actual hash: %s", hash, readHash)
|
||||
log.Errorf("%s", message)
|
||||
err := d.Delete(hash)
|
||||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), err
|
||||
}
|
||||
return nil, shared.NewBlobTrace(time.Since(start), d.Name()), errors.Err(message)
|
||||
}
|
||||
}
|
||||
|
||||
return blob, shared.NewBlobTrace(time.Since(start), d.Name()), nil
|
||||
}
|
||||
|
||||
// Put stores the blob on disk
|
||||
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
|
||||
err := d.initOnce()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d.ensureDirExists(d.dir(hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeFile(d.path(hash), blob, 0644)
|
||||
return errors.Err(err)
|
||||
}
|
||||
|
||||
// PutSD stores the sd blob on the disk
|
||||
func (d *DiskStore) PutSD(hash string, blob stream.Blob) error {
|
||||
return d.Put(hash, blob)
|
||||
|
@ -111,15 +174,11 @@ func (d *DiskStore) dir(hash string) string {
|
|||
}
|
||||
return path.Join(d.blobDir, hash[:d.prefixLength])
|
||||
}
|
||||
func (d *DiskStore) tmpDir(hash string) string {
|
||||
return path.Join(d.blobDir, "tmp")
|
||||
}
|
||||
|
||||
func (d *DiskStore) path(hash string) string {
|
||||
return path.Join(d.dir(hash), hash)
|
||||
}
|
||||
func (d *DiskStore) tmpPath(hash string) string {
|
||||
return path.Join(d.tmpDir(hash), hash)
|
||||
}
|
||||
|
||||
func (d *DiskStore) ensureDirExists(dir string) error {
|
||||
return errors.Err(os.MkdirAll(dir, 0755))
|
||||
}
|
||||
|
@ -133,14 +192,26 @@ func (d *DiskStore) initOnce() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.ensureDirExists(path.Join(d.blobDir, "tmp"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type writeRequest struct {
|
||||
filename string
|
||||
data []byte
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
// Shutdown shuts down the store gracefully
|
||||
func (d *DiskStore) Shutdown() {
|
||||
return
|
||||
}
|
||||
|
||||
func writeFile(filename string, data []byte, perm os.FileMode) {
|
||||
writeCh <- writeRequest{
|
||||
filename: filename,
|
||||
data: data,
|
||||
perm: perm,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -13,9 +14,9 @@ import (
|
|||
)
|
||||
|
||||
func TestDiskStore_Get(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
|
||||
tmpDir, err := ioutil.TempDir("", "reflector_test_*")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
defer os.RemoveAll(tmpDir)
|
||||
d := NewDiskStore(tmpDir, 2)
|
||||
|
||||
hash := "f428b8265d65dad7f8ffa52922bba836404cbd62f3ecfe10adba6b444f8f658938e54f5981ac4de39644d5b93d89a94b"
|
||||
|
@ -24,7 +25,7 @@ func TestDiskStore_Get(t *testing.T) {
|
|||
expectedPath := path.Join(tmpDir, hash[:2], hash)
|
||||
err = os.MkdirAll(filepath.Dir(expectedPath), os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
err = os.WriteFile(expectedPath, data, os.ModePerm)
|
||||
err = ioutil.WriteFile(expectedPath, data, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
blob, _, err := d.Get(hash)
|
||||
|
@ -33,9 +34,9 @@ func TestDiskStore_Get(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDiskStore_GetNonexistentBlob(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
|
||||
tmpDir, err := ioutil.TempDir("", "reflector_test_*")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
defer os.RemoveAll(tmpDir)
|
||||
d := NewDiskStore(tmpDir, 2)
|
||||
|
||||
blob, _, err := d.Get("nonexistent")
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
)
|
||||
|
||||
var openFileFlags = os.O_WRONLY | os.O_CREATE
|
||||
|
||||
// Put stores the blob on disk
|
||||
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
|
||||
err := d.initOnce()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d.ensureDirExists(d.dir(hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open file with O_DIRECT
|
||||
f, err := os.OpenFile(d.tmpPath(hash), openFileFlags, 0644)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.Copy(f, bytes.NewReader(blob))
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
err = os.Rename(d.tmpPath(hash), d.path(hash))
|
||||
return errors.Err(err)
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/brk0v/directio"
|
||||
)
|
||||
|
||||
var openFileFlags = os.O_WRONLY | os.O_CREATE | syscall.O_DIRECT
|
||||
|
||||
// Put stores the blob on disk
|
||||
func (d *DiskStore) Put(hash string, blob stream.Blob) error {
|
||||
err := d.initOnce()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d.ensureDirExists(d.dir(hash))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open file with O_DIRECT
|
||||
f, err := os.OpenFile(d.tmpPath(hash), openFileFlags, 0644)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
dio, err := directio.New(f)
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
defer func() { _ = dio.Flush() }()
|
||||
_, err = io.Copy(dio, bytes.NewReader(blob))
|
||||
if err != nil {
|
||||
return errors.Err(err)
|
||||
}
|
||||
err = os.Rename(d.tmpPath(hash), d.path(hash))
|
||||
return errors.Err(err)
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const cacheMaxSize = 3
|
||||
|
||||
func getTestGcacheStore() (*GcacheStore, *MemStore) {
|
||||
m := NewMemStore()
|
||||
return NewGcacheStore("test", m, cacheMaxSize, LFU), m
|
||||
}
|
||||
|
||||
func TestGcacheStore_Eviction(t *testing.T) {
|
||||
lfu, mem := getTestGcacheStore()
|
||||
b := []byte("x")
|
||||
for i := 0; i < 3; i++ {
|
||||
err := lfu.Put(fmt.Sprintf("%d", i), b)
|
||||
require.NoError(t, err)
|
||||
for j := 0; j < 3-i; j++ {
|
||||
_, _, err = lfu.Get(fmt.Sprintf("%d", i))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
for k, v := range map[string]bool{
|
||||
"0": true,
|
||||
"1": true,
|
||||
"2": true,
|
||||
} {
|
||||
has, err := lfu.Has(k)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, v, has)
|
||||
}
|
||||
err := lfu.Put("3", b)
|
||||
require.NoError(t, err)
|
||||
for k, v := range map[string]bool{
|
||||
"0": true,
|
||||
"1": true,
|
||||
"2": false,
|
||||
"3": true,
|
||||
} {
|
||||
has, err := lfu.Has(k)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, v, has)
|
||||
}
|
||||
assert.Equal(t, cacheMaxSize, len(mem.Debug()))
|
||||
|
||||
err = lfu.Delete("0")
|
||||
assert.NoError(t, err)
|
||||
err = lfu.Delete("1")
|
||||
assert.NoError(t, err)
|
||||
err = lfu.Delete("3")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(mem.Debug()))
|
||||
}
|
||||
|
||||
func TestGcacheStore_UnderlyingBlobMissing(t *testing.T) {
|
||||
lfu, mem := getTestGcacheStore()
|
||||
hash := "hash"
|
||||
b := []byte("this is a blob of stuff")
|
||||
err := lfu.Put(hash, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mem.Delete(hash)
|
||||
require.NoError(t, err)
|
||||
|
||||
// hash still exists in lru
|
||||
assert.True(t, lfu.cache.Has(hash))
|
||||
|
||||
blob, _, err := lfu.Get(hash)
|
||||
assert.Nil(t, blob)
|
||||
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
|
||||
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
|
||||
reflect.TypeOf(err).String(), err.Error())
|
||||
|
||||
// lru.Get() removes hash if underlying store doesn't have it
|
||||
assert.False(t, lfu.cache.Has(hash))
|
||||
}
|
||||
|
||||
func TestGcacheStore_loadExisting(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(tmpDir) }()
|
||||
d := NewDiskStore(tmpDir, 2)
|
||||
|
||||
hash := "hash"
|
||||
b := []byte("this is a blob of stuff")
|
||||
err = d.Put(hash, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
existing, err := d.list()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(existing), "blob should exist in cache")
|
||||
assert.Equal(t, hash, existing[0])
|
||||
|
||||
lfu := NewGcacheStore("test", d, 3, LFU) // lru should load existing blobs when it's created
|
||||
time.Sleep(100 * time.Millisecond) // async load so let's wait...
|
||||
has, err := lfu.Has(hash)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, has, "hash should be loaded from disk store but it's not")
|
||||
}
|
|
@ -2,8 +2,8 @@ package store
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
@ -16,24 +16,22 @@ import (
|
|||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
)
|
||||
|
||||
// HttpStore is a store that works on top of the HTTP protocol
|
||||
// NoopStore is a store that does nothing
|
||||
type HttpStore struct {
|
||||
upstream string
|
||||
httpClient *http.Client
|
||||
edgeToken string
|
||||
}
|
||||
|
||||
func NewHttpStore(upstream, edgeToken string) *HttpStore {
|
||||
func NewHttpStore(upstream string) *HttpStore {
|
||||
return &HttpStore{
|
||||
upstream: "http://" + upstream,
|
||||
httpClient: getClient(),
|
||||
edgeToken: edgeToken,
|
||||
}
|
||||
}
|
||||
|
||||
const nameHttp = "http"
|
||||
|
||||
func (n *HttpStore) Name() string { return nameHttp }
|
||||
func (n *HttpStore) Name() string { return nameNoop }
|
||||
func (n *HttpStore) Has(hash string) (bool, error) {
|
||||
url := n.upstream + "/blob?hash=" + hash
|
||||
|
||||
|
@ -46,7 +44,7 @@ func (n *HttpStore) Has(hash string) (bool, error) {
|
|||
if err != nil {
|
||||
return false, errors.Err(err)
|
||||
}
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
@ -55,7 +53,7 @@ func (n *HttpStore) Has(hash string) (bool, error) {
|
|||
}
|
||||
var body []byte
|
||||
if res.Body != nil {
|
||||
body, _ = io.ReadAll(res.Body)
|
||||
body, _ = ioutil.ReadAll(res.Body)
|
||||
}
|
||||
return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
|
||||
}
|
||||
|
@ -63,9 +61,6 @@ func (n *HttpStore) Has(hash string) (bool, error) {
|
|||
func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
||||
start := time.Now()
|
||||
url := n.upstream + "/blob?hash=" + hash
|
||||
if n.edgeToken != "" {
|
||||
url += "&edge_token=" + n.edgeToken
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
|
@ -76,7 +71,7 @@ func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
|
||||
}
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
defer res.Body.Close()
|
||||
tmp := getBuffer()
|
||||
defer putBuffer(tmp)
|
||||
serialized := res.Header.Get("Via")
|
||||
|
@ -105,7 +100,7 @@ func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
|||
}
|
||||
var body []byte
|
||||
if res.Body != nil {
|
||||
body, _ = io.ReadAll(res.Body)
|
||||
body, _ = ioutil.ReadAll(res.Body)
|
||||
}
|
||||
|
||||
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
|
||||
|
@ -120,7 +115,7 @@ func (n *HttpStore) PutSD(string, stream.Blob) error {
|
|||
func (n *HttpStore) Delete(string) error {
|
||||
return shared.ErrNotImplemented
|
||||
}
|
||||
func (n *HttpStore) Shutdown() {}
|
||||
func (n *HttpStore) Shutdown() { return }
|
||||
|
||||
// buffer pool to reduce GC
|
||||
// https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha
|
||||
|
@ -143,19 +138,14 @@ func putBuffer(buf *bytes.Buffer) {
|
|||
buffers.Put(buf)
|
||||
}
|
||||
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
dialer := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
return dialer.DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
// getClient gets an http client that's customized to be more performant when dealing with blobs of 2MB in size (most of our blobs)
|
||||
func getClient() *http.Client {
|
||||
// Customize the Transport to have larger connection pool
|
||||
defaultTransport := &http.Transport{
|
||||
DialContext: dialContext,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
|
|
|
@ -3,19 +3,19 @@ package store
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
)
|
||||
|
||||
// ITTTStore performs an operation on this storage, if this fails, it attempts to run it on that
|
||||
// ITTT store performs an operation on this storage, if this fails, it attempts to run it on that
|
||||
type ITTTStore struct {
|
||||
this, that BlobStore
|
||||
}
|
||||
|
||||
// NewITTTStore returns a new instance of the IF THIS THAN THAT store
|
||||
// NewCachingStore makes a new caching disk store and returns a pointer to it.
|
||||
func NewITTTStore(this, that BlobStore) *ITTTStore {
|
||||
return &ITTTStore{
|
||||
this: this,
|
||||
|
@ -28,7 +28,7 @@ const nameIttt = "ittt"
|
|||
// Name is the cache type name
|
||||
func (c *ITTTStore) Name() string { return nameIttt }
|
||||
|
||||
// Has checks in this for a hash, if it fails it checks in that. It returns true if either store has it.
|
||||
// Has checks the cache and then the origin for a hash. It returns true if either store has it.
|
||||
func (c *ITTTStore) Has(hash string) (bool, error) {
|
||||
has, err := c.this.Has(hash)
|
||||
if err != nil || !has {
|
||||
|
@ -70,4 +70,6 @@ func (c *ITTTStore) Delete(hash string) error {
|
|||
}
|
||||
|
||||
// Shutdown shuts down the store gracefully
|
||||
func (c *ITTTStore) Shutdown() {}
|
||||
func (c *ITTTStore) Shutdown() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -3,63 +3,35 @@ package store
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/bparli/lfuda-go"
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GcacheStore adds a max cache size and Greedy-Dual-Size-Frequency cache eviction strategy to a BlobStore
|
||||
type GcacheStore struct {
|
||||
// LRUStore adds a max cache size and LRU eviction to a BlobStore
|
||||
type LFUDAStore struct {
|
||||
// underlying store
|
||||
store BlobStore
|
||||
// cache implementation
|
||||
cache gcache.Cache
|
||||
// lfuda implementation
|
||||
lfuda *lfuda.Cache
|
||||
}
|
||||
type EvictionStrategy int
|
||||
|
||||
const (
|
||||
//LFU Discards the least frequently used items first.
|
||||
LFU EvictionStrategy = iota
|
||||
//ARC Constantly balances between LRU and LFU, to improve the combined result.
|
||||
ARC
|
||||
//LRU Discards the least recently used items first.
|
||||
LRU
|
||||
//SIMPLE has no clear priority for evict cache. It depends on key-value map order.
|
||||
SIMPLE
|
||||
)
|
||||
|
||||
// NewGcacheStore initialize a new LRUStore
|
||||
func NewGcacheStore(component string, store BlobStore, maxSize int, strategy EvictionStrategy) *GcacheStore {
|
||||
cacheBuilder := gcache.New(maxSize)
|
||||
var cache gcache.Cache
|
||||
evictFunc := func(key interface{}, value interface{}) {
|
||||
logrus.Infof("evicting %s", key)
|
||||
// NewLRUStore initialize a new LRUStore
|
||||
func NewLFUDAStore(component string, store BlobStore, maxSize float64) *LFUDAStore {
|
||||
lfuda := lfuda.NewGDSFWithEvict(maxSize, func(key interface{}, value interface{}) {
|
||||
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc()
|
||||
_ = store.Delete(key.(string)) // TODO: log this error. may happen if underlying entry is gone but cache entry still there
|
||||
}
|
||||
switch strategy {
|
||||
case LFU:
|
||||
cache = cacheBuilder.LFU().EvictedFunc(evictFunc).Build()
|
||||
case ARC:
|
||||
cache = cacheBuilder.ARC().EvictedFunc(evictFunc).Build()
|
||||
case LRU:
|
||||
cache = cacheBuilder.LRU().EvictedFunc(evictFunc).Build()
|
||||
case SIMPLE:
|
||||
cache = cacheBuilder.Simple().EvictedFunc(evictFunc).Build()
|
||||
|
||||
}
|
||||
l := &GcacheStore{
|
||||
})
|
||||
l := &LFUDAStore{
|
||||
store: store,
|
||||
cache: cache,
|
||||
lfuda: lfuda,
|
||||
}
|
||||
go func() {
|
||||
if lstr, ok := store.(lister); ok {
|
||||
err := l.loadExisting(lstr, maxSize)
|
||||
err := l.loadExisting(lstr, int(maxSize))
|
||||
if err != nil {
|
||||
panic(err) // TODO: what should happen here? panic? return nil? just keep going?
|
||||
}
|
||||
|
@ -69,34 +41,34 @@ func NewGcacheStore(component string, store BlobStore, maxSize int, strategy Evi
|
|||
return l
|
||||
}
|
||||
|
||||
const nameGcache = "gcache"
|
||||
const nameLFUDA = "lfuda"
|
||||
|
||||
// Name is the cache type name
|
||||
func (l *GcacheStore) Name() string { return nameGcache }
|
||||
func (l *LFUDAStore) Name() string { return nameLFUDA }
|
||||
|
||||
// Has returns whether the blob is in the store, without updating the recent-ness.
|
||||
func (l *GcacheStore) Has(hash string) (bool, error) {
|
||||
return l.cache.Has(hash), nil
|
||||
func (l *LFUDAStore) Has(hash string) (bool, error) {
|
||||
return l.lfuda.Contains(hash), nil
|
||||
}
|
||||
|
||||
// Get returns the blob or an error if the blob doesn't exist.
|
||||
func (l *GcacheStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
||||
func (l *LFUDAStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
||||
start := time.Now()
|
||||
_, err := l.cache.Get(hash)
|
||||
if err != nil {
|
||||
_, has := l.lfuda.Get(hash)
|
||||
if !has {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound)
|
||||
}
|
||||
blob, stack, err := l.store.Get(hash)
|
||||
if errors.Is(err, ErrBlobNotFound) {
|
||||
// Blob disappeared from underlying store
|
||||
l.cache.Remove(hash)
|
||||
l.lfuda.Remove(hash)
|
||||
}
|
||||
return blob, stack.Stack(time.Since(start), l.Name()), err
|
||||
}
|
||||
|
||||
// Put stores the blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
|
||||
func (l *GcacheStore) Put(hash string, blob stream.Blob) error {
|
||||
_ = l.cache.Set(hash, true)
|
||||
func (l *LFUDAStore) Put(hash string, blob stream.Blob) error {
|
||||
l.lfuda.Set(hash, true)
|
||||
has, _ := l.Has(hash)
|
||||
if has {
|
||||
err := l.store.Put(hash, blob)
|
||||
|
@ -108,8 +80,8 @@ func (l *GcacheStore) Put(hash string, blob stream.Blob) error {
|
|||
}
|
||||
|
||||
// PutSD stores the sd blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
|
||||
func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error {
|
||||
_ = l.cache.Set(hash, true)
|
||||
func (l *LFUDAStore) PutSD(hash string, blob stream.Blob) error {
|
||||
l.lfuda.Set(hash, true)
|
||||
has, _ := l.Has(hash)
|
||||
if has {
|
||||
err := l.store.PutSD(hash, blob)
|
||||
|
@ -121,7 +93,7 @@ func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error {
|
|||
}
|
||||
|
||||
// Delete deletes the blob from the store
|
||||
func (l *GcacheStore) Delete(hash string) error {
|
||||
func (l *LFUDAStore) Delete(hash string) error {
|
||||
err := l.store.Delete(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -130,12 +102,12 @@ func (l *GcacheStore) Delete(hash string) error {
|
|||
// This must come after store.Delete()
|
||||
// Remove triggers onEvict function, which also tries to delete blob from store
|
||||
// We need to delete it manually first so any errors can be propagated up
|
||||
l.cache.Remove(hash)
|
||||
l.lfuda.Remove(hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadExisting imports existing blobs from the underlying store into the LRU cache
|
||||
func (l *GcacheStore) loadExisting(store lister, maxItems int) error {
|
||||
func (l *LFUDAStore) loadExisting(store lister, maxItems int) error {
|
||||
logrus.Infof("loading at most %d items", maxItems)
|
||||
existing, err := store.list()
|
||||
if err != nil {
|
||||
|
@ -144,20 +116,17 @@ func (l *GcacheStore) loadExisting(store lister, maxItems int) error {
|
|||
logrus.Infof("read %d files from underlying store", len(existing))
|
||||
|
||||
added := 0
|
||||
for i, h := range existing {
|
||||
_ = l.cache.Set(h, true)
|
||||
for _, h := range existing {
|
||||
l.lfuda.Set(h, true)
|
||||
added++
|
||||
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than the cache
|
||||
err := l.Delete(h)
|
||||
logrus.Infof("deleted overflowing blob: %s (%d/%d)", h, i, len(existing))
|
||||
if err != nil {
|
||||
logrus.Warnf("error while deleting a blob that's overflowing the cache: %s", err.Error())
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the store gracefully
|
||||
func (l *GcacheStore) Shutdown() {
|
||||
func (l *LFUDAStore) Shutdown() {
|
||||
return
|
||||
}
|
136
store/lfuda_test.go
Normal file
136
store/lfuda_test.go
Normal file
|
@ -0,0 +1,136 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const cacheMaxSize = 3
|
||||
|
||||
func getTestLFUDAStore() (*LFUDAStore, *MemStore) {
|
||||
m := NewMemStore()
|
||||
return NewLFUDAStore("test", m, cacheMaxSize), m
|
||||
}
|
||||
|
||||
func TestFUDAStore_Eviction(t *testing.T) {
|
||||
lfuda, mem := getTestLFUDAStore()
|
||||
b := []byte("x")
|
||||
err := lfuda.Put("one", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("two", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("three", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("four", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("five", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("five", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("four", b)
|
||||
require.NoError(t, err)
|
||||
err = lfuda.Put("two", b)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = lfuda.Get("five")
|
||||
require.NoError(t, err)
|
||||
_, _, err = lfuda.Get("four")
|
||||
require.NoError(t, err)
|
||||
_, _, err = lfuda.Get("two")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
|
||||
|
||||
for k, v := range map[string]bool{
|
||||
"one": false,
|
||||
"two": true,
|
||||
"three": false,
|
||||
"four": true,
|
||||
"five": true,
|
||||
"six": false,
|
||||
} {
|
||||
has, err := lfuda.Has(k)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, v, has)
|
||||
}
|
||||
|
||||
lfuda.Get("two") // touch so it stays in cache
|
||||
lfuda.Get("five") // touch so it stays in cache
|
||||
lfuda.Put("six", b)
|
||||
|
||||
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
|
||||
|
||||
for k, v := range map[string]bool{
|
||||
"one": false,
|
||||
"two": true,
|
||||
"three": false,
|
||||
"four": false,
|
||||
"five": true,
|
||||
"six": true,
|
||||
} {
|
||||
has, err := lfuda.Has(k)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, v, has)
|
||||
}
|
||||
|
||||
err = lfuda.Delete("six")
|
||||
assert.NoError(t, err)
|
||||
err = lfuda.Delete("five")
|
||||
assert.NoError(t, err)
|
||||
err = lfuda.Delete("two")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(mem.Debug()))
|
||||
}
|
||||
|
||||
func TestFUDAStore_UnderlyingBlobMissing(t *testing.T) {
|
||||
lfuda, mem := getTestLFUDAStore()
|
||||
hash := "hash"
|
||||
b := []byte("this is a blob of stuff")
|
||||
err := lfuda.Put(hash, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mem.Delete(hash)
|
||||
require.NoError(t, err)
|
||||
|
||||
// hash still exists in lru
|
||||
assert.True(t, lfuda.lfuda.Contains(hash))
|
||||
|
||||
blob, _, err := lfuda.Get(hash)
|
||||
assert.Nil(t, blob)
|
||||
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
|
||||
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
|
||||
reflect.TypeOf(err).String(), err.Error())
|
||||
|
||||
// lru.Get() removes hash if underlying store doesn't have it
|
||||
assert.False(t, lfuda.lfuda.Contains(hash))
|
||||
}
|
||||
|
||||
func TestFUDAStore_loadExisting(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "reflector_test_*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
d := NewDiskStore(tmpDir, 2)
|
||||
|
||||
hash := "hash"
|
||||
b := []byte("this is a blob of stuff")
|
||||
err = d.Put(hash, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
existing, err := d.list()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(existing), "blob should exist in cache")
|
||||
assert.Equal(t, hash, existing[0])
|
||||
|
||||
lfuda := NewLFUDAStore("test", d, 3) // lru should load existing blobs when it's created
|
||||
time.Sleep(100 * time.Millisecond) // async load so let's wait...
|
||||
has, err := lfuda.Has(hash)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, has, "hash should be loaded from disk store but it's not")
|
||||
}
|
129
store/lru.go
Normal file
129
store/lru.go
Normal file
|
@ -0,0 +1,129 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"github.com/bluele/gcache"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LRUStore adds a max cache size and LRU eviction to a BlobStore
|
||||
type LRUStore struct {
|
||||
// underlying store
|
||||
store BlobStore
|
||||
// lru implementation
|
||||
lru gcache.Cache
|
||||
}
|
||||
|
||||
// NewLRUStore initialize a new LRUStore
|
||||
func NewLRUStore(component string, store BlobStore, maxItems int) *LRUStore {
|
||||
l := &LRUStore{
|
||||
store: store,
|
||||
}
|
||||
l.lru = gcache.New(maxItems).ARC().EvictedFunc(func(key, value interface{}) {
|
||||
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(l.Name(), component)).Inc()
|
||||
_ = store.Delete(key.(string))
|
||||
}).Build()
|
||||
|
||||
go func() {
|
||||
if lstr, ok := store.(lister); ok {
|
||||
err := l.loadExisting(lstr, maxItems)
|
||||
if err != nil {
|
||||
panic(err) // TODO: what should happen here? panic? return nil? just keep going?
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// Name is the cache type name
|
||||
func (l *LRUStore) Name() string {
|
||||
return "lru_" + l.store.Name()
|
||||
}
|
||||
|
||||
// Has returns whether the blob is in the store, without updating the recent-ness.
|
||||
func (l *LRUStore) Has(hash string) (bool, error) {
|
||||
return l.lru.Has(hash), nil
|
||||
}
|
||||
|
||||
// Get returns the blob or an error if the blob doesn't exist.
|
||||
func (l *LRUStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
|
||||
start := time.Now()
|
||||
_, err := l.lru.Get(hash)
|
||||
if err != nil {
|
||||
return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound)
|
||||
}
|
||||
blob, stack, err := l.store.Get(hash)
|
||||
if errors.Is(err, ErrBlobNotFound) {
|
||||
// Blob disappeared from underlying store
|
||||
l.lru.Remove(hash)
|
||||
}
|
||||
return blob, stack.Stack(time.Since(start), l.Name()), err
|
||||
}
|
||||
|
||||
// Put stores the blob
|
||||
func (l *LRUStore) Put(hash string, blob stream.Blob) error {
|
||||
err := l.store.Put(hash, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.lru.Set(hash, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutSD stores the sd blob
|
||||
func (l *LRUStore) PutSD(hash string, blob stream.Blob) error {
|
||||
err := l.store.PutSD(hash, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = l.lru.Set(hash, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the blob from the store
|
||||
func (l *LRUStore) Delete(hash string) error {
|
||||
err := l.store.Delete(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This must come after store.Delete()
|
||||
// Remove triggers onEvict function, which also tries to delete blob from store
|
||||
// We need to delete it manually first so any errors can be propagated up
|
||||
l.lru.Remove(hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadExisting imports existing blobs from the underlying store into the LRU cache
|
||||
func (l *LRUStore) loadExisting(store lister, maxItems int) error {
|
||||
logrus.Infof("loading at most %d items", maxItems)
|
||||
existing, err := store.list()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("read %d files from disk", len(existing))
|
||||
added := 0
|
||||
for _, h := range existing {
|
||||
l.lru.Set(h, true)
|
||||
added++
|
||||
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than LRU cache
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the store gracefully
|
||||
func (l *LRUStore) Shutdown() {
|
||||
return
|
||||
}
|
123
store/lru_test.go
Normal file
123
store/lru_test.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const cacheMaxBlobs = 3
|
||||
|
||||
func getTestLRUStore() (*LRUStore, *MemStore) {
|
||||
m := NewMemStore()
|
||||
return NewLRUStore("test", m, 3), m
|
||||
}
|
||||
|
||||
func TestLRUStore_Eviction(t *testing.T) {
|
||||
lru, mem := getTestLRUStore()
|
||||
b := []byte("x")
|
||||
err := lru.Put("one", b)
|
||||
require.NoError(t, err)
|
||||
err = lru.Put("two", b)
|
||||
require.NoError(t, err)
|
||||
err = lru.Put("three", b)
|
||||
require.NoError(t, err)
|
||||
err = lru.Put("four", b)
|
||||
require.NoError(t, err)
|
||||
err = lru.Put("five", b)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
|
||||
|
||||
for k, v := range map[string]bool{
|
||||
"one": false,
|
||||
"two": false,
|
||||
"three": true,
|
||||
"four": true,
|
||||
"five": true,
|
||||
"six": false,
|
||||
} {
|
||||
has, err := lru.Has(k)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, v, has)
|
||||
}
|
||||
|
||||
lru.Get("three") // touch so it stays in cache
|
||||
lru.Put("six", b)
|
||||
|
||||
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
|
||||
|
||||
for k, v := range map[string]bool{
|
||||
"one": false,
|
||||
"two": false,
|
||||
"three": true,
|
||||
"four": false,
|
||||
"five": true,
|
||||
"six": true,
|
||||
} {
|
||||
has, err := lru.Has(k)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, v, has)
|
||||
}
|
||||
|
||||
err = lru.Delete("three")
|
||||
assert.NoError(t, err)
|
||||
err = lru.Delete("five")
|
||||
assert.NoError(t, err)
|
||||
err = lru.Delete("six")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(mem.Debug()))
|
||||
}
|
||||
|
||||
func TestLRUStore_UnderlyingBlobMissing(t *testing.T) {
|
||||
lru, mem := getTestLRUStore()
|
||||
hash := "hash"
|
||||
b := []byte("this is a blob of stuff")
|
||||
err := lru.Put(hash, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mem.Delete(hash)
|
||||
require.NoError(t, err)
|
||||
|
||||
// hash still exists in lru
|
||||
assert.True(t, lru.lru.Has(hash))
|
||||
|
||||
blob, _, err := lru.Get(hash)
|
||||
assert.Nil(t, blob)
|
||||
assert.True(t, errors.Is(err, ErrBlobNotFound), "expected (%s) %s, got (%s) %s",
|
||||
reflect.TypeOf(ErrBlobNotFound).String(), ErrBlobNotFound.Error(),
|
||||
reflect.TypeOf(err).String(), err.Error())
|
||||
|
||||
// lru.Get() removes hash if underlying store doesn't have it
|
||||
assert.False(t, lru.lru.Has(hash))
|
||||
}
|
||||
|
||||
func TestLRUStore_loadExisting(t *testing.T) {
|
||||
tmpDir, err := ioutil.TempDir("", "reflector_test_*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
d := NewDiskStore(tmpDir, 2)
|
||||
|
||||
hash := "hash"
|
||||
b := []byte("this is a blob of stuff")
|
||||
err = d.Put(hash, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
existing, err := d.list()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(existing), "blob should exist in cache")
|
||||
assert.Equal(t, hash, existing[0])
|
||||
|
||||
lru := NewLRUStore("test", d, 3) // lru should load existing blobs when it's created
|
||||
time.Sleep(100 * time.Millisecond) // async load so let's wait...
|
||||
has, err := lru.Has(hash)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, has, "hash should be loaded from disk store but it's not")
|
||||
}
|
|
@ -4,10 +4,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
)
|
||||
|
||||
// MemStore is an in memory only blob store with no persistence.
|
||||
|
@ -77,4 +76,6 @@ func (m *MemStore) Debug() map[string]stream.Blob {
|
|||
}
|
||||
|
||||
// Shutdown shuts down the store gracefully
|
||||
func (m *MemStore) Shutdown() {}
|
||||
func (m *MemStore) Shutdown() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -3,9 +3,8 @@ package store
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
)
|
||||
|
||||
// NoopStore is a store that does nothing
|
||||
|
|
|
@ -5,11 +5,10 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
|
|
@ -3,10 +3,10 @@ package store
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/reflector.go/internal/metrics"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
package speedwalk
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
@ -18,19 +17,7 @@ import (
|
|||
// AllFiles recursively lists every file in every subdirectory of a given directory
|
||||
// If basename is true, return the basename of each file. Otherwise return the full path starting at startDir.
|
||||
func AllFiles(startDir string, basename bool) ([]string, error) {
|
||||
entries, err := os.ReadDir(startDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items := make([]fs.FileInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, info)
|
||||
}
|
||||
|
||||
items, err := ioutil.ReadDir(startDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,23 +1,22 @@
|
|||
package store
|
||||
|
||||
import (
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
|
||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||
"github.com/lbryio/lbry.go/v2/stream"
|
||||
"github.com/lbryio/reflector.go/shared"
|
||||
)
|
||||
|
||||
// BlobStore is an interface for handling blob storage.
|
||||
type BlobStore interface {
|
||||
// Name of blob store (useful for metrics)
|
||||
Name() string
|
||||
// Has Does blob exist in the store.
|
||||
// Does blob exist in the store.
|
||||
Has(hash string) (bool, error)
|
||||
// Get the blob from the store. Must return ErrBlobNotFound if blob is not in store.
|
||||
Get(hash string) (stream.Blob, shared.BlobTrace, error)
|
||||
// Put the blob into the store.
|
||||
Put(hash string, blob stream.Blob) error
|
||||
// PutSD an SD blob into the store.
|
||||
// Put an SD blob into the store.
|
||||
PutSD(hash string, blob stream.Blob) error
|
||||
// Delete the blob from the store.
|
||||
Delete(hash string) error
|
||||
|
|
Loading…
Reference in a new issue