refactor refactor refactor

This commit is contained in:
Niko Storni 2021-07-20 02:09:14 +02:00
parent 72be487262
commit febfc51cb0
36 changed files with 274 additions and 286 deletions

View file

@ -7,6 +7,7 @@ import (
"syscall" "syscall"
"github.com/lbryio/lbry.go/v2/extras/crypto" "github.com/lbryio/lbry.go/v2/extras/crypto"
"github.com/lbryio/reflector.go/cluster" "github.com/lbryio/reflector.go/cluster"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"

View file

@ -10,10 +10,11 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/store/speedwalk" "github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -39,7 +40,7 @@ func integrityCheckCmd(cmd *cobra.Command, args []string) {
blobs, err := speedwalk.AllFiles(diskStorePath, true) blobs, err := speedwalk.AllFiles(diskStorePath, true)
if err != nil { if err != nil {
log.Errorf("error while reading blobs from disk %s", errors.FullTrace(err)) log.Fatalf("error while reading blobs from disk %s", errors.FullTrace(err))
} }
tasks := make(chan string, len(blobs)) tasks := make(chan string, len(blobs))
done := make(chan bool) done := make(chan bool)
@ -63,12 +64,12 @@ func consume(worker int, tasks <-chan string, done chan<- bool, totalTasks int,
start := time.Now() start := time.Now()
for b := range tasks { for b := range tasks {
checked := atomic.AddInt32(processed, 1) processedSoFar := atomic.AddInt32(processed, 1)
if worker == 0 { if worker == 0 {
remaining := int32(totalTasks) - checked remaining := int32(totalTasks) - processedSoFar
timePerBlob := time.Since(start).Microseconds() / int64(checked) timePerBlob := time.Since(start).Microseconds() / int64(processedSoFar)
remainingTime := time.Duration(int64(remaining)*timePerBlob) * time.Microsecond remainingTime := time.Duration(int64(remaining)*timePerBlob) * time.Microsecond
log.Infof("[T%d] %d/%d blobs checked. ETA: %s", worker, checked, totalTasks, remainingTime.String()) log.Infof("[T%d] %d/%d blobs processed so far. ETA: %s", worker, processedSoFar, totalTasks, remainingTime.String())
} }
blobPath := path.Join(diskStorePath, b[:2], b) blobPath := path.Join(diskStorePath, b[:2], b)
blob, err := ioutil.ReadFile(blobPath) blob, err := ioutil.ReadFile(blobPath)

View file

@ -1,11 +1,12 @@
package cmd package cmd
import ( import (
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/store/speedwalk" "github.com/lbryio/reflector.go/store/speedwalk"
"github.com/lbryio/lbry.go/v2/extras/errors"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -8,8 +8,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
@ -19,6 +18,8 @@ import (
"github.com/lbryio/reflector.go/server/http" "github.com/lbryio/reflector.go/server/http"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/c2h5oh/datasize" "github.com/c2h5oh/datasize"
@ -27,23 +28,40 @@ import (
) )
var ( var (
//port configuration
tcpPeerPort int tcpPeerPort int
http3PeerPort int http3PeerPort int
httpPort int httpPort int
receiverPort int receiverPort int
metricsPort int metricsPort int
//flags configuration
disableUploads bool disableUploads bool
disableBlocklist bool disableBlocklist bool
proxyAddress string
proxyPort string
proxyProtocol string
useDB bool useDB bool
cloudFrontEndpoint string
WasabiEndpoint string //upstream configuration
reflectorCmdDiskCache string upstreamReflector string
bufferReflectorCmdDiskCache string upstreamProtocol string
reflectorCmdMemCache int
//downstream configuration
requestQueueSize int requestQueueSize int
//upstream edge configuration (to "cold" storage)
originEndpoint string
originEndpointFallback string
//cache configuration
diskCache string
secondaryDiskCache string
memCache int
)
var cacheManagers = []string{"localdb", "lfuda", "lru"}
const (
LOCALDB int = iota
LFUDA
LRU
) )
func init() { func init() {
@ -52,38 +70,41 @@ func init() {
Short: "Run reflector server", Short: "Run reflector server",
Run: reflectorCmd, Run: reflectorCmd,
} }
cmd.Flags().StringVar(&proxyAddress, "proxy-address", "", "address of another reflector server where blobs are fetched from")
cmd.Flags().StringVar(&proxyPort, "proxy-port", "5567", "port of another reflector server where blobs are fetched from") cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from for the TCP (LBRY) protocol")
cmd.Flags().StringVar(&proxyProtocol, "proxy-protocol", "http3", "protocol used to fetch blobs from another reflector server (tcp/http3)")
cmd.Flags().StringVar(&cloudFrontEndpoint, "cloudfront-endpoint", "", "CloudFront edge endpoint for standard HTTP retrieval")
cmd.Flags().StringVar(&WasabiEndpoint, "wasabi-endpoint", "", "Wasabi edge endpoint for standard HTTP retrieval")
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from")
cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol") cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol")
cmd.Flags().IntVar(&httpPort, "http-port", 5569, "The port reflector will distribute content from over HTTP protocol") cmd.Flags().IntVar(&httpPort, "http-port", 5569, "The port reflector will distribute content from over HTTP protocol")
cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from") cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from")
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for metrics") cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for prometheus metrics")
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests should be submitted to upstream")
cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server") cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server")
cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating") cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating")
cmd.Flags().BoolVar(&useDB, "use-db", true, "whether to connect to the reflector db or not") cmd.Flags().BoolVar(&useDB, "use-db", true, "Whether to connect to the reflector db or not")
cmd.Flags().StringVar(&reflectorCmdDiskCache, "disk-cache", "",
"enable disk cache, setting max size and path where to store blobs. format is 'sizeGB:CACHE_PATH'") cmd.Flags().StringVar(&upstreamReflector, "upstream-reflector", "", "host:port of a reflector server where blobs are fetched from")
cmd.Flags().StringVar(&bufferReflectorCmdDiskCache, "buffer-disk-cache", "", cmd.Flags().StringVar(&upstreamProtocol, "proxy-protocol", "http", "protocol used to fetch blobs from another reflector server (tcp/http3/http)")
"enable buffer disk cache, setting max size and path where to store blobs. format is 'sizeGB:CACHE_PATH'")
cmd.Flags().IntVar(&reflectorCmdMemCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs") cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests from downstream should be handled at once (the rest will wait)")
cmd.Flags().StringVar(&originEndpoint, "origin-endpoint", "", "HTTP edge endpoint for standard HTTP retrieval")
cmd.Flags().StringVar(&originEndpointFallback, "origin-endpoint-fallback", "", "HTTP edge endpoint for standard HTTP retrieval if first origin fails")
cmd.Flags().StringVar(&diskCache, "disk-cache", "100GB:/tmp/downloaded_blobs:localdb", "Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru)")
cmd.Flags().StringVar(&secondaryDiskCache, "optional-disk-cache", "", "Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (this would get hit before the one specified in disk-cache)")
cmd.Flags().IntVar(&memCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
rootCmd.AddCommand(cmd) rootCmd.AddCommand(cmd)
} }
func reflectorCmd(cmd *cobra.Command, args []string) { func reflectorCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString()) log.Printf("reflector %s", meta.VersionString())
cleanerStopper := stop.New()
// the blocklist logic requires the db backed store to be the outer-most store // the blocklist logic requires the db backed store to be the outer-most store
underlyingStore := setupStore() underlyingStore := initStores()
outerStore := wrapWithCache(underlyingStore, cleanerStopper) underlyingStoreWithCaches, cleanerStopper := initCaches(underlyingStore)
if !disableUploads { if !disableUploads {
reflectorServer := reflector.NewServer(underlyingStore, outerStore) reflectorServer := reflector.NewServer(underlyingStore, underlyingStoreWithCaches)
reflectorServer.Timeout = 3 * time.Minute reflectorServer.Timeout = 3 * time.Minute
reflectorServer.EnableBlocklist = !disableBlocklist reflectorServer.EnableBlocklist = !disableBlocklist
@ -94,21 +115,21 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
defer reflectorServer.Shutdown() defer reflectorServer.Shutdown()
} }
peerServer := peer.NewServer(outerStore) peerServer := peer.NewServer(underlyingStoreWithCaches)
err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort)) err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
defer peerServer.Shutdown() defer peerServer.Shutdown()
http3PeerServer := http3.NewServer(outerStore, requestQueueSize) http3PeerServer := http3.NewServer(underlyingStoreWithCaches, requestQueueSize)
err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort)) err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
defer http3PeerServer.Shutdown() defer http3PeerServer.Shutdown()
httpServer := http.NewServer(outerStore, requestQueueSize) httpServer := http.NewServer(underlyingStoreWithCaches, requestQueueSize)
err = httpServer.Start(":" + strconv.Itoa(httpPort)) err = httpServer.Start(":" + strconv.Itoa(httpPort))
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -118,8 +139,8 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics") metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics")
metricsServer.Start() metricsServer.Start()
defer metricsServer.Shutdown() defer metricsServer.Shutdown()
defer outerStore.Shutdown() defer underlyingStoreWithCaches.Shutdown()
defer underlyingStore.Shutdown() defer underlyingStore.Shutdown() //do we actually need this? Oo
interruptChan := make(chan os.Signal, 1) interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM) signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
@ -128,33 +149,38 @@ func reflectorCmd(cmd *cobra.Command, args []string) {
cleanerStopper.StopAndWait() cleanerStopper.StopAndWait()
} }
func setupStore() store.BlobStore { func initUpstreamStore() store.BlobStore {
var s store.BlobStore var s store.BlobStore
if upstreamReflector == "" {
if proxyAddress != "" { return nil
switch proxyProtocol { }
switch upstreamProtocol {
case "tcp": case "tcp":
s = peer.NewStore(peer.StoreOpts{ s = peer.NewStore(peer.StoreOpts{
Address: proxyAddress + ":" + proxyPort, Address: upstreamReflector,
Timeout: 30 * time.Second, Timeout: 30 * time.Second,
}) })
case "http3": case "http3":
s = http3.NewStore(http3.StoreOpts{ s = http3.NewStore(http3.StoreOpts{
Address: proxyAddress + ":" + proxyPort, Address: upstreamReflector,
Timeout: 30 * time.Second, Timeout: 30 * time.Second,
}) })
case "http": case "http":
s = store.NewHttpStore(proxyAddress + ":" + proxyPort) s = store.NewHttpStore(upstreamReflector)
default: default:
log.Fatalf("protocol is not recognized: %s", proxyProtocol) log.Fatalf("protocol is not recognized: %s", upstreamProtocol)
} }
} else { return s
}
func initEdgeStore() store.BlobStore {
var s3Store *store.S3Store var s3Store *store.S3Store
var s store.BlobStore
if conf != "none" { if conf != "none" {
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName) s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName)
} }
if cloudFrontEndpoint != "" && WasabiEndpoint != "" { if originEndpointFallback != "" && originEndpoint != "" {
ittt := store.NewITTTStore(store.NewCloudFrontROStore(WasabiEndpoint), store.NewCloudFrontROStore(cloudFrontEndpoint)) ittt := store.NewITTTStore(store.NewCloudFrontROStore(originEndpoint), store.NewCloudFrontROStore(originEndpointFallback))
if s3Store != nil { if s3Store != nil {
s = store.NewCloudFrontRWStore(ittt, s3Store) s = store.NewCloudFrontRWStore(ittt, s3Store)
} else { } else {
@ -165,8 +191,10 @@ func setupStore() store.BlobStore {
} else { } else {
log.Fatalf("this configuration does not include a valid upstream source") log.Fatalf("this configuration does not include a valid upstream source")
} }
return s
} }
func initDBStore(s store.BlobStore) store.BlobStore {
if useDB { if useDB {
dbInst := &db.SQL{ dbInst := &db.SQL{
TrackAccess: db.TrackAccessStreams, TrackAccess: db.TrackAccessStreams,
@ -176,26 +204,55 @@ func setupStore() store.BlobStore {
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
s = store.NewDBBackedStore(s, dbInst, false) s = store.NewDBBackedStore(s, dbInst, false)
} }
return s return s
} }
func wrapWithCache(s store.BlobStore, cleanerStopper *stop.Group) store.BlobStore { func initStores() store.BlobStore {
wrapped := s s := initUpstreamStore()
if s == nil {
s = initEdgeStore()
}
s = initDBStore(s)
return s
}
diskCacheMaxSize, diskCachePath := diskCacheParams(reflectorCmdDiskCache) // initCaches returns a store wrapped with caches and a stop group to execute a clean shutdown
func initCaches(s store.BlobStore) (store.BlobStore, *stop.Group) {
stopper := stop.New()
diskStore := initDiskStore(s, diskCache, stopper)
finalStore := initDiskStore(diskStore, secondaryDiskCache, stopper)
stop.New()
if memCache > 0 {
finalStore = store.NewCachingStore(
"reflector",
finalStore,
store.NewLRUStore("mem", store.NewMemStore(), memCache),
)
}
return finalStore, stopper
}
func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *stop.Group) store.BlobStore {
diskCacheMaxSize, diskCachePath, cacheManager := diskCacheParams(diskParams)
//we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte //we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte
// so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup // so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup
realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize) realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
if diskCacheMaxSize > 0 { if diskCacheMaxSize == 0 {
return upstreamStore
}
err := os.MkdirAll(diskCachePath, os.ModePerm) err := os.MkdirAll(diskCachePath, os.ModePerm)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
diskStore := store.NewDiskStore(diskCachePath, 2)
var unwrappedStore store.BlobStore
cleanerStopper := stop.New(stopper)
switch cacheManager {
case cacheManagers[LOCALDB]:
localDb := &db.SQL{ localDb := &db.SQL{
SoftDelete: true, SoftDelete: true,
TrackAccess: db.TrackAccessBlobs, TrackAccess: db.TrackAccessBlobs,
@ -205,55 +262,41 @@ func wrapWithCache(s store.BlobStore, cleanerStopper *stop.Group) store.BlobStor
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
dbBackedDiskStore := store.NewDBBackedStore(store.NewDiskStore(diskCachePath, 2), localDb, true) unwrappedStore = store.NewDBBackedStore(diskStore, localDb, true)
wrapped = store.NewCachingStore( go cleanOldestBlobs(int(realCacheSize), localDb, unwrappedStore, cleanerStopper)
case cacheManagers[LFUDA]:
unwrappedStore = store.NewLFUDAStore("nvme", store.NewDiskStore(diskCachePath, 2), realCacheSize)
case cacheManagers[LRU]:
unwrappedStore = store.NewLRUStore("nvme", store.NewDiskStore(diskCachePath, 2), int(realCacheSize))
}
wrapped := store.NewCachingStore(
"reflector", "reflector",
wrapped, upstreamStore,
dbBackedDiskStore, unwrappedStore,
) )
go cleanOldestBlobs(int(realCacheSize), localDb, dbBackedDiskStore, cleanerStopper)
}
diskCacheMaxSize, diskCachePath = diskCacheParams(bufferReflectorCmdDiskCache)
realCacheSize = float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
if diskCacheMaxSize > 0 {
err := os.MkdirAll(diskCachePath, os.ModePerm)
if err != nil {
log.Fatal(err)
}
wrapped = store.NewCachingStore(
"reflector",
wrapped,
store.NewLFUDAStore("nvme", store.NewDiskStore(diskCachePath, 2), realCacheSize),
)
}
if reflectorCmdMemCache > 0 {
wrapped = store.NewCachingStore(
"reflector",
wrapped,
store.NewLRUStore("mem", store.NewMemStore(), reflectorCmdMemCache),
)
}
return wrapped return wrapped
} }
func diskCacheParams(diskParams string) (int, string) { func diskCacheParams(diskParams string) (int, string, string) {
if diskParams == "" { if diskParams == "" {
return 0, "" return 0, "", ""
} }
parts := strings.Split(diskParams, ":") parts := strings.Split(diskParams, ":")
if len(parts) != 2 { if len(parts) != 3 {
log.Fatalf("--disk-cache must be a number, followed by ':', followed by a string") log.Fatalf("%s does is formatted incorrectly. Expected format: 'sizeGB:CACHE_PATH:cachemanager' for example: '100GB:/tmp/downloaded_blobs:localdb'", diskParams)
} }
diskCacheSize := parts[0] diskCacheSize := parts[0]
path := parts[1] path := parts[1]
cacheManager := parts[2]
if len(path) == 0 || path[0] != '/' { if len(path) == 0 || path[0] != '/' {
log.Fatalf("--disk-cache path must start with '/'") log.Fatalf("disk cache paths must start with '/'")
}
if !util.InSlice(cacheManager, cacheManagers) {
log.Fatalf("specified cache manager '%s' is not supported. Use one of the following: %v", cacheManager, cacheManagers)
} }
var maxSize datasize.ByteSize var maxSize datasize.ByteSize
@ -262,9 +305,9 @@ func diskCacheParams(diskParams string) (int, string) {
log.Fatal(err) log.Fatal(err)
} }
if maxSize <= 0 { if maxSize <= 0 {
log.Fatal("--disk-cache size must be more than 0") log.Fatal("disk cache size must be more than 0")
} }
return int(maxSize), path return int(maxSize), path, cacheManager
} }
func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) { func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) {

View file

@ -6,10 +6,11 @@ import (
"os" "os"
"strings" "strings"
"github.com/lbryio/reflector.go/updater"
"github.com/lbryio/lbry.go/v2/dht" "github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util" "github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/reflector.go/updater"
"github.com/johntdyer/slackrus" "github.com/johntdyer/slackrus"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"

View file

@ -7,8 +7,6 @@ import (
"strings" "strings"
"syscall" "syscall"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/reflector.go/cluster" "github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/peer" "github.com/lbryio/reflector.go/peer"
@ -16,6 +14,9 @@ import (
"github.com/lbryio/reflector.go/reflector" "github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -8,8 +8,8 @@ import (
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/reflector" "github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View file

@ -39,11 +39,11 @@ type SdBlob struct {
type trackAccess int type trackAccess int
const ( const (
// Don't track accesses //TrackAccessNone Don't track accesses
TrackAccessNone trackAccess = iota TrackAccessNone trackAccess = iota
// Track accesses at the stream level //TrackAccessStreams Track accesses at the stream level
TrackAccessStreams TrackAccessStreams
// Track accesses at the blob level //TrackAccessBlobs Track accesses at the blob level
TrackAccessBlobs TrackAccessBlobs
) )
@ -101,7 +101,7 @@ func (s *SQL) AddBlob(hash string, length int, isStored bool) error {
return err return err
} }
// AddBlob adds a blob to the database. //AddBlobs adds blobs to the database.
func (s *SQL) AddBlobs(hash []string) error { func (s *SQL) AddBlobs(hash []string) error {
if s.conn == nil { if s.conn == nil {
return errors.Err("not connected") return errors.Err("not connected")
@ -419,7 +419,7 @@ func (s *SQL) Delete(hash string) error {
return errors.Err(err) return errors.Err(err)
} }
// GetHashRange gets the smallest and biggest hashes in the db //LeastRecentlyAccessedHashes gets the least recently accessed blobs
func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) { func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
if s.conn == nil { if s.conn == nil {
return nil, errors.Err("not connected") return nil, errors.Err("not connected")
@ -451,40 +451,6 @@ func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
return blobs, nil return blobs, nil
} }
// AllHashes writes all hashes from the db into the channel.
// It does not close the channel when it finishes.
//func (s *SQL) AllHashes(ch chan<- string) error {
// if s.conn == nil {
// return errors.Err("not connected")
// }
//
// query := "SELECT hash from blob_"
// if s.SoftDelete {
// query += " where is_stored = 1"
// }
// s.logQuery(query)
//
// rows, err := s.conn.Query(query)
// if err != nil {
// return errors.Err(err)
// }
// defer closeRows(rows)
//
// for rows.Next() {
// var hash string
// err := rows.Scan(&hash)
// if err != nil {
// return errors.Err(err)
// }
// ch <- hash
// // TODO: this needs testing
// // TODO: need a way to cancel this early (e.g. in case of shutdown)
// }
//
// close(ch)
// return nil
//}
func (s *SQL) Count() (int, error) { func (s *SQL) Count() (int, error) {
if s.conn == nil { if s.conn == nil {
return 0, errors.Err("not connected") return 0, errors.Err("not connected")
@ -813,47 +779,3 @@ CREATE TABLE blocked (
); );
*/ */
//func (d *LiteDBBackedStore) selfClean() {
// d.stopper.Add(1)
// defer d.stopper.Done()
// lastCleanup := time.Now()
// const cleanupInterval = 10 * time.Second
// for {
// select {
// case <-d.stopper.Ch():
// log.Infoln("stopping self cleanup")
// return
// default:
// time.Sleep(1 * time.Second)
// }
// if time.Since(lastCleanup) < cleanupInterval {
// continue
//
// blobsCount, err := d.db.BlobsCount()
// if err != nil {
// log.Errorf(errors.FullTrace(err))
// }
// if blobsCount >= d.maxItems {
// itemsToDelete := blobsCount / 100 * 10
// blobs, err := d.db.GetLRUBlobs(itemsToDelete)
// if err != nil {
// log.Errorf(errors.FullTrace(err))
// }
// for _, hash := range blobs {
// select {
// case <-d.stopper.Ch():
// return
// default:
//
// }
// err = d.Delete(hash)
// if err != nil {
// log.Errorf(errors.FullTrace(err))
// }
// metrics.CacheLRUEvictCount.With(metrics.CacheLabels(d.Name(), d.component)).Inc()
// }
// }
// lastCleanup = time.Now()
// }
//}

View file

@ -18,9 +18,6 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
// ErrBlobExists is a default error for when a blob already exists on the reflector server.
var ErrBlobExists = errors.Base("blob exists on server")
// Client is an instance of a client connected to a server. // Client is an instance of a client connected to a server.
type Client struct { type Client struct {
Timeout time.Duration Timeout time.Duration

View file

@ -15,6 +15,7 @@ import (
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lucas-clemente/quic-go/http3" "github.com/lucas-clemente/quic-go/http3"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )

View file

@ -8,10 +8,12 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lucas-clemente/quic-go" "github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/http3" "github.com/lucas-clemente/quic-go/http3"
) )

View file

@ -4,10 +4,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
) )
// Store is a blob store that gets blobs from a peer. // Store is a blob store that gets blobs from a peer.

View file

@ -5,14 +5,14 @@ import (
"strconv" "strconv"
"sync" "sync"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/reflector.go/cluster" "github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/peer" "github.com/lbryio/reflector.go/peer"
"github.com/lbryio/reflector.go/reflector" "github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop" "github.com/lbryio/lbry.go/v2/extras/stop"

View file

@ -4,8 +4,9 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/davecgh/go-spew/spew"
"github.com/lbryio/lbry.go/v2/dht/bits" "github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/davecgh/go-spew/spew"
) )
func TestAnnounceRange(t *testing.T) { func TestAnnounceRange(t *testing.T) {

View file

@ -22,7 +22,7 @@ import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
) )
var TODO = ` /* TODO:
import cert from wallet import cert from wallet
get all utxos from chainquery get all utxos from chainquery
create transaction create transaction
@ -38,8 +38,7 @@ var TODO = `
"too-long-mempool-chain", "too-long-mempool-chain",
"Missing inputs", "Missing inputs",
"Not enough funds to cover this transaction", "Not enough funds to cover this transaction",
} */
`
type Details struct { type Details struct {
Title string Title string

View file

@ -9,7 +9,6 @@ import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/reflector.go/wallet" "github.com/lbryio/reflector.go/wallet"

View file

@ -9,9 +9,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht/bits"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/phayes/freeport" "github.com/phayes/freeport"
) )

View file

@ -7,9 +7,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"

View file

@ -5,12 +5,13 @@ import (
"sync" "sync"
"time" "time"
"github.com/gin-gonic/gin"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store" "github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/gin-gonic/gin"
) )
func (s *Server) getBlob(c *gin.Context) { func (s *Server) getBlob(c *gin.Context) {

View file

@ -5,11 +5,13 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/bluele/gcache" "github.com/bluele/gcache"
nice "github.com/ekyoung/gin-nice-recovery" nice "github.com/ekyoung/gin-nice-recovery"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )

View file

@ -5,6 +5,7 @@ import (
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/util" "github.com/lbryio/lbry.go/v2/extras/util"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

View file

@ -3,12 +3,13 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
log "github.com/sirupsen/logrus"
"github.com/lbryio/reflector.go/internal/metrics" log "github.com/sirupsen/logrus"
) )
// CachingStore combines two stores, typically a local and a remote store, to improve performance. // CachingStore combines two stores, typically a local and a remote store, to improve performance.

View file

@ -6,8 +6,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
) )
func TestCachingStore_Put(t *testing.T) { func TestCachingStore_Put(t *testing.T) {

View file

@ -6,12 +6,13 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta" "github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )

View file

@ -3,8 +3,9 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
) )
// CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3. // CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3.

View file

@ -5,11 +5,12 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/db" "github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )

View file

@ -16,7 +16,7 @@ import (
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
) )
// NoopStore is a store that does nothing // HttpStore is a store that works on top of the HTTP protocol
type HttpStore struct { type HttpStore struct {
upstream string upstream string
httpClient *http.Client httpClient *http.Client

View file

@ -3,19 +3,19 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
) )
// ITTT store performs an operation on this storage, if this fails, it attempts to run it on that // ITTTStore performs an operation on this storage, if this fails, it attempts to run it on that
type ITTTStore struct { type ITTTStore struct {
this, that BlobStore this, that BlobStore
} }
// NewCachingStore makes a new caching disk store and returns a pointer to it. // NewITTTStore returns a new instance of the IF THIS THAN THAT store
func NewITTTStore(this, that BlobStore) *ITTTStore { func NewITTTStore(this, that BlobStore) *ITTTStore {
return &ITTTStore{ return &ITTTStore{
this: this, this: this,
@ -28,7 +28,7 @@ const nameIttt = "ittt"
// Name is the cache type name // Name is the cache type name
func (c *ITTTStore) Name() string { return nameIttt } func (c *ITTTStore) Name() string { return nameIttt }
// Has checks the cache and then the origin for a hash. It returns true if either store has it. // Has checks in this for a hash, if it fails it checks in that. It returns true if either store has it.
func (c *ITTTStore) Has(hash string) (bool, error) { func (c *ITTTStore) Has(hash string) (bool, error) {
has, err := c.this.Has(hash) has, err := c.this.Has(hash)
if err != nil || !has { if err != nil || !has {

View file

@ -1,17 +1,21 @@
package store package store
//TODO: the caching strategy is actually not LFUDA, it should become a parameter and the name of the struct should be changed
import ( import (
"time" "time"
"github.com/bparli/lfuda-go"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/bparli/lfuda-go"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// LRUStore adds a max cache size and LRU eviction to a BlobStore // LFUDAStore adds a max cache size and Greedy-Dual-Size-Frequency cache eviction strategy to a BlobStore
type LFUDAStore struct { type LFUDAStore struct {
// underlying store // underlying store
store BlobStore store BlobStore
@ -19,7 +23,7 @@ type LFUDAStore struct {
lfuda *lfuda.Cache lfuda *lfuda.Cache
} }
// NewLRUStore initialize a new LRUStore // NewLFUDAStore initialize a new LRUStore
func NewLFUDAStore(component string, store BlobStore, maxSize float64) *LFUDAStore { func NewLFUDAStore(component string, store BlobStore, maxSize float64) *LFUDAStore {
lfuda := lfuda.NewGDSFWithEvict(maxSize, func(key interface{}, value interface{}) { lfuda := lfuda.NewGDSFWithEvict(maxSize, func(key interface{}, value interface{}) {
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc() metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc()

View file

@ -4,9 +4,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
// MemStore is an in memory only blob store with no persistence. // MemStore is an in memory only blob store with no persistence.

View file

@ -3,8 +3,9 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
) )
// NoopStore is a store that does nothing // NoopStore is a store that does nothing

View file

@ -5,11 +5,12 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"

View file

@ -3,10 +3,10 @@ package store
import ( import (
"time" "time"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/reflector.go/internal/metrics" "github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared" "github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"golang.org/x/sync/singleflight" "golang.org/x/sync/singleflight"

View file

@ -1,22 +1,23 @@
package store package store
import ( import (
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors" "github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream" "github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
) )
// BlobStore is an interface for handling blob storage. // BlobStore is an interface for handling blob storage.
type BlobStore interface { type BlobStore interface {
// Name of blob store (useful for metrics) // Name of blob store (useful for metrics)
Name() string Name() string
// Does blob exist in the store. // Has Does blob exist in the store.
Has(hash string) (bool, error) Has(hash string) (bool, error)
// Get the blob from the store. Must return ErrBlobNotFound if blob is not in store. // Get the blob from the store. Must return ErrBlobNotFound if blob is not in store.
Get(hash string) (stream.Blob, shared.BlobTrace, error) Get(hash string) (stream.Blob, shared.BlobTrace, error)
// Put the blob into the store. // Put the blob into the store.
Put(hash string, blob stream.Blob) error Put(hash string, blob stream.Blob) error
// Put an SD blob into the store. // PutSD an SD blob into the store.
PutSD(hash string, blob stream.Blob) error PutSD(hash string, blob stream.Blob) error
// Delete the blob from the store. // Delete the blob from the store.
Delete(hash string) error Delete(hash string) error