when reflecting a sdblob, insert all the stream and intermediate blobs using a transaction #50
4 changed files with 15 additions and 16 deletions
|
@ -40,8 +40,6 @@ func NewLFUDAStore(component string, store BlobStore, maxSize float64) *LFUDASto
|
||||||
|
|
||||||
const nameLFUDA = "lfuda"
|
const nameLFUDA = "lfuda"
|
||||||
|
|
||||||
var fakeTrue = []byte{'t'}
|
|
||||||
|
|
||||||
// Name is the cache type name
|
// Name is the cache type name
|
||||||
func (l *LFUDAStore) Name() string { return nameLFUDA }
|
func (l *LFUDAStore) Name() string { return nameLFUDA }
|
||||||
|
|
||||||
|
@ -66,7 +64,7 @@ func (l *LFUDAStore) Get(hash string) (stream.Blob, error) {
|
||||||
|
|
||||||
// Put stores the blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
|
// Put stores the blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
|
||||||
func (l *LFUDAStore) Put(hash string, blob stream.Blob) error {
|
func (l *LFUDAStore) Put(hash string, blob stream.Blob) error {
|
||||||
l.lfuda.Set(hash, fakeTrue)
|
l.lfuda.Set(hash, true)
|
||||||
has, _ := l.Has(hash)
|
has, _ := l.Has(hash)
|
||||||
if has {
|
if has {
|
||||||
err := l.store.Put(hash, blob)
|
err := l.store.Put(hash, blob)
|
||||||
|
@ -77,14 +75,16 @@ func (l *LFUDAStore) Put(hash string, blob stream.Blob) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutSD stores the sd blob
|
// PutSD stores the sd blob. Following LFUDA rules it's not guaranteed that a SET will store the value!!!
|
||||||
func (l *LFUDAStore) PutSD(hash string, blob stream.Blob) error {
|
func (l *LFUDAStore) PutSD(hash string, blob stream.Blob) error {
|
||||||
err := l.store.PutSD(hash, blob)
|
l.lfuda.Set(hash, true)
|
||||||
if err != nil {
|
has, _ := l.Has(hash)
|
||||||
return err
|
if has {
|
||||||
|
err := l.store.PutSD(hash, blob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
l.lfuda.Set(hash, fakeTrue)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,12 +109,13 @@ func (l *LFUDAStore) loadExisting(store lister, maxItems int) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
logrus.Infof("read %d files from disk", len(existing))
|
||||||
|
|
||||||
added := 0
|
added := 0
|
||||||
for _, h := range existing {
|
for _, h := range existing {
|
||||||
l.lfuda.Set(h, fakeTrue)
|
l.lfuda.Set(h, true)
|
||||||
added++
|
added++
|
||||||
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than LRU cache
|
if maxItems > 0 && added >= maxItems { // underlying cache is bigger than the cache
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/lbryio/lbry.go/v2/extras/errors"
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -68,8 +67,6 @@ func TestFUDAStore_Eviction(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
|
assert.Equal(t, cacheMaxBlobs, len(mem.Debug()))
|
||||||
|
|
||||||
keys := lfuda.lfuda.Keys()
|
|
||||||
log.Infof("%+v", keys)
|
|
||||||
for k, v := range map[string]bool{
|
for k, v := range map[string]bool{
|
||||||
"one": false,
|
"one": false,
|
||||||
"two": true,
|
"two": true,
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"github.com/lbryio/reflector.go/internal/metrics"
|
"github.com/lbryio/reflector.go/internal/metrics"
|
||||||
|
|
||||||
golru "github.com/hashicorp/golang-lru"
|
golru "github.com/hashicorp/golang-lru"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LRUStore adds a max cache size and LRU eviction to a BlobStore
|
// LRUStore adds a max cache size and LRU eviction to a BlobStore
|
||||||
|
@ -106,11 +107,12 @@ func (l *LRUStore) Delete(hash string) error {
|
||||||
|
|
||||||
// loadExisting imports existing blobs from the underlying store into the LRU cache
|
// loadExisting imports existing blobs from the underlying store into the LRU cache
|
||||||
func (l *LRUStore) loadExisting(store lister, maxItems int) error {
|
func (l *LRUStore) loadExisting(store lister, maxItems int) error {
|
||||||
|
logrus.Infof("loading at most %d items", maxItems)
|
||||||
existing, err := store.list()
|
existing, err := store.list()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
logrus.Infof("read %d files from disk", len(existing))
|
||||||
added := 0
|
added := 0
|
||||||
for _, h := range existing {
|
for _, h := range existing {
|
||||||
l.lru.Add(h, true)
|
l.lru.Add(h, true)
|
||||||
|
|
|
@ -83,6 +83,5 @@ func AllFiles(startDir string, basename bool) ([]string, error) {
|
||||||
|
|
||||||
close(pathChan)
|
close(pathChan)
|
||||||
pathWG.Wait()
|
pathWG.Wait()
|
||||||
logrus.Infoln("loaded LRU")
|
|
||||||
return paths, nil
|
return paths, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue