2018-01-31 02:15:21 +01:00
|
|
|
package store
|
|
|
|
|
|
|
|
import (
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path"
|
2020-05-09 03:06:51 +02:00
|
|
|
"path/filepath"
|
2018-01-31 02:15:21 +01:00
|
|
|
|
2019-11-14 01:11:35 +01:00
|
|
|
"github.com/lbryio/lbry.go/v2/extras/errors"
|
|
|
|
"github.com/lbryio/lbry.go/v2/stream"
|
2020-10-21 23:31:15 +02:00
|
|
|
"github.com/spf13/afero"
|
2019-11-14 00:50:49 +01:00
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
lru "github.com/hashicorp/golang-lru"
|
2018-01-31 02:15:21 +01:00
|
|
|
)
|
|
|
|
|
2019-10-03 22:24:59 +02:00
|
|
|
// DiskBlobStore stores blobs on a local disk
|
|
|
|
type DiskBlobStore struct {
|
2019-10-03 22:12:49 +02:00
|
|
|
// the location of blobs on disk
|
|
|
|
blobDir string
|
2020-10-21 23:31:15 +02:00
|
|
|
// max number of blobs to store
|
|
|
|
maxBlobs int
|
2019-10-03 22:12:49 +02:00
|
|
|
// store files in subdirectories based on the first N chars in the filename. 0 = don't create subdirectories.
|
|
|
|
prefixLength int
|
2018-01-31 02:15:21 +01:00
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
// lru cache
|
|
|
|
lru *lru.Cache
|
|
|
|
// filesystem abstraction
|
|
|
|
fs afero.Fs
|
|
|
|
|
|
|
|
// true if initOnce ran, false otherwise
|
|
|
|
initialized bool
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:24:59 +02:00
|
|
|
// NewDiskBlobStore returns an initialized file disk store pointer.
|
2020-10-21 23:31:15 +02:00
|
|
|
func NewDiskBlobStore(dir string, maxBlobs, prefixLength int) *DiskBlobStore {
|
|
|
|
dbs := DiskBlobStore{
|
|
|
|
blobDir: dir,
|
|
|
|
maxBlobs: maxBlobs,
|
|
|
|
prefixLength: prefixLength,
|
|
|
|
fs: afero.NewOsFs(),
|
|
|
|
}
|
2019-11-14 00:50:49 +01:00
|
|
|
return &dbs
|
2019-10-03 22:12:49 +02:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) dir(hash string) string {
|
|
|
|
if d.prefixLength <= 0 || len(hash) < d.prefixLength {
|
|
|
|
return d.blobDir
|
2019-10-03 22:12:49 +02:00
|
|
|
}
|
2019-10-03 22:34:57 +02:00
|
|
|
return path.Join(d.blobDir, hash[:d.prefixLength])
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) path(hash string) string {
|
|
|
|
return path.Join(d.dir(hash), hash)
|
2019-10-03 22:12:49 +02:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) ensureDirExists(dir string) error {
|
2020-10-21 23:31:15 +02:00
|
|
|
return errors.Err(d.fs.MkdirAll(dir, 0755))
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) initOnce() error {
|
|
|
|
if d.initialized {
|
2018-01-31 02:15:21 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
err := d.ensureDirExists(d.blobDir)
|
2019-10-03 22:12:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
2018-07-26 16:25:47 +02:00
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
l, err := lru.NewWithEvict(d.maxBlobs, func(key interface{}, value interface{}) {
|
|
|
|
_ = d.fs.Remove(d.path(key.(string))) // TODO: log this error. may happen if file is gone but cache entry still there?
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
d.lru = l
|
|
|
|
|
|
|
|
err = d.loadExisting()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
d.initialized = true
|
2018-01-31 02:15:21 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-30 03:38:55 +02:00
|
|
|
// Has returns T/F or Error if it the blob stored already. It will error with any IO disk error.
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) Has(hash string) (bool, error) {
|
|
|
|
err := d.initOnce()
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
return d.lru.Contains(hash), nil
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
// Get returns the blob or an error if the blob doesn't exist.
|
|
|
|
func (d *DiskBlobStore) Get(hash string) (stream.Blob, error) {
|
|
|
|
err := d.initOnce()
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
2019-10-03 22:12:49 +02:00
|
|
|
return nil, err
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
_, has := d.lru.Get(hash)
|
|
|
|
if !has {
|
|
|
|
return nil, errors.Err(ErrBlobNotFound)
|
|
|
|
}
|
|
|
|
|
|
|
|
file, err := d.fs.Open(d.path(hash))
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
2018-02-07 21:21:20 +01:00
|
|
|
if os.IsNotExist(err) {
|
2020-10-21 23:31:15 +02:00
|
|
|
d.lru.Remove(hash)
|
2019-10-03 22:12:49 +02:00
|
|
|
return nil, errors.Err(ErrBlobNotFound)
|
2018-02-07 21:21:20 +01:00
|
|
|
}
|
2019-10-03 22:12:49 +02:00
|
|
|
return nil, err
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
2020-07-10 15:19:57 +02:00
|
|
|
defer file.Close()
|
2018-01-31 02:15:21 +01:00
|
|
|
|
|
|
|
return ioutil.ReadAll(file)
|
|
|
|
}
|
|
|
|
|
2018-07-26 16:25:47 +02:00
|
|
|
// Put stores the blob on disk
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) Put(hash string, blob stream.Blob) error {
|
|
|
|
err := d.initOnce()
|
2018-01-31 02:15:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-03 22:34:57 +02:00
|
|
|
err = d.ensureDirExists(d.dir(hash))
|
2019-10-03 22:12:49 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
err = afero.WriteFile(d.fs, d.path(hash), blob, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Err(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.lru.Add(hash, true)
|
|
|
|
|
|
|
|
return nil
|
2018-01-31 02:15:21 +01:00
|
|
|
}
|
2018-02-02 22:49:20 +01:00
|
|
|
|
2018-07-26 16:25:47 +02:00
|
|
|
// PutSD stores the sd blob on the disk
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) PutSD(hash string, blob stream.Blob) error {
|
|
|
|
return d.Put(hash, blob)
|
2018-02-02 22:49:20 +01:00
|
|
|
}
|
2018-09-11 13:41:29 +02:00
|
|
|
|
|
|
|
// Delete deletes the blob from the store
|
2019-10-03 22:34:57 +02:00
|
|
|
func (d *DiskBlobStore) Delete(hash string) error {
|
|
|
|
err := d.initOnce()
|
2018-09-11 13:41:29 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
d.lru.Remove(hash)
|
|
|
|
return nil
|
2019-11-14 00:50:49 +01:00
|
|
|
}
|
|
|
|
|
2020-10-21 23:31:15 +02:00
|
|
|
// loadExisting scans the blobDir and imports existing blobs into lru cache
|
|
|
|
func (d *DiskBlobStore) loadExisting() error {
|
|
|
|
dirs, err := afero.ReadDir(d.fs, d.blobDir)
|
2019-11-14 00:50:49 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-21 23:31:15 +02:00
|
|
|
|
2019-11-14 00:50:49 +01:00
|
|
|
for _, dir := range dirs {
|
|
|
|
if dir.IsDir() {
|
2020-10-21 23:31:15 +02:00
|
|
|
files, err := afero.ReadDir(d.fs, filepath.Join(d.blobDir, dir.Name()))
|
2019-11-14 00:50:49 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, file := range files {
|
|
|
|
if file.Mode().IsRegular() && !file.IsDir() {
|
2020-10-21 23:31:15 +02:00
|
|
|
d.lru.Add(file.Name(), true)
|
2019-11-14 00:50:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|