2019-10-03 19:36:35 +02:00
|
|
|
package store
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2020-10-14 22:08:48 +02:00
|
|
|
"sync"
|
2019-10-03 19:36:35 +02:00
|
|
|
"testing"
|
2020-10-14 22:08:48 +02:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/lbryio/lbry.go/v2/stream"
|
2019-10-03 19:36:35 +02:00
|
|
|
)
|
|
|
|
|
2020-10-22 19:49:02 +02:00
|
|
|
func TestCachingStore_Put(t *testing.T) {
|
|
|
|
origin := NewMemStore()
|
|
|
|
cache := NewMemStore()
|
2020-10-29 17:39:53 +01:00
|
|
|
s := NewCachingStore("test", origin, cache)
|
2019-10-03 19:36:35 +02:00
|
|
|
|
|
|
|
b := []byte("this is a blob of stuff")
|
|
|
|
hash := "hash"
|
|
|
|
|
|
|
|
err := s.Put(hash, b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
has, err := origin.Has(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
t.Errorf("failed to store blob in origin")
|
|
|
|
}
|
|
|
|
|
|
|
|
has, err = cache.Has(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
t.Errorf("failed to store blob in cache")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 19:49:02 +02:00
|
|
|
func TestCachingStore_CacheMiss(t *testing.T) {
|
|
|
|
origin := NewMemStore()
|
|
|
|
cache := NewMemStore()
|
2020-10-29 17:39:53 +01:00
|
|
|
s := NewCachingStore("test", origin, cache)
|
2019-10-03 19:36:35 +02:00
|
|
|
|
|
|
|
b := []byte("this is a blob of stuff")
|
|
|
|
hash := "hash"
|
|
|
|
err := origin.Put(hash, b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
res, err := s.Get(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(b, res) {
|
|
|
|
t.Errorf("expected Get() to return %s, got %s", string(b), string(res))
|
|
|
|
}
|
2020-11-21 01:26:32 +01:00
|
|
|
time.Sleep(10 * time.Millisecond) //storing to cache is done async so let's give it some time
|
2019-10-03 19:36:35 +02:00
|
|
|
|
|
|
|
has, err := cache.Has(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
t.Errorf("Get() did not copy blob to cache")
|
|
|
|
}
|
|
|
|
|
|
|
|
res, err = cache.Get(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(b, res) {
|
|
|
|
t.Errorf("expected cached Get() to return %s, got %s", string(b), string(res))
|
|
|
|
}
|
|
|
|
}
|
2020-10-14 22:08:48 +02:00
|
|
|
|
2020-10-22 19:49:02 +02:00
|
|
|
func TestCachingStore_ThunderingHerd(t *testing.T) {
|
2020-10-14 22:08:48 +02:00
|
|
|
storeDelay := 100 * time.Millisecond
|
|
|
|
origin := NewSlowBlobStore(storeDelay)
|
2020-10-22 19:49:02 +02:00
|
|
|
cache := NewMemStore()
|
2020-10-29 17:39:53 +01:00
|
|
|
s := NewCachingStore("test", origin, cache)
|
2020-10-14 22:08:48 +02:00
|
|
|
|
|
|
|
b := []byte("this is a blob of stuff")
|
|
|
|
hash := "hash"
|
|
|
|
err := origin.Put(hash, b)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg := &sync.WaitGroup{}
|
|
|
|
|
|
|
|
getNoErr := func() {
|
|
|
|
res, err := s.Get(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(b, res) {
|
|
|
|
t.Errorf("expected Get() to return %s, got %s", string(b), string(res))
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
wg.Add(4)
|
|
|
|
go func() {
|
|
|
|
go getNoErr()
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
go getNoErr()
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
go getNoErr()
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
go getNoErr()
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
duration := time.Since(start)
|
|
|
|
|
|
|
|
// only the first getNoErr() should hit the origin. the rest should wait for the first request to return
|
|
|
|
// once the first returns, the others should return immediately
|
|
|
|
// therefore, if the delay much longer than 100ms, it means subsequent requests also went to the origin
|
|
|
|
expectedMaxDelay := storeDelay + 5*time.Millisecond // a bit of extra time to let requests finish
|
|
|
|
if duration > expectedMaxDelay {
|
|
|
|
t.Errorf("Expected delay of at most %s, got %s", expectedMaxDelay, duration)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SlowBlobStore adds a delay to each request
|
|
|
|
type SlowBlobStore struct {
|
2020-10-22 19:49:02 +02:00
|
|
|
mem *MemStore
|
2020-10-14 22:08:48 +02:00
|
|
|
delay time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewSlowBlobStore(delay time.Duration) *SlowBlobStore {
|
|
|
|
return &SlowBlobStore{
|
2020-10-22 19:49:02 +02:00
|
|
|
mem: NewMemStore(),
|
2020-10-14 22:08:48 +02:00
|
|
|
delay: delay,
|
|
|
|
}
|
|
|
|
}
|
2020-10-22 19:49:02 +02:00
|
|
|
func (s *SlowBlobStore) Name() string {
|
|
|
|
return "slow"
|
|
|
|
}
|
2020-10-14 22:08:48 +02:00
|
|
|
|
|
|
|
func (s *SlowBlobStore) Has(hash string) (bool, error) {
|
|
|
|
time.Sleep(s.delay)
|
|
|
|
return s.mem.Has(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SlowBlobStore) Get(hash string) (stream.Blob, error) {
|
|
|
|
time.Sleep(s.delay)
|
|
|
|
return s.mem.Get(hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SlowBlobStore) Put(hash string, blob stream.Blob) error {
|
|
|
|
time.Sleep(s.delay)
|
|
|
|
return s.mem.Put(hash, blob)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SlowBlobStore) PutSD(hash string, blob stream.Blob) error {
|
|
|
|
time.Sleep(s.delay)
|
|
|
|
return s.mem.PutSD(hash, blob)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *SlowBlobStore) Delete(hash string) error {
|
|
|
|
time.Sleep(s.delay)
|
|
|
|
return s.mem.Delete(hash)
|
|
|
|
}
|
2020-12-23 23:08:13 +01:00
|
|
|
|
|
|
|
func (s *SlowBlobStore) Shutdown() {
|
|
|
|
return
|
|
|
|
}
|