d9d6e2b00e
How do you decide you need to purge your cache? Relying on runtime.ReadMemStats sucks for two reasons. First, it's a stop-the-world call, which is pretty bad in general and down right stupid for a supposedly concurrent-focused package. Secondly, it only tells you the total memory usage, but most time you really want to limit the amount of memory the cache itself uses. Since there's no great way to determine the size of an object, that means users need to supply the size. One way is to make it so that any cached item satisfies a simple interface which exposes a Size() method. With this, we can track how much memory is set put and a delete releases. But it's hard for consumers to know how much memory they're taking when storing complex object (the entire point of an in-process cache is to avoid having to serialize the data). Since any Size() is bound to be a rough guess, we can simplify the entire thing by evicting based on # of items. This works really bad when items vary greatly in size (an HTTP cache), but in a lot of other cases it works great. Furthermore, even for an HTTP cache, given enough values, it should average out in most cases. Whatever. This improve performance and should improve the usability of the cache. It is a pretty big breaking change though.
167 lines
3.2 KiB
Go
167 lines
3.2 KiB
Go
// An LRU cached aimed at high concurrency
|
|
package ccache
|
|
|
|
import (
|
|
"container/list"
|
|
"hash/fnv"
|
|
"sync/atomic"
|
|
"time"
|
|
)
|
|
|
|
type Cache struct {
|
|
*Configuration
|
|
list *list.List
|
|
buckets []*Bucket
|
|
bucketCount uint32
|
|
deletables chan *Item
|
|
promotables chan *Item
|
|
}
|
|
|
|
func New(config *Configuration) *Cache {
|
|
c := &Cache{
|
|
list: list.New(),
|
|
Configuration: config,
|
|
bucketCount: uint32(config.buckets),
|
|
buckets: make([]*Bucket, config.buckets),
|
|
deletables: make(chan *Item, config.deleteBuffer),
|
|
promotables: make(chan *Item, config.promoteBuffer),
|
|
}
|
|
for i := 0; i < config.buckets; i++ {
|
|
c.buckets[i] = &Bucket{
|
|
lookup: make(map[string]*Item),
|
|
}
|
|
}
|
|
go c.worker()
|
|
return c
|
|
}
|
|
|
|
func (c *Cache) Get(key string) interface{} {
|
|
if item := c.get(key); item != nil {
|
|
return item.value
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (c *Cache) TrackingGet(key string) TrackedItem {
|
|
item := c.get(key)
|
|
if item == nil {
|
|
return NilTracked
|
|
}
|
|
item.track()
|
|
return item
|
|
}
|
|
|
|
func (c *Cache) get(key string) *Item {
|
|
bucket := c.bucket(key)
|
|
item := bucket.get(key)
|
|
if item == nil {
|
|
return nil
|
|
}
|
|
if item.expires.Before(time.Now()) {
|
|
c.deleteItem(bucket, item)
|
|
return nil
|
|
}
|
|
c.conditionalPromote(item)
|
|
return item
|
|
}
|
|
|
|
func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
|
|
item, new := c.bucket(key).set(key, value, duration)
|
|
if new {
|
|
c.promote(item)
|
|
} else {
|
|
c.conditionalPromote(item)
|
|
}
|
|
}
|
|
|
|
func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (interface{}, error) {
|
|
item := c.Get(key)
|
|
if item != nil {
|
|
return item, nil
|
|
}
|
|
value, err := fetch()
|
|
if err == nil {
|
|
c.Set(key, value, duration)
|
|
}
|
|
return value, err
|
|
}
|
|
|
|
func (c *Cache) Delete(key string) {
|
|
item := c.bucket(key).getAndDelete(key)
|
|
if item != nil {
|
|
c.deletables <- item
|
|
}
|
|
}
|
|
|
|
//this isn't thread safe. It's meant to be called from non-concurrent tests
|
|
func (c *Cache) Clear() {
|
|
for _, bucket := range c.buckets {
|
|
bucket.clear()
|
|
}
|
|
c.list = list.New()
|
|
}
|
|
|
|
func (c *Cache) deleteItem(bucket *Bucket, item *Item) {
|
|
bucket.delete(item.key) //stop othe GETs from getting it
|
|
c.deletables <- item
|
|
}
|
|
|
|
func (c *Cache) bucket(key string) *Bucket {
|
|
h := fnv.New32a()
|
|
h.Write([]byte(key))
|
|
index := h.Sum32() % c.bucketCount
|
|
return c.buckets[index]
|
|
}
|
|
|
|
func (c *Cache) conditionalPromote(item *Item) {
|
|
if item.shouldPromote(c.getsPerPromote) == false {
|
|
return
|
|
}
|
|
c.promote(item)
|
|
}
|
|
|
|
func (c *Cache) promote(item *Item) {
|
|
c.promotables <- item
|
|
}
|
|
|
|
func (c *Cache) worker() {
|
|
for {
|
|
select {
|
|
case item := <-c.promotables:
|
|
if c.doPromote(item) && c.list.Len() > c.maxItems {
|
|
c.gc()
|
|
}
|
|
case item := <-c.deletables:
|
|
c.list.Remove(item.element)
|
|
}
|
|
}
|
|
}
|
|
|
|
func (c *Cache) doPromote(item *Item) bool {
|
|
item.Lock()
|
|
defer item.Unlock()
|
|
item.promotions = 0
|
|
if item.element != nil { //not a new item
|
|
c.list.MoveToFront(item.element)
|
|
return false
|
|
}
|
|
item.element = c.list.PushFront(item)
|
|
return true
|
|
}
|
|
|
|
func (c *Cache) gc() {
|
|
element := c.list.Back()
|
|
for i := 0; i < c.itemsToPrune; i++ {
|
|
if element == nil {
|
|
return
|
|
}
|
|
prev := element.Prev()
|
|
item := element.Value.(*Item)
|
|
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
|
c.bucket(item.key).delete(item.key)
|
|
c.list.Remove(element)
|
|
}
|
|
element = prev
|
|
}
|
|
}
|