2014-10-25 07:19:14 +02:00
|
|
|
// An LRU cached aimed at high concurrency
|
|
|
|
package ccache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"container/list"
|
|
|
|
"hash/fnv"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
type LayeredCache struct {
|
|
|
|
*Configuration
|
2020-06-26 14:22:30 +02:00
|
|
|
list *list.List
|
|
|
|
buckets []*layeredBucket
|
|
|
|
bucketMask uint32
|
|
|
|
size int64
|
|
|
|
deletables chan *Item
|
|
|
|
promotables chan *Item
|
|
|
|
control chan interface{}
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Create a new layered cache with the specified configuration.
|
|
|
|
// A layered cache used a two keys to identify a value: a primary key
|
|
|
|
// and a secondary key. Get, Set and Delete require both a primary and
|
|
|
|
// secondary key. However, DeleteAll requires only a primary key, deleting
|
|
|
|
// all values that share the same primary key.
|
|
|
|
|
|
|
|
// Layered Cache is useful as an HTTP cache, where an HTTP purge might
|
|
|
|
// delete multiple variants of the same resource:
|
|
|
|
// primary key = "user/44"
|
|
|
|
// secondary key 1 = ".json"
|
|
|
|
// secondary key 2 = ".xml"
|
|
|
|
|
|
|
|
// See ccache.Configure() for creating a configuration
|
2014-10-25 07:19:14 +02:00
|
|
|
func Layered(config *Configuration) *LayeredCache {
|
|
|
|
c := &LayeredCache{
|
|
|
|
list: list.New(),
|
|
|
|
Configuration: config,
|
2014-11-02 12:09:49 +01:00
|
|
|
bucketMask: uint32(config.buckets) - 1,
|
2014-11-14 01:56:24 +01:00
|
|
|
buckets: make([]*layeredBucket, config.buckets),
|
2014-10-25 07:19:14 +02:00
|
|
|
deletables: make(chan *Item, config.deleteBuffer),
|
2020-06-26 14:22:30 +02:00
|
|
|
control: make(chan interface{}),
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
for i := 0; i < int(config.buckets); i++ {
|
2014-11-14 01:56:24 +01:00
|
|
|
c.buckets[i] = &layeredBucket{
|
|
|
|
buckets: make(map[string]*bucket),
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
}
|
2017-02-10 19:26:41 +01:00
|
|
|
c.restart()
|
2014-10-25 07:19:14 +02:00
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2019-01-26 06:33:50 +01:00
|
|
|
func (c *LayeredCache) ItemCount() int {
|
|
|
|
count := 0
|
|
|
|
for _, b := range c.buckets {
|
|
|
|
count += b.itemCount()
|
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Get an item from the cache. Returns nil if the item wasn't found.
|
|
|
|
// This can return an expired item. Use item.Expired() to see if the item
|
|
|
|
// is expired and item.TTL() to see how long until the item expires (which
|
|
|
|
// will be negative for an already expired item).
|
2014-10-25 12:15:47 +02:00
|
|
|
func (c *LayeredCache) Get(primary, secondary string) *Item {
|
2014-12-28 05:11:32 +01:00
|
|
|
item := c.bucket(primary).get(primary, secondary)
|
2014-10-25 12:15:47 +02:00
|
|
|
if item == nil {
|
|
|
|
return nil
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
2016-07-08 00:32:49 +02:00
|
|
|
if item.expires > time.Now().UnixNano() {
|
2014-12-28 05:11:32 +01:00
|
|
|
c.promote(item)
|
2014-10-25 12:15:47 +02:00
|
|
|
}
|
|
|
|
return item
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
2016-11-01 17:01:39 +01:00
|
|
|
// Get the secondary cache for a given primary key. This operation will
|
|
|
|
// never return nil. In the case where the primary key does not exist, a
|
|
|
|
// new, underlying, empty bucket will be created and returned.
|
|
|
|
func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache {
|
|
|
|
primaryBkt := c.bucket(primary)
|
|
|
|
bkt := primaryBkt.getSecondaryBucket(primary)
|
2016-11-02 07:53:22 +01:00
|
|
|
primaryBkt.Lock()
|
2016-11-01 17:01:39 +01:00
|
|
|
if bkt == nil {
|
|
|
|
bkt = &bucket{lookup: make(map[string]*Item)}
|
|
|
|
primaryBkt.buckets[primary] = bkt
|
|
|
|
}
|
2016-11-02 07:53:22 +01:00
|
|
|
primaryBkt.Unlock()
|
2016-11-01 17:01:39 +01:00
|
|
|
return &SecondaryCache{
|
|
|
|
bucket: bkt,
|
|
|
|
pCache: c,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Used when the cache was created with the Track() configuration option.
|
|
|
|
// Avoid otherwise
|
2014-10-25 07:19:14 +02:00
|
|
|
func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
|
2014-10-25 12:15:47 +02:00
|
|
|
item := c.Get(primary, secondary)
|
2014-10-25 07:19:14 +02:00
|
|
|
if item == nil {
|
|
|
|
return NilTracked
|
|
|
|
}
|
|
|
|
item.track()
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Set the value in the cache for the specified duration
|
2014-10-25 07:19:14 +02:00
|
|
|
func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) {
|
2015-01-07 02:09:39 +01:00
|
|
|
c.set(primary, secondary, value, duration)
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Replace the value if it exists, does not set if it doesn't.
|
|
|
|
// Returns true if the item existed an was replaced, false otherwise.
|
|
|
|
// Replace does not reset item's TTL nor does it alter its position in the LRU
|
2014-11-13 16:23:52 +01:00
|
|
|
func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool {
|
2014-12-28 05:11:32 +01:00
|
|
|
item := c.bucket(primary).get(primary, secondary)
|
|
|
|
if item == nil {
|
|
|
|
return false
|
2014-11-21 09:45:11 +01:00
|
|
|
}
|
2014-12-28 05:11:32 +01:00
|
|
|
c.Set(primary, secondary, value, item.TTL())
|
|
|
|
return true
|
2014-11-13 16:23:52 +01:00
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Attempts to get the value from the cache and calles fetch on a miss.
|
|
|
|
// If fetch returns an error, no value is cached and the error is returned back
|
|
|
|
// to the caller.
|
2016-11-02 03:34:09 +01:00
|
|
|
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
|
2014-10-25 07:19:14 +02:00
|
|
|
item := c.Get(primary, secondary)
|
|
|
|
if item != nil {
|
|
|
|
return item, nil
|
|
|
|
}
|
|
|
|
value, err := fetch()
|
2015-01-07 02:09:39 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
2015-01-07 02:09:39 +01:00
|
|
|
return c.set(primary, secondary, value, duration), nil
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Remove the item from the cache, return true if the item was present, false otherwise.
|
2014-10-27 02:30:48 +01:00
|
|
|
func (c *LayeredCache) Delete(primary, secondary string) bool {
|
2014-10-25 07:19:14 +02:00
|
|
|
item := c.bucket(primary).delete(primary, secondary)
|
|
|
|
if item != nil {
|
|
|
|
c.deletables <- item
|
2014-10-27 02:30:48 +01:00
|
|
|
return true
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
2014-10-27 02:30:48 +01:00
|
|
|
return false
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Deletes all items that share the same primary key
|
2014-10-27 02:30:48 +01:00
|
|
|
func (c *LayeredCache) DeleteAll(primary string) bool {
|
|
|
|
return c.bucket(primary).deleteAll(primary, c.deletables)
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
2020-08-11 18:53:40 +02:00
|
|
|
// Deletes all items that share the same primary key and prefix.
|
|
|
|
func (c *LayeredCache) DeletePrefix(primary, prefix string) int {
|
|
|
|
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
|
|
|
|
}
|
|
|
|
|
2020-08-12 17:30:28 +02:00
|
|
|
// Deletes all items that share the same primary key and where the matches func evaluates to true.
|
|
|
|
func (c *LayeredCache) DeleteFunc(primary string, matches func(key string, item interface{}) bool) int {
|
|
|
|
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
|
|
|
|
}
|
|
|
|
|
2014-10-25 07:19:14 +02:00
|
|
|
//this isn't thread safe. It's meant to be called from non-concurrent tests
|
|
|
|
func (c *LayeredCache) Clear() {
|
|
|
|
for _, bucket := range c.buckets {
|
|
|
|
bucket.clear()
|
|
|
|
}
|
2014-11-21 08:39:25 +01:00
|
|
|
c.size = 0
|
2014-10-25 07:19:14 +02:00
|
|
|
c.list = list.New()
|
|
|
|
}
|
|
|
|
|
2017-02-10 19:26:41 +01:00
|
|
|
func (c *LayeredCache) Stop() {
|
|
|
|
close(c.promotables)
|
2020-06-26 14:22:30 +02:00
|
|
|
<-c.control
|
2017-02-10 19:26:41 +01:00
|
|
|
}
|
|
|
|
|
2020-02-05 15:05:05 +01:00
|
|
|
// Gets the number of items removed from the cache due to memory pressure since
|
|
|
|
// the last time GetDropped was called
|
|
|
|
func (c *LayeredCache) GetDropped() int {
|
2020-06-26 14:22:30 +02:00
|
|
|
res := make(chan int)
|
|
|
|
c.control <- getDropped{res: res}
|
|
|
|
return <-res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sets a new max size. That can result in a GC being run if the new maxium size
|
|
|
|
// is smaller than the cached size
|
|
|
|
func (c *LayeredCache) SetMaxSize(size int64) {
|
|
|
|
c.control <- setMaxSize{size}
|
2020-02-05 15:05:05 +01:00
|
|
|
}
|
|
|
|
|
2017-02-10 19:26:41 +01:00
|
|
|
func (c *LayeredCache) restart() {
|
|
|
|
c.promotables = make(chan *Item, c.promoteBuffer)
|
2020-06-26 14:22:30 +02:00
|
|
|
c.control = make(chan interface{})
|
2017-02-10 19:26:41 +01:00
|
|
|
go c.worker()
|
|
|
|
}
|
|
|
|
|
2015-01-07 02:09:39 +01:00
|
|
|
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item {
|
|
|
|
item, existing := c.bucket(primary).set(primary, secondary, value, duration)
|
|
|
|
if existing != nil {
|
|
|
|
c.deletables <- existing
|
|
|
|
}
|
|
|
|
c.promote(item)
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
func (c *LayeredCache) bucket(key string) *layeredBucket {
|
2014-10-25 07:19:14 +02:00
|
|
|
h := fnv.New32a()
|
|
|
|
h.Write([]byte(key))
|
2014-11-02 12:09:49 +01:00
|
|
|
return c.buckets[h.Sum32()&c.bucketMask]
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *LayeredCache) promote(item *Item) {
|
|
|
|
c.promotables <- item
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *LayeredCache) worker() {
|
2020-06-26 14:22:30 +02:00
|
|
|
defer close(c.control)
|
2020-02-05 15:05:05 +01:00
|
|
|
dropped := 0
|
2014-10-25 07:19:14 +02:00
|
|
|
for {
|
|
|
|
select {
|
2017-02-10 19:26:41 +01:00
|
|
|
case item, ok := <-c.promotables:
|
|
|
|
if ok == false {
|
|
|
|
return
|
|
|
|
}
|
2014-12-28 05:11:32 +01:00
|
|
|
if c.doPromote(item) && c.size > c.maxSize {
|
2020-02-05 15:05:05 +01:00
|
|
|
dropped += c.gc()
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
case item := <-c.deletables:
|
|
|
|
if item.element == nil {
|
2018-12-27 16:51:19 +01:00
|
|
|
atomic.StoreInt32(&item.promotions, -2)
|
2014-10-25 07:19:14 +02:00
|
|
|
} else {
|
2014-12-28 05:11:32 +01:00
|
|
|
c.size -= item.size
|
2018-07-16 18:20:17 +02:00
|
|
|
if c.onDelete != nil {
|
|
|
|
c.onDelete(item)
|
|
|
|
}
|
2014-10-25 07:19:14 +02:00
|
|
|
c.list.Remove(item.element)
|
|
|
|
}
|
2020-06-26 14:22:30 +02:00
|
|
|
case control := <-c.control:
|
|
|
|
switch msg := control.(type) {
|
|
|
|
case getDropped:
|
|
|
|
msg.res <- dropped
|
|
|
|
dropped = 0
|
|
|
|
case setMaxSize:
|
|
|
|
c.maxSize = msg.size
|
|
|
|
if c.size > c.maxSize {
|
|
|
|
dropped += c.gc()
|
|
|
|
}
|
|
|
|
}
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *LayeredCache) doPromote(item *Item) bool {
|
|
|
|
// deleted before it ever got promoted
|
2018-12-27 16:51:19 +01:00
|
|
|
if atomic.LoadInt32(&item.promotions) == -2 {
|
2014-10-25 07:19:14 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if item.element != nil { //not a new item
|
2014-12-28 05:11:32 +01:00
|
|
|
if item.shouldPromote(c.getsPerPromote) {
|
|
|
|
c.list.MoveToFront(item.element)
|
2018-12-27 16:51:19 +01:00
|
|
|
atomic.StoreInt32(&item.promotions, 0)
|
2014-12-28 05:11:32 +01:00
|
|
|
}
|
2014-10-25 07:19:14 +02:00
|
|
|
return false
|
|
|
|
}
|
2014-12-28 05:11:32 +01:00
|
|
|
c.size += item.size
|
2014-10-25 07:19:14 +02:00
|
|
|
item.element = c.list.PushFront(item)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-02-05 15:05:05 +01:00
|
|
|
func (c *LayeredCache) gc() int {
|
2014-10-25 07:19:14 +02:00
|
|
|
element := c.list.Back()
|
2020-02-05 15:05:05 +01:00
|
|
|
dropped := 0
|
2014-10-25 07:19:14 +02:00
|
|
|
for i := 0; i < c.itemsToPrune; i++ {
|
|
|
|
if element == nil {
|
2020-02-05 15:05:05 +01:00
|
|
|
return dropped
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
prev := element.Prev()
|
|
|
|
item := element.Value.(*Item)
|
|
|
|
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
|
|
|
c.bucket(item.group).delete(item.group, item.key)
|
2014-12-28 05:11:32 +01:00
|
|
|
c.size -= item.size
|
2014-10-25 07:19:14 +02:00
|
|
|
c.list.Remove(element)
|
2014-12-28 05:11:32 +01:00
|
|
|
item.promotions = -2
|
2020-02-05 15:05:05 +01:00
|
|
|
dropped += 1
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|
|
|
|
element = prev
|
|
|
|
}
|
2020-02-05 15:05:05 +01:00
|
|
|
return dropped
|
2014-10-25 07:19:14 +02:00
|
|
|
}
|