2013-10-19 02:56:28 +02:00
|
|
|
package ccache
|
|
|
|
|
|
|
|
type Configuration struct {
|
2014-11-21 09:06:27 +01:00
|
|
|
maxSize int64
|
2014-02-28 13:10:42 +01:00
|
|
|
buckets int
|
|
|
|
itemsToPrune int
|
|
|
|
deleteBuffer int
|
|
|
|
promoteBuffer int
|
|
|
|
getsPerPromote int32
|
|
|
|
tracking bool
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-11-14 01:56:24 +01:00
|
|
|
// Creates a configuration object with sensible defaults
|
|
|
|
// Use this as the start of the fluent configuration:
|
2014-11-21 09:06:27 +01:00
|
|
|
// e.g.: ccache.New(ccache.Configure().MaxSize(10000))
|
2013-10-19 02:56:28 +02:00
|
|
|
func Configure() *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
return &Configuration{
|
2014-10-14 08:43:34 +02:00
|
|
|
buckets: 16,
|
2014-02-28 13:10:42 +01:00
|
|
|
itemsToPrune: 500,
|
|
|
|
deleteBuffer: 1024,
|
2014-10-14 08:43:34 +02:00
|
|
|
getsPerPromote: 3,
|
2014-02-28 13:10:42 +01:00
|
|
|
promoteBuffer: 1024,
|
2014-11-21 09:06:27 +01:00
|
|
|
maxSize: 5000,
|
2014-02-28 13:10:42 +01:00
|
|
|
tracking: false,
|
|
|
|
}
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-11-21 09:06:27 +01:00
|
|
|
// The max size for the cache
|
This is a sad commit.
How do you decide you need to purge your cache? Relying on runtime.ReadMemStats
sucks for two reasons. First, it's a stop-the-world call, which is pretty bad
in general and down right stupid for a supposedly concurrent-focused package.
Secondly, it only tells you the total memory usage, but most time you really
want to limit the amount of memory the cache itself uses.
Since there's no great way to determine the size of an object, that means users
need to supply the size. One way is to make it so that any cached item satisfies
a simple interface which exposes a Size() method. With this, we can track how
much memory is set put and a delete releases. But it's hard for consumers to
know how much memory they're taking when storing complex object (the entire point
of an in-process cache is to avoid having to serialize the data). Since any Size()
is bound to be a rough guess, we can simplify the entire thing by evicting based
on # of items.
This works really bad when items vary greatly in size (an HTTP cache), but in
a lot of other cases it works great. Furthermore, even for an HTTP cache, given
enough values, it should average out in most cases.
Whatever. This improve performance and should improve the usability of the cache.
It is a pretty big breaking change though.
2014-04-08 17:36:28 +02:00
|
|
|
// [5000]
|
2014-11-21 09:06:27 +01:00
|
|
|
func (c *Configuration) MaxSize(max int64) *Configuration {
|
|
|
|
c.maxSize = max
|
2014-02-28 13:10:42 +01:00
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// Keys are hashed into % bucket count to provide greater concurrency (every set
|
2014-11-02 12:09:49 +01:00
|
|
|
// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...)
|
|
|
|
// [16]
|
2014-10-25 02:46:18 +02:00
|
|
|
func (c *Configuration) Buckets(count uint32) *Configuration {
|
2014-11-02 12:09:49 +01:00
|
|
|
if count == 0 || ((count&(^count+1)) == count) == false {
|
|
|
|
count = 16
|
|
|
|
}
|
2014-10-25 02:46:18 +02:00
|
|
|
c.buckets = int(count)
|
2014-02-28 13:10:42 +01:00
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The number of items to prune when memory is low
|
|
|
|
// [500]
|
2014-10-25 02:46:18 +02:00
|
|
|
func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
|
|
|
|
c.itemsToPrune = int(count)
|
2014-02-28 13:10:42 +01:00
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The size of the queue for items which should be promoted. If the queue fills
|
|
|
|
// up, promotions are skipped
|
|
|
|
// [1024]
|
2014-10-25 02:46:18 +02:00
|
|
|
func (c *Configuration) PromoteBuffer(size uint32) *Configuration {
|
|
|
|
c.promoteBuffer = int(size)
|
2014-02-28 13:10:42 +01:00
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
2013-10-30 13:18:51 +01:00
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The size of the queue for items which should be deleted. If the queue fills
|
|
|
|
// up, calls to Delete() will block
|
2014-10-25 02:46:18 +02:00
|
|
|
func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
|
|
|
|
c.deleteBuffer = int(size)
|
2014-02-28 13:10:42 +01:00
|
|
|
return c
|
2013-10-30 13:18:51 +01:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// Give a large cache with a high read / write ratio, it's usually unecessary
|
|
|
|
// to promote an item on every Get. GetsPerPromote specifies the number of Gets
|
|
|
|
// a key must have before being promoted
|
2014-10-25 02:46:18 +02:00
|
|
|
// [3]
|
|
|
|
func (c *Configuration) GetsPerPromote(count int32) *Configuration {
|
|
|
|
c.getsPerPromote = count
|
2014-02-28 13:10:42 +01:00
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// Typically, a cache is agnostic about how cached values are use. This is fine
|
|
|
|
// for a typical cache usage, where you fetch an item from the cache, do something
|
2014-11-14 01:56:24 +01:00
|
|
|
// (write it out) and nothing else.
|
2014-02-28 17:44:39 +01:00
|
|
|
|
|
|
|
// However, if callers are going to keep a reference to a cached item for a long
|
|
|
|
// time, things get messy. Specifically, the cache can evict the item, while
|
|
|
|
// references still exist. Technically, this isn't an issue. However, if you reload
|
|
|
|
// the item back into the cache, you end up with 2 objects representing the same
|
|
|
|
// data. This is a waste of space and could lead to weird behavior (the type an
|
|
|
|
// identity map is meant to solve).
|
|
|
|
|
|
|
|
// By turning tracking on and using the cache's TrackingGet, the cache
|
|
|
|
// won't evict items which you haven't called Release() on. It's a simple reference
|
|
|
|
// counter.
|
2014-02-28 13:10:42 +01:00
|
|
|
func (c *Configuration) Track() *Configuration {
|
|
|
|
c.tracking = true
|
|
|
|
return c
|
2013-10-30 13:18:51 +01:00
|
|
|
}
|