2013-10-19 02:56:28 +02:00
|
|
|
package ccache
|
|
|
|
|
|
|
|
type Configuration struct {
|
2014-02-28 13:10:42 +01:00
|
|
|
size uint64
|
|
|
|
buckets int
|
|
|
|
itemsToPrune int
|
|
|
|
deleteBuffer int
|
|
|
|
promoteBuffer int
|
|
|
|
getsPerPromote int32
|
|
|
|
tracking bool
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func Configure() *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
return &Configuration{
|
|
|
|
buckets: 64,
|
|
|
|
itemsToPrune: 500,
|
|
|
|
deleteBuffer: 1024,
|
|
|
|
getsPerPromote: 10,
|
|
|
|
promoteBuffer: 1024,
|
|
|
|
size: 500 * 1024 * 1024,
|
|
|
|
tracking: false,
|
|
|
|
}
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The size, in bytes, of the data to cache
|
|
|
|
// [500MB]
|
2013-10-19 14:36:33 +02:00
|
|
|
func (c *Configuration) Size(bytes uint64) *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
c.size = bytes
|
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// Keys are hashed into % bucket count to provide greater concurrency (every set
|
|
|
|
// requires a write lock on the bucket)
|
|
|
|
// [64]
|
2013-10-19 14:36:33 +02:00
|
|
|
func (c *Configuration) Buckets(count int) *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
c.buckets = count
|
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The number of items to prune when memory is low
|
|
|
|
// [500]
|
2013-10-19 14:36:33 +02:00
|
|
|
func (c *Configuration) ItemsToPrune(count int) *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
c.itemsToPrune = count
|
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The size of the queue for items which should be promoted. If the queue fills
|
|
|
|
// up, promotions are skipped
|
|
|
|
// [1024]
|
2013-10-19 14:36:33 +02:00
|
|
|
func (c *Configuration) PromoteBuffer(size int) *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
c.promoteBuffer = size
|
|
|
|
return c
|
2013-10-19 02:56:28 +02:00
|
|
|
}
|
2013-10-30 13:18:51 +01:00
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// The size of the queue for items which should be deleted. If the queue fills
|
|
|
|
// up, calls to Delete() will block
|
2013-10-30 13:18:51 +01:00
|
|
|
func (c *Configuration) DeleteBuffer(size int) *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
c.deleteBuffer = size
|
|
|
|
return c
|
2013-10-30 13:18:51 +01:00
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// Give a large cache with a high read / write ratio, it's usually unecessary
|
|
|
|
// to promote an item on every Get. GetsPerPromote specifies the number of Gets
|
|
|
|
// a key must have before being promoted
|
|
|
|
// [10]
|
2013-10-30 13:18:51 +01:00
|
|
|
func (c *Configuration) GetsPerPromote(count int) *Configuration {
|
2014-02-28 13:10:42 +01:00
|
|
|
c.getsPerPromote = int32(count)
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2014-02-28 17:44:39 +01:00
|
|
|
// Typically, a cache is agnostic about how cached values are use. This is fine
|
|
|
|
// for a typical cache usage, where you fetch an item from the cache, do something
|
|
|
|
// (write it out to) and nothing else.
|
|
|
|
|
|
|
|
// However, if callers are going to keep a reference to a cached item for a long
|
|
|
|
// time, things get messy. Specifically, the cache can evict the item, while
|
|
|
|
// references still exist. Technically, this isn't an issue. However, if you reload
|
|
|
|
// the item back into the cache, you end up with 2 objects representing the same
|
|
|
|
// data. This is a waste of space and could lead to weird behavior (the type an
|
|
|
|
// identity map is meant to solve).
|
|
|
|
|
|
|
|
// By turning tracking on and using the cache's TrackingGet, the cache
|
|
|
|
// won't evict items which you haven't called Release() on. It's a simple reference
|
|
|
|
// counter.
|
2014-02-28 13:10:42 +01:00
|
|
|
func (c *Configuration) Track() *Configuration {
|
|
|
|
c.tracking = true
|
|
|
|
return c
|
2013-10-30 13:18:51 +01:00
|
|
|
}
|