Remove rm_scan_count_limit from Cache creation.
This commit is contained in:
parent
b9f06fe785
commit
0c13b55e55
4 changed files with 6 additions and 16 deletions
|
@ -761,16 +761,11 @@ LRUCache
|
||||||
|
|
||||||
Wraps the rocksdb LRUCache
|
Wraps the rocksdb LRUCache
|
||||||
|
|
||||||
.. py:method:: __init__(capacity, shard_bits=None, rm_scan_count_limit=None)
|
.. py:method:: __init__(capacity, shard_bits=None)
|
||||||
|
|
||||||
Create a new cache with a fixed size capacity. The cache is sharded
|
Create a new cache with a fixed size capacity. The cache is sharded
|
||||||
to 2^numShardBits shards, by hash of the key. The total capacity
|
to 2^numShardBits shards, by hash of the key. The total capacity
|
||||||
is divided and evenly assigned to each shard. Inside each shard,
|
is divided and evenly assigned to each shard.
|
||||||
the eviction is done in two passes: first try to free spaces by
|
|
||||||
evicting entries that are among the most least used removeScanCountLimit
|
|
||||||
entries and do not have reference other than by the cache itself, in
|
|
||||||
the least-used order. If not enough space is freed, further free the
|
|
||||||
entries in least used order.
|
|
||||||
|
|
||||||
.. _table_factories_label:
|
.. _table_factories_label:
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,8 @@ In newer versions of rocksdb a bunch of options were moved or removed.
|
||||||
* Moved ``Options.block_restart_interval`` to ``BlockBasedTableFactory``
|
* Moved ``Options.block_restart_interval`` to ``BlockBasedTableFactory``
|
||||||
* Moved ``Options.whole_key_filtering`` to ``BlockBasedTableFactory``
|
* Moved ``Options.whole_key_filtering`` to ``BlockBasedTableFactory``
|
||||||
* Removed ``Options.table_cache_remove_scan_count_limit``
|
* Removed ``Options.table_cache_remove_scan_count_limit``
|
||||||
|
* Removed rm_scan_count_limit from ``LRUCache``
|
||||||
|
|
||||||
|
|
||||||
New:
|
New:
|
||||||
^^^^
|
^^^^
|
||||||
|
|
|
@ -442,15 +442,9 @@ cdef class PyCache(object):
|
||||||
cdef class PyLRUCache(PyCache):
|
cdef class PyLRUCache(PyCache):
|
||||||
cdef shared_ptr[cache.Cache] cache_ob
|
cdef shared_ptr[cache.Cache] cache_ob
|
||||||
|
|
||||||
def __cinit__(self, capacity, shard_bits=None, rm_scan_count_limit=None):
|
def __cinit__(self, capacity, shard_bits=None):
|
||||||
if shard_bits is not None:
|
if shard_bits is not None:
|
||||||
if rm_scan_count_limit is not None:
|
self.cache_ob = cache.NewLRUCache(capacity, shard_bits)
|
||||||
self.cache_ob = cache.NewLRUCache(
|
|
||||||
capacity,
|
|
||||||
shard_bits,
|
|
||||||
rm_scan_count_limit)
|
|
||||||
else:
|
|
||||||
self.cache_ob = cache.NewLRUCache(capacity, shard_bits)
|
|
||||||
else:
|
else:
|
||||||
self.cache_ob = cache.NewLRUCache(capacity)
|
self.cache_ob = cache.NewLRUCache(capacity)
|
||||||
|
|
||||||
|
|
|
@ -6,4 +6,3 @@ cdef extern from "rocksdb/cache.h" namespace "rocksdb":
|
||||||
|
|
||||||
cdef extern shared_ptr[Cache] NewLRUCache(size_t)
|
cdef extern shared_ptr[Cache] NewLRUCache(size_t)
|
||||||
cdef extern shared_ptr[Cache] NewLRUCache(size_t, int)
|
cdef extern shared_ptr[Cache] NewLRUCache(size_t, int)
|
||||||
cdef extern shared_ptr[Cache] NewLRUCache(size_t, int, int)
|
|
||||||
|
|
Loading…
Reference in a new issue