Make the 'compact_range' call available in python

This commit is contained in:
hofmockel 2014-05-31 17:59:29 +02:00
parent 84d2bf373d
commit c6e4014136
4 changed files with 69 additions and 2 deletions

View file

@ -247,6 +247,38 @@ Database object
``largest_seqno``
largest seqno in file
.. py:method:: compact_range(begin=None, end=None, reduce_level=False, target_level=-1)
Compact the underlying storage for the key range [begin,end].
The actual compaction interval might be superset of [begin, end].
In particular, deleted and overwritten versions are discarded,
and the data is rearranged to reduce the cost of operations
needed to access the data.
This operation should typically only be invoked by users who understand
the underlying implementation.
``begin == None`` is treated as a key before all keys in the database.
``end == None`` is treated as a key after all keys in the database.
Therefore the following call will compact the entire database: ``db.compact_range()``.
Note that after the entire database is compacted, all data are pushed
down to the last level containing any data. If the total data size
after compaction is reduced, that level might not be appropriate for
hosting all the files. In this case, client could set reduce_level
to ``True``, to move the files back to the minimum level capable of holding
the data set or a given level (specified by non-negative target_level).
:param bytes begin: Key where to start compaction.
If ``None`` start at the beginning of the database.
:param bytes end: Key where to end compaction.
If ``None`` end at the last key of the database.
:param bool reduce_level: If ``True`` allow rocksdb to move the data to
another level, if the current is not big enouth.
If ``False`` you may end with a bigger level
than configured.
:param int target_level: Level where to push the the range to compact.
.. py:attribute:: options

View file

@ -1537,6 +1537,34 @@ cdef class DB(object):
return ret
def compact_range(self, begin=None, end=None, reduce_level=False, target_level=-1):
cdef Status st
cdef Slice begin_val
cdef Slice end_val
cdef Slice* begin_ptr
cdef Slice* end_ptr
begin_ptr = NULL
end_ptr = NULL
if begin is not None:
begin_val = bytes_to_slice(begin)
begin_ptr = cython.address(begin_val)
if end is not None:
end_val = bytes_to_slice(end)
end_ptr = cython.address(end_val)
st = self.db.CompactRange(
begin_ptr,
end_ptr,
reduce_level,
target_level)
check_status(st)
@staticmethod
def __parse_read_opts(
verify_checksums=False,

View file

@ -91,10 +91,10 @@ cdef extern from "rocksdb/db.h" namespace "rocksdb":
int,
uint64_t*) nogil except+
void CompactRange(
Status CompactRange(
const Slice*,
const Slice*,
bool,
cpp_bool,
int) nogil except+
int NumberLevels() nogil except+

View file

@ -188,6 +188,13 @@ class TestDB(unittest.TestCase, TestHelper):
self.assertIsNotNone(self.db.get_property(b'rocksdb.num-files-at-level0'))
self.assertIsNone(self.db.get_property(b'does not exsits'))
def test_compact_range(self):
for x in range(10000):
x = int_to_bytes(x)
self.db.put(x, x)
self.db.compact_range()
class AssocCounter(rocksdb.interfaces.AssociativeMergeOperator):
def merge(self, key, existing_value, value):