2020-09-03 08:50:13 +02:00
|
|
|
#cython: language_level=3
|
2014-01-13 19:52:22 +01:00
|
|
|
import cython
|
|
|
|
from libcpp.string cimport string
|
|
|
|
from libcpp.deque cimport deque
|
|
|
|
from libcpp.vector cimport vector
|
2014-08-22 19:58:17 +02:00
|
|
|
from cpython cimport bool as py_bool
|
2014-01-13 19:52:22 +01:00
|
|
|
from libcpp cimport bool as cpp_bool
|
2014-02-02 17:28:16 +01:00
|
|
|
from libc.stdint cimport uint32_t
|
2014-01-13 19:52:22 +01:00
|
|
|
from cython.operator cimport dereference as deref
|
2014-01-16 21:32:00 +01:00
|
|
|
from cpython.bytes cimport PyBytes_AsString
|
|
|
|
from cpython.bytes cimport PyBytes_Size
|
|
|
|
from cpython.bytes cimport PyBytes_FromString
|
|
|
|
from cpython.bytes cimport PyBytes_FromStringAndSize
|
2014-01-16 08:51:01 +01:00
|
|
|
from cpython.unicode cimport PyUnicode_Decode
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2020-09-03 08:50:13 +02:00
|
|
|
from .std_memory cimport shared_ptr
|
|
|
|
from . cimport options
|
|
|
|
from . cimport merge_operator
|
|
|
|
from . cimport filter_policy
|
|
|
|
from . cimport comparator
|
|
|
|
from . cimport slice_transform
|
|
|
|
from . cimport cache
|
|
|
|
from . cimport logger
|
|
|
|
from . cimport snapshot
|
|
|
|
from . cimport db
|
|
|
|
from . cimport iterator
|
|
|
|
from . cimport backup
|
|
|
|
from . cimport env
|
|
|
|
from . cimport table_factory
|
|
|
|
from . cimport memtablerep
|
|
|
|
from . cimport universal_compaction
|
2014-04-29 17:46:21 +02:00
|
|
|
|
|
|
|
# Enums are the only exception for direct imports
|
|
|
|
# Their name als already unique enough
|
2020-09-03 08:50:13 +02:00
|
|
|
from .universal_compaction cimport kCompactionStopStyleSimilarSize
|
|
|
|
from .universal_compaction cimport kCompactionStopStyleTotalSize
|
2014-04-29 17:46:21 +02:00
|
|
|
|
2020-09-03 08:50:13 +02:00
|
|
|
from .options cimport kCompactionStyleLevel
|
|
|
|
from .options cimport kCompactionStyleUniversal
|
|
|
|
from .options cimport kCompactionStyleFIFO
|
|
|
|
from .options cimport kCompactionStyleNone
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2020-09-03 08:50:13 +02:00
|
|
|
from .slice_ cimport Slice
|
|
|
|
from .status cimport Status
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-16 08:51:01 +01:00
|
|
|
import sys
|
2020-09-03 08:50:13 +02:00
|
|
|
from .interfaces import MergeOperator as IMergeOperator
|
|
|
|
from .interfaces import AssociativeMergeOperator as IAssociativeMergeOperator
|
|
|
|
from .interfaces import FilterPolicy as IFilterPolicy
|
|
|
|
from .interfaces import Comparator as IComparator
|
|
|
|
from .interfaces import SliceTransform as ISliceTransform
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
import traceback
|
2020-09-03 08:50:13 +02:00
|
|
|
from .errors import NotFound
|
|
|
|
from .errors import Corruption
|
|
|
|
from .errors import NotSupported
|
|
|
|
from .errors import InvalidArgument
|
|
|
|
from .errors import RocksIOError
|
|
|
|
from .errors import MergeInProgress
|
|
|
|
from .errors import Incomplete
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
import weakref
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
ctypedef const filter_policy.FilterPolicy ConstFilterPolicy
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef extern from "cpp/utils.hpp" namespace "py_rocks":
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef const Slice* vector_data(vector[Slice]&)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-19 12:35:36 +01:00
|
|
|
# Prepare python for threaded usage.
|
|
|
|
# Python callbacks (merge, comparator)
|
|
|
|
# could be executed in a rocksdb background thread (eg. compaction).
|
|
|
|
cdef extern from "Python.h":
|
|
|
|
void PyEval_InitThreads()
|
|
|
|
PyEval_InitThreads()
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
## Here comes the stuff to wrap the status to exception
|
|
|
|
cdef check_status(const Status& st):
|
|
|
|
if st.ok():
|
|
|
|
return
|
|
|
|
|
|
|
|
if st.IsNotFound():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise NotFound(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.IsCorruption():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise Corruption(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.IsNotSupported():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise NotSupported(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.IsInvalidArgument():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise InvalidArgument(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.IsIOError():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise RocksIOError(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.IsMergeInProgress():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise MergeInProgress(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.IsIncomplete():
|
2020-09-03 08:50:13 +02:00
|
|
|
raise Incomplete(st.ToString())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
raise Exception("Unknown error: %s" % st.ToString())
|
|
|
|
######################################################
|
|
|
|
|
|
|
|
|
2014-01-16 21:32:00 +01:00
|
|
|
cdef string bytes_to_string(path) except *:
|
2014-01-16 08:51:01 +01:00
|
|
|
return string(PyBytes_AsString(path), PyBytes_Size(path))
|
|
|
|
|
2014-01-16 21:32:00 +01:00
|
|
|
cdef string_to_bytes(string ob):
|
|
|
|
return PyBytes_FromStringAndSize(ob.c_str(), ob.size())
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice bytes_to_slice(ob) except *:
|
|
|
|
return Slice(PyBytes_AsString(ob), PyBytes_Size(ob))
|
2014-01-16 21:32:00 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef slice_to_bytes(Slice sl):
|
2014-01-16 21:32:00 +01:00
|
|
|
return PyBytes_FromStringAndSize(sl.data(), sl.size())
|
|
|
|
|
2014-01-16 08:51:01 +01:00
|
|
|
## only for filsystem paths
|
|
|
|
cdef string path_to_string(object path) except *:
|
|
|
|
if isinstance(path, bytes):
|
|
|
|
return bytes_to_string(path)
|
|
|
|
if isinstance(path, unicode):
|
|
|
|
path = path.encode(sys.getfilesystemencoding())
|
|
|
|
return bytes_to_string(path)
|
|
|
|
else:
|
|
|
|
raise TypeError("Wrong type for path: %s" % path)
|
|
|
|
|
|
|
|
cdef object string_to_path(string path):
|
2014-01-16 21:32:00 +01:00
|
|
|
fs_encoding = sys.getfilesystemencoding().encode('ascii')
|
2014-01-16 08:51:01 +01:00
|
|
|
return PyUnicode_Decode(path.c_str(), path.size(), fs_encoding, "replace")
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
## Here comes the stuff for the comparator
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyComparator(object):
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return None
|
|
|
|
|
|
|
|
cdef const comparator.Comparator* get_comparator(self):
|
|
|
|
return NULL
|
|
|
|
|
2014-01-24 17:11:41 +01:00
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
|
|
|
pass
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
@cython.internal
|
|
|
|
cdef class PyGenericComparator(PyComparator):
|
2014-01-24 17:11:41 +01:00
|
|
|
cdef comparator.ComparatorWrapper* comparator_ptr
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef object ob
|
|
|
|
|
|
|
|
def __cinit__(self, object ob):
|
2014-01-23 08:34:26 +01:00
|
|
|
self.comparator_ptr = NULL
|
2014-01-13 19:52:22 +01:00
|
|
|
if not isinstance(ob, IComparator):
|
2014-01-26 10:50:44 +01:00
|
|
|
raise TypeError("%s is not of type %s" % (ob, IComparator))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
self.ob = ob
|
2014-01-24 17:11:41 +01:00
|
|
|
self.comparator_ptr = new comparator.ComparatorWrapper(
|
2014-01-16 21:32:00 +01:00
|
|
|
bytes_to_string(ob.name()),
|
2014-01-13 19:52:22 +01:00
|
|
|
<void*>ob,
|
2014-01-24 17:11:41 +01:00
|
|
|
compare_callback)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def __dealloc__(self):
|
2014-01-23 08:34:26 +01:00
|
|
|
if not self.comparator_ptr == NULL:
|
|
|
|
del self.comparator_ptr
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return self.ob
|
|
|
|
|
|
|
|
cdef const comparator.Comparator* get_comparator(self):
|
2014-01-24 17:11:41 +01:00
|
|
|
return <comparator.Comparator*> self.comparator_ptr
|
|
|
|
|
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
|
|
|
self.comparator_ptr.set_info_log(info_log)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyBytewiseComparator(PyComparator):
|
|
|
|
cdef const comparator.Comparator* comparator_ptr
|
|
|
|
|
|
|
|
def __cinit__(self):
|
|
|
|
self.comparator_ptr = comparator.BytewiseComparator()
|
|
|
|
|
|
|
|
def name(self):
|
2014-01-16 21:32:00 +01:00
|
|
|
return PyBytes_FromString(self.comparator_ptr.Name())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-16 21:32:00 +01:00
|
|
|
def compare(self, a, b):
|
2014-01-13 19:52:22 +01:00
|
|
|
return self.comparator_ptr.Compare(
|
2014-01-16 21:32:00 +01:00
|
|
|
bytes_to_slice(a),
|
|
|
|
bytes_to_slice(b))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
cdef const comparator.Comparator* get_comparator(self):
|
|
|
|
return self.comparator_ptr
|
|
|
|
|
2017-04-25 02:25:02 +02:00
|
|
|
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef int compare_callback(
|
|
|
|
void* ctx,
|
2014-01-24 17:11:41 +01:00
|
|
|
logger.Logger* log,
|
|
|
|
string& error_msg,
|
2014-01-18 12:24:49 +01:00
|
|
|
const Slice& a,
|
|
|
|
const Slice& b) with gil:
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-24 17:11:41 +01:00
|
|
|
try:
|
|
|
|
return (<object>ctx).compare(slice_to_bytes(a), slice_to_bytes(b))
|
|
|
|
except BaseException as error:
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
logger.Log(log, "Error in compare callback: %s", <bytes>tb)
|
|
|
|
error_msg.assign(<bytes>str(error))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
BytewiseComparator = PyBytewiseComparator
|
|
|
|
#########################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## Here comes the stuff for the filter policy
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyFilterPolicy(object):
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return None
|
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef shared_ptr[ConstFilterPolicy] get_policy(self):
|
|
|
|
return shared_ptr[ConstFilterPolicy]()
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-26 09:47:04 +01:00
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
|
|
|
pass
|
2014-01-16 21:32:00 +01:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
@cython.internal
|
|
|
|
cdef class PyGenericFilterPolicy(PyFilterPolicy):
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef shared_ptr[filter_policy.FilterPolicyWrapper] policy
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef object ob
|
|
|
|
|
|
|
|
def __cinit__(self, object ob):
|
|
|
|
if not isinstance(ob, IFilterPolicy):
|
2014-01-26 10:50:44 +01:00
|
|
|
raise TypeError("%s is not of type %s" % (ob, IFilterPolicy))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
self.ob = ob
|
2014-10-22 09:43:47 +02:00
|
|
|
self.policy.reset(new filter_policy.FilterPolicyWrapper(
|
2014-01-16 21:32:00 +01:00
|
|
|
bytes_to_string(ob.name()),
|
2014-01-13 19:52:22 +01:00
|
|
|
<void*>ob,
|
|
|
|
create_filter_callback,
|
2014-10-22 09:43:47 +02:00
|
|
|
key_may_match_callback))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return self.ob
|
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef shared_ptr[ConstFilterPolicy] get_policy(self):
|
|
|
|
return <shared_ptr[ConstFilterPolicy]>(self.policy)
|
2014-01-26 09:47:04 +01:00
|
|
|
|
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
2014-10-22 09:43:47 +02:00
|
|
|
self.policy.get().set_info_log(info_log)
|
2014-01-26 09:47:04 +01:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef void create_filter_callback(
|
|
|
|
void* ctx,
|
2014-01-26 09:47:04 +01:00
|
|
|
logger.Logger* log,
|
|
|
|
string& error_msg,
|
2014-01-18 12:24:49 +01:00
|
|
|
const Slice* keys,
|
2014-01-13 19:52:22 +01:00
|
|
|
int n,
|
|
|
|
string* dst) with gil:
|
|
|
|
|
2014-01-26 09:47:04 +01:00
|
|
|
try:
|
|
|
|
ret = (<object>ctx).create_filter(
|
|
|
|
[slice_to_bytes(keys[i]) for i in range(n)])
|
|
|
|
dst.append(bytes_to_string(ret))
|
|
|
|
except BaseException as error:
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
logger.Log(log, "Error in create filter callback: %s", <bytes>tb)
|
|
|
|
error_msg.assign(<bytes>str(error))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef cpp_bool key_may_match_callback(
|
|
|
|
void* ctx,
|
2014-01-26 09:47:04 +01:00
|
|
|
logger.Logger* log,
|
|
|
|
string& error_msg,
|
2014-01-18 12:24:49 +01:00
|
|
|
const Slice& key,
|
|
|
|
const Slice& filt) with gil:
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-26 09:47:04 +01:00
|
|
|
try:
|
|
|
|
return (<object>ctx).key_may_match(
|
|
|
|
slice_to_bytes(key),
|
|
|
|
slice_to_bytes(filt))
|
|
|
|
except BaseException as error:
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
logger.Log(log, "Error in key_mach_match callback: %s", <bytes>tb)
|
|
|
|
error_msg.assign(<bytes>str(error))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyBloomFilterPolicy(PyFilterPolicy):
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef shared_ptr[ConstFilterPolicy] policy
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def __cinit__(self, int bits_per_key):
|
2014-10-22 09:43:47 +02:00
|
|
|
self.policy.reset(filter_policy.NewBloomFilterPolicy(bits_per_key))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def name(self):
|
2014-10-22 09:43:47 +02:00
|
|
|
return PyBytes_FromString(self.policy.get().Name())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def create_filter(self, keys):
|
|
|
|
cdef string dst
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef vector[Slice] c_keys
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
for key in keys:
|
2014-01-16 21:32:00 +01:00
|
|
|
c_keys.push_back(bytes_to_slice(key))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
self.policy.get().CreateFilter(
|
2014-01-13 19:52:22 +01:00
|
|
|
vector_data(c_keys),
|
2018-11-02 19:27:14 +01:00
|
|
|
<int>c_keys.size(),
|
2014-01-13 19:52:22 +01:00
|
|
|
cython.address(dst))
|
|
|
|
|
2014-01-16 21:32:00 +01:00
|
|
|
return string_to_bytes(dst)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def key_may_match(self, key, filter_):
|
2014-10-22 09:43:47 +02:00
|
|
|
return self.policy.get().KeyMayMatch(
|
2014-01-16 21:32:00 +01:00
|
|
|
bytes_to_slice(key),
|
|
|
|
bytes_to_slice(filter_))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return self
|
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef shared_ptr[ConstFilterPolicy] get_policy(self):
|
2014-01-13 19:52:22 +01:00
|
|
|
return self.policy
|
|
|
|
|
|
|
|
BloomFilterPolicy = PyBloomFilterPolicy
|
|
|
|
#############################################
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## Here comes the stuff for the merge operator
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyMergeOperator(object):
|
|
|
|
cdef shared_ptr[merge_operator.MergeOperator] merge_op
|
|
|
|
cdef object ob
|
|
|
|
|
|
|
|
def __cinit__(self, object ob):
|
2017-04-16 08:13:01 +02:00
|
|
|
self.ob = ob
|
2014-01-13 19:52:22 +01:00
|
|
|
if isinstance(ob, IAssociativeMergeOperator):
|
|
|
|
self.merge_op.reset(
|
|
|
|
<merge_operator.MergeOperator*>
|
|
|
|
new merge_operator.AssociativeMergeOperatorWrapper(
|
2014-01-16 21:32:00 +01:00
|
|
|
bytes_to_string(ob.name()),
|
2014-01-13 19:52:22 +01:00
|
|
|
<void*>(ob),
|
|
|
|
merge_callback))
|
|
|
|
|
|
|
|
elif isinstance(ob, IMergeOperator):
|
|
|
|
self.merge_op.reset(
|
|
|
|
<merge_operator.MergeOperator*>
|
|
|
|
new merge_operator.MergeOperatorWrapper(
|
2014-01-16 21:32:00 +01:00
|
|
|
bytes_to_string(ob.name()),
|
2014-01-13 19:52:22 +01:00
|
|
|
<void*>ob,
|
|
|
|
<void*>ob,
|
|
|
|
full_merge_callback,
|
|
|
|
partial_merge_callback))
|
2017-04-25 02:25:02 +02:00
|
|
|
# elif isinstance(ob, str):
|
|
|
|
# if ob == "put":
|
|
|
|
# self.merge_op = merge_operator.MergeOperators.CreatePutOperator()
|
|
|
|
# elif ob == "put_v1":
|
|
|
|
# self.merge_op = merge_operator.MergeOperators.CreateDeprecatedPutOperator()
|
|
|
|
# elif ob == "uint64add":
|
|
|
|
# self.merge_op = merge_operator.MergeOperators.CreateUInt64AddOperator()
|
|
|
|
# elif ob == "stringappend":
|
|
|
|
# self.merge_op = merge_operator.MergeOperators.CreateStringAppendOperator()
|
|
|
|
# #TODO: necessary?
|
|
|
|
# # elif ob == "stringappendtest":
|
|
|
|
# # self.merge_op = merge_operator.MergeOperators.CreateStringAppendTESTOperator()
|
|
|
|
# elif ob == "max":
|
|
|
|
# self.merge_op = merge_operator.MergeOperators.CreateMaxOperator()
|
|
|
|
# else:
|
|
|
|
# msg = "{0} is not the default type".format(ob)
|
|
|
|
# raise TypeError(msg)
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
2014-01-26 10:50:44 +01:00
|
|
|
msg = "%s is not of this types %s"
|
|
|
|
msg %= (ob, (IAssociativeMergeOperator, IMergeOperator))
|
|
|
|
raise TypeError(msg)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2017-04-16 08:13:01 +02:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef object get_ob(self):
|
|
|
|
return self.ob
|
|
|
|
|
|
|
|
cdef shared_ptr[merge_operator.MergeOperator] get_operator(self):
|
|
|
|
return self.merge_op
|
|
|
|
|
|
|
|
cdef cpp_bool merge_callback(
|
|
|
|
void* ctx,
|
2014-01-18 12:24:49 +01:00
|
|
|
const Slice& key,
|
|
|
|
const Slice* existing_value,
|
|
|
|
const Slice& value,
|
2014-01-13 19:52:22 +01:00
|
|
|
string* new_value,
|
|
|
|
logger.Logger* log) with gil:
|
|
|
|
|
|
|
|
if existing_value == NULL:
|
|
|
|
py_existing_value = None
|
|
|
|
else:
|
2014-01-16 21:32:00 +01:00
|
|
|
py_existing_value = slice_to_bytes(deref(existing_value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
ret = (<object>ctx).merge(
|
2014-01-16 21:32:00 +01:00
|
|
|
slice_to_bytes(key),
|
2014-01-13 19:52:22 +01:00
|
|
|
py_existing_value,
|
2014-01-16 21:32:00 +01:00
|
|
|
slice_to_bytes(value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if ret[0]:
|
2014-01-16 21:32:00 +01:00
|
|
|
new_value.assign(bytes_to_string(ret[1]))
|
2014-01-13 19:52:22 +01:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2014-01-26 09:49:07 +01:00
|
|
|
except:
|
2014-01-15 09:53:27 +01:00
|
|
|
tb = traceback.format_exc()
|
2014-01-26 09:49:07 +01:00
|
|
|
logger.Log(log, "Error in merge_callback: %s", <bytes>tb)
|
2014-01-13 19:52:22 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
cdef cpp_bool full_merge_callback(
|
|
|
|
void* ctx,
|
2014-01-18 12:24:49 +01:00
|
|
|
const Slice& key,
|
|
|
|
const Slice* existing_value,
|
2014-01-16 21:32:00 +01:00
|
|
|
const deque[string]& op_list,
|
2014-01-13 19:52:22 +01:00
|
|
|
string* new_value,
|
|
|
|
logger.Logger* log) with gil:
|
|
|
|
|
|
|
|
if existing_value == NULL:
|
|
|
|
py_existing_value = None
|
|
|
|
else:
|
2014-01-16 21:32:00 +01:00
|
|
|
py_existing_value = slice_to_bytes(deref(existing_value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
ret = (<object>ctx).full_merge(
|
2014-01-16 21:32:00 +01:00
|
|
|
slice_to_bytes(key),
|
2014-01-13 19:52:22 +01:00
|
|
|
py_existing_value,
|
2014-01-16 21:32:00 +01:00
|
|
|
[string_to_bytes(op_list[i]) for i in range(op_list.size())])
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if ret[0]:
|
2014-01-16 21:32:00 +01:00
|
|
|
new_value.assign(bytes_to_string(ret[1]))
|
2014-01-13 19:52:22 +01:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2014-01-26 09:49:07 +01:00
|
|
|
except:
|
2014-01-15 09:53:27 +01:00
|
|
|
tb = traceback.format_exc()
|
2014-01-26 09:49:07 +01:00
|
|
|
logger.Log(log, "Error in full_merge_callback: %s", <bytes>tb)
|
2014-01-13 19:52:22 +01:00
|
|
|
return False
|
|
|
|
|
|
|
|
cdef cpp_bool partial_merge_callback(
|
|
|
|
void* ctx,
|
2014-01-18 12:24:49 +01:00
|
|
|
const Slice& key,
|
|
|
|
const Slice& left_op,
|
|
|
|
const Slice& right_op,
|
2014-01-13 19:52:22 +01:00
|
|
|
string* new_value,
|
|
|
|
logger.Logger* log) with gil:
|
|
|
|
|
|
|
|
try:
|
|
|
|
ret = (<object>ctx).partial_merge(
|
2014-01-16 21:32:00 +01:00
|
|
|
slice_to_bytes(key),
|
|
|
|
slice_to_bytes(left_op),
|
|
|
|
slice_to_bytes(right_op))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if ret[0]:
|
2014-01-16 21:32:00 +01:00
|
|
|
new_value.assign(bytes_to_string(ret[1]))
|
2014-01-13 19:52:22 +01:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2014-01-26 09:49:07 +01:00
|
|
|
except:
|
2014-01-15 09:53:27 +01:00
|
|
|
tb = traceback.format_exc()
|
2014-01-26 09:49:07 +01:00
|
|
|
logger.Log(log, "Error in partial_merge_callback: %s", <bytes>tb)
|
2014-01-13 19:52:22 +01:00
|
|
|
return False
|
|
|
|
##############################################
|
|
|
|
|
|
|
|
#### Here comes the Cache stuff
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyCache(object):
|
|
|
|
cdef shared_ptr[cache.Cache] get_cache(self):
|
|
|
|
return shared_ptr[cache.Cache]()
|
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyLRUCache(PyCache):
|
|
|
|
cdef shared_ptr[cache.Cache] cache_ob
|
|
|
|
|
2015-04-12 13:58:22 +02:00
|
|
|
def __cinit__(self, capacity, shard_bits=None):
|
2014-01-13 19:52:22 +01:00
|
|
|
if shard_bits is not None:
|
2015-04-12 13:58:22 +02:00
|
|
|
self.cache_ob = cache.NewLRUCache(capacity, shard_bits)
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
|
|
|
self.cache_ob = cache.NewLRUCache(capacity)
|
|
|
|
|
|
|
|
cdef shared_ptr[cache.Cache] get_cache(self):
|
|
|
|
return self.cache_ob
|
|
|
|
|
|
|
|
LRUCache = PyLRUCache
|
|
|
|
###############################
|
|
|
|
|
2014-01-19 12:53:57 +01:00
|
|
|
### Here comes the stuff for SliceTransform
|
|
|
|
@cython.internal
|
|
|
|
cdef class PySliceTransform(object):
|
2014-04-01 21:24:18 +02:00
|
|
|
cdef shared_ptr[slice_transform.SliceTransform] transfomer
|
2014-01-19 12:53:57 +01:00
|
|
|
cdef object ob
|
|
|
|
|
|
|
|
def __cinit__(self, object ob):
|
|
|
|
if not isinstance(ob, ISliceTransform):
|
|
|
|
raise TypeError("%s is not of type %s" % (ob, ISliceTransform))
|
|
|
|
|
|
|
|
self.ob = ob
|
2014-04-01 21:24:18 +02:00
|
|
|
self.transfomer.reset(
|
|
|
|
<slice_transform.SliceTransform*>
|
|
|
|
new slice_transform.SliceTransformWrapper(
|
|
|
|
bytes_to_string(ob.name()),
|
|
|
|
<void*>ob,
|
|
|
|
slice_transform_callback,
|
|
|
|
slice_in_domain_callback,
|
|
|
|
slice_in_range_callback))
|
2014-01-19 12:53:57 +01:00
|
|
|
|
|
|
|
cdef object get_ob(self):
|
|
|
|
return self.ob
|
|
|
|
|
2014-04-01 21:24:18 +02:00
|
|
|
cdef shared_ptr[slice_transform.SliceTransform] get_transformer(self):
|
|
|
|
return self.transfomer
|
2014-01-26 10:45:06 +01:00
|
|
|
|
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
2014-04-01 21:24:18 +02:00
|
|
|
cdef slice_transform.SliceTransformWrapper* ptr
|
|
|
|
ptr = <slice_transform.SliceTransformWrapper*> self.transfomer.get()
|
|
|
|
ptr.set_info_log(info_log)
|
2014-01-26 10:45:06 +01:00
|
|
|
|
|
|
|
|
|
|
|
cdef Slice slice_transform_callback(
|
|
|
|
void* ctx,
|
|
|
|
logger.Logger* log,
|
|
|
|
string& error_msg,
|
|
|
|
const Slice& src) with gil:
|
2014-01-19 12:53:57 +01:00
|
|
|
|
|
|
|
cdef size_t offset
|
|
|
|
cdef size_t size
|
|
|
|
|
|
|
|
try:
|
|
|
|
ret = (<object>ctx).transform(slice_to_bytes(src))
|
|
|
|
offset = ret[0]
|
|
|
|
size = ret[1]
|
2014-01-21 17:34:01 +01:00
|
|
|
if (offset + size) > src.size():
|
|
|
|
msg = "offset(%i) + size(%i) is bigger than slice(%i)"
|
|
|
|
raise Exception(msg % (offset, size, src.size()))
|
|
|
|
|
2014-01-19 12:53:57 +01:00
|
|
|
return Slice(src.data() + offset, size)
|
2014-01-26 10:45:06 +01:00
|
|
|
except BaseException as error:
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
logger.Log(log, "Error in slice transfrom callback: %s", <bytes>tb)
|
|
|
|
error_msg.assign(<bytes>str(error))
|
|
|
|
|
|
|
|
cdef cpp_bool slice_in_domain_callback(
|
|
|
|
void* ctx,
|
|
|
|
logger.Logger* log,
|
|
|
|
string& error_msg,
|
|
|
|
const Slice& src) with gil:
|
2014-01-19 12:53:57 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
return (<object>ctx).in_domain(slice_to_bytes(src))
|
2014-01-26 10:45:06 +01:00
|
|
|
except BaseException as error:
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
logger.Log(log, "Error in slice transfrom callback: %s", <bytes>tb)
|
|
|
|
error_msg.assign(<bytes>str(error))
|
|
|
|
|
|
|
|
cdef cpp_bool slice_in_range_callback(
|
|
|
|
void* ctx,
|
|
|
|
logger.Logger* log,
|
|
|
|
string& error_msg,
|
|
|
|
const Slice& src) with gil:
|
2014-01-19 12:53:57 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
return (<object>ctx).in_range(slice_to_bytes(src))
|
2014-01-26 10:45:06 +01:00
|
|
|
except BaseException as error:
|
|
|
|
tb = traceback.format_exc()
|
|
|
|
logger.Log(log, "Error in slice transfrom callback: %s", <bytes>tb)
|
|
|
|
error_msg.assign(<bytes>str(error))
|
2014-01-19 12:53:57 +01:00
|
|
|
###########################################
|
2014-04-27 19:20:30 +02:00
|
|
|
|
|
|
|
## Here are the TableFactories
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyTableFactory(object):
|
|
|
|
cdef shared_ptr[table_factory.TableFactory] factory
|
|
|
|
|
|
|
|
cdef shared_ptr[table_factory.TableFactory] get_table_factory(self):
|
|
|
|
return self.factory
|
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
|
|
|
pass
|
|
|
|
|
2014-04-27 19:20:30 +02:00
|
|
|
cdef class BlockBasedTableFactory(PyTableFactory):
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef PyFilterPolicy py_filter_policy
|
|
|
|
|
2014-08-22 19:58:17 +02:00
|
|
|
def __init__(self,
|
|
|
|
index_type='binary_search',
|
|
|
|
py_bool hash_index_allow_collision=True,
|
2014-10-22 09:35:17 +02:00
|
|
|
checksum='crc32',
|
2014-10-22 09:41:33 +02:00
|
|
|
PyCache block_cache=None,
|
|
|
|
PyCache block_cache_compressed=None,
|
2014-10-22 09:43:47 +02:00
|
|
|
filter_policy=None,
|
2014-10-22 09:35:17 +02:00
|
|
|
no_block_cache=False,
|
|
|
|
block_size=None,
|
|
|
|
block_size_deviation=None,
|
|
|
|
block_restart_interval=None,
|
2020-03-12 20:53:07 +01:00
|
|
|
whole_key_filtering=None,
|
2020-05-18 17:51:11 +02:00
|
|
|
enable_index_compression=False,
|
2020-07-19 14:21:35 +02:00
|
|
|
cache_index_and_filter_blocks=False,
|
|
|
|
format_version=2,
|
2020-05-18 17:51:11 +02:00
|
|
|
):
|
2014-08-22 19:58:17 +02:00
|
|
|
|
|
|
|
cdef table_factory.BlockBasedTableOptions table_options
|
|
|
|
|
|
|
|
if index_type == 'binary_search':
|
|
|
|
table_options.index_type = table_factory.kBinarySearch
|
|
|
|
elif index_type == 'hash_search':
|
|
|
|
table_options.index_type = table_factory.kHashSearch
|
|
|
|
else:
|
|
|
|
raise ValueError("Unknown index_type: %s" % index_type)
|
|
|
|
|
|
|
|
if hash_index_allow_collision:
|
|
|
|
table_options.hash_index_allow_collision = True
|
|
|
|
else:
|
|
|
|
table_options.hash_index_allow_collision = False
|
|
|
|
|
2020-03-12 20:53:07 +01:00
|
|
|
if enable_index_compression:
|
|
|
|
table_options.enable_index_compression = True
|
|
|
|
else:
|
|
|
|
table_options.enable_index_compression = False
|
|
|
|
|
2014-08-22 19:58:17 +02:00
|
|
|
if checksum == 'crc32':
|
|
|
|
table_options.checksum = table_factory.kCRC32c
|
|
|
|
elif checksum == 'xxhash':
|
|
|
|
table_options.checksum = table_factory.kxxHash
|
|
|
|
else:
|
|
|
|
raise ValueError("Unknown checksum: %s" % checksum)
|
|
|
|
|
2014-10-22 09:35:17 +02:00
|
|
|
if no_block_cache:
|
|
|
|
table_options.no_block_cache = True
|
|
|
|
else:
|
|
|
|
table_options.no_block_cache = False
|
|
|
|
|
|
|
|
# If the following options are None use the rocksdb default.
|
|
|
|
if block_size is not None:
|
|
|
|
table_options.block_size = block_size
|
|
|
|
|
|
|
|
if block_size_deviation is not None:
|
|
|
|
table_options.block_size_deviation = block_size_deviation
|
|
|
|
|
|
|
|
if block_restart_interval is not None:
|
|
|
|
table_options.block_restart_interval = block_restart_interval
|
|
|
|
|
|
|
|
if whole_key_filtering is not None:
|
|
|
|
if whole_key_filtering:
|
|
|
|
table_options.whole_key_filtering = True
|
|
|
|
else:
|
|
|
|
table_options.whole_key_filtering = False
|
|
|
|
|
2020-05-18 17:51:11 +02:00
|
|
|
if cache_index_and_filter_blocks is not None:
|
|
|
|
if cache_index_and_filter_blocks:
|
|
|
|
table_options.cache_index_and_filter_blocks = True
|
|
|
|
else:
|
|
|
|
table_options.cache_index_and_filter_blocks = False
|
|
|
|
|
2014-10-22 09:41:33 +02:00
|
|
|
if block_cache is not None:
|
|
|
|
table_options.block_cache = block_cache.get_cache()
|
|
|
|
|
|
|
|
if block_cache_compressed is not None:
|
|
|
|
table_options.block_cache_compressed = block_cache_compressed.get_cache()
|
|
|
|
|
2020-07-19 14:21:35 +02:00
|
|
|
if format_version is not None:
|
|
|
|
table_options.format_version = format_version
|
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
# Set the filter_policy
|
|
|
|
self.py_filter_policy = None
|
|
|
|
if filter_policy is not None:
|
|
|
|
if isinstance(filter_policy, PyFilterPolicy):
|
|
|
|
if (<PyFilterPolicy?>filter_policy).get_policy().get() == NULL:
|
|
|
|
raise Exception("Cannot set filter policy: %s" % filter_policy)
|
|
|
|
self.py_filter_policy = filter_policy
|
|
|
|
else:
|
|
|
|
self.py_filter_policy = PyGenericFilterPolicy(filter_policy)
|
|
|
|
|
|
|
|
table_options.filter_policy = self.py_filter_policy.get_policy()
|
|
|
|
|
2014-08-22 19:58:17 +02:00
|
|
|
self.factory.reset(table_factory.NewBlockBasedTableFactory(table_options))
|
2014-04-27 19:20:30 +02:00
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
cdef set_info_log(self, shared_ptr[logger.Logger] info_log):
|
|
|
|
if self.py_filter_policy is not None:
|
|
|
|
self.py_filter_policy.set_info_log(info_log)
|
|
|
|
|
2014-04-27 19:20:30 +02:00
|
|
|
cdef class PlainTableFactory(PyTableFactory):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
user_key_len=0,
|
2014-08-22 19:58:17 +02:00
|
|
|
bloom_bits_per_key=10,
|
2014-04-27 19:20:30 +02:00
|
|
|
hash_table_ratio=0.75,
|
2014-08-22 19:58:17 +02:00
|
|
|
index_sparseness=10,
|
|
|
|
huge_page_tlb_size=0,
|
|
|
|
encoding_type='plain',
|
2019-02-01 14:56:58 +01:00
|
|
|
py_bool full_scan_mode=False):
|
2014-08-22 19:58:17 +02:00
|
|
|
|
|
|
|
cdef table_factory.PlainTableOptions table_options
|
|
|
|
|
|
|
|
table_options.user_key_len = user_key_len
|
|
|
|
table_options.bloom_bits_per_key = bloom_bits_per_key
|
|
|
|
table_options.hash_table_ratio = hash_table_ratio
|
|
|
|
table_options.index_sparseness = index_sparseness
|
|
|
|
table_options.huge_page_tlb_size = huge_page_tlb_size
|
|
|
|
|
|
|
|
if encoding_type == 'plain':
|
|
|
|
table_options.encoding_type = table_factory.kPlain
|
|
|
|
elif encoding_type == 'prefix':
|
|
|
|
table_options.encoding_type = table_factory.kPrefix
|
|
|
|
else:
|
|
|
|
raise ValueError("Unknown encoding_type: %s" % encoding_type)
|
|
|
|
|
|
|
|
table_options.full_scan_mode = full_scan_mode
|
|
|
|
|
|
|
|
self.factory.reset( table_factory.NewPlainTableFactory(table_options))
|
2014-04-27 19:20:30 +02:00
|
|
|
#############################################
|
2014-04-28 20:32:33 +02:00
|
|
|
|
|
|
|
### Here are the MemtableFactories
|
|
|
|
@cython.internal
|
|
|
|
cdef class PyMemtableFactory(object):
|
|
|
|
cdef shared_ptr[memtablerep.MemTableRepFactory] factory
|
|
|
|
|
|
|
|
cdef shared_ptr[memtablerep.MemTableRepFactory] get_memtable_factory(self):
|
|
|
|
return self.factory
|
|
|
|
|
|
|
|
cdef class SkipListMemtableFactory(PyMemtableFactory):
|
|
|
|
def __init__(self):
|
|
|
|
self.factory.reset(memtablerep.NewSkipListFactory())
|
|
|
|
|
|
|
|
cdef class VectorMemtableFactory(PyMemtableFactory):
|
|
|
|
def __init__(self, count=0):
|
|
|
|
self.factory.reset(memtablerep.NewVectorRepFactory(count))
|
|
|
|
|
|
|
|
cdef class HashSkipListMemtableFactory(PyMemtableFactory):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
bucket_count=1000000,
|
|
|
|
skiplist_height=4,
|
|
|
|
skiplist_branching_factor=4):
|
|
|
|
|
|
|
|
self.factory.reset(
|
|
|
|
memtablerep.NewHashSkipListRepFactory(
|
|
|
|
bucket_count,
|
|
|
|
skiplist_height,
|
|
|
|
skiplist_branching_factor))
|
|
|
|
|
|
|
|
cdef class HashLinkListMemtableFactory(PyMemtableFactory):
|
|
|
|
def __init__(self, bucket_count=50000):
|
|
|
|
self.factory.reset(memtablerep.NewHashLinkListRepFactory(bucket_count))
|
|
|
|
##################################
|
|
|
|
|
2017-04-16 08:13:01 +02:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef class CompressionType(object):
|
2014-01-16 21:32:00 +01:00
|
|
|
no_compression = u'no_compression'
|
|
|
|
snappy_compression = u'snappy_compression'
|
|
|
|
zlib_compression = u'zlib_compression'
|
|
|
|
bzip2_compression = u'bzip2_compression'
|
2015-07-05 13:52:17 +02:00
|
|
|
lz4_compression = u'lz4_compression'
|
|
|
|
lz4hc_compression = u'lz4hc_compression'
|
2017-04-07 18:18:38 +02:00
|
|
|
xpress_compression = u'xpress_compression'
|
|
|
|
zstd_compression = u'zstd_compression'
|
|
|
|
zstdnotfinal_compression = u'zstdnotfinal_compression'
|
|
|
|
disable_compression = u'disable_compression'
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2017-04-16 18:00:44 +02:00
|
|
|
cdef class CompactionPri(object):
|
|
|
|
by_compensated_size = u'by_compensated_size'
|
|
|
|
oldest_largest_seq_first = u'oldest_largest_seq_first'
|
|
|
|
oldest_smallest_seq_first = u'oldest_smallest_seq_first'
|
|
|
|
min_overlapping_ratio = u'min_overlapping_ratio'
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
@cython.internal
|
|
|
|
cdef class _ColumnFamilyHandle:
|
|
|
|
""" This is an internal class that we will weakref for safety """
|
|
|
|
cdef db.ColumnFamilyHandle* handle
|
|
|
|
cdef object __weakref__
|
|
|
|
cdef object weak_handle
|
|
|
|
|
|
|
|
def __cinit__(self):
|
|
|
|
self.handle = NULL
|
|
|
|
|
|
|
|
def __dealloc__(self):
|
|
|
|
if not self.handle == NULL:
|
|
|
|
del self.handle
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
cdef from_handle_ptr(db.ColumnFamilyHandle* handle):
|
|
|
|
inst = <_ColumnFamilyHandle>_ColumnFamilyHandle.__new__(_ColumnFamilyHandle)
|
|
|
|
inst.handle = handle
|
|
|
|
return inst
|
|
|
|
|
|
|
|
@property
|
|
|
|
def name(self):
|
|
|
|
return self.handle.GetName()
|
|
|
|
|
|
|
|
@property
|
|
|
|
def id(self):
|
|
|
|
return self.handle.GetID()
|
|
|
|
|
|
|
|
@property
|
|
|
|
def weakref(self):
|
|
|
|
if self.weak_handle is None:
|
|
|
|
self.weak_handle = ColumnFamilyHandle.from_wrapper(self)
|
|
|
|
return self.weak_handle
|
|
|
|
|
|
|
|
cdef class ColumnFamilyHandle:
|
|
|
|
""" This represents a ColumnFamilyHandle """
|
|
|
|
cdef object _ref
|
|
|
|
cdef readonly bytes name
|
|
|
|
cdef readonly int id
|
|
|
|
|
|
|
|
def __cinit__(self, weakhandle):
|
|
|
|
self._ref = weakhandle
|
|
|
|
self.name = self._ref().name
|
|
|
|
self.id = self._ref().id
|
|
|
|
|
|
|
|
def __init__(self, *):
|
|
|
|
raise TypeError("These can not be constructed from Python")
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
cdef object from_wrapper(_ColumnFamilyHandle real_handle):
|
|
|
|
return ColumnFamilyHandle.__new__(ColumnFamilyHandle, weakref.ref(real_handle))
|
|
|
|
|
|
|
|
@property
|
|
|
|
def is_valid(self):
|
|
|
|
return self._ref() is not None
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
valid = "valid" if self.is_valid else "invalid"
|
|
|
|
return f"<ColumnFamilyHandle name: {self.name}, id: {self.id}, state: {valid}>"
|
|
|
|
|
|
|
|
cdef db.ColumnFamilyHandle* get_handle(self) except NULL:
|
|
|
|
cdef _ColumnFamilyHandle real_handle = self._ref()
|
|
|
|
if real_handle is None:
|
|
|
|
raise ValueError(f"{self} is no longer a valid ColumnFamilyHandle!")
|
|
|
|
return real_handle.handle
|
|
|
|
|
|
|
|
def __eq__(self, other):
|
|
|
|
cdef ColumnFamilyHandle fast_other
|
|
|
|
if isinstance(other, ColumnFamilyHandle):
|
|
|
|
fast_other = other
|
|
|
|
return (
|
|
|
|
self.name == fast_other.name
|
|
|
|
and self.id == fast_other.id
|
|
|
|
and self._ref == fast_other._ref
|
|
|
|
)
|
|
|
|
return False
|
|
|
|
|
|
|
|
def __lt__(self, other):
|
|
|
|
cdef ColumnFamilyHandle fast_other
|
|
|
|
if isinstance(other, ColumnFamilyHandle):
|
|
|
|
return self.id < other.id
|
|
|
|
return NotImplemented
|
|
|
|
|
|
|
|
# Since @total_ordering isn't a thing for cython
|
|
|
|
def __ne__(self, other):
|
|
|
|
return not self == other
|
|
|
|
|
|
|
|
def __gt__(self, other):
|
|
|
|
return other < self
|
|
|
|
|
|
|
|
def __le__(self, other):
|
|
|
|
return not other < self
|
|
|
|
|
|
|
|
def __ge__(self, other):
|
|
|
|
return not self < other
|
|
|
|
|
|
|
|
def __hash__(self):
|
|
|
|
# hash of a weakref matches that of its original ref'ed object
|
|
|
|
# so we use the id of our weakref object here to prevent
|
|
|
|
# a situation where we are invalid, but match a valid handle's hash
|
|
|
|
return hash((self.id, self.name, id(self._ref)))
|
|
|
|
|
|
|
|
cdef class ColumnFamilyOptions(object):
|
|
|
|
cdef options.ColumnFamilyOptions* copts
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef PyComparator py_comparator
|
|
|
|
cdef PyMergeOperator py_merge_operator
|
2014-01-21 13:14:41 +01:00
|
|
|
cdef PySliceTransform py_prefix_extractor
|
2014-04-27 19:20:30 +02:00
|
|
|
cdef PyTableFactory py_table_factory
|
2014-04-28 20:32:33 +02:00
|
|
|
cdef PyMemtableFactory py_memtable_factory
|
|
|
|
|
2014-01-23 08:53:14 +01:00
|
|
|
# Used to protect sharing of Options with many DB-objects
|
|
|
|
cdef cpp_bool in_use
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def __cinit__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts = NULL
|
|
|
|
self.copts = new options.ColumnFamilyOptions()
|
2014-01-23 08:53:14 +01:00
|
|
|
self.in_use = False
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def __dealloc__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
if not self.copts == NULL:
|
|
|
|
del self.copts
|
2014-01-23 08:34:26 +01:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
def __init__(self, **kwargs):
|
|
|
|
self.py_comparator = BytewiseComparator()
|
|
|
|
self.py_merge_operator = None
|
2014-01-21 13:14:41 +01:00
|
|
|
self.py_prefix_extractor = None
|
2014-04-27 19:20:30 +02:00
|
|
|
self.py_table_factory = None
|
2014-04-28 20:32:33 +02:00
|
|
|
self.py_memtable_factory = None
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
for key, value in kwargs.items():
|
|
|
|
setattr(self, key, value)
|
|
|
|
|
|
|
|
property write_buffer_size:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.write_buffer_size
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.write_buffer_size = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property max_write_buffer_number:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.max_write_buffer_number
|
2017-04-07 18:18:38 +02:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.max_write_buffer_number = value
|
2017-04-07 18:18:38 +02:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
property min_write_buffer_number_to_merge:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.min_write_buffer_number_to_merge
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.min_write_buffer_number_to_merge = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2017-04-16 08:13:01 +02:00
|
|
|
property compression_opts:
|
|
|
|
def __get__(self):
|
|
|
|
cdef dict ret_ob = {}
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
ret_ob['window_bits'] = self.copts.compression_opts.window_bits
|
|
|
|
ret_ob['level'] = self.copts.compression_opts.level
|
|
|
|
ret_ob['strategy'] = self.copts.compression_opts.strategy
|
|
|
|
ret_ob['max_dict_bytes'] = self.copts.compression_opts.max_dict_bytes
|
2017-04-16 08:13:01 +02:00
|
|
|
|
|
|
|
return ret_ob
|
|
|
|
|
|
|
|
def __set__(self, dict value):
|
|
|
|
cdef options.CompressionOptions* copts
|
2018-11-02 19:27:14 +01:00
|
|
|
copts = cython.address(self.copts.compression_opts)
|
2017-04-16 08:13:01 +02:00
|
|
|
# CompressionOptions(int wbits, int _lev, int _strategy, int _max_dict_bytes)
|
|
|
|
if 'window_bits' in value:
|
|
|
|
copts.window_bits = value['window_bits']
|
|
|
|
if 'level' in value:
|
|
|
|
copts.level = value['level']
|
|
|
|
if 'strategy' in value:
|
|
|
|
copts.strategy = value['strategy']
|
|
|
|
if 'max_dict_bytes' in value:
|
|
|
|
copts.max_dict_bytes = value['max_dict_bytes']
|
|
|
|
|
2017-04-16 18:00:44 +02:00
|
|
|
property compaction_pri:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_pri == options.kByCompensatedSize:
|
2017-04-16 18:00:44 +02:00
|
|
|
return CompactionPri.by_compensated_size
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_pri == options.kOldestLargestSeqFirst:
|
2017-04-16 18:00:44 +02:00
|
|
|
return CompactionPri.oldest_largest_seq_first
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_pri == options.kOldestSmallestSeqFirst:
|
2017-04-16 18:00:44 +02:00
|
|
|
return CompactionPri.oldest_smallest_seq_first
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_pri == options.kMinOverlappingRatio:
|
2017-04-16 18:00:44 +02:00
|
|
|
return CompactionPri.min_overlapping_ratio
|
|
|
|
def __set__(self, value):
|
|
|
|
if value == CompactionPri.by_compensated_size:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_pri = options.kByCompensatedSize
|
2017-04-16 18:00:44 +02:00
|
|
|
elif value == CompactionPri.oldest_largest_seq_first:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_pri = options.kOldestLargestSeqFirst
|
2017-04-16 18:00:44 +02:00
|
|
|
elif value == CompactionPri.oldest_smallest_seq_first:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_pri = options.kOldestSmallestSeqFirst
|
2017-04-16 18:00:44 +02:00
|
|
|
elif value == CompactionPri.min_overlapping_ratio:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_pri = options.kMinOverlappingRatio
|
2017-04-16 18:00:44 +02:00
|
|
|
else:
|
|
|
|
raise TypeError("Unknown compaction pri: %s" % value)
|
2017-04-16 08:13:01 +02:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
property compression:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compression == options.kNoCompression:
|
2014-01-13 19:52:22 +01:00
|
|
|
return CompressionType.no_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kSnappyCompression:
|
2014-01-13 19:52:22 +01:00
|
|
|
return CompressionType.snappy_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kZlibCompression:
|
2014-01-13 19:52:22 +01:00
|
|
|
return CompressionType.zlib_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kBZip2Compression:
|
2014-01-13 19:52:22 +01:00
|
|
|
return CompressionType.bzip2_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kLZ4Compression:
|
2015-07-05 13:52:17 +02:00
|
|
|
return CompressionType.lz4_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kLZ4HCCompression:
|
2015-07-05 13:52:17 +02:00
|
|
|
return CompressionType.lz4hc_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kXpressCompression:
|
2017-04-07 18:18:38 +02:00
|
|
|
return CompressionType.xpress_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kZSTD:
|
2017-04-07 18:18:38 +02:00
|
|
|
return CompressionType.zstd_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kZSTDNotFinalCompression:
|
2017-04-07 18:18:38 +02:00
|
|
|
return CompressionType.zstdnotfinal_compression
|
2018-11-02 19:27:14 +01:00
|
|
|
elif self.copts.compression == options.kDisableCompressionOption:
|
2017-04-07 18:18:38 +02:00
|
|
|
return CompressionType.disable_compression
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
|
|
|
raise Exception("Unknonw type: %s" % self.opts.compression)
|
|
|
|
|
|
|
|
def __set__(self, value):
|
|
|
|
if value == CompressionType.no_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kNoCompression
|
2014-01-13 19:52:22 +01:00
|
|
|
elif value == CompressionType.snappy_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kSnappyCompression
|
2014-01-13 19:52:22 +01:00
|
|
|
elif value == CompressionType.zlib_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kZlibCompression
|
2014-01-13 19:52:22 +01:00
|
|
|
elif value == CompressionType.bzip2_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kBZip2Compression
|
2015-07-05 13:52:17 +02:00
|
|
|
elif value == CompressionType.lz4_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kLZ4Compression
|
2015-07-05 13:52:17 +02:00
|
|
|
elif value == CompressionType.lz4hc_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kLZ4HCCompression
|
2017-04-07 18:18:38 +02:00
|
|
|
elif value == CompressionType.zstd_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kZSTD
|
2017-04-07 18:18:38 +02:00
|
|
|
elif value == CompressionType.zstdnotfinal_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kZSTDNotFinalCompression
|
2017-04-07 18:18:38 +02:00
|
|
|
elif value == CompressionType.disable_compression:
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compression = options.kDisableCompressionOption
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
|
|
|
raise TypeError("Unknown compression: %s" % value)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
property max_compaction_bytes:
|
|
|
|
def __get__(self):
|
|
|
|
return self.copts.max_compaction_bytes
|
|
|
|
def __set__(self, value):
|
|
|
|
self.copts.max_compaction_bytes = value
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
property num_levels:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.num_levels
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.num_levels = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property level0_file_num_compaction_trigger:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.level0_file_num_compaction_trigger
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.level0_file_num_compaction_trigger = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property level0_slowdown_writes_trigger:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.level0_slowdown_writes_trigger
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.level0_slowdown_writes_trigger = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property level0_stop_writes_trigger:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.level0_stop_writes_trigger
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.level0_stop_writes_trigger = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property max_mem_compaction_level:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.max_mem_compaction_level
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.max_mem_compaction_level = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property target_file_size_base:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.target_file_size_base
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.target_file_size_base = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property target_file_size_multiplier:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.target_file_size_multiplier
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.target_file_size_multiplier = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property max_bytes_for_level_base:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.max_bytes_for_level_base
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.max_bytes_for_level_base = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property max_bytes_for_level_multiplier:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.max_bytes_for_level_multiplier
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.max_bytes_for_level_multiplier = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property max_bytes_for_level_multiplier_additional:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.max_bytes_for_level_multiplier_additional
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.max_bytes_for_level_multiplier_additional = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property soft_rate_limit:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.soft_rate_limit
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.soft_rate_limit = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property hard_rate_limit:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.hard_rate_limit
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.hard_rate_limit = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property rate_limit_delay_max_milliseconds:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.rate_limit_delay_max_milliseconds
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.rate_limit_delay_max_milliseconds = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property arena_block_size:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.arena_block_size
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.arena_block_size = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property disable_auto_compactions:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.disable_auto_compactions
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.disable_auto_compactions = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property purge_redundant_kvs_while_flush:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.purge_redundant_kvs_while_flush
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.purge_redundant_kvs_while_flush = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2017-03-23 15:13:45 +01:00
|
|
|
# FIXME: remove to util/options_helper.h
|
|
|
|
# property allow_os_buffer:
|
|
|
|
# def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
# return self.copts.allow_os_buffer
|
2017-03-23 15:13:45 +01:00
|
|
|
# def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
# self.copts.allow_os_buffer = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-04-29 17:46:21 +02:00
|
|
|
property compaction_style:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_style == kCompactionStyleLevel:
|
2014-04-29 17:46:21 +02:00
|
|
|
return 'level'
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_style == kCompactionStyleUniversal:
|
2014-04-29 17:46:21 +02:00
|
|
|
return 'universal'
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_style == kCompactionStyleFIFO:
|
2017-04-07 18:18:38 +02:00
|
|
|
return 'fifo'
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.copts.compaction_style == kCompactionStyleNone:
|
2017-04-07 18:18:38 +02:00
|
|
|
return 'none'
|
2014-04-29 17:46:21 +02:00
|
|
|
raise Exception("Unknown compaction_style")
|
|
|
|
|
|
|
|
def __set__(self, str value):
|
|
|
|
if value == 'level':
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_style = kCompactionStyleLevel
|
2014-04-29 17:46:21 +02:00
|
|
|
elif value == 'universal':
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_style = kCompactionStyleUniversal
|
2017-04-07 18:18:38 +02:00
|
|
|
elif value == 'fifo':
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_style = kCompactionStyleFIFO
|
2017-04-07 18:18:38 +02:00
|
|
|
elif value == 'none':
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.compaction_style = kCompactionStyleNone
|
2014-04-29 17:46:21 +02:00
|
|
|
else:
|
|
|
|
raise Exception("Unknown compaction style")
|
|
|
|
|
|
|
|
property compaction_options_universal:
|
|
|
|
def __get__(self):
|
|
|
|
cdef universal_compaction.CompactionOptionsUniversal uopts
|
|
|
|
cdef dict ret_ob = {}
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
uopts = self.copts.compaction_options_universal
|
2014-04-29 17:46:21 +02:00
|
|
|
|
|
|
|
ret_ob['size_ratio'] = uopts.size_ratio
|
|
|
|
ret_ob['min_merge_width'] = uopts.min_merge_width
|
|
|
|
ret_ob['max_merge_width'] = uopts.max_merge_width
|
|
|
|
ret_ob['max_size_amplification_percent'] = uopts.max_size_amplification_percent
|
|
|
|
ret_ob['compression_size_percent'] = uopts.compression_size_percent
|
|
|
|
|
|
|
|
if uopts.stop_style == kCompactionStopStyleSimilarSize:
|
|
|
|
ret_ob['stop_style'] = 'similar_size'
|
|
|
|
elif uopts.stop_style == kCompactionStopStyleTotalSize:
|
|
|
|
ret_ob['stop_style'] = 'total_size'
|
|
|
|
else:
|
|
|
|
raise Exception("Unknown compaction style")
|
|
|
|
|
|
|
|
return ret_ob
|
|
|
|
|
|
|
|
def __set__(self, dict value):
|
|
|
|
cdef universal_compaction.CompactionOptionsUniversal* uopts
|
2018-11-02 19:27:14 +01:00
|
|
|
uopts = cython.address(self.copts.compaction_options_universal)
|
2014-04-29 17:46:21 +02:00
|
|
|
|
|
|
|
if 'size_ratio' in value:
|
|
|
|
uopts.size_ratio = value['size_ratio']
|
|
|
|
|
|
|
|
if 'min_merge_width' in value:
|
|
|
|
uopts.min_merge_width = value['min_merge_width']
|
|
|
|
|
|
|
|
if 'max_merge_width' in value:
|
|
|
|
uopts.max_merge_width = value['max_merge_width']
|
|
|
|
|
|
|
|
if 'max_size_amplification_percent' in value:
|
|
|
|
uopts.max_size_amplification_percent = value['max_size_amplification_percent']
|
|
|
|
|
|
|
|
if 'compression_size_percent' in value:
|
|
|
|
uopts.compression_size_percent = value['compression_size_percent']
|
|
|
|
|
|
|
|
if 'stop_style' in value:
|
|
|
|
if value['stop_style'] == 'similar_size':
|
|
|
|
uopts.stop_style = kCompactionStopStyleSimilarSize
|
|
|
|
elif value['stop_style'] == 'total_size':
|
|
|
|
uopts.stop_style = kCompactionStopStyleTotalSize
|
|
|
|
else:
|
|
|
|
raise Exception("Unknown compaction style")
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
property max_sequential_skip_in_iterations:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.max_sequential_skip_in_iterations
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.max_sequential_skip_in_iterations = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property inplace_update_support:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.inplace_update_support
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.inplace_update_support = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-04-27 19:20:30 +02:00
|
|
|
property table_factory:
|
|
|
|
def __get__(self):
|
|
|
|
return self.py_table_factory
|
|
|
|
|
|
|
|
def __set__(self, PyTableFactory value):
|
2014-04-28 20:32:33 +02:00
|
|
|
self.py_table_factory = value
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.table_factory = value.get_table_factory()
|
2014-04-27 19:20:30 +02:00
|
|
|
|
2014-04-28 20:32:33 +02:00
|
|
|
property memtable_factory:
|
|
|
|
def __get__(self):
|
|
|
|
return self.py_memtable_factory
|
|
|
|
|
|
|
|
def __set__(self, PyMemtableFactory value):
|
|
|
|
self.py_memtable_factory = value
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.memtable_factory = value.get_memtable_factory()
|
2014-04-28 20:32:33 +02:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
property inplace_update_num_locks:
|
|
|
|
def __get__(self):
|
2018-11-02 19:27:14 +01:00
|
|
|
return self.copts.inplace_update_num_locks
|
2014-01-13 19:52:22 +01:00
|
|
|
def __set__(self, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.inplace_update_num_locks = value
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property comparator:
|
|
|
|
def __get__(self):
|
|
|
|
return self.py_comparator.get_ob()
|
|
|
|
|
|
|
|
def __set__(self, value):
|
|
|
|
if isinstance(value, PyComparator):
|
|
|
|
if (<PyComparator?>value).get_comparator() == NULL:
|
|
|
|
raise Exception("Cannot set %s as comparator" % value)
|
|
|
|
else:
|
|
|
|
self.py_comparator = value
|
|
|
|
else:
|
|
|
|
self.py_comparator = PyGenericComparator(value)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.comparator = self.py_comparator.get_comparator()
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
property merge_operator:
|
|
|
|
def __get__(self):
|
|
|
|
if self.py_merge_operator is None:
|
|
|
|
return None
|
|
|
|
return self.py_merge_operator.get_ob()
|
|
|
|
|
|
|
|
def __set__(self, value):
|
|
|
|
self.py_merge_operator = PyMergeOperator(value)
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.merge_operator = self.py_merge_operator.get_operator()
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-21 13:14:41 +01:00
|
|
|
property prefix_extractor:
|
|
|
|
def __get__(self):
|
|
|
|
if self.py_prefix_extractor is None:
|
|
|
|
return None
|
|
|
|
return self.py_prefix_extractor.get_ob()
|
|
|
|
|
|
|
|
def __set__(self, value):
|
|
|
|
self.py_prefix_extractor = PySliceTransform(value)
|
2018-11-02 19:27:14 +01:00
|
|
|
self.copts.prefix_extractor = self.py_prefix_extractor.get_transformer()
|
|
|
|
|
2020-04-07 14:17:30 +02:00
|
|
|
property optimize_filters_for_hits:
|
|
|
|
def __get__(self):
|
|
|
|
return self.copts.optimize_filters_for_hits
|
|
|
|
def __set__(self, value):
|
|
|
|
self.copts.optimize_filters_for_hits = value
|
2018-11-02 19:27:14 +01:00
|
|
|
|
2020-05-18 17:51:11 +02:00
|
|
|
property paranoid_file_checks:
|
|
|
|
def __get__(self):
|
|
|
|
return self.copts.paranoid_file_checks
|
|
|
|
def __set__(self, value):
|
|
|
|
self.copts.paranoid_file_checks = value
|
|
|
|
|
2021-02-16 21:35:41 +01:00
|
|
|
property level_compaction_dynamic_level_bytes:
|
|
|
|
def __get__(self):
|
|
|
|
return self.copts.level_compaction_dynamic_level_bytes
|
|
|
|
def __set__(self, value):
|
|
|
|
self.copts.level_compaction_dynamic_level_bytes = value
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef class Options(ColumnFamilyOptions):
|
|
|
|
cdef options.Options* opts
|
|
|
|
cdef PyCache py_row_cache
|
|
|
|
|
|
|
|
def __cinit__(self):
|
|
|
|
# Destroy the existing ColumnFamilyOptions()
|
|
|
|
del self.copts
|
|
|
|
self.opts = NULL
|
|
|
|
self.copts = self.opts = new options.Options()
|
|
|
|
self.in_use = False
|
|
|
|
|
|
|
|
def __dealloc__(self):
|
|
|
|
if not self.opts == NULL:
|
|
|
|
self.copts = NULL
|
|
|
|
del self.opts
|
|
|
|
|
|
|
|
def __init__(self, **kwargs):
|
|
|
|
ColumnFamilyOptions.__init__(self)
|
|
|
|
self.py_row_cache = None
|
|
|
|
|
|
|
|
for key, value in kwargs.items():
|
|
|
|
setattr(self, key, value)
|
|
|
|
|
2019-04-22 20:45:29 +02:00
|
|
|
def IncreaseParallelism(self, int total_threads=16):
|
|
|
|
self.opts.IncreaseParallelism(total_threads)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
property create_if_missing:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.create_if_missing
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.create_if_missing = value
|
|
|
|
|
2020-03-12 20:53:07 +01:00
|
|
|
property create_missing_column_families:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.create_missing_column_families
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.create_missing_column_families = value
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
property error_if_exists:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.error_if_exists
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.error_if_exists = value
|
|
|
|
|
|
|
|
property paranoid_checks:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.paranoid_checks
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.paranoid_checks = value
|
|
|
|
|
|
|
|
property max_open_files:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.max_open_files
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.max_open_files = value
|
|
|
|
|
|
|
|
property use_fsync:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.use_fsync
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.use_fsync = value
|
|
|
|
|
|
|
|
property db_log_dir:
|
|
|
|
def __get__(self):
|
|
|
|
return string_to_path(self.opts.db_log_dir)
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.db_log_dir = path_to_string(value)
|
|
|
|
|
|
|
|
property wal_dir:
|
|
|
|
def __get__(self):
|
|
|
|
return string_to_path(self.opts.wal_dir)
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.wal_dir = path_to_string(value)
|
|
|
|
|
|
|
|
property delete_obsolete_files_period_micros:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.delete_obsolete_files_period_micros
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.delete_obsolete_files_period_micros = value
|
|
|
|
|
|
|
|
property max_background_compactions:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.max_background_compactions
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.max_background_compactions = value
|
|
|
|
|
2020-03-12 20:53:07 +01:00
|
|
|
property stats_history_buffer_size:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.stats_history_buffer_size
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.stats_history_buffer_size = value
|
|
|
|
|
|
|
|
property max_background_jobs:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.max_background_jobs
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.max_background_jobs = value
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
property max_background_flushes:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.max_background_flushes
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.max_background_flushes = value
|
|
|
|
|
|
|
|
property max_log_file_size:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.max_log_file_size
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.max_log_file_size = value
|
|
|
|
|
|
|
|
property log_file_time_to_roll:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.log_file_time_to_roll
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.log_file_time_to_roll = value
|
|
|
|
|
|
|
|
property keep_log_file_num:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.keep_log_file_num
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.keep_log_file_num = value
|
|
|
|
|
|
|
|
property max_manifest_file_size:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.max_manifest_file_size
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.max_manifest_file_size = value
|
|
|
|
|
|
|
|
property table_cache_numshardbits:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.table_cache_numshardbits
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.table_cache_numshardbits = value
|
|
|
|
|
|
|
|
property wal_ttl_seconds:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.WAL_ttl_seconds
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.WAL_ttl_seconds = value
|
|
|
|
|
2021-02-17 10:45:42 +01:00
|
|
|
property db_write_buffer_size:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.db_write_buffer_size
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.db_write_buffer_size = value
|
|
|
|
|
2021-02-16 21:54:48 +01:00
|
|
|
property ttl:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.ttl
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.ttl = value
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
property wal_size_limit_mb:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.WAL_size_limit_MB
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.WAL_size_limit_MB = value
|
|
|
|
|
|
|
|
property manifest_preallocation_size:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.manifest_preallocation_size
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.manifest_preallocation_size = value
|
|
|
|
|
|
|
|
property enable_write_thread_adaptive_yield:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.enable_write_thread_adaptive_yield
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.enable_write_thread_adaptive_yield = value
|
|
|
|
|
|
|
|
property allow_concurrent_memtable_write:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.allow_concurrent_memtable_write
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.allow_concurrent_memtable_write = value
|
|
|
|
|
|
|
|
property allow_mmap_reads:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.allow_mmap_reads
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.allow_mmap_reads = value
|
|
|
|
|
|
|
|
property allow_mmap_writes:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.allow_mmap_writes
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.allow_mmap_writes = value
|
|
|
|
|
|
|
|
property is_fd_close_on_exec:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.is_fd_close_on_exec
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.is_fd_close_on_exec = value
|
|
|
|
|
|
|
|
property skip_log_error_on_recovery:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.skip_log_error_on_recovery
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.skip_log_error_on_recovery = value
|
|
|
|
|
|
|
|
property stats_dump_period_sec:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.stats_dump_period_sec
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.stats_dump_period_sec = value
|
|
|
|
|
|
|
|
property advise_random_on_open:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.advise_random_on_open
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.advise_random_on_open = value
|
|
|
|
|
|
|
|
# TODO: need to remove -Wconversion to make this work
|
|
|
|
# property access_hint_on_compaction_start:
|
|
|
|
# def __get__(self):
|
|
|
|
# return self.opts.access_hint_on_compaction_start
|
|
|
|
# def __set__(self, AccessHint value):
|
|
|
|
# self.opts.access_hint_on_compaction_start = value
|
|
|
|
|
|
|
|
property use_adaptive_mutex:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.use_adaptive_mutex
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.use_adaptive_mutex = value
|
|
|
|
|
|
|
|
property bytes_per_sync:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts.bytes_per_sync
|
|
|
|
def __set__(self, value):
|
|
|
|
self.opts.bytes_per_sync = value
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2015-08-30 12:21:40 +02:00
|
|
|
property row_cache:
|
|
|
|
def __get__(self):
|
|
|
|
return self.py_row_cache
|
|
|
|
|
|
|
|
def __set__(self, value):
|
|
|
|
if value is None:
|
|
|
|
self.py_row_cache = None
|
|
|
|
self.opts.row_cache.reset()
|
|
|
|
elif not isinstance(value, PyCache):
|
|
|
|
raise Exception("row_cache must be a Cache object")
|
|
|
|
else:
|
|
|
|
self.py_row_cache = value
|
|
|
|
self.opts.row_cache = self.py_row_cache.get_cache()
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
# Forward declaration
|
|
|
|
cdef class Snapshot
|
|
|
|
|
|
|
|
cdef class KeysIterator
|
|
|
|
cdef class ValuesIterator
|
|
|
|
cdef class ItemsIterator
|
|
|
|
cdef class ReversedIterator
|
|
|
|
|
2015-07-05 13:35:15 +02:00
|
|
|
# Forward declaration
|
|
|
|
cdef class WriteBatchIterator
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef class WriteBatch(object):
|
|
|
|
cdef db.WriteBatch* batch
|
|
|
|
|
|
|
|
def __cinit__(self, data=None):
|
2014-01-23 08:34:26 +01:00
|
|
|
self.batch = NULL
|
2014-01-13 19:52:22 +01:00
|
|
|
if data is not None:
|
2014-01-17 07:39:33 +01:00
|
|
|
self.batch = new db.WriteBatch(bytes_to_string(data))
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
|
|
|
self.batch = new db.WriteBatch()
|
|
|
|
|
|
|
|
def __dealloc__(self):
|
2014-01-23 08:34:26 +01:00
|
|
|
if not self.batch == NULL:
|
|
|
|
del self.batch
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def put(self, key, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = NULL
|
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
# nullptr is default family
|
|
|
|
self.batch.Put(cf_handle, bytes_to_slice(key), bytes_to_slice(value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def merge(self, key, value):
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = NULL
|
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
# nullptr is default family
|
|
|
|
self.batch.Merge(cf_handle, bytes_to_slice(key), bytes_to_slice(value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def delete(self, key):
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = NULL
|
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
# nullptr is default family
|
|
|
|
self.batch.Delete(cf_handle, bytes_to_slice(key))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def clear(self):
|
|
|
|
self.batch.Clear()
|
|
|
|
|
|
|
|
def data(self):
|
2014-01-17 07:39:33 +01:00
|
|
|
return string_to_bytes(self.batch.Data())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def count(self):
|
|
|
|
return self.batch.Count()
|
|
|
|
|
2015-07-05 13:35:15 +02:00
|
|
|
def __iter__(self):
|
|
|
|
return WriteBatchIterator(self)
|
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class WriteBatchIterator(object):
|
|
|
|
# Need a reference to the WriteBatch.
|
|
|
|
# The BatchItems are only pointers to the memory in WriteBatch.
|
|
|
|
cdef WriteBatch batch
|
|
|
|
cdef vector[db.BatchItem] items
|
|
|
|
cdef size_t pos
|
|
|
|
|
|
|
|
def __init__(self, WriteBatch batch):
|
|
|
|
cdef Status st
|
|
|
|
|
|
|
|
self.batch = batch
|
|
|
|
self.pos = 0
|
|
|
|
|
|
|
|
st = db.get_batch_items(batch.batch, cython.address(self.items))
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if self.pos == self.items.size():
|
|
|
|
raise StopIteration()
|
|
|
|
|
|
|
|
cdef str op
|
|
|
|
|
|
|
|
if self.items[self.pos].op == db.BatchItemOpPut:
|
|
|
|
op = "Put"
|
|
|
|
elif self.items[self.pos].op == db.BatchItemOpMerge:
|
|
|
|
op = "Merge"
|
|
|
|
elif self.items[self.pos].op == db.BatchItemOpDelte:
|
|
|
|
op = "Delete"
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.items[self.pos].column_family_id != 0: # Column Family is set
|
|
|
|
ret = (
|
|
|
|
op,
|
|
|
|
(
|
|
|
|
self.items[self.pos].column_family_id,
|
|
|
|
slice_to_bytes(self.items[self.pos].key)
|
|
|
|
),
|
|
|
|
slice_to_bytes(self.items[self.pos].value)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
ret = (
|
|
|
|
op,
|
|
|
|
slice_to_bytes(self.items[self.pos].key),
|
|
|
|
slice_to_bytes(self.items[self.pos].value)
|
|
|
|
)
|
2015-07-05 13:35:15 +02:00
|
|
|
self.pos += 1
|
|
|
|
return ret
|
|
|
|
|
2014-01-23 08:53:14 +01:00
|
|
|
@cython.no_gc_clear
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef class DB(object):
|
|
|
|
cdef Options opts
|
|
|
|
cdef db.DB* db
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef list cf_handles
|
|
|
|
cdef list cf_options
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def __cinit__(self, db_name, Options opts, dict column_families=None, read_only=False):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Status st
|
2014-01-23 08:34:26 +01:00
|
|
|
cdef string db_path
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef vector[db.ColumnFamilyDescriptor] column_family_descriptors
|
|
|
|
cdef vector[db.ColumnFamilyHandle*] column_family_handles
|
|
|
|
cdef bytes default_cf_name = db.kDefaultColumnFamilyName
|
2014-01-23 08:34:26 +01:00
|
|
|
self.db = NULL
|
2014-01-23 08:53:14 +01:00
|
|
|
self.opts = None
|
2018-11-02 19:27:14 +01:00
|
|
|
self.cf_handles = []
|
|
|
|
self.cf_options = []
|
2014-01-23 08:34:26 +01:00
|
|
|
|
2014-01-23 08:53:14 +01:00
|
|
|
if opts.in_use:
|
|
|
|
raise Exception("Options object is already used by another DB")
|
2014-01-18 12:24:49 +01:00
|
|
|
|
2014-01-23 08:34:26 +01:00
|
|
|
db_path = path_to_string(db_name)
|
2018-11-02 19:27:14 +01:00
|
|
|
if not column_families or default_cf_name not in column_families:
|
|
|
|
# Always add the default column family
|
|
|
|
column_family_descriptors.push_back(
|
|
|
|
db.ColumnFamilyDescriptor(
|
|
|
|
db.kDefaultColumnFamilyName,
|
|
|
|
options.ColumnFamilyOptions(deref(opts.opts))
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.cf_options.append(None) # Since they are the same as db
|
|
|
|
if column_families:
|
|
|
|
for cf_name, cf_options in column_families.items():
|
|
|
|
if not isinstance(cf_name, bytes):
|
|
|
|
raise TypeError(
|
|
|
|
f"column family name {cf_name!r} is not of type {bytes}!"
|
|
|
|
)
|
|
|
|
if not isinstance(cf_options, ColumnFamilyOptions):
|
|
|
|
raise TypeError(
|
|
|
|
f"column family options {cf_options!r} is not of type "
|
|
|
|
f"{ColumnFamilyOptions}!"
|
|
|
|
)
|
|
|
|
if (<ColumnFamilyOptions>cf_options).in_use:
|
|
|
|
raise Exception(
|
|
|
|
f"ColumnFamilyOptions object for {cf_name} is already "
|
|
|
|
"used by another Column Family"
|
|
|
|
)
|
|
|
|
(<ColumnFamilyOptions>cf_options).in_use = True
|
|
|
|
column_family_descriptors.push_back(
|
|
|
|
db.ColumnFamilyDescriptor(
|
|
|
|
cf_name,
|
|
|
|
deref((<ColumnFamilyOptions>cf_options).copts)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.cf_options.append(cf_options)
|
2014-01-13 19:52:22 +01:00
|
|
|
if read_only:
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
st = db.DB_OpenForReadOnly_ColumnFamilies(
|
2014-01-13 19:52:22 +01:00
|
|
|
deref(opts.opts),
|
2014-01-18 12:24:49 +01:00
|
|
|
db_path,
|
2018-11-02 19:27:14 +01:00
|
|
|
column_family_descriptors,
|
|
|
|
&column_family_handles,
|
|
|
|
&self.db,
|
2014-01-18 12:24:49 +01:00
|
|
|
False)
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
st = db.DB_Open_ColumnFamilies(
|
2014-01-13 19:52:22 +01:00
|
|
|
deref(opts.opts),
|
2014-01-18 12:24:49 +01:00
|
|
|
db_path,
|
2018-11-02 19:27:14 +01:00
|
|
|
column_family_descriptors,
|
|
|
|
&column_family_handles,
|
|
|
|
&self.db)
|
2014-01-18 12:24:49 +01:00
|
|
|
check_status(st)
|
2014-01-24 17:11:41 +01:00
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
for handle in column_family_handles:
|
|
|
|
wrapper = _ColumnFamilyHandle.from_handle_ptr(handle)
|
|
|
|
self.cf_handles.append(wrapper)
|
|
|
|
|
2014-01-24 17:11:41 +01:00
|
|
|
# Inject the loggers into the python callbacks
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef shared_ptr[logger.Logger] info_log = self.db.GetOptions(
|
|
|
|
self.db.DefaultColumnFamily()).info_log
|
2014-01-24 17:11:41 +01:00
|
|
|
if opts.py_comparator is not None:
|
|
|
|
opts.py_comparator.set_info_log(info_log)
|
|
|
|
|
2014-10-22 09:43:47 +02:00
|
|
|
if opts.py_table_factory is not None:
|
|
|
|
opts.py_table_factory.set_info_log(info_log)
|
2014-01-26 09:47:04 +01:00
|
|
|
|
2014-01-26 10:45:06 +01:00
|
|
|
if opts.prefix_extractor is not None:
|
|
|
|
opts.py_prefix_extractor.set_info_log(info_log)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef ColumnFamilyOptions copts
|
|
|
|
for idx, copts in enumerate(self.cf_options):
|
|
|
|
if not copts:
|
|
|
|
continue
|
|
|
|
|
|
|
|
info_log = self.db.GetOptions(column_family_handles[idx]).info_log
|
|
|
|
|
|
|
|
if copts.py_comparator is not None:
|
|
|
|
copts.py_comparator.set_info_log(info_log)
|
|
|
|
|
|
|
|
if copts.py_table_factory is not None:
|
|
|
|
copts.py_table_factory.set_info_log(info_log)
|
|
|
|
|
|
|
|
if copts.prefix_extractor is not None:
|
|
|
|
copts.py_prefix_extractor.set_info_log(info_log)
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
self.opts = opts
|
2014-01-23 08:53:14 +01:00
|
|
|
self.opts.in_use = True
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2020-03-12 20:53:07 +01:00
|
|
|
def close(self, safe=True):
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef ColumnFamilyOptions copts
|
2020-03-12 20:53:07 +01:00
|
|
|
cdef cpp_bool c_safe = safe
|
2021-01-17 16:09:26 +01:00
|
|
|
cdef Status st
|
|
|
|
if self.db != NULL:
|
2020-03-12 20:53:07 +01:00
|
|
|
# We need stop backround compactions
|
|
|
|
with nogil:
|
|
|
|
db.CancelAllBackgroundWork(self.db, c_safe)
|
2018-11-02 19:27:14 +01:00
|
|
|
# We have to make sure we delete the handles so rocksdb doesn't
|
|
|
|
# assert when we delete the db
|
2020-03-12 20:53:07 +01:00
|
|
|
del self.cf_handles[:]
|
2018-11-02 19:27:14 +01:00
|
|
|
for copts in self.cf_options:
|
|
|
|
if copts:
|
|
|
|
copts.in_use = False
|
2020-03-12 20:53:07 +01:00
|
|
|
del self.cf_options[:]
|
2014-01-24 17:03:14 +01:00
|
|
|
with nogil:
|
2020-09-03 08:50:13 +02:00
|
|
|
st = self.db.Close()
|
2021-01-17 16:09:26 +01:00
|
|
|
self.db = NULL
|
2019-04-22 20:45:29 +02:00
|
|
|
if self.opts is not None:
|
|
|
|
self.opts.in_use = False
|
|
|
|
|
2021-01-17 16:09:26 +01:00
|
|
|
def __dealloc__(self):
|
|
|
|
self.close()
|
2020-09-03 08:50:13 +02:00
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
@property
|
|
|
|
def column_families(self):
|
|
|
|
return [handle.weakref for handle in self.cf_handles]
|
|
|
|
|
|
|
|
def get_column_family(self, bytes name):
|
|
|
|
for handle in self.cf_handles:
|
|
|
|
if handle.name == name:
|
|
|
|
return handle.weakref
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
def put(self, key, value, sync=False, disable_wal=False):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Status st
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.WriteOptions opts
|
|
|
|
opts.sync = sync
|
|
|
|
opts.disableWAL = disable_wal
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
else:
|
|
|
|
column_family = None
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key = bytes_to_slice(key)
|
|
|
|
cdef Slice c_value = bytes_to_slice(value)
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
2014-01-18 12:24:49 +01:00
|
|
|
|
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
st = self.db.Put(opts, cf_handle, c_key, c_value)
|
2014-01-18 12:24:49 +01:00
|
|
|
check_status(st)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def delete(self, key, sync=False, disable_wal=False):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Status st
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.WriteOptions opts
|
|
|
|
opts.sync = sync
|
|
|
|
opts.disableWAL = disable_wal
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
else:
|
|
|
|
column_family = None
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key = bytes_to_slice(key)
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
st = self.db.Delete(opts, cf_handle, c_key)
|
2014-01-18 12:24:49 +01:00
|
|
|
check_status(st)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def merge(self, key, value, sync=False, disable_wal=False):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Status st
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.WriteOptions opts
|
|
|
|
opts.sync = sync
|
|
|
|
opts.disableWAL = disable_wal
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
else:
|
|
|
|
column_family = None
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key = bytes_to_slice(key)
|
|
|
|
cdef Slice c_value = bytes_to_slice(value)
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
st = self.db.Merge(opts, cf_handle, c_key, c_value)
|
2014-01-18 12:24:49 +01:00
|
|
|
check_status(st)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def write(self, WriteBatch batch, sync=False, disable_wal=False):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Status st
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.WriteOptions opts
|
|
|
|
opts.sync = sync
|
|
|
|
opts.disableWAL = disable_wal
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
st = self.db.Write(opts, batch.batch)
|
|
|
|
check_status(st)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def get(self, key, *args, **kwargs):
|
|
|
|
cdef string res
|
|
|
|
cdef Status st
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef options.ReadOptions opts
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
2018-11-02 19:27:14 +01:00
|
|
|
|
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
else:
|
|
|
|
column_family = None
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key = bytes_to_slice(key)
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
2014-01-18 12:24:49 +01:00
|
|
|
|
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
st = self.db.Get(opts, cf_handle, c_key, cython.address(res))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if st.ok():
|
2014-01-16 21:32:00 +01:00
|
|
|
return string_to_bytes(res)
|
2014-01-13 19:52:22 +01:00
|
|
|
elif st.IsNotFound():
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def multi_get(self, keys, *args, **kwargs):
|
2020-08-07 08:25:16 +02:00
|
|
|
# Remove duplicate keys
|
|
|
|
keys = list(dict.fromkeys(keys))
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef vector[string] values
|
|
|
|
values.resize(len(keys))
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle
|
|
|
|
cdef vector[db.ColumnFamilyHandle*] cf_handles
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef vector[Slice] c_keys
|
2014-01-13 19:52:22 +01:00
|
|
|
for key in keys:
|
2018-11-02 19:27:14 +01:00
|
|
|
if isinstance(key, tuple):
|
|
|
|
py_handle, key = key
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>py_handle).get_handle()
|
|
|
|
else:
|
|
|
|
cf_handle = self.db.DefaultColumnFamily()
|
2014-01-16 21:32:00 +01:00
|
|
|
c_keys.push_back(bytes_to_slice(key))
|
2018-11-02 19:27:14 +01:00
|
|
|
cf_handles.push_back(cf_handle)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef options.ReadOptions opts
|
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
|
|
|
|
|
|
|
cdef vector[Status] res
|
|
|
|
with nogil:
|
|
|
|
res = self.db.MultiGet(
|
|
|
|
opts,
|
2018-11-02 19:27:14 +01:00
|
|
|
cf_handles,
|
2014-01-18 12:24:49 +01:00
|
|
|
c_keys,
|
|
|
|
cython.address(values))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cdef dict ret_dict = {}
|
|
|
|
for index in range(len(keys)):
|
|
|
|
if res[index].ok():
|
2014-01-16 21:32:00 +01:00
|
|
|
ret_dict[keys[index]] = string_to_bytes(values[index])
|
2014-01-13 19:52:22 +01:00
|
|
|
elif res[index].IsNotFound():
|
|
|
|
ret_dict[keys[index]] = None
|
|
|
|
else:
|
|
|
|
check_status(res[index])
|
|
|
|
|
|
|
|
return ret_dict
|
|
|
|
|
|
|
|
def key_may_exist(self, key, fetch=False, *args, **kwargs):
|
|
|
|
cdef string value
|
|
|
|
cdef cpp_bool value_found
|
|
|
|
cdef cpp_bool exists
|
|
|
|
cdef options.ReadOptions opts
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
2018-11-02 19:27:14 +01:00
|
|
|
if isinstance(key, tuple):
|
|
|
|
column_family, key = key
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
c_key = bytes_to_slice(key)
|
|
|
|
exists = False
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
if fetch:
|
|
|
|
value_found = False
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
exists = self.db.KeyMayExist(
|
|
|
|
opts,
|
2018-11-02 19:27:14 +01:00
|
|
|
cf_handle,
|
2014-01-18 12:24:49 +01:00
|
|
|
c_key,
|
|
|
|
cython.address(value),
|
|
|
|
cython.address(value_found))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
if exists:
|
|
|
|
if value_found:
|
2014-01-16 21:32:00 +01:00
|
|
|
return (True, string_to_bytes(value))
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
|
|
|
return (True, None)
|
|
|
|
else:
|
|
|
|
return (False, None)
|
|
|
|
else:
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
exists = self.db.KeyMayExist(
|
|
|
|
opts,
|
2018-11-02 19:27:14 +01:00
|
|
|
cf_handle,
|
2014-01-18 12:24:49 +01:00
|
|
|
c_key,
|
|
|
|
cython.address(value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
return (exists, None)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def iterkeys(self, ColumnFamilyHandle column_family=None, *args, **kwargs):
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.ReadOptions opts
|
|
|
|
cdef KeysIterator it
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = column_family.get_handle()
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
2018-11-02 19:27:14 +01:00
|
|
|
it = KeysIterator(self, column_family)
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
it.ptr = self.db.NewIterator(opts, cf_handle)
|
2014-01-13 19:52:22 +01:00
|
|
|
return it
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def itervalues(self, ColumnFamilyHandle column_family=None, *args, **kwargs):
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.ReadOptions opts
|
|
|
|
cdef ValuesIterator it
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = column_family.get_handle()
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
it = ValuesIterator(self)
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
it.ptr = self.db.NewIterator(opts, cf_handle)
|
2014-01-13 19:52:22 +01:00
|
|
|
return it
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def iteritems(self, ColumnFamilyHandle column_family=None, *args, **kwargs):
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef options.ReadOptions opts
|
|
|
|
cdef ItemsIterator it
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = column_family.get_handle()
|
2014-01-13 19:52:22 +01:00
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
it = ItemsIterator(self, column_family)
|
2014-01-21 13:14:41 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
it.ptr = self.db.NewIterator(opts, cf_handle)
|
2014-01-13 19:52:22 +01:00
|
|
|
return it
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def iterskeys(self, column_families, *args, **kwargs):
|
|
|
|
cdef vector[db.Iterator*] iters
|
|
|
|
iters.resize(len(column_families))
|
|
|
|
cdef options.ReadOptions opts
|
|
|
|
cdef db.Iterator* it_ptr
|
|
|
|
cdef KeysIterator it
|
|
|
|
cdef db.ColumnFamilyHandle* cf_handle
|
|
|
|
cdef vector[db.ColumnFamilyHandle*] cf_handles
|
|
|
|
|
|
|
|
for column_family in column_families:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
cf_handles.push_back(cf_handle)
|
|
|
|
|
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
|
|
|
with nogil:
|
|
|
|
self.db.NewIterators(opts, cf_handles, &iters)
|
|
|
|
|
|
|
|
cf_iter = iter(column_families)
|
|
|
|
cdef list ret = []
|
|
|
|
for it_ptr in iters:
|
|
|
|
it = KeysIterator(self, next(cf_iter))
|
|
|
|
it.ptr = it_ptr
|
|
|
|
ret.append(it)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def itersvalues(self, column_families, *args, **kwargs):
|
|
|
|
cdef vector[db.Iterator*] iters
|
|
|
|
iters.resize(len(column_families))
|
|
|
|
cdef options.ReadOptions opts
|
|
|
|
cdef db.Iterator* it_ptr
|
|
|
|
cdef ValuesIterator it
|
|
|
|
cdef db.ColumnFamilyHandle* cf_handle
|
|
|
|
cdef vector[db.ColumnFamilyHandle*] cf_handles
|
|
|
|
|
|
|
|
for column_family in column_families:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
cf_handles.push_back(cf_handle)
|
|
|
|
|
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
|
|
|
with nogil:
|
|
|
|
self.db.NewIterators(opts, cf_handles, &iters)
|
|
|
|
|
|
|
|
cdef list ret = []
|
|
|
|
for it_ptr in iters:
|
|
|
|
it = ValuesIterator(self)
|
|
|
|
it.ptr = it_ptr
|
|
|
|
ret.append(it)
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def iterskeys(self, column_families, *args, **kwargs):
|
|
|
|
cdef vector[db.Iterator*] iters
|
|
|
|
iters.resize(len(column_families))
|
|
|
|
cdef options.ReadOptions opts
|
|
|
|
cdef db.Iterator* it_ptr
|
|
|
|
cdef ItemsIterator it
|
|
|
|
cdef db.ColumnFamilyHandle* cf_handle
|
|
|
|
cdef vector[db.ColumnFamilyHandle*] cf_handles
|
|
|
|
|
|
|
|
for column_family in column_families:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
cf_handles.push_back(cf_handle)
|
|
|
|
|
|
|
|
opts = self.build_read_opts(self.__parse_read_opts(*args, **kwargs))
|
|
|
|
with nogil:
|
|
|
|
self.db.NewIterators(opts, cf_handles, &iters)
|
|
|
|
|
|
|
|
|
|
|
|
cf_iter = iter(column_families)
|
|
|
|
cdef list ret = []
|
|
|
|
for it_ptr in iters:
|
|
|
|
it = ItemsIterator(self, next(cf_iter))
|
|
|
|
it.ptr = it_ptr
|
|
|
|
ret.append(it)
|
|
|
|
return ret
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
def snapshot(self):
|
|
|
|
return Snapshot(self)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def get_property(self, prop, ColumnFamilyHandle column_family=None):
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef string value
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_prop = bytes_to_slice(prop)
|
|
|
|
cdef cpp_bool ret = False
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = column_family.get_handle()
|
2014-01-18 12:24:49 +01:00
|
|
|
|
|
|
|
with nogil:
|
2018-11-02 19:27:14 +01:00
|
|
|
ret = self.db.GetProperty(cf_handle, c_prop, cython.address(value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
if ret:
|
2014-01-16 21:32:00 +01:00
|
|
|
return string_to_bytes(value)
|
2014-01-13 19:52:22 +01:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_live_files_metadata(self):
|
|
|
|
cdef vector[db.LiveFileMetaData] metadata
|
|
|
|
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
self.db.GetLiveFilesMetaData(cython.address(metadata))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
ret = []
|
|
|
|
for ob in metadata:
|
|
|
|
t = {}
|
2014-01-16 21:32:00 +01:00
|
|
|
t['name'] = string_to_path(ob.name)
|
2014-01-13 19:52:22 +01:00
|
|
|
t['level'] = ob.level
|
|
|
|
t['size'] = ob.size
|
2014-01-16 21:32:00 +01:00
|
|
|
t['smallestkey'] = string_to_bytes(ob.smallestkey)
|
|
|
|
t['largestkey'] = string_to_bytes(ob.largestkey)
|
2014-01-13 19:52:22 +01:00
|
|
|
t['smallest_seqno'] = ob.smallest_seqno
|
|
|
|
t['largest_seqno'] = ob.largest_seqno
|
|
|
|
|
|
|
|
ret.append(t)
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
2020-07-19 14:21:35 +02:00
|
|
|
def get_column_family_meta_data(self, ColumnFamilyHandle column_family=None):
|
|
|
|
cdef db.ColumnFamilyMetaData metadata
|
|
|
|
|
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
self.db.GetColumnFamilyMetaData(cf_handle, cython.address(metadata))
|
|
|
|
|
|
|
|
return {
|
|
|
|
"size":metadata.size,
|
|
|
|
"file_count":metadata.file_count,
|
|
|
|
}
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def compact_range(self, begin=None, end=None, ColumnFamilyHandle column_family=None, **py_options):
|
2015-08-27 21:45:09 +02:00
|
|
|
cdef options.CompactRangeOptions c_options
|
|
|
|
|
|
|
|
c_options.change_level = py_options.get('change_level', False)
|
|
|
|
c_options.target_level = py_options.get('target_level', -1)
|
|
|
|
|
|
|
|
blc = py_options.get('bottommost_level_compaction', 'if_compaction_filter')
|
|
|
|
if blc == 'skip':
|
|
|
|
c_options.bottommost_level_compaction = options.blc_skip
|
|
|
|
elif blc == 'if_compaction_filter':
|
|
|
|
c_options.bottommost_level_compaction = options.blc_is_filter
|
|
|
|
elif blc == 'force':
|
|
|
|
c_options.bottommost_level_compaction = options.blc_force
|
|
|
|
else:
|
|
|
|
raise ValueError("bottommost_level_compaction is not valid")
|
2014-05-31 17:59:29 +02:00
|
|
|
|
|
|
|
cdef Status st
|
|
|
|
cdef Slice begin_val
|
|
|
|
cdef Slice end_val
|
|
|
|
|
|
|
|
cdef Slice* begin_ptr
|
|
|
|
cdef Slice* end_ptr
|
|
|
|
|
|
|
|
begin_ptr = NULL
|
|
|
|
end_ptr = NULL
|
|
|
|
|
|
|
|
if begin is not None:
|
|
|
|
begin_val = bytes_to_slice(begin)
|
|
|
|
begin_ptr = cython.address(begin_val)
|
|
|
|
|
|
|
|
if end is not None:
|
|
|
|
end_val = bytes_to_slice(end)
|
|
|
|
end_ptr = cython.address(end_val)
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef db.ColumnFamilyHandle* cf_handle = self.db.DefaultColumnFamily()
|
|
|
|
if column_family:
|
|
|
|
cf_handle = (<ColumnFamilyHandle?>column_family).get_handle()
|
|
|
|
|
|
|
|
st = self.db.CompactRange(c_options, cf_handle, begin_ptr, end_ptr)
|
2014-05-31 17:59:29 +02:00
|
|
|
check_status(st)
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
@staticmethod
|
|
|
|
def __parse_read_opts(
|
|
|
|
verify_checksums=False,
|
|
|
|
fill_cache=True,
|
|
|
|
snapshot=None,
|
|
|
|
read_tier="all"):
|
|
|
|
|
|
|
|
# TODO: Is this really effiencet ?
|
|
|
|
return locals()
|
|
|
|
|
|
|
|
cdef options.ReadOptions build_read_opts(self, dict py_opts):
|
|
|
|
cdef options.ReadOptions opts
|
|
|
|
opts.verify_checksums = py_opts['verify_checksums']
|
|
|
|
opts.fill_cache = py_opts['fill_cache']
|
|
|
|
if py_opts['snapshot'] is not None:
|
|
|
|
opts.snapshot = (<Snapshot?>(py_opts['snapshot'])).ptr
|
|
|
|
|
|
|
|
if py_opts['read_tier'] == "all":
|
|
|
|
opts.read_tier = options.kReadAllTier
|
|
|
|
elif py_opts['read_tier'] == 'cache':
|
|
|
|
opts.read_tier = options.kBlockCacheTier
|
|
|
|
else:
|
|
|
|
raise ValueError("Invalid read_tier")
|
|
|
|
|
|
|
|
return opts
|
|
|
|
|
|
|
|
property options:
|
|
|
|
def __get__(self):
|
|
|
|
return self.opts
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def create_column_family(self, bytes name, ColumnFamilyOptions copts):
|
|
|
|
cdef db.ColumnFamilyHandle* cf_handle
|
|
|
|
cdef Status st
|
|
|
|
cdef string c_name = name
|
|
|
|
|
|
|
|
for handle in self.cf_handles:
|
|
|
|
if handle.name == name:
|
|
|
|
raise ValueError(f"{name} is already an existing column family")
|
|
|
|
|
|
|
|
if copts.in_use:
|
|
|
|
raise Exception("ColumnFamilyOptions are in_use by another column family")
|
|
|
|
|
|
|
|
copts.in_use = True
|
|
|
|
with nogil:
|
|
|
|
st = self.db.CreateColumnFamily(deref(copts.copts), c_name, &cf_handle)
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
handle = _ColumnFamilyHandle.from_handle_ptr(cf_handle)
|
|
|
|
|
|
|
|
self.cf_handles.append(handle)
|
|
|
|
self.cf_options.append(copts)
|
|
|
|
return handle.weakref
|
|
|
|
|
|
|
|
def drop_column_family(self, ColumnFamilyHandle weak_handle not None):
|
|
|
|
cdef db.ColumnFamilyHandle* cf_handle
|
|
|
|
cdef ColumnFamilyOptions copts
|
|
|
|
cdef Status st
|
|
|
|
|
|
|
|
cf_handle = weak_handle.get_handle()
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
st = self.db.DropColumnFamily(cf_handle)
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
py_handle = weak_handle._ref()
|
|
|
|
index = self.cf_handles.index(py_handle)
|
|
|
|
copts = self.cf_options.pop(index)
|
|
|
|
del self.cf_handles[index]
|
|
|
|
del py_handle
|
|
|
|
if copts:
|
|
|
|
copts.in_use = False
|
|
|
|
|
2015-08-15 16:34:35 +02:00
|
|
|
|
|
|
|
def repair_db(db_name, Options opts):
|
|
|
|
cdef Status st
|
|
|
|
cdef string db_path
|
|
|
|
|
|
|
|
db_path = path_to_string(db_name)
|
|
|
|
st = db.RepairDB(db_path, deref(opts.opts))
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def list_column_families(db_name, Options opts):
|
|
|
|
cdef Status st
|
|
|
|
cdef string db_path
|
|
|
|
cdef vector[string] column_families
|
|
|
|
|
|
|
|
db_path = path_to_string(db_name)
|
|
|
|
with nogil:
|
|
|
|
st = db.ListColumnFamilies(deref(opts.opts), db_path, &column_families)
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
return column_families
|
|
|
|
|
|
|
|
|
2014-01-23 08:10:51 +01:00
|
|
|
@cython.no_gc_clear
|
2014-01-13 19:52:22 +01:00
|
|
|
@cython.internal
|
|
|
|
cdef class Snapshot(object):
|
|
|
|
cdef const snapshot.Snapshot* ptr
|
|
|
|
cdef DB db
|
|
|
|
|
|
|
|
def __cinit__(self, DB db):
|
|
|
|
self.db = db
|
2014-01-23 08:34:26 +01:00
|
|
|
self.ptr = NULL
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
self.ptr = db.db.GetSnapshot()
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def __dealloc__(self):
|
2014-01-23 08:34:26 +01:00
|
|
|
if not self.ptr == NULL:
|
|
|
|
with nogil:
|
|
|
|
self.db.db.ReleaseSnapshot(self.ptr)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class BaseIterator(object):
|
|
|
|
cdef iterator.Iterator* ptr
|
|
|
|
cdef DB db
|
2018-11-02 19:27:14 +01:00
|
|
|
cdef ColumnFamilyHandle handle
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2018-11-02 19:27:14 +01:00
|
|
|
def __cinit__(self, DB db, ColumnFamilyHandle handle = None):
|
2014-01-13 19:52:22 +01:00
|
|
|
self.db = db
|
|
|
|
self.ptr = NULL
|
2018-11-02 19:27:14 +01:00
|
|
|
self.handle = handle
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
def __dealloc__(self):
|
2014-01-23 08:34:26 +01:00
|
|
|
if not self.ptr == NULL:
|
2014-01-13 19:52:22 +01:00
|
|
|
del self.ptr
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if not self.ptr.Valid():
|
|
|
|
raise StopIteration()
|
|
|
|
|
|
|
|
cdef object ret = self.get_ob()
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
self.ptr.Next()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2014-01-13 19:52:22 +01:00
|
|
|
return ret
|
|
|
|
|
2017-04-23 13:32:41 +02:00
|
|
|
def get(self):
|
|
|
|
cdef object ret = self.get_ob()
|
|
|
|
return ret
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
def __reversed__(self):
|
|
|
|
return ReversedIterator(self)
|
|
|
|
|
|
|
|
cpdef seek_to_first(self):
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
self.ptr.SeekToFirst()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cpdef seek_to_last(self):
|
2014-01-18 12:24:49 +01:00
|
|
|
with nogil:
|
|
|
|
self.ptr.SeekToLast()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
cpdef seek(self, key):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key = bytes_to_slice(key)
|
|
|
|
with nogil:
|
|
|
|
self.ptr.Seek(c_key)
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2014-01-13 19:52:22 +01:00
|
|
|
|
2017-04-23 13:32:41 +02:00
|
|
|
cpdef seek_for_prev(self, key):
|
|
|
|
cdef Slice c_key = bytes_to_slice(key)
|
|
|
|
with nogil:
|
|
|
|
self.ptr.SeekForPrev(c_key)
|
|
|
|
check_status(self.ptr.status())
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
cdef object get_ob(self):
|
|
|
|
return None
|
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class KeysIterator(BaseIterator):
|
|
|
|
cdef object get_ob(self):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key
|
|
|
|
with nogil:
|
|
|
|
c_key = self.ptr.key()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.handle:
|
|
|
|
return self.handle, slice_to_bytes(c_key)
|
2014-01-18 12:24:49 +01:00
|
|
|
return slice_to_bytes(c_key)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class ValuesIterator(BaseIterator):
|
|
|
|
cdef object get_ob(self):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_value
|
|
|
|
with nogil:
|
|
|
|
c_value = self.ptr.value()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2014-01-18 12:24:49 +01:00
|
|
|
return slice_to_bytes(c_value)
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class ItemsIterator(BaseIterator):
|
|
|
|
cdef object get_ob(self):
|
2014-01-18 12:24:49 +01:00
|
|
|
cdef Slice c_key
|
|
|
|
cdef Slice c_value
|
|
|
|
with nogil:
|
|
|
|
c_key = self.ptr.key()
|
|
|
|
c_value = self.ptr.value()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.ptr.status())
|
2018-11-02 19:27:14 +01:00
|
|
|
if self.handle:
|
|
|
|
return ((self.handle, slice_to_bytes(c_key)), slice_to_bytes(c_value))
|
2014-01-18 12:24:49 +01:00
|
|
|
return (slice_to_bytes(c_key), slice_to_bytes(c_value))
|
2014-01-13 19:52:22 +01:00
|
|
|
|
|
|
|
@cython.internal
|
|
|
|
cdef class ReversedIterator(object):
|
|
|
|
cdef BaseIterator it
|
|
|
|
|
|
|
|
def __cinit__(self, BaseIterator it):
|
|
|
|
self.it = it
|
|
|
|
|
|
|
|
def seek_to_first(self):
|
|
|
|
self.it.seek_to_first()
|
|
|
|
|
|
|
|
def seek_to_last(self):
|
|
|
|
self.it.seek_to_last()
|
|
|
|
|
|
|
|
def seek(self, key):
|
|
|
|
self.it.seek(key)
|
|
|
|
|
2017-04-23 13:32:41 +02:00
|
|
|
def seek_for_prev(self, key):
|
|
|
|
self.it.seek_for_prev(key)
|
|
|
|
|
|
|
|
def get(self):
|
|
|
|
return self.it.get()
|
|
|
|
|
2014-01-13 19:52:22 +01:00
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __reversed__(self):
|
|
|
|
return self.it
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if not self.it.ptr.Valid():
|
|
|
|
raise StopIteration()
|
|
|
|
|
|
|
|
cdef object ret = self.it.get_ob()
|
2014-01-19 13:45:02 +01:00
|
|
|
with nogil:
|
|
|
|
self.it.ptr.Prev()
|
2014-01-19 13:41:49 +01:00
|
|
|
check_status(self.it.ptr.status())
|
2014-01-13 19:52:22 +01:00
|
|
|
return ret
|
2014-02-02 17:28:16 +01:00
|
|
|
|
|
|
|
cdef class BackupEngine(object):
|
|
|
|
cdef backup.BackupEngine* engine
|
|
|
|
|
|
|
|
def __cinit__(self, backup_dir):
|
2015-04-12 14:08:30 +02:00
|
|
|
cdef Status st
|
2014-02-02 17:28:16 +01:00
|
|
|
cdef string c_backup_dir
|
|
|
|
self.engine = NULL
|
|
|
|
|
|
|
|
c_backup_dir = path_to_string(backup_dir)
|
2015-04-12 14:08:30 +02:00
|
|
|
st = backup.BackupEngine_Open(
|
2014-02-02 17:28:16 +01:00
|
|
|
env.Env_Default(),
|
2015-04-12 14:08:30 +02:00
|
|
|
backup.BackupableDBOptions(c_backup_dir),
|
|
|
|
cython.address(self.engine))
|
|
|
|
|
|
|
|
check_status(st)
|
2014-02-02 17:28:16 +01:00
|
|
|
|
|
|
|
def __dealloc__(self):
|
|
|
|
if not self.engine == NULL:
|
|
|
|
with nogil:
|
|
|
|
del self.engine
|
|
|
|
|
|
|
|
def create_backup(self, DB db, flush_before_backup=False):
|
|
|
|
cdef Status st
|
|
|
|
cdef cpp_bool c_flush_before_backup
|
|
|
|
|
|
|
|
c_flush_before_backup = flush_before_backup
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
st = self.engine.CreateNewBackup(db.db, c_flush_before_backup)
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def restore_backup(self, backup_id, db_dir, wal_dir):
|
|
|
|
cdef Status st
|
|
|
|
cdef backup.BackupID c_backup_id
|
|
|
|
cdef string c_db_dir
|
|
|
|
cdef string c_wal_dir
|
|
|
|
|
|
|
|
c_backup_id = backup_id
|
|
|
|
c_db_dir = path_to_string(db_dir)
|
|
|
|
c_wal_dir = path_to_string(wal_dir)
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
st = self.engine.RestoreDBFromBackup(
|
|
|
|
c_backup_id,
|
|
|
|
c_db_dir,
|
|
|
|
c_wal_dir)
|
|
|
|
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def restore_latest_backup(self, db_dir, wal_dir):
|
|
|
|
cdef Status st
|
|
|
|
cdef string c_db_dir
|
|
|
|
cdef string c_wal_dir
|
|
|
|
|
|
|
|
c_db_dir = path_to_string(db_dir)
|
|
|
|
c_wal_dir = path_to_string(wal_dir)
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
st = self.engine.RestoreDBFromLatestBackup(c_db_dir, c_wal_dir)
|
|
|
|
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def stop_backup(self):
|
|
|
|
with nogil:
|
|
|
|
self.engine.StopBackup()
|
|
|
|
|
|
|
|
def purge_old_backups(self, num_backups_to_keep):
|
|
|
|
cdef Status st
|
|
|
|
cdef uint32_t c_num_backups_to_keep
|
|
|
|
|
|
|
|
c_num_backups_to_keep = num_backups_to_keep
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
st = self.engine.PurgeOldBackups(c_num_backups_to_keep)
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def delete_backup(self, backup_id):
|
|
|
|
cdef Status st
|
|
|
|
cdef backup.BackupID c_backup_id
|
|
|
|
|
|
|
|
c_backup_id = backup_id
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
st = self.engine.DeleteBackup(c_backup_id)
|
|
|
|
|
|
|
|
check_status(st)
|
|
|
|
|
|
|
|
def get_backup_info(self):
|
|
|
|
cdef vector[backup.BackupInfo] backup_info
|
|
|
|
|
|
|
|
with nogil:
|
|
|
|
self.engine.GetBackupInfo(cython.address(backup_info))
|
|
|
|
|
|
|
|
ret = []
|
|
|
|
for ob in backup_info:
|
|
|
|
t = {}
|
|
|
|
t['backup_id'] = ob.backup_id
|
|
|
|
t['timestamp'] = ob.timestamp
|
|
|
|
t['size'] = ob.size
|
|
|
|
ret.append(t)
|
|
|
|
|
|
|
|
return ret
|