2018-11-03 23:50:34 +01:00
|
|
|
# Copyright (c) 2016-2017, the ElectrumX authors
|
|
|
|
#
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# See the file "LICENCE" for information about the copyright
|
|
|
|
# and warranty status of this software.
|
|
|
|
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Backend database abstraction."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
import os
|
|
|
|
from functools import partial
|
|
|
|
|
2018-11-04 07:42:27 +01:00
|
|
|
from torba.server import util
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
|
|
|
|
def db_class(name):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Returns a DB engine class."""
|
2018-11-03 23:50:34 +01:00
|
|
|
for db_class in util.subclasses(Storage):
|
|
|
|
if db_class.__name__.lower() == name.lower():
|
|
|
|
db_class.import_module()
|
|
|
|
return db_class
|
|
|
|
raise RuntimeError('unrecognised DB engine "{}"'.format(name))
|
|
|
|
|
|
|
|
|
2018-11-26 02:51:41 +01:00
|
|
|
class Storage:
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Abstract base class of the DB backend abstraction."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def __init__(self, name, for_sync):
|
|
|
|
self.is_new = not os.path.exists(name)
|
|
|
|
self.for_sync = for_sync or self.is_new
|
|
|
|
self.open(name, create=self.is_new)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def import_module(cls):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Import the DB engine module."""
|
2018-11-03 23:50:34 +01:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def open(self, name, create):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Open an existing database or create a new one."""
|
2018-11-03 23:50:34 +01:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def close(self):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Close an existing database."""
|
2018-11-03 23:50:34 +01:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def get(self, key):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def put(self, key, value):
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def write_batch(self):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return a context manager that provides `put` and `delete`.
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
Changes should only be committed when the context manager
|
|
|
|
closes without an exception.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2018-11-03 23:50:34 +01:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def iterator(self, prefix=b'', reverse=False):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return an iterator that yields (key, value) pairs from the
|
2018-11-03 23:50:34 +01:00
|
|
|
database sorted by key.
|
|
|
|
|
|
|
|
If `prefix` is set, only keys starting with `prefix` will be
|
|
|
|
included. If `reverse` is True the items are returned in
|
|
|
|
reverse order.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2018-11-03 23:50:34 +01:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
|
|
|
|
class LevelDB(Storage):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""LevelDB database engine."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def import_module(cls):
|
|
|
|
import plyvel
|
|
|
|
cls.module = plyvel
|
|
|
|
|
|
|
|
def open(self, name, create):
|
|
|
|
mof = 512 if self.for_sync else 128
|
|
|
|
# Use snappy compression (the default)
|
|
|
|
self.db = self.module.DB(name, create_if_missing=create,
|
|
|
|
max_open_files=mof)
|
|
|
|
self.close = self.db.close
|
|
|
|
self.get = self.db.get
|
|
|
|
self.put = self.db.put
|
|
|
|
self.iterator = self.db.iterator
|
|
|
|
self.write_batch = partial(self.db.write_batch, transaction=True,
|
|
|
|
sync=True)
|
|
|
|
|
|
|
|
|
|
|
|
class RocksDB(Storage):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""RocksDB database engine."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def import_module(cls):
|
|
|
|
import rocksdb
|
|
|
|
cls.module = rocksdb
|
|
|
|
|
|
|
|
def open(self, name, create):
|
|
|
|
mof = 512 if self.for_sync else 128
|
|
|
|
# Use snappy compression (the default)
|
|
|
|
options = self.module.Options(create_if_missing=create,
|
|
|
|
use_fsync=True,
|
|
|
|
target_file_size_base=33554432,
|
|
|
|
max_open_files=mof)
|
|
|
|
self.db = self.module.DB(name, options)
|
|
|
|
self.get = self.db.get
|
|
|
|
self.put = self.db.put
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
# PyRocksDB doesn't provide a close method; hopefully this is enough
|
|
|
|
self.db = self.get = self.put = None
|
|
|
|
import gc
|
|
|
|
gc.collect()
|
|
|
|
|
|
|
|
def write_batch(self):
|
|
|
|
return RocksDBWriteBatch(self.db)
|
|
|
|
|
|
|
|
def iterator(self, prefix=b'', reverse=False):
|
|
|
|
return RocksDBIterator(self.db, prefix, reverse)
|
|
|
|
|
|
|
|
|
2018-11-26 02:51:41 +01:00
|
|
|
class RocksDBWriteBatch:
|
2019-04-16 09:50:35 +02:00
|
|
|
"""A write batch for RocksDB."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def __init__(self, db):
|
|
|
|
self.batch = RocksDB.module.WriteBatch()
|
|
|
|
self.db = db
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self.batch
|
|
|
|
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
|
if not exc_val:
|
|
|
|
self.db.write(self.batch)
|
|
|
|
|
|
|
|
|
2018-11-26 02:51:41 +01:00
|
|
|
class RocksDBIterator:
|
2019-04-16 09:50:35 +02:00
|
|
|
"""An iterator for RocksDB."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def __init__(self, db, prefix, reverse):
|
|
|
|
self.prefix = prefix
|
|
|
|
if reverse:
|
|
|
|
self.iterator = reversed(db.iteritems())
|
|
|
|
nxt_prefix = util.increment_byte_string(prefix)
|
|
|
|
if nxt_prefix:
|
|
|
|
self.iterator.seek(nxt_prefix)
|
|
|
|
try:
|
|
|
|
next(self.iterator)
|
|
|
|
except StopIteration:
|
|
|
|
self.iterator.seek(nxt_prefix)
|
|
|
|
else:
|
|
|
|
self.iterator.seek_to_last()
|
|
|
|
else:
|
|
|
|
self.iterator = db.iteritems()
|
|
|
|
self.iterator.seek(prefix)
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
k, v = next(self.iterator)
|
|
|
|
if not k.startswith(self.prefix):
|
|
|
|
raise StopIteration
|
|
|
|
return k, v
|