2018-11-03 23:50:34 +01:00
|
|
|
# Copyright (c) 2016-2018, Neil Booth
|
|
|
|
# Copyright (c) 2017, the ElectrumX authors
|
|
|
|
#
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# See the file "LICENCE" for information about the copyright
|
|
|
|
# and warranty status of this software.
|
|
|
|
|
2019-04-16 09:50:35 +02:00
|
|
|
"""History by script hash (address)."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
import array
|
|
|
|
import ast
|
|
|
|
import bisect
|
|
|
|
import time
|
|
|
|
from collections import defaultdict
|
|
|
|
from functools import partial
|
|
|
|
|
2019-12-31 20:52:57 +01:00
|
|
|
from lbry.wallet.server import util
|
2021-01-09 20:39:20 +01:00
|
|
|
from lbry.wallet.server.util import pack_be_uint16, unpack_be_uint16_from
|
2019-12-31 20:52:57 +01:00
|
|
|
from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
HASHX_HISTORY_PREFIX = b'x'
|
|
|
|
HIST_STATE = b'state-hist'
|
|
|
|
|
|
|
|
|
2018-11-26 02:51:41 +01:00
|
|
|
class History:
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
DB_VERSIONS = [0]
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
|
|
|
# For history compaction
|
|
|
|
self.max_hist_row_entries = 12500
|
|
|
|
self.unflushed = defaultdict(partial(array.array, 'I'))
|
|
|
|
self.unflushed_count = 0
|
|
|
|
self.db = None
|
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
def open_db(self, db, for_sync, utxo_flush_count, compacting):
|
|
|
|
self.db = db #db_class('hist', for_sync)
|
2018-11-03 23:50:34 +01:00
|
|
|
self.read_state()
|
|
|
|
self.clear_excess(utxo_flush_count)
|
|
|
|
# An incomplete compaction needs to be cancelled otherwise
|
|
|
|
# restarting it will corrupt the history
|
|
|
|
if not compacting:
|
|
|
|
self._cancel_compaction()
|
|
|
|
return self.flush_count
|
|
|
|
|
|
|
|
def close_db(self):
|
|
|
|
if self.db:
|
2021-01-09 20:39:20 +01:00
|
|
|
# self.db.close()
|
2018-11-03 23:50:34 +01:00
|
|
|
self.db = None
|
|
|
|
|
|
|
|
def read_state(self):
|
2021-01-09 20:39:20 +01:00
|
|
|
state = self.db.get(HIST_STATE)
|
2018-11-03 23:50:34 +01:00
|
|
|
if state:
|
|
|
|
state = ast.literal_eval(state.decode())
|
|
|
|
if not isinstance(state, dict):
|
|
|
|
raise RuntimeError('failed reading state from history DB')
|
|
|
|
self.flush_count = state['flush_count']
|
|
|
|
self.comp_flush_count = state.get('comp_flush_count', -1)
|
|
|
|
self.comp_cursor = state.get('comp_cursor', -1)
|
|
|
|
self.db_version = state.get('db_version', 0)
|
|
|
|
else:
|
|
|
|
self.flush_count = 0
|
|
|
|
self.comp_flush_count = -1
|
|
|
|
self.comp_cursor = -1
|
|
|
|
self.db_version = max(self.DB_VERSIONS)
|
|
|
|
|
|
|
|
self.logger.info(f'history DB version: {self.db_version}')
|
|
|
|
if self.db_version not in self.DB_VERSIONS:
|
|
|
|
msg = f'this software only handles DB versions {self.DB_VERSIONS}'
|
|
|
|
self.logger.error(msg)
|
|
|
|
raise RuntimeError(msg)
|
|
|
|
self.logger.info(f'flush count: {self.flush_count:,d}')
|
|
|
|
|
|
|
|
def clear_excess(self, utxo_flush_count):
|
|
|
|
# < might happen at end of compaction as both DBs cannot be
|
|
|
|
# updated atomically
|
|
|
|
if self.flush_count <= utxo_flush_count:
|
|
|
|
return
|
|
|
|
|
|
|
|
self.logger.info('DB shut down uncleanly. Scanning for '
|
|
|
|
'excess history flushes...')
|
|
|
|
|
|
|
|
keys = []
|
2021-01-09 20:39:20 +01:00
|
|
|
for key, hist in self.db.iterator(prefix=HASHX_HISTORY_PREFIX):
|
|
|
|
k = key[1:]
|
|
|
|
flush_id, = unpack_be_uint16_from(k[-2:])
|
2018-11-03 23:50:34 +01:00
|
|
|
if flush_id > utxo_flush_count:
|
2021-01-09 20:39:20 +01:00
|
|
|
keys.append(k)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
self.logger.info(f'deleting {len(keys):,d} history entries')
|
|
|
|
|
|
|
|
self.flush_count = utxo_flush_count
|
|
|
|
with self.db.write_batch() as batch:
|
|
|
|
for key in keys:
|
2021-01-09 20:39:20 +01:00
|
|
|
batch.delete(HASHX_HISTORY_PREFIX + key)
|
2018-11-03 23:50:34 +01:00
|
|
|
self.write_state(batch)
|
|
|
|
|
|
|
|
self.logger.info('deleted excess history entries')
|
|
|
|
|
|
|
|
def write_state(self, batch):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Write state to the history DB."""
|
2018-11-03 23:50:34 +01:00
|
|
|
state = {
|
|
|
|
'flush_count': self.flush_count,
|
|
|
|
'comp_flush_count': self.comp_flush_count,
|
|
|
|
'comp_cursor': self.comp_cursor,
|
|
|
|
'db_version': self.db_version,
|
|
|
|
}
|
|
|
|
# History entries are not prefixed; the suffix \0\0 ensures we
|
|
|
|
# look similar to other entries and aren't interfered with
|
2021-01-09 20:39:20 +01:00
|
|
|
batch.put(HIST_STATE, repr(state).encode())
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def add_unflushed(self, hashXs_by_tx, first_tx_num):
|
|
|
|
unflushed = self.unflushed
|
|
|
|
count = 0
|
|
|
|
for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num):
|
|
|
|
hashXs = set(hashXs)
|
|
|
|
for hashX in hashXs:
|
|
|
|
unflushed[hashX].append(tx_num)
|
|
|
|
count += len(hashXs)
|
|
|
|
self.unflushed_count += count
|
|
|
|
|
|
|
|
def unflushed_memsize(self):
|
|
|
|
return len(self.unflushed) * 180 + self.unflushed_count * 4
|
|
|
|
|
|
|
|
def assert_flushed(self):
|
|
|
|
assert not self.unflushed
|
|
|
|
|
|
|
|
def flush(self):
|
|
|
|
start_time = time.time()
|
|
|
|
self.flush_count += 1
|
2021-01-09 20:39:20 +01:00
|
|
|
flush_id = pack_be_uint16(self.flush_count)
|
2018-11-03 23:50:34 +01:00
|
|
|
unflushed = self.unflushed
|
|
|
|
|
|
|
|
with self.db.write_batch() as batch:
|
|
|
|
for hashX in sorted(unflushed):
|
|
|
|
key = hashX + flush_id
|
2021-01-09 20:39:20 +01:00
|
|
|
batch.put(HASHX_HISTORY_PREFIX + key, unflushed[hashX].tobytes())
|
2018-11-03 23:50:34 +01:00
|
|
|
self.write_state(batch)
|
|
|
|
|
|
|
|
count = len(unflushed)
|
|
|
|
unflushed.clear()
|
|
|
|
self.unflushed_count = 0
|
|
|
|
|
|
|
|
if self.db.for_sync:
|
|
|
|
elapsed = time.time() - start_time
|
|
|
|
self.logger.info(f'flushed history in {elapsed:.1f}s '
|
|
|
|
f'for {count:,d} addrs')
|
|
|
|
|
|
|
|
def backup(self, hashXs, tx_count):
|
|
|
|
# Not certain this is needed, but it doesn't hurt
|
|
|
|
self.flush_count += 1
|
|
|
|
nremoves = 0
|
|
|
|
bisect_left = bisect.bisect_left
|
|
|
|
|
|
|
|
with self.db.write_batch() as batch:
|
|
|
|
for hashX in sorted(hashXs):
|
|
|
|
deletes = []
|
|
|
|
puts = {}
|
2021-01-09 20:39:20 +01:00
|
|
|
for key, hist in self.db.iterator(prefix=HASHX_HISTORY_PREFIX + hashX, reverse=True):
|
|
|
|
k = key[1:]
|
2018-11-03 23:50:34 +01:00
|
|
|
a = array.array('I')
|
|
|
|
a.frombytes(hist)
|
|
|
|
# Remove all history entries >= tx_count
|
|
|
|
idx = bisect_left(a, tx_count)
|
|
|
|
nremoves += len(a) - idx
|
|
|
|
if idx > 0:
|
2021-01-09 20:39:20 +01:00
|
|
|
puts[k] = a[:idx].tobytes()
|
2018-11-03 23:50:34 +01:00
|
|
|
break
|
2021-01-09 20:39:20 +01:00
|
|
|
deletes.append(k)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
for key in deletes:
|
|
|
|
batch.delete(key)
|
|
|
|
for key, value in puts.items():
|
|
|
|
batch.put(key, value)
|
|
|
|
self.write_state(batch)
|
|
|
|
|
|
|
|
self.logger.info(f'backing up removed {nremoves:,d} history entries')
|
|
|
|
|
2020-11-25 22:08:04 +01:00
|
|
|
# def get_txnums(self, hashX, limit=1000):
|
|
|
|
# """Generator that returns an unpruned, sorted list of tx_nums in the
|
|
|
|
# history of a hashX. Includes both spending and receiving
|
|
|
|
# transactions. By default yields at most 1000 entries. Set
|
|
|
|
# limit to None to get them all. """
|
|
|
|
# limit = util.resolve_limit(limit)
|
|
|
|
# for key, hist in self.db.iterator(prefix=hashX):
|
|
|
|
# a = array.array('I')
|
|
|
|
# a.frombytes(hist)
|
|
|
|
# for tx_num in a:
|
|
|
|
# if limit == 0:
|
|
|
|
# return
|
|
|
|
# yield tx_num
|
|
|
|
# limit -= 1
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# History compaction
|
|
|
|
#
|
|
|
|
|
|
|
|
# comp_cursor is a cursor into compaction progress.
|
|
|
|
# -1: no compaction in progress
|
|
|
|
# 0-65535: Compaction in progress; all prefixes < comp_cursor have
|
|
|
|
# been compacted, and later ones have not.
|
|
|
|
# 65536: compaction complete in-memory but not flushed
|
|
|
|
#
|
|
|
|
# comp_flush_count applies during compaction, and is a flush count
|
|
|
|
# for history with prefix < comp_cursor. flush_count applies
|
|
|
|
# to still uncompacted history. It is -1 when no compaction is
|
|
|
|
# taking place. Key suffixes up to and including comp_flush_count
|
|
|
|
# are used, so a parallel history flush must first increment this
|
|
|
|
#
|
|
|
|
# When compaction is complete and the final flush takes place,
|
|
|
|
# flush_count is reset to comp_flush_count, and comp_flush_count to -1
|
|
|
|
|
|
|
|
def _flush_compaction(self, cursor, write_items, keys_to_delete):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Flush a single compaction pass as a batch."""
|
2018-11-03 23:50:34 +01:00
|
|
|
# Update compaction state
|
|
|
|
if cursor == 65536:
|
|
|
|
self.flush_count = self.comp_flush_count
|
|
|
|
self.comp_cursor = -1
|
|
|
|
self.comp_flush_count = -1
|
|
|
|
else:
|
|
|
|
self.comp_cursor = cursor
|
|
|
|
|
|
|
|
# History DB. Flush compacted history and updated state
|
|
|
|
with self.db.write_batch() as batch:
|
|
|
|
# Important: delete first! The keyspace may overlap.
|
|
|
|
for key in keys_to_delete:
|
2021-01-09 20:39:20 +01:00
|
|
|
batch.delete(HASHX_HISTORY_PREFIX + key)
|
2018-11-03 23:50:34 +01:00
|
|
|
for key, value in write_items:
|
2021-01-09 20:39:20 +01:00
|
|
|
batch.put(HASHX_HISTORY_PREFIX + key, value)
|
2018-11-03 23:50:34 +01:00
|
|
|
self.write_state(batch)
|
|
|
|
|
|
|
|
def _compact_hashX(self, hashX, hist_map, hist_list,
|
|
|
|
write_items, keys_to_delete):
|
2019-10-02 20:04:30 +02:00
|
|
|
"""Compress history for a hashX. hist_list is an ordered list of
|
2019-04-16 09:50:35 +02:00
|
|
|
the histories to be compressed."""
|
2018-11-03 23:50:34 +01:00
|
|
|
# History entries (tx numbers) are 4 bytes each. Distribute
|
|
|
|
# over rows of up to 50KB in size. A fixed row size means
|
|
|
|
# future compactions will not need to update the first N - 1
|
|
|
|
# rows.
|
|
|
|
max_row_size = self.max_hist_row_entries * 4
|
|
|
|
full_hist = b''.join(hist_list)
|
|
|
|
nrows = (len(full_hist) + max_row_size - 1) // max_row_size
|
|
|
|
if nrows > 4:
|
|
|
|
self.logger.info('hashX {} is large: {:,d} entries across '
|
|
|
|
'{:,d} rows'
|
|
|
|
.format(hash_to_hex_str(hashX),
|
|
|
|
len(full_hist) // 4, nrows))
|
|
|
|
|
|
|
|
# Find what history needs to be written, and what keys need to
|
|
|
|
# be deleted. Start by assuming all keys are to be deleted,
|
|
|
|
# and then remove those that are the same on-disk as when
|
|
|
|
# compacted.
|
|
|
|
write_size = 0
|
|
|
|
keys_to_delete.update(hist_map)
|
|
|
|
for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
|
2021-01-09 20:39:20 +01:00
|
|
|
key = hashX + pack_be_uint16(n)
|
2018-11-03 23:50:34 +01:00
|
|
|
if hist_map.get(key) == chunk:
|
|
|
|
keys_to_delete.remove(key)
|
|
|
|
else:
|
|
|
|
write_items.append((key, chunk))
|
|
|
|
write_size += len(chunk)
|
|
|
|
|
|
|
|
assert n + 1 == nrows
|
|
|
|
self.comp_flush_count = max(self.comp_flush_count, n)
|
|
|
|
|
|
|
|
return write_size
|
|
|
|
|
|
|
|
def _compact_prefix(self, prefix, write_items, keys_to_delete):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Compact all history entries for hashXs beginning with the
|
|
|
|
given prefix. Update keys_to_delete and write."""
|
2018-11-03 23:50:34 +01:00
|
|
|
prior_hashX = None
|
|
|
|
hist_map = {}
|
|
|
|
hist_list = []
|
|
|
|
|
|
|
|
key_len = HASHX_LEN + 2
|
|
|
|
write_size = 0
|
2021-01-09 20:39:20 +01:00
|
|
|
for key, hist in self.db.iterator(prefix=HASHX_HISTORY_PREFIX + prefix):
|
|
|
|
k = key[1:]
|
2018-11-03 23:50:34 +01:00
|
|
|
# Ignore non-history entries
|
2021-01-09 20:39:20 +01:00
|
|
|
if len(k) != key_len:
|
2018-11-03 23:50:34 +01:00
|
|
|
continue
|
2021-01-09 20:39:20 +01:00
|
|
|
hashX = k[:-2]
|
2018-11-03 23:50:34 +01:00
|
|
|
if hashX != prior_hashX and prior_hashX:
|
|
|
|
write_size += self._compact_hashX(prior_hashX, hist_map,
|
|
|
|
hist_list, write_items,
|
|
|
|
keys_to_delete)
|
|
|
|
hist_map.clear()
|
|
|
|
hist_list.clear()
|
|
|
|
prior_hashX = hashX
|
2021-01-09 20:39:20 +01:00
|
|
|
hist_map[k] = hist
|
2018-11-03 23:50:34 +01:00
|
|
|
hist_list.append(hist)
|
|
|
|
|
|
|
|
if prior_hashX:
|
|
|
|
write_size += self._compact_hashX(prior_hashX, hist_map, hist_list,
|
|
|
|
write_items, keys_to_delete)
|
|
|
|
return write_size
|
|
|
|
|
|
|
|
def _compact_history(self, limit):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Inner loop of history compaction. Loops until limit bytes have
|
2018-11-03 23:50:34 +01:00
|
|
|
been processed.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2018-11-03 23:50:34 +01:00
|
|
|
keys_to_delete = set()
|
|
|
|
write_items = [] # A list of (key, value) pairs
|
|
|
|
write_size = 0
|
|
|
|
|
|
|
|
# Loop over 2-byte prefixes
|
|
|
|
cursor = self.comp_cursor
|
2021-01-09 20:39:20 +01:00
|
|
|
while write_size < limit and cursor < 65536:
|
|
|
|
prefix = pack_be_uint16(cursor)
|
2018-11-03 23:50:34 +01:00
|
|
|
write_size += self._compact_prefix(prefix, write_items,
|
|
|
|
keys_to_delete)
|
|
|
|
cursor += 1
|
|
|
|
|
|
|
|
max_rows = self.comp_flush_count + 1
|
|
|
|
self._flush_compaction(cursor, write_items, keys_to_delete)
|
|
|
|
|
|
|
|
self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), '
|
|
|
|
'removed {:,d} rows, largest: {:,d}, {:.1f}% complete'
|
|
|
|
.format(len(write_items), write_size / 1000000,
|
|
|
|
len(keys_to_delete), max_rows,
|
|
|
|
100 * cursor / 65536))
|
|
|
|
return write_size
|
|
|
|
|
|
|
|
def _cancel_compaction(self):
|
|
|
|
if self.comp_cursor != -1:
|
|
|
|
self.logger.warning('cancelling in-progress history compaction')
|
|
|
|
self.comp_flush_count = -1
|
|
|
|
self.comp_cursor = -1
|