2018-11-03 23:50:34 +01:00
|
|
|
# Copyright (c) 2016, Neil Booth
|
|
|
|
# Copyright (c) 2017, the ElectrumX authors
|
|
|
|
#
|
|
|
|
# All rights reserved.
|
|
|
|
#
|
|
|
|
# See the file "LICENCE" for information about the copyright
|
|
|
|
# and warranty status of this software.
|
|
|
|
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Interface to the blockchain database."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
|
2018-12-06 20:27:38 +01:00
|
|
|
import asyncio
|
2018-11-03 23:50:34 +01:00
|
|
|
import array
|
|
|
|
import ast
|
2021-02-04 23:44:18 +01:00
|
|
|
import base64
|
2018-11-03 23:50:34 +01:00
|
|
|
import os
|
|
|
|
import time
|
2020-11-23 18:05:47 +01:00
|
|
|
import typing
|
2021-02-19 19:19:58 +01:00
|
|
|
import struct
|
|
|
|
from typing import Optional, Iterable
|
2021-01-12 18:24:08 +01:00
|
|
|
from functools import partial
|
2018-12-07 02:19:16 +01:00
|
|
|
from asyncio import sleep
|
2021-01-12 00:13:39 +01:00
|
|
|
from bisect import bisect_right, bisect_left
|
2021-01-12 18:24:08 +01:00
|
|
|
from collections import namedtuple, defaultdict
|
2018-11-03 23:50:34 +01:00
|
|
|
from glob import glob
|
|
|
|
from struct import pack, unpack
|
2020-02-25 20:15:50 +01:00
|
|
|
from concurrent.futures.thread import ThreadPoolExecutor
|
2018-11-03 23:50:34 +01:00
|
|
|
import attr
|
2021-01-16 22:25:46 +01:00
|
|
|
from lbry.utils import LRUCacheWithMetrics
|
2021-02-19 19:19:58 +01:00
|
|
|
from lbry.schema.url import URL
|
2019-12-31 20:52:57 +01:00
|
|
|
from lbry.wallet.server import util
|
2021-02-19 19:19:58 +01:00
|
|
|
from lbry.wallet.server.hash import hash_to_hex_str, CLAIM_HASH_LEN
|
2021-02-21 23:26:13 +01:00
|
|
|
from lbry.wallet.server.tx import TxInput
|
2019-12-31 20:52:57 +01:00
|
|
|
from lbry.wallet.server.merkle import Merkle, MerkleCache
|
2021-01-12 00:13:39 +01:00
|
|
|
from lbry.wallet.server.util import formatted_time, pack_be_uint16, unpack_be_uint16_from
|
2019-12-31 20:52:57 +01:00
|
|
|
from lbry.wallet.server.storage import db_class
|
2021-02-19 19:19:58 +01:00
|
|
|
from lbry.wallet.server.db.revertable import RevertablePut, RevertableDelete, RevertableOp, delete_prefix
|
|
|
|
from lbry.wallet.server.db import DB_PREFIXES
|
|
|
|
from lbry.wallet.server.db.prefixes import Prefixes
|
|
|
|
from lbry.wallet.server.db.claimtrie import StagedClaimtrieItem, get_update_effective_amount_ops, length_encoded_name
|
2021-02-21 23:26:13 +01:00
|
|
|
from lbry.wallet.server.db.claimtrie import get_expiration_height
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
UTXO = namedtuple("UTXO", "tx_num tx_pos tx_hash height value")
|
2021-02-19 19:19:58 +01:00
|
|
|
|
|
|
|
TXO_STRUCT = struct.Struct(b'>LH')
|
|
|
|
TXO_STRUCT_unpack = TXO_STRUCT.unpack
|
|
|
|
TXO_STRUCT_pack = TXO_STRUCT.pack
|
|
|
|
|
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
HISTORY_PREFIX = b'A'
|
|
|
|
TX_PREFIX = b'B'
|
|
|
|
BLOCK_HASH_PREFIX = b'C'
|
2020-06-09 22:21:48 +02:00
|
|
|
HEADER_PREFIX = b'H'
|
2021-01-11 18:17:54 +01:00
|
|
|
TX_NUM_PREFIX = b'N'
|
2020-06-09 22:21:48 +02:00
|
|
|
TX_COUNT_PREFIX = b'T'
|
2021-01-12 00:13:39 +01:00
|
|
|
UNDO_PREFIX = b'U'
|
2020-06-09 22:21:48 +02:00
|
|
|
TX_HASH_PREFIX = b'X'
|
2021-01-11 18:17:54 +01:00
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
HASHX_UTXO_PREFIX = b'h'
|
2021-01-11 18:17:54 +01:00
|
|
|
HIST_STATE = b'state-hist'
|
|
|
|
UTXO_STATE = b'state-utxo'
|
2021-01-09 20:39:20 +01:00
|
|
|
UTXO_PREFIX = b'u'
|
|
|
|
HASHX_HISTORY_PREFIX = b'x'
|
|
|
|
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
@attr.s(slots=True)
|
2018-11-26 02:51:41 +01:00
|
|
|
class FlushData:
|
2018-11-03 23:50:34 +01:00
|
|
|
height = attr.ib()
|
|
|
|
tx_count = attr.ib()
|
|
|
|
headers = attr.ib()
|
2020-11-09 21:34:42 +01:00
|
|
|
block_hashes = attr.ib()
|
2020-11-09 19:09:00 +01:00
|
|
|
block_txs = attr.ib()
|
2021-02-19 19:19:58 +01:00
|
|
|
claimtrie_stash = attr.ib()
|
2018-11-03 23:50:34 +01:00
|
|
|
# The following are flushed to the UTXO DB if undo_infos is not None
|
|
|
|
undo_infos = attr.ib()
|
|
|
|
adds = attr.ib()
|
|
|
|
deletes = attr.ib()
|
|
|
|
tip = attr.ib()
|
2021-02-19 19:19:58 +01:00
|
|
|
undo_claimtrie = attr.ib()
|
|
|
|
|
|
|
|
|
|
|
|
class ResolveResult(typing.NamedTuple):
|
|
|
|
name: str
|
|
|
|
claim_hash: bytes
|
|
|
|
tx_num: int
|
|
|
|
position: int
|
|
|
|
tx_hash: bytes
|
|
|
|
height: int
|
|
|
|
short_url: str
|
|
|
|
is_controlling: bool
|
|
|
|
canonical_url: str
|
|
|
|
creation_height: int
|
|
|
|
activation_height: int
|
|
|
|
expiration_height: int
|
|
|
|
effective_amount: int
|
|
|
|
support_amount: int
|
|
|
|
last_take_over_height: Optional[int]
|
|
|
|
claims_in_channel: Optional[int]
|
|
|
|
channel_hash: Optional[bytes]
|
|
|
|
reposted_claim_hash: Optional[bytes]
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
|
2020-01-10 16:47:57 +01:00
|
|
|
class LevelDB:
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Simple wrapper of the backend database for querying.
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
Performs no DB update, though the DB will be cleaned on opening if
|
|
|
|
it was shutdown uncleanly.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
DB_VERSIONS = [6]
|
2021-02-19 19:19:58 +01:00
|
|
|
HIST_DB_VERSIONS = [0, 6]
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
class DBError(Exception):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Raised on general DB errors generally indicating corruption."""
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def __init__(self, env):
|
|
|
|
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
|
|
|
self.env = env
|
|
|
|
self.coin = env.coin
|
2020-06-09 22:21:48 +02:00
|
|
|
self.executor = None
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
self.logger.info(f'switching current directory to {env.db_dir}')
|
|
|
|
|
2019-08-26 23:23:43 +02:00
|
|
|
self.db_class = db_class(env.db_dir, self.env.db_engine)
|
2021-01-09 20:39:20 +01:00
|
|
|
self.db = None
|
2021-01-12 18:24:08 +01:00
|
|
|
|
|
|
|
self.hist_unflushed = defaultdict(partial(array.array, 'I'))
|
|
|
|
self.hist_unflushed_count = 0
|
|
|
|
self.hist_flush_count = 0
|
|
|
|
self.hist_comp_flush_count = -1
|
|
|
|
self.hist_comp_cursor = -1
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
self.tx_counts = None
|
2020-11-21 03:32:00 +01:00
|
|
|
self.headers = None
|
2021-02-04 23:44:18 +01:00
|
|
|
self.encoded_headers = LRUCacheWithMetrics(1 << 21, metric_name='encoded_headers', namespace='wallet_server')
|
2018-11-03 23:50:34 +01:00
|
|
|
self.last_flush = time.time()
|
|
|
|
|
|
|
|
self.logger.info(f'using {self.env.db_engine} for DB backend')
|
|
|
|
|
|
|
|
# Header merkle cache
|
|
|
|
self.merkle = Merkle()
|
|
|
|
self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)
|
|
|
|
|
2020-06-09 22:21:48 +02:00
|
|
|
self.headers_db = None
|
2020-11-09 19:09:00 +01:00
|
|
|
self.tx_db = None
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-16 22:25:46 +01:00
|
|
|
self._tx_and_merkle_cache = LRUCacheWithMetrics(2 ** 17, metric_name='tx_and_merkle', namespace="wallet_server")
|
2020-11-25 22:08:04 +01:00
|
|
|
self.total_transactions = None
|
2021-02-19 19:22:07 +01:00
|
|
|
self.transaction_num_mapping = {}
|
2020-11-23 18:05:47 +01:00
|
|
|
|
2021-02-19 19:19:58 +01:00
|
|
|
def claim_hash_and_name_from_txo(self, tx_num: int, tx_idx: int):
|
|
|
|
claim_hash_and_name = self.db.get(
|
|
|
|
DB_PREFIXES.txo_to_claim.value + TXO_STRUCT_pack(tx_num, tx_idx)
|
|
|
|
)
|
|
|
|
if not claim_hash_and_name:
|
|
|
|
return
|
|
|
|
return claim_hash_and_name[:CLAIM_HASH_LEN], claim_hash_and_name[CLAIM_HASH_LEN:]
|
|
|
|
|
|
|
|
def get_supported_claim_from_txo(self, tx_num, tx_idx: int):
|
|
|
|
supported_claim_hash = self.db.get(
|
|
|
|
DB_PREFIXES.support_to_claim.value + TXO_STRUCT_pack(tx_num, tx_idx)
|
|
|
|
)
|
|
|
|
if supported_claim_hash:
|
|
|
|
packed_support_amount = self.db.get(
|
|
|
|
Prefixes.claim_to_support.pack_key(supported_claim_hash, tx_num, tx_idx)
|
|
|
|
)
|
|
|
|
if packed_support_amount is not None:
|
|
|
|
return supported_claim_hash, Prefixes.claim_to_support.unpack_value(packed_support_amount).amount
|
|
|
|
return None, None
|
|
|
|
|
|
|
|
def get_support_amount(self, claim_hash: bytes):
|
|
|
|
total = 0
|
|
|
|
for packed in self.db.iterator(prefix=DB_PREFIXES.claim_to_support.value + claim_hash, include_key=False):
|
|
|
|
total += Prefixes.claim_to_support.unpack_value(packed).amount
|
|
|
|
return total
|
|
|
|
|
|
|
|
def get_supports(self, claim_hash: bytes):
|
|
|
|
supports = []
|
|
|
|
for k, v in self.db.iterator(prefix=DB_PREFIXES.claim_to_support.value + claim_hash):
|
|
|
|
unpacked_k = Prefixes.claim_to_support.unpack_key(k)
|
|
|
|
unpacked_v = Prefixes.claim_to_support.unpack_value(v)
|
|
|
|
supports.append((unpacked_k.tx_num, unpacked_k.position, unpacked_v.amount))
|
|
|
|
|
|
|
|
return supports
|
|
|
|
|
|
|
|
def _prepare_resolve_result(self, tx_num: int, position: int, claim_hash: bytes, name: str, root_tx_num: int,
|
|
|
|
root_position: int) -> ResolveResult:
|
|
|
|
tx_hash = self.total_transactions[tx_num]
|
|
|
|
height = bisect_right(self.tx_counts, tx_num)
|
|
|
|
created_height = bisect_right(self.tx_counts, root_tx_num)
|
|
|
|
last_take_over_height = 0
|
|
|
|
activation_height = created_height
|
|
|
|
|
2021-02-21 23:26:13 +01:00
|
|
|
expiration_height = get_expiration_height(height)
|
2021-02-19 19:19:58 +01:00
|
|
|
support_amount = self.get_support_amount(claim_hash)
|
|
|
|
effective_amount = self.get_effective_amount(claim_hash)
|
|
|
|
channel_hash = self.get_channel_for_claim(claim_hash)
|
|
|
|
|
|
|
|
claims_in_channel = None
|
|
|
|
short_url = f'{name}#{claim_hash.hex()}'
|
|
|
|
canonical_url = short_url
|
|
|
|
if channel_hash:
|
|
|
|
channel_vals = self.get_root_claim_txo_and_current_amount(channel_hash)
|
|
|
|
if channel_vals:
|
|
|
|
_, _, _, channel_name, _, _ = channel_vals
|
|
|
|
claims_in_channel = self.get_claims_in_channel_count(channel_hash)
|
|
|
|
canonical_url = f'{channel_name}#{channel_hash.hex()}/{name}#{claim_hash.hex()}'
|
|
|
|
return ResolveResult(
|
|
|
|
name, claim_hash, tx_num, position, tx_hash, height, short_url=short_url,
|
|
|
|
is_controlling=False, canonical_url=canonical_url, last_take_over_height=last_take_over_height,
|
|
|
|
claims_in_channel=claims_in_channel, creation_height=created_height, activation_height=activation_height,
|
|
|
|
expiration_height=expiration_height, effective_amount=effective_amount, support_amount=support_amount,
|
|
|
|
channel_hash=channel_hash, reposted_claim_hash=None
|
|
|
|
)
|
|
|
|
|
|
|
|
def _resolve(self, normalized_name: str, claim_id: Optional[str] = None,
|
|
|
|
amount_order: int = 1) -> Optional[ResolveResult]:
|
|
|
|
"""
|
|
|
|
:param normalized_name: name
|
|
|
|
:param claim_id: partial or complete claim id
|
|
|
|
:param amount_order: '$<value>' suffix to a url, defaults to 1 (winning) if no claim id modifier is provided
|
|
|
|
"""
|
|
|
|
|
|
|
|
encoded_name = length_encoded_name(normalized_name)
|
|
|
|
amount_order = max(int(amount_order or 1), 1)
|
|
|
|
if claim_id:
|
|
|
|
# resolve by partial/complete claim id
|
|
|
|
short_claim_hash = bytes.fromhex(claim_id)
|
|
|
|
prefix = DB_PREFIXES.claim_short_id_prefix.value + encoded_name + short_claim_hash
|
|
|
|
for k, v in self.db.iterator(prefix=prefix):
|
|
|
|
key = Prefixes.claim_short_id.unpack_key(k)
|
|
|
|
claim_txo = Prefixes.claim_short_id.unpack_value(v)
|
|
|
|
return self._prepare_resolve_result(claim_txo.tx_num, claim_txo.position, key.claim_hash, key.name,
|
|
|
|
key.root_tx_num, key.root_position)
|
|
|
|
return
|
|
|
|
|
|
|
|
# resolve by amount ordering, 1 indexed
|
|
|
|
for idx, (k, v) in enumerate(self.db.iterator(prefix=DB_PREFIXES.claim_effective_amount_prefix.value + encoded_name)):
|
|
|
|
if amount_order > idx + 1:
|
|
|
|
continue
|
|
|
|
key = Prefixes.claim_effective_amount.unpack_key(k)
|
|
|
|
claim_val = Prefixes.claim_effective_amount.unpack_value(v)
|
|
|
|
return self._prepare_resolve_result(
|
|
|
|
key.tx_num, key.position, claim_val.claim_hash, key.name, claim_val.root_tx_num,
|
|
|
|
claim_val.root_position
|
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
def _resolve_claim_in_channel(self, channel_hash: bytes, normalized_name: str):
|
|
|
|
prefix = DB_PREFIXES.channel_to_claim.value + channel_hash + length_encoded_name(normalized_name)
|
|
|
|
candidates = []
|
|
|
|
for k, v in self.db.iterator(prefix=prefix):
|
|
|
|
key = Prefixes.channel_to_claim.unpack_key(k)
|
|
|
|
stream = Prefixes.channel_to_claim.unpack_value(v)
|
|
|
|
if not candidates or candidates[-1][-1] == key.effective_amount:
|
|
|
|
candidates.append((stream.claim_hash, key.tx_num, key.position, key.effective_amount))
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
if not candidates:
|
|
|
|
return
|
|
|
|
return list(sorted(candidates, key=lambda item: item[1]))[0]
|
|
|
|
|
|
|
|
def _fs_resolve(self, url):
|
|
|
|
try:
|
|
|
|
parsed = URL.parse(url)
|
|
|
|
except ValueError as e:
|
|
|
|
return e, None
|
|
|
|
|
|
|
|
stream = channel = resolved_channel = resolved_stream = None
|
|
|
|
if parsed.has_stream_in_channel:
|
|
|
|
channel = parsed.channel
|
|
|
|
stream = parsed.stream
|
|
|
|
elif parsed.has_channel:
|
|
|
|
channel = parsed.channel
|
|
|
|
elif parsed.has_stream:
|
|
|
|
stream = parsed.stream
|
|
|
|
if channel:
|
|
|
|
resolved_channel = self._resolve(channel.normalized, channel.claim_id, channel.amount_order)
|
|
|
|
if not resolved_channel:
|
|
|
|
return None, LookupError(f'Could not find channel in "{url}".')
|
|
|
|
if stream:
|
|
|
|
if resolved_channel:
|
|
|
|
stream_claim = self._resolve_claim_in_channel(resolved_channel.claim_hash, stream.normalized)
|
|
|
|
if stream_claim:
|
|
|
|
stream_claim_id, stream_tx_num, stream_tx_pos, effective_amount = stream_claim
|
|
|
|
resolved_stream = self._fs_get_claim_by_hash(stream_claim_id)
|
|
|
|
else:
|
|
|
|
resolved_stream = self._resolve(stream.normalized, stream.claim_id, stream.amount_order)
|
|
|
|
if not channel and not resolved_channel and resolved_stream and resolved_stream.channel_hash:
|
|
|
|
resolved_channel = self._fs_get_claim_by_hash(resolved_stream.channel_hash)
|
|
|
|
if not resolved_stream:
|
|
|
|
return LookupError(f'Could not find claim at "{url}".'), None
|
|
|
|
|
|
|
|
return resolved_stream, resolved_channel
|
|
|
|
|
|
|
|
async def fs_resolve(self, url):
|
|
|
|
return await asyncio.get_event_loop().run_in_executor(self.executor, self._fs_resolve, url)
|
|
|
|
|
|
|
|
def _fs_get_claim_by_hash(self, claim_hash):
|
|
|
|
for k, v in self.db.iterator(prefix=DB_PREFIXES.claim_to_txo.value + claim_hash):
|
|
|
|
unpacked_k = Prefixes.claim_to_txo.unpack_key(k)
|
|
|
|
unpacked_v = Prefixes.claim_to_txo.unpack_value(v)
|
|
|
|
return self._prepare_resolve_result(
|
|
|
|
unpacked_k.tx_num, unpacked_k.position, unpacked_k.claim_hash, unpacked_v.name,
|
|
|
|
unpacked_v.root_tx_num, unpacked_v.root_position
|
|
|
|
)
|
|
|
|
|
|
|
|
async def fs_getclaimbyid(self, claim_id):
|
|
|
|
return await asyncio.get_event_loop().run_in_executor(
|
|
|
|
self.executor, self._fs_get_claim_by_hash, bytes.fromhex(claim_id)
|
|
|
|
)
|
|
|
|
|
|
|
|
def claim_exists(self, claim_hash: bytes):
|
|
|
|
for _ in self.db.iterator(prefix=DB_PREFIXES.claim_to_txo.value + claim_hash, include_value=False):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def get_root_claim_txo_and_current_amount(self, claim_hash):
|
|
|
|
for k, v in self.db.iterator(prefix=DB_PREFIXES.claim_to_txo.value + claim_hash):
|
|
|
|
unpacked_k = Prefixes.claim_to_txo.unpack_key(k)
|
|
|
|
unpacked_v = Prefixes.claim_to_txo.unpack_value(v)
|
|
|
|
return unpacked_v.root_tx_num, unpacked_v.root_position, unpacked_v.amount, unpacked_v.name,\
|
|
|
|
unpacked_k.tx_num, unpacked_k.position
|
|
|
|
|
|
|
|
def make_staged_claim_item(self, claim_hash: bytes) -> StagedClaimtrieItem:
|
|
|
|
root_tx_num, root_idx, value, name, tx_num, idx = self.db.get_root_claim_txo_and_current_amount(
|
|
|
|
claim_hash
|
|
|
|
)
|
2021-02-21 23:26:13 +01:00
|
|
|
height = bisect_right(self.tx_counts, tx_num)
|
2021-02-19 19:19:58 +01:00
|
|
|
effective_amount = self.db.get_support_amount(claim_hash) + value
|
|
|
|
signing_hash = self.get_channel_for_claim(claim_hash)
|
2021-02-21 23:26:13 +01:00
|
|
|
activation_height = 0
|
2021-02-19 19:19:58 +01:00
|
|
|
if signing_hash:
|
|
|
|
count = self.get_claims_in_channel_count(signing_hash)
|
|
|
|
else:
|
|
|
|
count = 0
|
|
|
|
return StagedClaimtrieItem(
|
2021-02-21 23:26:13 +01:00
|
|
|
name, claim_hash, value, effective_amount, activation_height, get_expiration_height(height), tx_num, idx,
|
|
|
|
root_tx_num, root_idx, signing_hash, count
|
2021-02-19 19:19:58 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
def get_effective_amount(self, claim_hash):
|
|
|
|
for v in self.db.iterator(prefix=DB_PREFIXES.claim_to_txo.value + claim_hash, include_key=False):
|
|
|
|
return Prefixes.claim_to_txo.unpack_value(v).amount + self.get_support_amount(claim_hash)
|
|
|
|
fnord
|
|
|
|
return None
|
|
|
|
|
|
|
|
def get_update_effective_amount_ops(self, claim_hash: bytes, effective_amount: int):
|
|
|
|
claim_info = self.get_root_claim_txo_and_current_amount(claim_hash)
|
|
|
|
if not claim_info:
|
|
|
|
return []
|
|
|
|
root_tx_num, root_position, amount, name, tx_num, position = claim_info
|
|
|
|
signing_hash = self.get_channel_for_claim(claim_hash)
|
|
|
|
claims_in_channel_count = None
|
|
|
|
if signing_hash:
|
|
|
|
claims_in_channel_count = self.get_claims_in_channel_count(signing_hash)
|
|
|
|
prev_effective_amount = self.get_effective_amount(claim_hash)
|
|
|
|
return get_update_effective_amount_ops(
|
|
|
|
name, effective_amount, prev_effective_amount, tx_num, position,
|
|
|
|
root_tx_num, root_position, claim_hash, signing_hash, claims_in_channel_count
|
|
|
|
)
|
|
|
|
|
|
|
|
def get_claims_in_channel_count(self, channel_hash) -> int:
|
|
|
|
for v in self.db.iterator(prefix=DB_PREFIXES.channel_to_claim.value + channel_hash, include_key=False):
|
|
|
|
return Prefixes.channel_to_claim.unpack_value(v).claims_in_channel
|
|
|
|
return 0
|
|
|
|
|
|
|
|
def get_channel_for_claim(self, claim_hash) -> Optional[bytes]:
|
|
|
|
return self.db.get(DB_PREFIXES.claim_to_channel.value + claim_hash)
|
|
|
|
|
2021-02-21 23:26:13 +01:00
|
|
|
def get_expired_by_height(self, height: int):
|
|
|
|
expired = {}
|
|
|
|
for _k, _v in self.db.iterator(prefix=DB_PREFIXES.claim_expiration.value + struct.pack(b'>L', height)):
|
|
|
|
k, v = Prefixes.claim_expiration.unpack_item(_k, _v)
|
|
|
|
tx_hash = self.total_transactions[k.tx_num]
|
|
|
|
tx = self.coin.transaction(self.db.get(DB_PREFIXES.TX_PREFIX.value + tx_hash))
|
|
|
|
# treat it like a claim spend so it will delete/abandon properly
|
|
|
|
# the _spend_claim function this result is fed to expects a txi, so make a mock one
|
|
|
|
expired[v.claim_hash] = (
|
|
|
|
k.tx_num, k.position, v.name,
|
|
|
|
TxInput(prev_hash=tx_hash, prev_idx=k.position, script=tx.outputs[k.position].pk_script, sequence=0)
|
|
|
|
)
|
|
|
|
return expired
|
|
|
|
|
2021-01-12 00:13:39 +01:00
|
|
|
# def add_unflushed(self, hashXs_by_tx, first_tx_num):
|
|
|
|
# unflushed = self.history.unflushed
|
|
|
|
# count = 0
|
|
|
|
# for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num):
|
|
|
|
# hashXs = set(hashXs)
|
|
|
|
# for hashX in hashXs:
|
|
|
|
# unflushed[hashX].append(tx_num)
|
|
|
|
# count += len(hashXs)
|
|
|
|
# self.history.unflushed_count += count
|
|
|
|
|
|
|
|
# def unflushed_memsize(self):
|
|
|
|
# return len(self.history.unflushed) * 180 + self.history.unflushed_count * 4
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
async def _read_tx_counts(self):
|
|
|
|
if self.tx_counts is not None:
|
|
|
|
return
|
|
|
|
# tx_counts[N] has the cumulative number of txs at the end of
|
|
|
|
# height N. So tx_counts[0] is 1 - the genesis coinbase
|
2020-06-09 23:54:13 +02:00
|
|
|
|
|
|
|
def get_counts():
|
|
|
|
return tuple(
|
|
|
|
util.unpack_be_uint64(tx_count)
|
2021-01-09 20:39:20 +01:00
|
|
|
for tx_count in self.db.iterator(prefix=TX_COUNT_PREFIX, include_key=False)
|
2020-06-09 23:54:13 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
tx_counts = await asyncio.get_event_loop().run_in_executor(self.executor, get_counts)
|
|
|
|
assert len(tx_counts) == self.db_height + 1, f"{len(tx_counts)} vs {self.db_height + 1}"
|
2018-11-03 23:50:34 +01:00
|
|
|
self.tx_counts = array.array('I', tx_counts)
|
2020-06-09 23:54:13 +02:00
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
if self.tx_counts:
|
2020-06-09 23:54:13 +02:00
|
|
|
assert self.db_tx_count == self.tx_counts[-1], \
|
|
|
|
f"{self.db_tx_count} vs {self.tx_counts[-1]} ({len(self.tx_counts)} counts)"
|
2018-11-03 23:50:34 +01:00
|
|
|
else:
|
|
|
|
assert self.db_tx_count == 0
|
|
|
|
|
2020-11-25 22:08:04 +01:00
|
|
|
async def _read_txids(self):
|
|
|
|
def get_txids():
|
2021-01-09 20:39:20 +01:00
|
|
|
return list(self.db.iterator(prefix=TX_HASH_PREFIX, include_key=False))
|
2020-11-25 22:08:04 +01:00
|
|
|
|
|
|
|
start = time.perf_counter()
|
|
|
|
self.logger.info("loading txids")
|
|
|
|
txids = await asyncio.get_event_loop().run_in_executor(self.executor, get_txids)
|
|
|
|
assert len(txids) == len(self.tx_counts) == 0 or len(txids) == self.tx_counts[-1]
|
|
|
|
self.total_transactions = txids
|
2021-02-19 19:22:07 +01:00
|
|
|
self.transaction_num_mapping = {
|
|
|
|
txid: i for i, txid in enumerate(txids)
|
|
|
|
}
|
2020-11-25 22:08:04 +01:00
|
|
|
ts = time.perf_counter() - start
|
|
|
|
self.logger.info("loaded %i txids in %ss", len(self.total_transactions), round(ts, 4))
|
|
|
|
|
2020-11-21 03:32:00 +01:00
|
|
|
async def _read_headers(self):
|
|
|
|
if self.headers is not None:
|
|
|
|
return
|
|
|
|
|
|
|
|
def get_headers():
|
2021-01-09 20:39:20 +01:00
|
|
|
return [
|
|
|
|
header for header in self.db.iterator(prefix=HEADER_PREFIX, include_key=False)
|
|
|
|
]
|
2020-11-21 03:32:00 +01:00
|
|
|
|
|
|
|
headers = await asyncio.get_event_loop().run_in_executor(self.executor, get_headers)
|
|
|
|
assert len(headers) - 1 == self.db_height, f"{len(headers)} vs {self.db_height}"
|
|
|
|
self.headers = headers
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
async def _open_dbs(self, for_sync, compacting):
|
2020-06-09 22:21:48 +02:00
|
|
|
if self.executor is None:
|
2020-12-11 01:26:34 +01:00
|
|
|
self.executor = ThreadPoolExecutor(1)
|
2020-06-11 20:22:47 +02:00
|
|
|
coin_path = os.path.join(self.env.db_dir, 'COIN')
|
|
|
|
if not os.path.isfile(coin_path):
|
|
|
|
with util.open_file(coin_path, create=True) as f:
|
|
|
|
f.write(f'ElectrumX databases and metadata for '
|
|
|
|
f'{self.coin.NAME} {self.coin.NET}'.encode())
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
assert self.db is None
|
|
|
|
self.db = self.db_class(f'lbry-{self.env.db_engine}', for_sync)
|
|
|
|
if self.db.is_new:
|
|
|
|
self.logger.info('created new db: %s', f'lbry-{self.env.db_engine}')
|
|
|
|
self.logger.info(f'opened DB (for sync: {for_sync})')
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
self.read_utxo_state()
|
|
|
|
|
|
|
|
# Then history DB
|
2021-01-12 00:13:39 +01:00
|
|
|
state = self.db.get(HIST_STATE)
|
|
|
|
if state:
|
|
|
|
state = ast.literal_eval(state.decode())
|
|
|
|
if not isinstance(state, dict):
|
|
|
|
raise RuntimeError('failed reading state from history DB')
|
2021-01-12 18:24:08 +01:00
|
|
|
self.hist_flush_count = state['flush_count']
|
|
|
|
self.hist_comp_flush_count = state.get('comp_flush_count', -1)
|
|
|
|
self.hist_comp_cursor = state.get('comp_cursor', -1)
|
|
|
|
self.hist_db_version = state.get('db_version', 0)
|
2021-01-12 00:13:39 +01:00
|
|
|
else:
|
2021-01-12 18:24:08 +01:00
|
|
|
self.hist_flush_count = 0
|
|
|
|
self.hist_comp_flush_count = -1
|
|
|
|
self.hist_comp_cursor = -1
|
|
|
|
self.hist_db_version = max(self.HIST_DB_VERSIONS)
|
|
|
|
|
|
|
|
self.logger.info(f'history DB version: {self.hist_db_version}')
|
|
|
|
if self.hist_db_version not in self.HIST_DB_VERSIONS:
|
|
|
|
msg = f'this software only handles DB versions {self.HIST_DB_VERSIONS}'
|
2021-01-12 00:13:39 +01:00
|
|
|
self.logger.error(msg)
|
|
|
|
raise RuntimeError(msg)
|
2021-01-12 18:24:08 +01:00
|
|
|
self.logger.info(f'flush count: {self.hist_flush_count:,d}')
|
2021-01-12 00:13:39 +01:00
|
|
|
|
|
|
|
# self.history.clear_excess(self.utxo_flush_count)
|
|
|
|
# < might happen at end of compaction as both DBs cannot be
|
|
|
|
# updated atomically
|
2021-01-12 18:24:08 +01:00
|
|
|
if self.hist_flush_count > self.utxo_flush_count:
|
2021-02-19 19:19:58 +01:00
|
|
|
self.logger.info('DB shut down uncleanly. Scanning for excess history flushes...')
|
2021-01-12 00:13:39 +01:00
|
|
|
|
|
|
|
keys = []
|
|
|
|
for key, hist in self.db.iterator(prefix=HASHX_HISTORY_PREFIX):
|
|
|
|
k = key[1:]
|
|
|
|
flush_id, = unpack_be_uint16_from(k[-2:])
|
|
|
|
if flush_id > self.utxo_flush_count:
|
|
|
|
keys.append(k)
|
|
|
|
|
|
|
|
self.logger.info(f'deleting {len(keys):,d} history entries')
|
|
|
|
|
2021-01-12 18:24:08 +01:00
|
|
|
self.hist_flush_count = self.utxo_flush_count
|
2021-01-12 00:13:39 +01:00
|
|
|
with self.db.write_batch() as batch:
|
|
|
|
for key in keys:
|
|
|
|
batch.delete(HASHX_HISTORY_PREFIX + key)
|
|
|
|
state = {
|
2021-01-12 18:24:08 +01:00
|
|
|
'flush_count': self.hist_flush_count,
|
|
|
|
'comp_flush_count': self.hist_comp_flush_count,
|
|
|
|
'comp_cursor': self.hist_comp_cursor,
|
|
|
|
'db_version': self.hist_db_version,
|
2021-01-12 00:13:39 +01:00
|
|
|
}
|
|
|
|
# History entries are not prefixed; the suffix \0\0 ensures we
|
|
|
|
# look similar to other entries and aren't interfered with
|
|
|
|
batch.put(HIST_STATE, repr(state).encode())
|
|
|
|
|
|
|
|
self.logger.info('deleted excess history entries')
|
|
|
|
|
2021-01-12 18:24:08 +01:00
|
|
|
self.utxo_flush_count = self.hist_flush_count
|
2021-01-12 00:13:39 +01:00
|
|
|
|
|
|
|
min_height = self.min_undo_height(self.db_height)
|
|
|
|
keys = []
|
|
|
|
for key, hist in self.db.iterator(prefix=UNDO_PREFIX):
|
|
|
|
height, = unpack('>I', key[-4:])
|
|
|
|
if height >= min_height:
|
|
|
|
break
|
|
|
|
keys.append(key)
|
|
|
|
|
|
|
|
if keys:
|
|
|
|
with self.db.write_batch() as batch:
|
|
|
|
for key in keys:
|
|
|
|
batch.delete(key)
|
|
|
|
self.logger.info(f'deleted {len(keys):,d} stale undo entries')
|
|
|
|
|
|
|
|
# delete old block files
|
|
|
|
prefix = self.raw_block_prefix()
|
|
|
|
paths = [path for path in glob(f'{prefix}[0-9]*')
|
|
|
|
if len(path) > len(prefix)
|
|
|
|
and int(path[len(prefix):]) < min_height]
|
|
|
|
if paths:
|
|
|
|
for path in paths:
|
|
|
|
try:
|
|
|
|
os.remove(path)
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
self.logger.info(f'deleted {len(paths):,d} stale block files')
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
# Read TX counts (requires meta directory)
|
|
|
|
await self._read_tx_counts()
|
2020-11-25 22:08:04 +01:00
|
|
|
if self.total_transactions is None:
|
|
|
|
await self._read_txids()
|
2020-11-21 03:32:00 +01:00
|
|
|
await self._read_headers()
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2018-12-14 22:15:59 +01:00
|
|
|
def close(self):
|
2021-01-09 20:39:20 +01:00
|
|
|
self.db.close()
|
2020-02-25 20:15:50 +01:00
|
|
|
self.executor.shutdown(wait=True)
|
2020-06-09 22:21:48 +02:00
|
|
|
self.executor = None
|
2018-11-07 20:54:27 +01:00
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
async def open_for_compacting(self):
|
|
|
|
await self._open_dbs(True, True)
|
|
|
|
|
|
|
|
async def open_for_sync(self):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Open the databases to sync to the daemon.
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
When syncing we want to reserve a lot of open files for the
|
|
|
|
synchronization. When serving clients we want the open files for
|
|
|
|
serving network connections.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2020-06-11 20:22:47 +02:00
|
|
|
self.logger.info("opened for sync")
|
2018-11-03 23:50:34 +01:00
|
|
|
await self._open_dbs(True, False)
|
|
|
|
|
|
|
|
async def open_for_serving(self):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Open the databases for serving. If they are already open they are
|
2018-11-03 23:50:34 +01:00
|
|
|
closed first.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2021-01-09 20:39:20 +01:00
|
|
|
if self.db:
|
|
|
|
return
|
|
|
|
# self.logger.info('closing DBs to re-open for serving')
|
|
|
|
# self.db.close()
|
|
|
|
# self.history.close_db()
|
|
|
|
# self.db = None
|
2020-06-12 06:11:26 +02:00
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
await self._open_dbs(False, False)
|
2020-06-11 20:22:47 +02:00
|
|
|
self.logger.info("opened for serving")
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
# Header merkle cache
|
|
|
|
|
|
|
|
async def populate_header_merkle_cache(self):
|
|
|
|
self.logger.info('populating header merkle cache...')
|
|
|
|
length = max(1, self.db_height - self.env.reorg_limit)
|
|
|
|
start = time.time()
|
|
|
|
await self.header_mc.initialize(length)
|
|
|
|
elapsed = time.time() - start
|
|
|
|
self.logger.info(f'header merkle cache populated in {elapsed:.1f}s')
|
|
|
|
|
|
|
|
async def header_branch_and_root(self, length, height):
|
|
|
|
return await self.header_mc.branch_and_root(length, height)
|
|
|
|
|
|
|
|
# Flushing
|
|
|
|
def assert_flushed(self, flush_data):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Asserts state is fully flushed."""
|
2018-11-03 23:50:34 +01:00
|
|
|
assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count
|
|
|
|
assert flush_data.height == self.fs_height == self.db_height
|
|
|
|
assert flush_data.tip == self.db_tip
|
|
|
|
assert not flush_data.headers
|
2020-11-09 19:09:00 +01:00
|
|
|
assert not flush_data.block_txs
|
2018-11-03 23:50:34 +01:00
|
|
|
assert not flush_data.adds
|
|
|
|
assert not flush_data.deletes
|
|
|
|
assert not flush_data.undo_infos
|
2021-01-12 18:24:08 +01:00
|
|
|
assert not self.hist_unflushed
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
def flush_utxo_db(self, batch, flush_data):
|
|
|
|
"""Flush the cached DB writes and UTXO set to the batch."""
|
|
|
|
# Care is needed because the writes generated by flushing the
|
|
|
|
# UTXO state may have keys in common with our write cache or
|
|
|
|
# may be in the DB already.
|
|
|
|
start_time = time.time()
|
|
|
|
add_count = len(flush_data.adds)
|
|
|
|
spend_count = len(flush_data.deletes) // 2
|
|
|
|
|
|
|
|
if self.db.for_sync:
|
|
|
|
block_count = flush_data.height - self.db_height
|
|
|
|
tx_count = flush_data.tx_count - self.db_tx_count
|
|
|
|
elapsed = time.time() - start_time
|
|
|
|
self.logger.info(f'flushed {block_count:,d} blocks with '
|
|
|
|
f'{tx_count:,d} txs, {add_count:,d} UTXO adds, '
|
|
|
|
f'{spend_count:,d} spends in '
|
|
|
|
f'{elapsed:.1f}s, committing...')
|
|
|
|
|
2021-01-12 18:24:08 +01:00
|
|
|
self.utxo_flush_count = self.hist_flush_count
|
2021-01-11 18:17:54 +01:00
|
|
|
self.db_height = flush_data.height
|
|
|
|
self.db_tx_count = flush_data.tx_count
|
|
|
|
self.db_tip = flush_data.tip
|
|
|
|
|
2021-01-12 00:13:39 +01:00
|
|
|
def write_history_state(self, batch):
|
|
|
|
state = {
|
2021-01-12 18:24:08 +01:00
|
|
|
'flush_count': self.hist_flush_count,
|
|
|
|
'comp_flush_count': self.hist_comp_flush_count,
|
|
|
|
'comp_cursor': self.hist_comp_cursor,
|
2021-01-12 00:13:39 +01:00
|
|
|
'db_version': self.db_version,
|
|
|
|
}
|
|
|
|
# History entries are not prefixed; the suffix \0\0 ensures we
|
|
|
|
# look similar to other entries and aren't interfered with
|
2021-02-19 19:19:58 +01:00
|
|
|
batch.put(DB_PREFIXES.HIST_STATE.value, repr(state).encode())
|
2021-01-12 00:13:39 +01:00
|
|
|
|
2021-01-12 18:24:08 +01:00
|
|
|
def flush_dbs(self, flush_data: FlushData, estimate_txs_remaining):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Flush out cached state. History is always flushed; UTXOs are
|
|
|
|
flushed if flush_utxos."""
|
2021-02-19 19:19:58 +01:00
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
if flush_data.height == self.db_height:
|
|
|
|
self.assert_flushed(flush_data)
|
|
|
|
return
|
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
prior_flush = self.last_flush
|
|
|
|
tx_delta = flush_data.tx_count - self.last_flush_tx_count
|
|
|
|
|
|
|
|
# Flush to file system
|
2021-01-11 18:17:54 +01:00
|
|
|
# self.flush_fs(flush_data)
|
2018-11-03 23:50:34 +01:00
|
|
|
prior_tx_count = (self.tx_counts[self.fs_height]
|
|
|
|
if self.fs_height >= 0 else 0)
|
2021-01-11 18:17:54 +01:00
|
|
|
|
2020-11-09 19:09:00 +01:00
|
|
|
assert len(flush_data.block_txs) == len(flush_data.headers)
|
2018-11-03 23:50:34 +01:00
|
|
|
assert flush_data.height == self.fs_height + len(flush_data.headers)
|
|
|
|
assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts
|
|
|
|
else 0)
|
|
|
|
assert len(self.tx_counts) == flush_data.height + 1
|
2020-11-09 19:09:00 +01:00
|
|
|
assert len(
|
|
|
|
b''.join(hashes for hashes, _ in flush_data.block_txs)
|
2021-02-19 19:19:58 +01:00
|
|
|
) // 32 == flush_data.tx_count - prior_tx_count, f"{len(b''.join(hashes for hashes, _ in flush_data.block_txs)) // 32} != {flush_data.tx_count}"
|
2021-01-11 18:17:54 +01:00
|
|
|
|
2020-11-20 00:37:49 +01:00
|
|
|
# Write the headers
|
2020-06-12 06:11:26 +02:00
|
|
|
start_time = time.perf_counter()
|
2020-11-20 00:37:49 +01:00
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
with self.db.write_batch() as batch:
|
2021-02-19 19:19:58 +01:00
|
|
|
self.put = batch.put
|
|
|
|
batch_put = self.put
|
|
|
|
batch_delete = batch.delete
|
2021-01-11 18:17:54 +01:00
|
|
|
height_start = self.fs_height + 1
|
|
|
|
tx_num = prior_tx_count
|
2021-02-19 19:19:58 +01:00
|
|
|
for i, (header, block_hash, (tx_hashes, txs)) in enumerate(
|
|
|
|
zip(flush_data.headers, flush_data.block_hashes, flush_data.block_txs)):
|
|
|
|
batch_put(DB_PREFIXES.HEADER_PREFIX.value + util.pack_be_uint64(height_start), header)
|
2020-11-21 03:32:00 +01:00
|
|
|
self.headers.append(header)
|
2020-11-20 00:37:49 +01:00
|
|
|
tx_count = self.tx_counts[height_start]
|
2021-02-19 19:19:58 +01:00
|
|
|
batch_put(DB_PREFIXES.BLOCK_HASH_PREFIX.value + util.pack_be_uint64(height_start), block_hash[::-1])
|
|
|
|
batch_put(DB_PREFIXES.TX_COUNT_PREFIX.value + util.pack_be_uint64(height_start), util.pack_be_uint64(tx_count))
|
2020-11-20 00:37:49 +01:00
|
|
|
height_start += 1
|
|
|
|
offset = 0
|
|
|
|
while offset < len(tx_hashes):
|
2021-02-19 19:19:58 +01:00
|
|
|
batch_put(DB_PREFIXES.TX_HASH_PREFIX.value + util.pack_be_uint64(tx_num), tx_hashes[offset:offset + 32])
|
|
|
|
batch_put(DB_PREFIXES.TX_NUM_PREFIX.value + tx_hashes[offset:offset + 32], util.pack_be_uint64(tx_num))
|
|
|
|
batch_put(DB_PREFIXES.TX_PREFIX.value + tx_hashes[offset:offset + 32], txs[offset // 32])
|
2020-11-20 00:37:49 +01:00
|
|
|
tx_num += 1
|
|
|
|
offset += 32
|
2021-01-11 18:17:54 +01:00
|
|
|
flush_data.headers.clear()
|
|
|
|
flush_data.block_txs.clear()
|
|
|
|
flush_data.block_hashes.clear()
|
2020-06-12 06:11:26 +02:00
|
|
|
|
2021-02-19 19:19:58 +01:00
|
|
|
for staged_change in flush_data.claimtrie_stash:
|
|
|
|
# print("ADVANCE", staged_change)
|
|
|
|
if staged_change.is_put:
|
|
|
|
batch_put(staged_change.key, staged_change.value)
|
|
|
|
else:
|
|
|
|
batch_delete(staged_change.key)
|
|
|
|
flush_data.claimtrie_stash.clear()
|
|
|
|
for undo_claims, height in flush_data.undo_claimtrie:
|
|
|
|
batch_put(DB_PREFIXES.undo_claimtrie.value + util.pack_be_uint64(height), undo_claims)
|
|
|
|
flush_data.undo_claimtrie.clear()
|
2021-01-11 18:17:54 +01:00
|
|
|
self.fs_height = flush_data.height
|
|
|
|
self.fs_tx_count = flush_data.tx_count
|
2020-11-09 21:34:42 +01:00
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
# Then history
|
2021-01-12 18:24:08 +01:00
|
|
|
self.hist_flush_count += 1
|
|
|
|
flush_id = pack_be_uint16(self.hist_flush_count)
|
|
|
|
unflushed = self.hist_unflushed
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
for hashX in sorted(unflushed):
|
|
|
|
key = hashX + flush_id
|
2021-02-19 19:19:58 +01:00
|
|
|
batch_put(DB_PREFIXES.HASHX_HISTORY_PREFIX.value + key, unflushed[hashX].tobytes())
|
2021-01-12 00:13:39 +01:00
|
|
|
self.write_history_state(batch)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
unflushed.clear()
|
2021-01-12 18:24:08 +01:00
|
|
|
self.hist_unflushed_count = 0
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
#########################
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-02-19 19:19:58 +01:00
|
|
|
# New undo information
|
|
|
|
for undo_info, height in flush_data.undo_infos:
|
|
|
|
batch_put(self.undo_key(height), b''.join(undo_info))
|
|
|
|
flush_data.undo_infos.clear()
|
|
|
|
|
|
|
|
# Spends
|
|
|
|
for key in sorted(flush_data.deletes):
|
|
|
|
batch_delete(key)
|
|
|
|
flush_data.deletes.clear()
|
|
|
|
|
|
|
|
# New UTXOs
|
|
|
|
for key, value in flush_data.adds.items():
|
|
|
|
# suffix = tx_idx + tx_num
|
|
|
|
hashX = value[:-12]
|
|
|
|
suffix = key[-2:] + value[-12:-8]
|
|
|
|
batch_put(DB_PREFIXES.HASHX_UTXO_PREFIX.value + key[:4] + suffix, hashX)
|
|
|
|
batch_put(DB_PREFIXES.UTXO_PREFIX.value + hashX + suffix, value[-8:])
|
|
|
|
flush_data.adds.clear()
|
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
# Flush state last as it reads the wall time.
|
2021-02-19 19:19:58 +01:00
|
|
|
start_time = time.time()
|
|
|
|
add_count = len(flush_data.adds)
|
|
|
|
spend_count = len(flush_data.deletes) // 2
|
|
|
|
|
|
|
|
if self.db.for_sync:
|
|
|
|
block_count = flush_data.height - self.db_height
|
|
|
|
tx_count = flush_data.tx_count - self.db_tx_count
|
|
|
|
elapsed = time.time() - start_time
|
|
|
|
self.logger.info(f'flushed {block_count:,d} blocks with '
|
|
|
|
f'{tx_count:,d} txs, {add_count:,d} UTXO adds, '
|
|
|
|
f'{spend_count:,d} spends in '
|
|
|
|
f'{elapsed:.1f}s, committing...')
|
|
|
|
|
|
|
|
self.utxo_flush_count = self.hist_flush_count
|
|
|
|
self.db_height = flush_data.height
|
|
|
|
self.db_tx_count = flush_data.tx_count
|
|
|
|
self.db_tip = flush_data.tip
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-11 18:17:54 +01:00
|
|
|
# self.flush_state(batch)
|
|
|
|
#
|
|
|
|
now = time.time()
|
|
|
|
self.wall_time += now - self.last_flush
|
|
|
|
self.last_flush = now
|
|
|
|
self.last_flush_tx_count = self.fs_tx_count
|
|
|
|
self.write_utxo_state(batch)
|
|
|
|
|
|
|
|
# # Update and put the wall time again - otherwise we drop the
|
|
|
|
# # time it took to commit the batch
|
|
|
|
# # self.flush_state(self.db)
|
|
|
|
# now = time.time()
|
|
|
|
# self.wall_time += now - self.last_flush
|
|
|
|
# self.last_flush = now
|
|
|
|
# self.last_flush_tx_count = self.fs_tx_count
|
|
|
|
# self.write_utxo_state(batch)
|
|
|
|
|
|
|
|
elapsed = self.last_flush - start_time
|
2021-01-12 18:24:08 +01:00
|
|
|
self.logger.info(f'flush #{self.hist_flush_count:,d} took '
|
2021-01-11 18:17:54 +01:00
|
|
|
f'{elapsed:.1f}s. Height {flush_data.height:,d} '
|
|
|
|
f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
|
|
|
|
|
|
|
|
# Catch-up stats
|
|
|
|
if self.db.for_sync:
|
|
|
|
flush_interval = self.last_flush - prior_flush
|
|
|
|
tx_per_sec_gen = int(flush_data.tx_count / self.wall_time)
|
|
|
|
tx_per_sec_last = 1 + int(tx_delta / flush_interval)
|
|
|
|
eta = estimate_txs_remaining() / tx_per_sec_last
|
|
|
|
self.logger.info(f'tx/sec since genesis: {tx_per_sec_gen:,d}, '
|
|
|
|
f'since last flush: {tx_per_sec_last:,d}')
|
|
|
|
self.logger.info(f'sync time: {formatted_time(self.wall_time)} '
|
|
|
|
f'ETA: {formatted_time(eta)}')
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-12 00:13:39 +01:00
|
|
|
# def flush_state(self, batch):
|
|
|
|
# """Flush chain state to the batch."""
|
|
|
|
# now = time.time()
|
|
|
|
# self.wall_time += now - self.last_flush
|
|
|
|
# self.last_flush = now
|
|
|
|
# self.last_flush_tx_count = self.fs_tx_count
|
|
|
|
# self.write_utxo_state(batch)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def flush_backup(self, flush_data, touched):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Like flush_dbs() but when backing up. All UTXOs are flushed."""
|
2018-11-03 23:50:34 +01:00
|
|
|
assert not flush_data.headers
|
2020-11-09 19:09:00 +01:00
|
|
|
assert not flush_data.block_txs
|
2018-11-03 23:50:34 +01:00
|
|
|
assert flush_data.height < self.db_height
|
2021-01-12 18:24:08 +01:00
|
|
|
assert not self.hist_unflushed
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
start_time = time.time()
|
|
|
|
tx_delta = flush_data.tx_count - self.last_flush_tx_count
|
2021-01-12 00:13:39 +01:00
|
|
|
###
|
|
|
|
self.fs_tx_count = flush_data.tx_count
|
|
|
|
# Truncate header_mc: header count is 1 more than the height.
|
|
|
|
self.header_mc.truncate(flush_data.height + 1)
|
|
|
|
###
|
|
|
|
# Not certain this is needed, but it doesn't hurt
|
2021-01-12 18:24:08 +01:00
|
|
|
self.hist_flush_count += 1
|
2021-01-12 00:13:39 +01:00
|
|
|
nremoves = 0
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2021-01-09 20:39:20 +01:00
|
|
|
with self.db.write_batch() as batch:
|
2021-02-19 19:19:58 +01:00
|
|
|
batch_put = batch.put
|
|
|
|
batch_delete = batch.delete
|
|
|
|
|
|
|
|
claim_reorg_height = self.fs_height
|
|
|
|
print("flush undos", flush_data.undo_claimtrie)
|
|
|
|
for (ops, height) in reversed(flush_data.undo_claimtrie):
|
|
|
|
claimtrie_ops = RevertableOp.unpack_stack(ops)
|
|
|
|
print("%i undo ops for %i" % (len(claimtrie_ops), height))
|
|
|
|
for op in reversed(claimtrie_ops):
|
|
|
|
print("REWIND", op)
|
|
|
|
if op.is_put:
|
|
|
|
batch_put(op.key, op.value)
|
|
|
|
else:
|
|
|
|
batch_delete(op.key)
|
|
|
|
batch_delete(DB_PREFIXES.undo_claimtrie.value + util.pack_be_uint64(claim_reorg_height))
|
|
|
|
claim_reorg_height -= 1
|
|
|
|
|
|
|
|
flush_data.undo_claimtrie.clear()
|
|
|
|
flush_data.claimtrie_stash.clear()
|
|
|
|
|
|
|
|
while self.fs_height > flush_data.height:
|
|
|
|
self.fs_height -= 1
|
|
|
|
self.headers.pop()
|
2021-01-12 00:13:39 +01:00
|
|
|
tx_count = flush_data.tx_count
|
|
|
|
for hashX in sorted(touched):
|
|
|
|
deletes = []
|
|
|
|
puts = {}
|
2021-02-19 19:19:58 +01:00
|
|
|
for key, hist in self.db.iterator(prefix=DB_PREFIXES.HASHX_HISTORY_PREFIX.value + hashX, reverse=True):
|
2021-01-12 00:13:39 +01:00
|
|
|
k = key[1:]
|
|
|
|
a = array.array('I')
|
|
|
|
a.frombytes(hist)
|
|
|
|
# Remove all history entries >= tx_count
|
|
|
|
idx = bisect_left(a, tx_count)
|
|
|
|
nremoves += len(a) - idx
|
|
|
|
if idx > 0:
|
|
|
|
puts[k] = a[:idx].tobytes()
|
|
|
|
break
|
|
|
|
deletes.append(k)
|
|
|
|
|
|
|
|
for key in deletes:
|
2021-02-19 19:19:58 +01:00
|
|
|
batch_delete(key)
|
2021-01-12 00:13:39 +01:00
|
|
|
for key, value in puts.items():
|
2021-02-19 19:19:58 +01:00
|
|
|
batch_put(key, value)
|
|
|
|
|
|
|
|
|
2021-01-12 00:13:39 +01:00
|
|
|
self.write_history_state(batch)
|
|
|
|
|
2021-02-19 19:19:58 +01:00
|
|
|
# New undo information
|
|
|
|
for undo_info, height in flush_data.undo_infos:
|
|
|
|
batch.put(self.undo_key(height), b''.join(undo_info))
|
|
|
|
flush_data.undo_infos.clear()
|
|
|
|
|
|
|
|
# Spends
|
|
|
|
for key in sorted(flush_data.deletes):
|
|
|
|
batch_delete(key)
|
|
|
|
flush_data.deletes.clear()
|
|
|
|
|
|
|
|
# New UTXOs
|
|
|
|
for key, value in flush_data.adds.items():
|
|
|
|
# suffix = tx_idx + tx_num
|
|
|
|
hashX = value[:-12]
|
|
|
|
suffix = key[-2:] + value[-12:-8]
|
|
|
|
batch_put(DB_PREFIXES.HASHX_UTXO_PREFIX.value + key[:4] + suffix, hashX)
|
|
|
|
batch_put(DB_PREFIXES.UTXO_PREFIX.value + hashX + suffix, value[-8:])
|
|
|
|
flush_data.adds.clear()
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
self.flush_utxo_db(batch, flush_data)
|
2021-02-19 19:19:58 +01:00
|
|
|
start_time = time.time()
|
|
|
|
add_count = len(flush_data.adds)
|
|
|
|
spend_count = len(flush_data.deletes) // 2
|
|
|
|
|
|
|
|
if self.db.for_sync:
|
|
|
|
block_count = flush_data.height - self.db_height
|
|
|
|
tx_count = flush_data.tx_count - self.db_tx_count
|
|
|
|
elapsed = time.time() - start_time
|
|
|
|
self.logger.info(f'flushed {block_count:,d} blocks with '
|
|
|
|
f'{tx_count:,d} txs, {add_count:,d} UTXO adds, '
|
|
|
|
f'{spend_count:,d} spends in '
|
|
|
|
f'{elapsed:.1f}s, committing...')
|
|
|
|
|
|
|
|
self.utxo_flush_count = self.hist_flush_count
|
|
|
|
self.db_height = flush_data.height
|
|
|
|
self.db_tx_count = flush_data.tx_count
|
|
|
|
self.db_tip = flush_data.tip
|
|
|
|
|
|
|
|
|
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
# Flush state last as it reads the wall time.
|
2021-01-12 00:13:39 +01:00
|
|
|
now = time.time()
|
|
|
|
self.wall_time += now - self.last_flush
|
|
|
|
self.last_flush = now
|
|
|
|
self.last_flush_tx_count = self.fs_tx_count
|
|
|
|
self.write_utxo_state(batch)
|
2021-02-19 19:19:58 +01:00
|
|
|
|
|
|
|
|
2021-01-12 00:13:39 +01:00
|
|
|
self.logger.info(f'backing up removed {nremoves:,d} history entries')
|
2018-11-03 23:50:34 +01:00
|
|
|
elapsed = self.last_flush - start_time
|
2021-01-12 18:24:08 +01:00
|
|
|
self.logger.info(f'backup flush #{self.hist_flush_count:,d} took {elapsed:.1f}s. '
|
|
|
|
f'Height {flush_data.height:,d} txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2020-11-21 21:48:20 +01:00
|
|
|
def raw_header(self, height):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return the binary header at the given height."""
|
2020-11-21 21:48:20 +01:00
|
|
|
header, n = self.read_headers(height, 1)
|
2018-11-03 23:50:34 +01:00
|
|
|
if n != 1:
|
|
|
|
raise IndexError(f'height {height:,d} out of range')
|
|
|
|
return header
|
|
|
|
|
2021-02-04 23:44:18 +01:00
|
|
|
def encode_headers(self, start_height, count, headers):
|
|
|
|
key = (start_height, count)
|
|
|
|
if not self.encoded_headers.get(key):
|
|
|
|
compressobj = zlib.compressobj(wbits=-15, level=1, memLevel=9)
|
|
|
|
headers = base64.b64encode(compressobj.compress(headers) + compressobj.flush()).decode()
|
|
|
|
if start_height % 1000 != 0:
|
|
|
|
return headers
|
|
|
|
self.encoded_headers[key] = headers
|
|
|
|
return self.encoded_headers.get(key)
|
|
|
|
|
2020-11-21 21:48:20 +01:00
|
|
|
def read_headers(self, start_height, count) -> typing.Tuple[bytes, int]:
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Requires start_height >= 0, count >= 0. Reads as many headers as
|
2018-11-03 23:50:34 +01:00
|
|
|
are available starting at start_height up to count. This
|
|
|
|
would be zero if start_height is beyond self.db_height, for
|
|
|
|
example.
|
|
|
|
|
|
|
|
Returns a (binary, n) pair where binary is the concatenated
|
|
|
|
binary headers, and n is the count of headers returned.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2020-11-20 18:52:15 +01:00
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
if start_height < 0 or count < 0:
|
|
|
|
raise self.DBError(f'{count:,d} headers starting at '
|
|
|
|
f'{start_height:,d} not on disk')
|
|
|
|
|
2020-11-21 21:48:20 +01:00
|
|
|
disk_count = max(0, min(count, self.db_height + 1 - start_height))
|
|
|
|
if disk_count:
|
|
|
|
return b''.join(self.headers[start_height:start_height + disk_count]), disk_count
|
|
|
|
return b'', 0
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def fs_tx_hash(self, tx_num):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return a par (tx_hash, tx_height) for the given tx number.
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2019-04-16 09:50:35 +02:00
|
|
|
If the tx_height is not on disk, returns (None, tx_height)."""
|
2018-11-03 23:50:34 +01:00
|
|
|
tx_height = bisect_right(self.tx_counts, tx_num)
|
|
|
|
if tx_height > self.db_height:
|
2020-11-25 22:08:04 +01:00
|
|
|
return None, tx_height
|
2021-03-26 14:54:00 +01:00
|
|
|
try:
|
|
|
|
return self.total_transactions[tx_num], tx_height
|
|
|
|
except IndexError:
|
|
|
|
self.logger.exception(
|
|
|
|
"Failed to access a cached transaction, known bug #3142 "
|
|
|
|
"should be fixed in #3205"
|
|
|
|
)
|
|
|
|
return None, tx_height
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2020-12-11 01:26:34 +01:00
|
|
|
def _fs_transactions(self, txids: Iterable[str]):
|
2020-11-23 18:05:47 +01:00
|
|
|
unpack_be_uint64 = util.unpack_be_uint64
|
|
|
|
tx_counts = self.tx_counts
|
2021-01-09 20:39:20 +01:00
|
|
|
tx_db_get = self.db.get
|
2020-12-11 01:26:34 +01:00
|
|
|
tx_cache = self._tx_and_merkle_cache
|
|
|
|
tx_infos = {}
|
2020-11-23 18:05:47 +01:00
|
|
|
|
2020-12-11 01:26:34 +01:00
|
|
|
for tx_hash in txids:
|
|
|
|
cached_tx = tx_cache.get(tx_hash)
|
|
|
|
if cached_tx:
|
|
|
|
tx, merkle = cached_tx
|
|
|
|
else:
|
|
|
|
tx_hash_bytes = bytes.fromhex(tx_hash)[::-1]
|
2021-02-19 19:19:58 +01:00
|
|
|
tx_num = tx_db_get(DB_PREFIXES.TX_NUM_PREFIX.value + tx_hash_bytes)
|
2020-12-11 01:26:34 +01:00
|
|
|
tx = None
|
|
|
|
tx_height = -1
|
|
|
|
if tx_num is not None:
|
|
|
|
tx_num = unpack_be_uint64(tx_num)
|
|
|
|
tx_height = bisect_right(tx_counts, tx_num)
|
|
|
|
if tx_height < self.db_height:
|
2021-02-19 19:19:58 +01:00
|
|
|
tx = tx_db_get(DB_PREFIXES.TX_PREFIX.value + tx_hash_bytes)
|
2020-12-11 01:26:34 +01:00
|
|
|
if tx_height == -1:
|
|
|
|
merkle = {
|
|
|
|
'block_height': -1
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
tx_pos = tx_num - tx_counts[tx_height - 1]
|
|
|
|
branch, root = self.merkle.branch_and_root(
|
|
|
|
self.total_transactions[tx_counts[tx_height - 1]:tx_counts[tx_height]], tx_pos
|
|
|
|
)
|
|
|
|
merkle = {
|
|
|
|
'block_height': tx_height,
|
|
|
|
'merkle': [
|
|
|
|
hash_to_hex_str(hash)
|
|
|
|
for hash in branch
|
|
|
|
],
|
|
|
|
'pos': tx_pos
|
|
|
|
}
|
|
|
|
if tx_height + 10 < self.db_height:
|
|
|
|
tx_cache[tx_hash] = tx, merkle
|
|
|
|
tx_infos[tx_hash] = (None if not tx else tx.hex(), merkle)
|
2020-11-23 18:05:47 +01:00
|
|
|
return tx_infos
|
2020-11-09 21:34:42 +01:00
|
|
|
|
|
|
|
async def fs_transactions(self, txids):
|
2020-12-11 01:26:34 +01:00
|
|
|
return await asyncio.get_event_loop().run_in_executor(self.executor, self._fs_transactions, txids)
|
2020-11-09 21:34:42 +01:00
|
|
|
|
2018-11-03 23:50:34 +01:00
|
|
|
async def fs_block_hashes(self, height, count):
|
2020-11-21 03:32:00 +01:00
|
|
|
if height + count > len(self.headers):
|
|
|
|
raise self.DBError(f'only got {len(self.headers) - height:,d} headers starting at {height:,d}, not {count:,d}')
|
|
|
|
return [self.coin.header_hash(header) for header in self.headers[height:height + count]]
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
async def limited_history(self, hashX, *, limit=1000):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return an unpruned, sorted list of (tx_hash, height) tuples of
|
2018-11-03 23:50:34 +01:00
|
|
|
confirmed transactions that touched the address, earliest in
|
|
|
|
the blockchain first. Includes both spending and receiving
|
|
|
|
transactions. By default returns at most 1000 entries. Set
|
|
|
|
limit to None to get them all.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2020-11-09 21:34:42 +01:00
|
|
|
|
2020-11-25 22:08:04 +01:00
|
|
|
def read_history():
|
2020-11-09 21:34:42 +01:00
|
|
|
db_height = self.db_height
|
|
|
|
tx_counts = self.tx_counts
|
2021-01-09 20:39:20 +01:00
|
|
|
tx_db_get = self.db.get
|
|
|
|
pack_be_uint64 = util.pack_be_uint64
|
2020-11-09 21:34:42 +01:00
|
|
|
|
|
|
|
cnt = 0
|
2020-11-25 22:08:04 +01:00
|
|
|
txs = []
|
2020-11-09 21:34:42 +01:00
|
|
|
|
2021-02-19 19:19:58 +01:00
|
|
|
for hist in self.db.iterator(prefix=DB_PREFIXES.HASHX_HISTORY_PREFIX.value + hashX, include_key=False):
|
2020-06-15 21:57:51 +02:00
|
|
|
a = array.array('I')
|
|
|
|
a.frombytes(hist)
|
|
|
|
for tx_num in a:
|
2020-11-09 21:34:42 +01:00
|
|
|
tx_height = bisect_right(tx_counts, tx_num)
|
|
|
|
if tx_height > db_height:
|
|
|
|
return
|
2020-11-25 22:08:04 +01:00
|
|
|
txs.append((tx_num, tx_height))
|
2020-11-09 21:34:42 +01:00
|
|
|
cnt += 1
|
|
|
|
if limit and cnt >= limit:
|
2020-11-25 22:08:04 +01:00
|
|
|
break
|
2020-11-09 21:34:42 +01:00
|
|
|
if limit and cnt >= limit:
|
2020-11-25 22:08:04 +01:00
|
|
|
break
|
|
|
|
return txs
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
while True:
|
2020-02-25 20:15:50 +01:00
|
|
|
history = await asyncio.get_event_loop().run_in_executor(self.executor, read_history)
|
2020-11-25 22:08:04 +01:00
|
|
|
if history is not None:
|
|
|
|
return [(self.total_transactions[tx_num], tx_height) for (tx_num, tx_height) in history]
|
2018-11-03 23:50:34 +01:00
|
|
|
self.logger.warning(f'limited_history: tx hash '
|
|
|
|
f'not found (reorg?), retrying...')
|
2018-12-07 02:19:16 +01:00
|
|
|
await sleep(0.25)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
# -- Undo information
|
|
|
|
|
|
|
|
def min_undo_height(self, max_height):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Returns a height from which we should store undo info."""
|
2018-11-03 23:50:34 +01:00
|
|
|
return max_height - self.env.reorg_limit + 1
|
|
|
|
|
|
|
|
def undo_key(self, height):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""DB key for undo information at the given height."""
|
2021-01-12 00:13:39 +01:00
|
|
|
return UNDO_PREFIX + pack('>I', height)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def read_undo_info(self, height):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Read undo information from a file for the current height."""
|
2021-02-19 19:19:58 +01:00
|
|
|
undo_claims = self.db.get(DB_PREFIXES.undo_claimtrie.value + util.pack_be_uint64(self.fs_height))
|
|
|
|
return self.db.get(self.undo_key(height)), undo_claims
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def raw_block_prefix(self):
|
2020-06-11 20:22:47 +02:00
|
|
|
return 'block'
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def raw_block_path(self, height):
|
2019-08-26 23:23:43 +02:00
|
|
|
return os.path.join(self.env.db_dir, f'{self.raw_block_prefix()}{height:d}')
|
2018-11-03 23:50:34 +01:00
|
|
|
|
2020-06-09 22:04:14 +02:00
|
|
|
async def read_raw_block(self, height):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Returns a raw block read from disk. Raises FileNotFoundError
|
|
|
|
if the block isn't on-disk."""
|
2020-06-09 22:04:14 +02:00
|
|
|
|
|
|
|
def read():
|
|
|
|
with util.open_file(self.raw_block_path(height)) as f:
|
|
|
|
return f.read(-1)
|
|
|
|
|
|
|
|
return await asyncio.get_event_loop().run_in_executor(self.executor, read)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def write_raw_block(self, block, height):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Write a raw block to disk."""
|
2018-11-03 23:50:34 +01:00
|
|
|
with util.open_truncate(self.raw_block_path(height)) as f:
|
|
|
|
f.write(block)
|
|
|
|
# Delete old blocks to prevent them accumulating
|
|
|
|
try:
|
|
|
|
del_height = self.min_undo_height(height) - 1
|
|
|
|
os.remove(self.raw_block_path(del_height))
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def clear_excess_undo_info(self):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Clear excess undo info. Only most recent N are kept."""
|
2018-11-03 23:50:34 +01:00
|
|
|
min_height = self.min_undo_height(self.db_height)
|
|
|
|
keys = []
|
2021-02-19 19:19:58 +01:00
|
|
|
for key, hist in self.db.iterator(prefix=DB_PREFIXES.UNDO_PREFIX.value):
|
2018-11-03 23:50:34 +01:00
|
|
|
height, = unpack('>I', key[-4:])
|
|
|
|
if height >= min_height:
|
|
|
|
break
|
|
|
|
keys.append(key)
|
|
|
|
|
|
|
|
if keys:
|
2021-01-09 20:39:20 +01:00
|
|
|
with self.db.write_batch() as batch:
|
2018-11-03 23:50:34 +01:00
|
|
|
for key in keys:
|
|
|
|
batch.delete(key)
|
|
|
|
self.logger.info(f'deleted {len(keys):,d} stale undo entries')
|
|
|
|
|
|
|
|
# delete old block files
|
|
|
|
prefix = self.raw_block_prefix()
|
|
|
|
paths = [path for path in glob(f'{prefix}[0-9]*')
|
|
|
|
if len(path) > len(prefix)
|
|
|
|
and int(path[len(prefix):]) < min_height]
|
|
|
|
if paths:
|
|
|
|
for path in paths:
|
|
|
|
try:
|
|
|
|
os.remove(path)
|
|
|
|
except FileNotFoundError:
|
|
|
|
pass
|
|
|
|
self.logger.info(f'deleted {len(paths):,d} stale block files')
|
|
|
|
|
|
|
|
# -- UTXO database
|
|
|
|
|
|
|
|
def read_utxo_state(self):
|
2021-01-09 20:39:20 +01:00
|
|
|
state = self.db.get(UTXO_STATE)
|
2018-11-03 23:50:34 +01:00
|
|
|
if not state:
|
|
|
|
self.db_height = -1
|
|
|
|
self.db_tx_count = 0
|
|
|
|
self.db_tip = b'\0' * 32
|
|
|
|
self.db_version = max(self.DB_VERSIONS)
|
|
|
|
self.utxo_flush_count = 0
|
|
|
|
self.wall_time = 0
|
|
|
|
self.first_sync = True
|
|
|
|
else:
|
|
|
|
state = ast.literal_eval(state.decode())
|
|
|
|
if not isinstance(state, dict):
|
|
|
|
raise self.DBError('failed reading state from DB')
|
|
|
|
self.db_version = state['db_version']
|
|
|
|
if self.db_version not in self.DB_VERSIONS:
|
2019-10-08 18:19:01 +02:00
|
|
|
raise self.DBError(f'your UTXO DB version is {self.db_version} but this '
|
|
|
|
f'software only handles versions {self.DB_VERSIONS}')
|
2018-11-03 23:50:34 +01:00
|
|
|
# backwards compat
|
|
|
|
genesis_hash = state['genesis']
|
|
|
|
if isinstance(genesis_hash, bytes):
|
|
|
|
genesis_hash = genesis_hash.decode()
|
|
|
|
if genesis_hash != self.coin.GENESIS_HASH:
|
2019-10-08 18:19:01 +02:00
|
|
|
raise self.DBError(f'DB genesis hash {genesis_hash} does not '
|
|
|
|
f'match coin {self.coin.GENESIS_HASH}')
|
2018-11-03 23:50:34 +01:00
|
|
|
self.db_height = state['height']
|
|
|
|
self.db_tx_count = state['tx_count']
|
|
|
|
self.db_tip = state['tip']
|
|
|
|
self.utxo_flush_count = state['utxo_flush_count']
|
|
|
|
self.wall_time = state['wall_time']
|
|
|
|
self.first_sync = state['first_sync']
|
|
|
|
|
|
|
|
# These are our state as we move ahead of DB state
|
|
|
|
self.fs_height = self.db_height
|
|
|
|
self.fs_tx_count = self.db_tx_count
|
|
|
|
self.last_flush_tx_count = self.fs_tx_count
|
|
|
|
|
|
|
|
# Log some stats
|
2019-10-08 18:19:01 +02:00
|
|
|
self.logger.info(f'DB version: {self.db_version:d}')
|
|
|
|
self.logger.info(f'coin: {self.coin.NAME}')
|
|
|
|
self.logger.info(f'network: {self.coin.NET}')
|
|
|
|
self.logger.info(f'height: {self.db_height:,d}')
|
|
|
|
self.logger.info(f'tip: {hash_to_hex_str(self.db_tip)}')
|
|
|
|
self.logger.info(f'tx count: {self.db_tx_count:,d}')
|
2021-01-09 20:39:20 +01:00
|
|
|
if self.db.for_sync:
|
2018-11-03 23:50:34 +01:00
|
|
|
self.logger.info(f'flushing DB cache at {self.env.cache_MB:,d} MB')
|
|
|
|
if self.first_sync:
|
2019-10-08 18:19:01 +02:00
|
|
|
self.logger.info(f'sync time so far: {util.formatted_time(self.wall_time)}')
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def write_utxo_state(self, batch):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Write (UTXO) state to the batch."""
|
2018-11-03 23:50:34 +01:00
|
|
|
state = {
|
|
|
|
'genesis': self.coin.GENESIS_HASH,
|
|
|
|
'height': self.db_height,
|
|
|
|
'tx_count': self.db_tx_count,
|
|
|
|
'tip': self.db_tip,
|
|
|
|
'utxo_flush_count': self.utxo_flush_count,
|
|
|
|
'wall_time': self.wall_time,
|
|
|
|
'first_sync': self.first_sync,
|
|
|
|
'db_version': self.db_version,
|
|
|
|
}
|
2021-02-19 19:19:58 +01:00
|
|
|
batch.put(DB_PREFIXES.UTXO_STATE.value, repr(state).encode())
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
def set_flush_count(self, count):
|
|
|
|
self.utxo_flush_count = count
|
2021-01-09 20:39:20 +01:00
|
|
|
with self.db.write_batch() as batch:
|
2018-11-03 23:50:34 +01:00
|
|
|
self.write_utxo_state(batch)
|
|
|
|
|
|
|
|
async def all_utxos(self, hashX):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return all UTXOs for an address sorted in no particular order."""
|
2018-11-03 23:50:34 +01:00
|
|
|
def read_utxos():
|
|
|
|
utxos = []
|
|
|
|
utxos_append = utxos.append
|
|
|
|
s_unpack = unpack
|
2020-11-09 21:34:42 +01:00
|
|
|
fs_tx_hash = self.fs_tx_hash
|
2018-11-03 23:50:34 +01:00
|
|
|
# Key: b'u' + address_hashX + tx_idx + tx_num
|
|
|
|
# Value: the UTXO value as a 64-bit unsigned integer
|
2021-02-19 19:19:58 +01:00
|
|
|
prefix = DB_PREFIXES.UTXO_PREFIX.value + hashX
|
2021-01-09 20:39:20 +01:00
|
|
|
for db_key, db_value in self.db.iterator(prefix=prefix):
|
2018-11-03 23:50:34 +01:00
|
|
|
tx_pos, tx_num = s_unpack('<HI', db_key[-6:])
|
|
|
|
value, = unpack('<Q', db_value)
|
2020-11-09 21:34:42 +01:00
|
|
|
tx_hash, height = fs_tx_hash(tx_num)
|
2018-11-03 23:50:34 +01:00
|
|
|
utxos_append(UTXO(tx_num, tx_pos, tx_hash, height, value))
|
|
|
|
return utxos
|
|
|
|
|
|
|
|
while True:
|
2020-02-25 20:15:50 +01:00
|
|
|
utxos = await asyncio.get_event_loop().run_in_executor(self.executor, read_utxos)
|
2018-11-03 23:50:34 +01:00
|
|
|
if all(utxo.tx_hash is not None for utxo in utxos):
|
|
|
|
return utxos
|
|
|
|
self.logger.warning(f'all_utxos: tx hash not '
|
|
|
|
f'found (reorg?), retrying...')
|
2018-12-07 02:19:16 +01:00
|
|
|
await sleep(0.25)
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
async def lookup_utxos(self, prevouts):
|
2019-04-16 09:50:35 +02:00
|
|
|
"""For each prevout, lookup it up in the DB and return a (hashX,
|
2018-11-03 23:50:34 +01:00
|
|
|
value) pair or None if not found.
|
|
|
|
|
|
|
|
Used by the mempool code.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2018-11-03 23:50:34 +01:00
|
|
|
def lookup_hashXs():
|
2019-04-16 09:50:35 +02:00
|
|
|
"""Return (hashX, suffix) pairs, or None if not found,
|
2018-11-03 23:50:34 +01:00
|
|
|
for each prevout.
|
2019-04-16 09:50:35 +02:00
|
|
|
"""
|
2018-11-03 23:50:34 +01:00
|
|
|
def lookup_hashX(tx_hash, tx_idx):
|
|
|
|
idx_packed = pack('<H', tx_idx)
|
|
|
|
|
|
|
|
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
|
|
|
|
# Value: hashX
|
2021-02-19 19:19:58 +01:00
|
|
|
prefix = DB_PREFIXES.HASHX_UTXO_PREFIX.value + tx_hash[:4] + idx_packed
|
2018-11-03 23:50:34 +01:00
|
|
|
|
|
|
|
# Find which entry, if any, the TX_HASH matches.
|
2021-01-09 20:39:20 +01:00
|
|
|
for db_key, hashX in self.db.iterator(prefix=prefix):
|
2018-11-03 23:50:34 +01:00
|
|
|
tx_num_packed = db_key[-4:]
|
|
|
|
tx_num, = unpack('<I', tx_num_packed)
|
|
|
|
hash, height = self.fs_tx_hash(tx_num)
|
|
|
|
if hash == tx_hash:
|
|
|
|
return hashX, idx_packed + tx_num_packed
|
|
|
|
return None, None
|
|
|
|
return [lookup_hashX(*prevout) for prevout in prevouts]
|
|
|
|
|
|
|
|
def lookup_utxos(hashX_pairs):
|
|
|
|
def lookup_utxo(hashX, suffix):
|
|
|
|
if not hashX:
|
|
|
|
# This can happen when the daemon is a block ahead
|
|
|
|
# of us and has mempool txs spending outputs from
|
|
|
|
# that new block
|
|
|
|
return None
|
|
|
|
# Key: b'u' + address_hashX + tx_idx + tx_num
|
|
|
|
# Value: the UTXO value as a 64-bit unsigned integer
|
2021-02-19 19:19:58 +01:00
|
|
|
key = DB_PREFIXES.UTXO_PREFIX.value + hashX + suffix
|
2021-01-09 20:39:20 +01:00
|
|
|
db_value = self.db.get(key)
|
2018-11-03 23:50:34 +01:00
|
|
|
if not db_value:
|
|
|
|
# This can happen if the DB was updated between
|
|
|
|
# getting the hashXs and getting the UTXOs
|
|
|
|
return None
|
|
|
|
value, = unpack('<Q', db_value)
|
|
|
|
return hashX, value
|
|
|
|
return [lookup_utxo(*hashX_pair) for hashX_pair in hashX_pairs]
|
|
|
|
|
2020-02-25 20:15:50 +01:00
|
|
|
hashX_pairs = await asyncio.get_event_loop().run_in_executor(self.executor, lookup_hashXs)
|
|
|
|
return await asyncio.get_event_loop().run_in_executor(self.executor, lookup_utxos, hashX_pairs)
|