diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3b3fb5947..df55605de 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -78,7 +78,11 @@ jobs: test: - datanetwork - blockchain - - blockchain_legacy_search + - claims + - takeovers + - transactions + - claims_legacy_search + - takeovers_legacy_search - other steps: - name: Configure sysctl limits diff --git a/.gitignore b/.gitignore index 22999a054..79951dafd 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,7 @@ __pycache__ _trial_temp/ trending*.log -/tests/integration/blockchain/files +/tests/integration/claims/files /tests/.coverage.* /lbry/wallet/bin diff --git a/lbry/error/__init__.py b/lbry/error/__init__.py index 7f16a3a41..f8c9d3165 100644 --- a/lbry/error/__init__.py +++ b/lbry/error/__init__.py @@ -252,9 +252,10 @@ class ResolveTimeoutError(WalletError): class ResolveCensoredError(WalletError): - def __init__(self, url, censor_id): + def __init__(self, url, censor_id, censor_row): self.url = url self.censor_id = censor_id + self.censor_row = censor_row super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.") diff --git a/lbry/extras/daemon/daemon.py b/lbry/extras/daemon/daemon.py index 14d82884d..212411188 100644 --- a/lbry/extras/daemon/daemon.py +++ b/lbry/extras/daemon/daemon.py @@ -2282,7 +2282,7 @@ class Daemon(metaclass=JSONRPCServerType): accounts = wallet.get_accounts_or_all(funding_account_ids) txo = None if claim_id: - txo = await self.ledger.get_claim_by_claim_id(accounts, claim_id, include_purchase_receipt=True) + txo = await self.ledger.get_claim_by_claim_id(claim_id, accounts, include_purchase_receipt=True) if not isinstance(txo, Output) or not txo.is_claim: # TODO: use error from lbry.error raise Exception(f"Could not find claim with claim_id '{claim_id}'.") @@ -3616,7 +3616,7 @@ class Daemon(metaclass=JSONRPCServerType): claim_address = old_txo.get_address(account.ledger) channel = None - if channel_id or channel_name: + if not clear_channel and (channel_id or channel_name): channel = await self.get_channel_or_error( wallet, channel_account_id, channel_id, channel_name, for_signing=True) elif old_txo.claim.is_signed and not clear_channel and not replace: @@ -3646,11 +3646,13 @@ class Daemon(metaclass=JSONRPCServerType): else: claim = Claim.from_bytes(old_txo.claim.to_bytes()) claim.stream.update(file_path=file_path, **kwargs) + if clear_channel: + claim.clear_signature() tx = await Transaction.claim_update( - old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel + old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0], + channel if not clear_channel else None ) new_txo = tx.outputs[0] - stream_hash = None if not preview: old_stream = self.file_manager.get_filtered(sd_hash=old_txo.claim.stream.source.sd_hash) @@ -4148,7 +4150,7 @@ class Daemon(metaclass=JSONRPCServerType): wallet = self.wallet_manager.get_wallet_or_default(wallet_id) if claim_id: - txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id) + txo = await self.ledger.get_claim_by_claim_id(claim_id, wallet.accounts) if not isinstance(txo, Output) or not txo.is_claim: # TODO: use error from lbry.error raise Exception(f"Could not find collection with claim_id '{claim_id}'.") @@ -4215,7 +4217,7 @@ class Daemon(metaclass=JSONRPCServerType): funding_accounts = wallet.get_accounts_or_all(funding_account_ids) channel = await self.get_channel_or_none(wallet, channel_account_id, channel_id, channel_name, for_signing=True) amount = self.get_dewies_or_error("amount", amount) - claim = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id) + claim = await self.ledger.get_claim_by_claim_id(claim_id) claim_address = claim.get_address(self.ledger) if not tip: account = wallet.get_account_or_default(account_id) diff --git a/lbry/schema/result.py b/lbry/schema/result.py index ef86c7696..eed4b9d6d 100644 --- a/lbry/schema/result.py +++ b/lbry/schema/result.py @@ -1,23 +1,27 @@ import base64 -import struct -from typing import List +from typing import List, TYPE_CHECKING, Union, Optional from binascii import hexlify from itertools import chain from lbry.error import ResolveCensoredError from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage +if TYPE_CHECKING: + from lbry.wallet.server.leveldb import ResolveResult INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID) NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND) BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED) -def set_reference(reference, txo_row): - if txo_row: - reference.tx_hash = txo_row['txo_hash'][:32] - reference.nout = struct.unpack(' bool: + def censor(self, row) -> Optional[bytes]: if self.is_censored(row): censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1] self.censored.setdefault(censoring_channel_hash, set()) self.censored[censoring_channel_hash].add(row['tx_hash']) - return True - return False + return censoring_channel_hash + return None def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict): for censoring_channel_hash, count in self.censored.items(): blocked = outputs.blocked.add() blocked.count = len(count) - set_reference(blocked.channel, extra_txo_rows.get(censoring_channel_hash)) + set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows) outputs.blocked_total += len(count) @@ -115,10 +119,10 @@ class Outputs: 'expiration_height': claim.expiration_height, 'effective_amount': claim.effective_amount, 'support_amount': claim.support_amount, - 'trending_group': claim.trending_group, - 'trending_mixed': claim.trending_mixed, - 'trending_local': claim.trending_local, - 'trending_global': claim.trending_global, + # 'trending_group': claim.trending_group, + # 'trending_mixed': claim.trending_mixed, + # 'trending_local': claim.trending_local, + # 'trending_global': claim.trending_global, } if claim.HasField('channel'): txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout] @@ -169,51 +173,54 @@ class Outputs: @classmethod def to_bytes(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked: Censor = None) -> bytes: - extra_txo_rows = {row['claim_hash']: row for row in extra_txo_rows} page = OutputsMessage() page.offset = offset if total is not None: page.total = total if blocked is not None: blocked.to_message(page, extra_txo_rows) + for row in extra_txo_rows: + cls.encode_txo(page.extra_txos.add(), row) + for row in txo_rows: - cls.row_to_message(row, page.txos.add(), extra_txo_rows) - for row in extra_txo_rows.values(): - cls.row_to_message(row, page.extra_txos.add(), extra_txo_rows) + # cls.row_to_message(row, page.txos.add(), extra_txo_rows) + txo_message: 'OutputsMessage' = page.txos.add() + cls.encode_txo(txo_message, row) + if not isinstance(row, Exception): + if row.channel_hash: + set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows) + if row.reposted_claim_hash: + set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows) + elif isinstance(row, ResolveCensoredError): + set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows) return page.SerializeToString() @classmethod - def row_to_message(cls, txo, txo_message, extra_row_dict: dict): - if isinstance(txo, Exception): - txo_message.error.text = txo.args[0] - if isinstance(txo, ValueError): + def encode_txo(cls, txo_message, resolve_result: Union['ResolveResult', Exception]): + if isinstance(resolve_result, Exception): + txo_message.error.text = resolve_result.args[0] + if isinstance(resolve_result, ValueError): txo_message.error.code = ErrorMessage.INVALID - elif isinstance(txo, LookupError): + elif isinstance(resolve_result, LookupError): txo_message.error.code = ErrorMessage.NOT_FOUND - elif isinstance(txo, ResolveCensoredError): + elif isinstance(resolve_result, ResolveCensoredError): txo_message.error.code = ErrorMessage.BLOCKED - set_reference(txo_message.error.blocked.channel, extra_row_dict.get(bytes.fromhex(txo.censor_id)[::-1])) return - txo_message.tx_hash = txo['txo_hash'][:32] - txo_message.nout, = struct.unpack(' Transaction: + async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction: tx = await awaitable if confirm: await self.ledger.wait(tx) await self.generate(1) await self.ledger.wait(tx, self.blockchain.block_expected) - return self.sout(tx) + if not return_tx: + return self.sout(tx) + return tx def create_upload_file(self, data, prefix=None, suffix=None): file_path = tempfile.mktemp(prefix=prefix or "tmp", suffix=suffix or "", dir=self.daemon.conf.upload_dir) @@ -507,19 +509,19 @@ class CommandTestCase(IntegrationTestCase): async def stream_create( self, name='hovercraft', bid='1.0', file_path=None, - data=b'hi!', confirm=True, prefix=None, suffix=None, **kwargs): + data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs): if file_path is None and data is not None: file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix) return await self.confirm_and_render( - self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm + self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx ) async def stream_update( - self, claim_id, data=None, prefix=None, suffix=None, confirm=True, **kwargs): + self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs): if data is not None: file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix) return await self.confirm_and_render( - self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm + self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx ) return await self.confirm_and_render( self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm @@ -625,6 +627,9 @@ class CommandTestCase(IntegrationTestCase): async def claim_search(self, **kwargs): return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items'] + async def get_claim_by_claim_id(self, claim_id): + return await self.out(self.ledger.get_claim_by_claim_id(claim_id)) + async def file_list(self, *args, **kwargs): return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items'] diff --git a/lbry/wallet/ledger.py b/lbry/wallet/ledger.py index 211e3ef7a..9583c22a7 100644 --- a/lbry/wallet/ledger.py +++ b/lbry/wallet/ledger.py @@ -556,7 +556,7 @@ class Ledger(metaclass=LedgerRegistry): log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request)) assert len(pending_synced_history) == len(remote_history), \ - f"{len(pending_synced_history)} vs {len(remote_history)}" + f"{len(pending_synced_history)} vs {len(remote_history)} for {address}" synced_history = "" for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())): assert i == remote_i, f"{i} vs {remote_i}" @@ -894,9 +894,21 @@ class Ledger(metaclass=LedgerRegistry): hub_server=new_sdk_server is not None ) - async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output: - for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]: - return claim + # async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output: + # return await self.network.get_claim_by_id(claim_id) + + async def get_claim_by_claim_id(self, claim_id, accounts=None, include_purchase_receipt=False, + include_is_my_output=False): + accounts = accounts or [] + # return await self.network.get_claim_by_id(claim_id) + inflated = await self._inflate_outputs( + self.network.get_claim_by_id(claim_id), accounts, + include_purchase_receipt=include_purchase_receipt, + include_is_my_output=include_is_my_output, + ) + txos = inflated[0] + if txos: + return txos[0] async def _report_state(self): try: diff --git a/lbry/wallet/network.py b/lbry/wallet/network.py index 240241a0c..5f796bef5 100644 --- a/lbry/wallet/network.py +++ b/lbry/wallet/network.py @@ -238,7 +238,7 @@ class Network: log.exception("error looking up dns for spv server %s:%i", server, port) # accumulate the dns results - if self.config['explicit_servers']: + if self.config.get('explicit_servers', []): hubs = self.config['explicit_servers'] elif self.known_hubs: hubs = self.known_hubs @@ -254,7 +254,7 @@ class Network: sent_ping_timestamps = {} _, ip_to_hostnames = await self.resolve_spv_dns() n = len(ip_to_hostnames) - log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config['explicit_servers'])) + log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', []))) pongs = {} known_hubs = self.known_hubs try: @@ -299,8 +299,8 @@ class Network: if (pong is not None and self.jurisdiction is not None) and \ (pong.country_name != self.jurisdiction): continue - client = ClientSession(network=self, server=(host, port), timeout=self.config['hub_timeout'], - concurrency=self.config['concurrent_hub_requests']) + client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30), + concurrency=self.config.get('concurrent_hub_requests', 30)) try: await client.create_connection() log.warning("Connected to spv server %s:%i", host, port) @@ -465,6 +465,12 @@ class Network: def get_server_features(self): return self.rpc('server.features', (), restricted=True) + # def get_claims_by_ids(self, claim_ids): + # return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids) + + def get_claim_by_id(self, claim_id): + return self.rpc('blockchain.claimtrie.getclaimbyid', [claim_id]) + def resolve(self, urls, session_override=None): return self.rpc('blockchain.claimtrie.resolve', urls, False, session_override) diff --git a/lbry/wallet/orchstr8/__init__.py b/lbry/wallet/orchstr8/__init__.py index c89834383..94c7e70df 100644 --- a/lbry/wallet/orchstr8/__init__.py +++ b/lbry/wallet/orchstr8/__init__.py @@ -1,5 +1,5 @@ __hub_url__ = ( - "https://github.com/lbryio/hub/releases/download/v0.2021.08.24-beta/hub" + "https://github.com/lbryio/hub/releases/download/leveldb-hub/hub" ) from .node import Conductor from .service import ConductorService diff --git a/lbry/wallet/orchstr8/node.py b/lbry/wallet/orchstr8/node.py index d592f74f7..8bf1ac83a 100644 --- a/lbry/wallet/orchstr8/node.py +++ b/lbry/wallet/orchstr8/node.py @@ -196,11 +196,10 @@ class SPVNode: self.session_timeout = 600 self.rpc_port = '0' # disabled by default self.stopped = False - self.index_name = None + self.index_name = uuid4().hex async def start(self, blockchain_node: 'BlockchainNode', extraconf=None): self.data_path = tempfile.mkdtemp() - self.index_name = uuid4().hex conf = { 'DESCRIPTION': '', 'PAYMENT_ADDRESS': '', @@ -223,7 +222,7 @@ class SPVNode: # TODO: don't use os.environ os.environ.update(conf) self.server = Server(Env(self.coin_class)) - self.server.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5 + self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5 await self.server.start() async def stop(self, cleanup=True): diff --git a/lbry/wallet/rpc/session.py b/lbry/wallet/rpc/session.py index ceae4b125..762bb21cd 100644 --- a/lbry/wallet/rpc/session.py +++ b/lbry/wallet/rpc/session.py @@ -496,6 +496,17 @@ class RPCSession(SessionBase): self.abort() return False + async def send_notifications(self, notifications) -> bool: + """Send an RPC notification over the network.""" + message, _ = self.connection.send_batch(notifications) + try: + await self._send_message(message) + return True + except asyncio.TimeoutError: + self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True)) + self.abort() + return False + def send_batch(self, raise_errors=False): """Return a BatchRequest. Intended to be used like so: diff --git a/lbry/wallet/server/block_processor.py b/lbry/wallet/server/block_processor.py index 8e38aa9a2..370bc5f1e 100644 --- a/lbry/wallet/server/block_processor.py +++ b/lbry/wallet/server/block_processor.py @@ -1,16 +1,36 @@ import time import asyncio +import typing +from bisect import bisect_right from struct import pack, unpack -from concurrent.futures.thread import ThreadPoolExecutor -from typing import Optional +from typing import Optional, List, Tuple, Set, DefaultDict, Dict, NamedTuple from prometheus_client import Gauge, Histogram +from collections import defaultdict + import lbry -from lbry.wallet.server.db.writer import SQLDB +from lbry.schema.claim import Claim +from lbry.wallet.ledger import Ledger, TestNetLedger, RegTestLedger + +from lbry.wallet.transaction import OutputScript, Output, Transaction +from lbry.wallet.server.tx import Tx, TxOutput, TxInput from lbry.wallet.server.daemon import DaemonError from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN from lbry.wallet.server.util import chunks, class_logger -from lbry.wallet.server.leveldb import FlushData +from lbry.crypto.hash import hash160 +from lbry.wallet.server.mempool import MemPool +from lbry.wallet.server.db.prefixes import ACTIVATED_SUPPORT_TXO_TYPE, ACTIVATED_CLAIM_TXO_TYPE +from lbry.wallet.server.db.prefixes import PendingActivationKey, PendingActivationValue, ClaimToTXOValue from lbry.wallet.server.udp import StatusServer +from lbry.wallet.server.db.revertable import RevertableOpStack +if typing.TYPE_CHECKING: + from lbry.wallet.server.leveldb import LevelDB + + +class TrendingNotification(NamedTuple): + height: int + added: bool + prev_amount: int + new_amount: int class Prefetcher: @@ -129,6 +149,31 @@ class ChainError(Exception): """Raised on error processing blocks.""" +class StagedClaimtrieItem(typing.NamedTuple): + name: str + normalized_name: str + claim_hash: bytes + amount: int + expiration_height: int + tx_num: int + position: int + root_tx_num: int + root_position: int + channel_signature_is_valid: bool + signing_hash: Optional[bytes] + reposted_claim_hash: Optional[bytes] + + @property + def is_update(self) -> bool: + return (self.tx_num, self.position) != (self.root_tx_num, self.root_position) + + def invalidate_signature(self) -> 'StagedClaimtrieItem': + return StagedClaimtrieItem( + self.name, self.normalized_name, self.claim_hash, self.amount, self.expiration_height, self.tx_num, + self.position, self.root_tx_num, self.root_position, False, None, self.reposted_claim_hash + ) + + NAMESPACE = "wallet_server" HISTOGRAM_BUCKETS = ( .005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf') @@ -152,41 +197,109 @@ class BlockProcessor: "reorg_count", "Number of reorgs", namespace=NAMESPACE ) - def __init__(self, env, db, daemon, notifications): + def __init__(self, env, db: 'LevelDB', daemon, shutdown_event: asyncio.Event): + self.state_lock = asyncio.Lock() self.env = env self.db = db self.daemon = daemon - self.notifications = notifications - + self.mempool = MemPool(env.coin, daemon, db, self.state_lock) + self.shutdown_event = shutdown_event self.coin = env.coin + if env.coin.NET == 'mainnet': + self.ledger = Ledger + elif env.coin.NET == 'testnet': + self.ledger = TestNetLedger + else: + self.ledger = RegTestLedger + + self._caught_up_event: Optional[asyncio.Event] = None + self.height = 0 + self.tip = bytes.fromhex(self.coin.GENESIS_HASH)[::-1] + self.tx_count = 0 + self.blocks_event = asyncio.Event() self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event) self.logger = class_logger(__name__, self.__class__.__name__) - self.executor = ThreadPoolExecutor(1) # Meta - self.next_cache_check = 0 - self.touched = set() - self.reorg_count = 0 - - # Caches of unflushed items. - self.headers = [] - self.block_hashes = [] - self.block_txs = [] - self.undo_infos = [] + self.touched_hashXs: Set[bytes] = set() # UTXO cache - self.utxo_cache = {} - self.db_deletes = [] + self.utxo_cache: Dict[Tuple[bytes, int], Tuple[bytes, int]] = {} - # If the lock is successfully acquired, in-memory chain state - # is consistent with self.height - self.state_lock = asyncio.Lock() + # Claimtrie cache + self.db_op_stack: Optional[RevertableOpStack] = None - self.search_cache = {} + # self.search_cache = {} self.history_cache = {} self.status_server = StatusServer() + ################################# + # attributes used for calculating stake activations and takeovers per block + ################################# + + self.taken_over_names: Set[str] = set() + # txo to pending claim + self.txo_to_claim: Dict[Tuple[int, int], StagedClaimtrieItem] = {} + # claim hash to pending claim txo + self.claim_hash_to_txo: Dict[bytes, Tuple[int, int]] = {} + # claim hash to lists of pending support txos + self.support_txos_by_claim: DefaultDict[bytes, List[Tuple[int, int]]] = defaultdict(list) + # support txo: (supported claim hash, support amount) + self.support_txo_to_claim: Dict[Tuple[int, int], Tuple[bytes, int]] = {} + # removed supports {name: {claim_hash: [(tx_num, nout), ...]}} + self.removed_support_txos_by_name_by_claim: DefaultDict[str, DefaultDict[bytes, List[Tuple[int, int]]]] = \ + defaultdict(lambda: defaultdict(list)) + self.abandoned_claims: Dict[bytes, StagedClaimtrieItem] = {} + self.updated_claims: Set[bytes] = set() + # removed activated support amounts by claim hash + self.removed_active_support_amount_by_claim: DefaultDict[bytes, List[int]] = defaultdict(list) + # pending activated support amounts by claim hash + self.activated_support_amount_by_claim: DefaultDict[bytes, List[int]] = defaultdict(list) + # pending activated name and claim hash to claim/update txo amount + self.activated_claim_amount_by_name_and_hash: Dict[Tuple[str, bytes], int] = {} + # pending claim and support activations per claim hash per name, + # used to process takeovers due to added activations + activation_by_claim_by_name_type = DefaultDict[str, DefaultDict[bytes, List[Tuple[PendingActivationKey, int]]]] + self.activation_by_claim_by_name: activation_by_claim_by_name_type = defaultdict(lambda: defaultdict(list)) + # these are used for detecting early takeovers by not yet activated claims/supports + self.possible_future_support_amounts_by_claim_hash: DefaultDict[bytes, List[int]] = defaultdict(list) + self.possible_future_claim_amount_by_name_and_hash: Dict[Tuple[str, bytes], int] = {} + self.possible_future_support_txos_by_claim_hash: DefaultDict[bytes, List[Tuple[int, int]]] = defaultdict(list) + + self.removed_claims_to_send_es = set() # cumulative changes across blocks to send ES + self.touched_claims_to_send_es = set() + self.activation_info_to_send_es: DefaultDict[str, List[TrendingNotification]] = defaultdict(list) + + self.removed_claim_hashes: Set[bytes] = set() # per block changes + self.touched_claim_hashes: Set[bytes] = set() + + self.signatures_changed = set() + + self.pending_reposted = set() + self.pending_channel_counts = defaultdict(lambda: 0) + self.pending_support_amount_change = defaultdict(lambda: 0) + + self.pending_channels = {} + self.amount_cache = {} + self.expired_claim_hashes: Set[bytes] = set() + + self.doesnt_have_valid_signature: Set[bytes] = set() + self.claim_channels: Dict[bytes, bytes] = {} + self.hashXs_by_tx: DefaultDict[bytes, List[int]] = defaultdict(list) + + self.pending_transaction_num_mapping: Dict[bytes, int] = {} + self.pending_transactions: Dict[int, bytes] = {} + + async def claim_producer(self): + if self.db.db_height <= 1: + return + + for claim_hash in self.removed_claims_to_send_es: + yield 'delete', claim_hash.hex() + async for claim in self.db.claims_producer(self.touched_claims_to_send_es): + yield 'update', claim + async def run_in_thread_with_lock(self, func, *args): # Run in a thread to prevent blocking. Shielded so that # cancellations from shutdown don't lose work - when the task @@ -195,13 +308,20 @@ class BlockProcessor: # consistent and not being updated elsewhere. async def run_in_thread_locked(): async with self.state_lock: - return await asyncio.get_event_loop().run_in_executor(self.executor, func, *args) + return await asyncio.get_event_loop().run_in_executor(None, func, *args) return await asyncio.shield(run_in_thread_locked()) + @staticmethod + async def run_in_thread(func, *args): + async def run_in_thread(): + return await asyncio.get_event_loop().run_in_executor(None, func, *args) + return await asyncio.shield(run_in_thread()) + async def check_and_advance_blocks(self, raw_blocks): """Process the list of raw blocks passed. Detects and handles reorgs. """ + if not raw_blocks: return first = self.height + 1 @@ -212,16 +332,35 @@ class BlockProcessor: chain = [self.tip] + [self.coin.header_hash(h) for h in headers[:-1]] if hprevs == chain: - start = time.perf_counter() - await self.run_in_thread_with_lock(self.advance_blocks, blocks) - if self.sql: - await self.db.search_index.claim_consumer(self.sql.claim_producer()) - for cache in self.search_cache.values(): - cache.clear() - self.history_cache.clear() - self.notifications.notified_mempool_txs.clear() - await self._maybe_flush() - processed_time = time.perf_counter() - start + total_start = time.perf_counter() + try: + for block in blocks: + start = time.perf_counter() + await self.run_in_thread(self.advance_block, block) + await self.flush() + + self.logger.info("advanced to %i in %0.3fs", self.height, time.perf_counter() - start) + if self.height == self.coin.nExtendedClaimExpirationForkHeight: + self.logger.warning( + "applying extended claim expiration fork on claims accepted by, %i", self.height + ) + await self.run_in_thread_with_lock(self.db.apply_expiration_extension_fork) + # TODO: we shouldnt wait on the search index updating before advancing to the next block + if not self.db.first_sync: + await self.db.reload_blocking_filtering_streams() + await self.db.search_index.claim_consumer(self.claim_producer()) + await self.db.search_index.apply_filters(self.db.blocked_streams, self.db.blocked_channels, + self.db.filtered_streams, self.db.filtered_channels) + await self.db.search_index.update_trending_score(self.activation_info_to_send_es) + self.db.search_index.clear_caches() + self.touched_claims_to_send_es.clear() + self.removed_claims_to_send_es.clear() + self.activation_info_to_send_es.clear() + # print("******************\n") + except: + self.logger.exception("advance blocks failed") + raise + processed_time = time.perf_counter() - total_start self.block_count_metric.set(self.height) self.block_update_time_metric.observe(processed_time) self.status_server.set_height(self.db.fs_height, self.db.db_tip) @@ -229,13 +368,44 @@ class BlockProcessor: s = '' if len(blocks) == 1 else 's' self.logger.info('processed {:,d} block{} in {:.1f}s'.format(len(blocks), s, processed_time)) if self._caught_up_event.is_set(): - if self.sql: - await self.db.search_index.apply_filters(self.sql.blocked_streams, self.sql.blocked_channels, - self.sql.filtered_streams, self.sql.filtered_channels) - await self.notifications.on_block(self.touched, self.height) - self.touched = set() + await self.mempool.on_block(self.touched_hashXs, self.height) + self.touched_hashXs.clear() elif hprevs[0] != chain[0]: - await self.reorg_chain() + min_start_height = max(self.height - self.coin.REORG_LIMIT, 0) + count = 1 + block_hashes_from_lbrycrd = await self.daemon.block_hex_hashes( + min_start_height, self.coin.REORG_LIMIT + ) + for height, block_hash in zip( + reversed(range(min_start_height, min_start_height + self.coin.REORG_LIMIT)), + reversed(block_hashes_from_lbrycrd)): + if self.db.get_block_hash(height)[::-1].hex() == block_hash: + break + count += 1 + self.logger.warning(f"blockchain reorg detected at {self.height}, unwinding last {count} blocks") + try: + assert count > 0, count + for _ in range(count): + await self.backup_block() + self.logger.info(f'backed up to height {self.height:,d}') + + await self.db._read_claim_txos() # TODO: don't do this + for touched in self.touched_claims_to_send_es: + if not self.db.get_claim_txo(touched): + self.removed_claims_to_send_es.add(touched) + self.touched_claims_to_send_es.difference_update(self.removed_claims_to_send_es) + await self.db.search_index.claim_consumer(self.claim_producer()) + self.db.search_index.clear_caches() + self.touched_claims_to_send_es.clear() + self.removed_claims_to_send_es.clear() + self.activation_info_to_send_es.clear() + await self.prefetcher.reset_height(self.height) + self.reorg_count_metric.inc() + except: + self.logger.exception("reorg blocks failed") + raise + finally: + self.logger.info("backed up to block %i", self.height) else: # It is probably possible but extremely rare that what # bitcoind returns doesn't form a chain because it @@ -246,398 +416,1258 @@ class BlockProcessor: 'resetting the prefetcher') await self.prefetcher.reset_height(self.height) - async def reorg_chain(self, count: Optional[int] = None): - """Handle a chain reorganisation. + async def flush(self): + save_undo = (self.daemon.cached_height() - self.height) <= self.env.reorg_limit - Count is the number of blocks to simulate a reorg, or None for - a real reorg.""" - if count is None: - self.logger.info('chain reorg detected') - else: - self.logger.info(f'faking a reorg of {count:,d} blocks') - - - async def get_raw_blocks(last_height, hex_hashes): - heights = range(last_height, last_height - len(hex_hashes), -1) - try: - blocks = [await self.db.read_raw_block(height) for height in heights] - self.logger.info(f'read {len(blocks)} blocks from disk') - return blocks - except FileNotFoundError: - return await self.daemon.raw_blocks(hex_hashes) - - def flush_backup(): - # self.touched can include other addresses which is - # harmless, but remove None. - self.touched.discard(None) - self.db.flush_backup(self.flush_data(), self.touched) - - try: - await self.flush(True) - - start, last, hashes = await self.reorg_hashes(count) - # Reverse and convert to hex strings. - hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)] - self.logger.info("reorg %i block hashes", len(hashes)) - for hex_hashes in chunks(hashes, 50): - raw_blocks = await get_raw_blocks(last, hex_hashes) - self.logger.info("got %i raw blocks", len(raw_blocks)) - await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks) - await self.run_in_thread_with_lock(flush_backup) - last -= len(raw_blocks) - - await self.prefetcher.reset_height(self.height) - self.reorg_count_metric.inc() - except: - self.logger.exception("boom") - raise - finally: - self.logger.info("done with reorg") - - async def reorg_hashes(self, count): - """Return a pair (start, last, hashes) of blocks to back up during a - reorg. - - The hashes are returned in order of increasing height. Start - is the height of the first hash, last of the last. - """ - start, count = await self.calc_reorg_range(count) - last = start + count - 1 - s = '' if count == 1 else 's' - self.logger.info(f'chain was reorganised replacing {count:,d} ' - f'block{s} at heights {start:,d}-{last:,d}') - - return start, last, await self.db.fs_block_hashes(start, count) - - async def calc_reorg_range(self, count: Optional[int]): - """Calculate the reorg range""" - - def diff_pos(hashes1, hashes2): - """Returns the index of the first difference in the hash lists. - If both lists match returns their length.""" - for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)): - if hash1 != hash2: - return n - return len(hashes) - - if count is None: - # A real reorg - start = self.height - 1 - count = 1 - while start > 0: - hashes = await self.db.fs_block_hashes(start, count) - hex_hashes = [hash_to_hex_str(hash) for hash in hashes] - d_hex_hashes = await self.daemon.block_hex_hashes(start, count) - n = diff_pos(hex_hashes, d_hex_hashes) - if n > 0: - start += n - break - count = min(count * 2, start) - start -= count - - count = (self.height - start) + 1 - else: - start = (self.height - count) + 1 - - return start, count - - def estimate_txs_remaining(self): - # Try to estimate how many txs there are to go - daemon_height = self.daemon.cached_height() - coin = self.coin - tail_count = daemon_height - max(self.height, coin.TX_COUNT_HEIGHT) - # Damp the initial enthusiasm - realism = max(2.0 - 0.9 * self.height / coin.TX_COUNT_HEIGHT, 1.0) - return (tail_count * coin.TX_PER_BLOCK + - max(coin.TX_COUNT - self.tx_count, 0)) * realism - - # - Flushing - def flush_data(self): - """The data for a flush. The lock must be taken.""" - assert self.state_lock.locked() - return FlushData(self.height, self.tx_count, self.headers, self.block_hashes, - self.block_txs, self.undo_infos, self.utxo_cache, - self.db_deletes, self.tip) - - async def flush(self, flush_utxos): def flush(): - self.db.flush_dbs(self.flush_data(), flush_utxos, - self.estimate_txs_remaining) + self.db.write_db_state() + if save_undo: + self.db.prefix_db.commit(self.height) + else: + self.db.prefix_db.unsafe_commit() + self.clear_after_advance_or_reorg() + self.db.assert_db_state() await self.run_in_thread_with_lock(flush) - async def _maybe_flush(self): - # If caught up, flush everything as client queries are - # performed on the DB. - if self._caught_up_event.is_set(): - await self.flush(True) - elif time.perf_counter() > self.next_cache_check: - await self.flush(True) - self.next_cache_check = time.perf_counter() + 30 + def _add_claim_or_update(self, height: int, txo: 'Output', tx_hash: bytes, tx_num: int, nout: int, + spent_claims: typing.Dict[bytes, typing.Tuple[int, int, str]]): + try: + claim_name = txo.script.values['claim_name'].decode() + except UnicodeDecodeError: + claim_name = ''.join(chr(c) for c in txo.script.values['claim_name']) + try: + normalized_name = txo.normalized_name + except UnicodeDecodeError: + normalized_name = claim_name + if txo.script.is_claim_name: + claim_hash = hash160(tx_hash + pack('>I', nout))[::-1] + # print(f"\tnew {claim_hash.hex()} ({tx_num} {txo.amount})") + else: + claim_hash = txo.claim_hash[::-1] + # print(f"\tupdate {claim_hash.hex()} ({tx_num} {txo.amount})") - def check_cache_size(self): - """Flush a cache if it gets too big.""" - # Good average estimates based on traversal of subobjects and - # requesting size from Python (see deep_getsizeof). - one_MB = 1000*1000 - utxo_cache_size = len(self.utxo_cache) * 205 - db_deletes_size = len(self.db_deletes) * 57 - hist_cache_size = self.db.history.unflushed_memsize() - # Roughly ntxs * 32 + nblocks * 42 - tx_hash_size = ((self.tx_count - self.db.fs_tx_count) * 32 - + (self.height - self.db.fs_height) * 42) - utxo_MB = (db_deletes_size + utxo_cache_size) // one_MB - hist_MB = (hist_cache_size + tx_hash_size) // one_MB + signing_channel_hash = None + channel_signature_is_valid = False + try: + signable = txo.signable + is_repost = txo.claim.is_repost + is_channel = txo.claim.is_channel + if txo.claim.is_signed: + signing_channel_hash = txo.signable.signing_channel_hash[::-1] + except: # google.protobuf.message.DecodeError: Could not parse JSON. + signable = None + is_repost = False + is_channel = False - self.logger.info('our height: {:,d} daemon: {:,d} ' - 'UTXOs {:,d}MB hist {:,d}MB' - .format(self.height, self.daemon.cached_height(), - utxo_MB, hist_MB)) + reposted_claim_hash = None - # Flush history if it takes up over 20% of cache memory. - # Flush UTXOs once they take up 80% of cache memory. - cache_MB = self.env.cache_MB - if utxo_MB + hist_MB >= cache_MB or hist_MB >= cache_MB // 5: - return utxo_MB >= cache_MB * 4 // 5 - return None + if is_repost: + reposted_claim_hash = txo.claim.repost.reference.claim_hash[::-1] + self.pending_reposted.add(reposted_claim_hash) - def advance_blocks(self, blocks): - """Synchronously advance the blocks. + if is_channel: + self.pending_channels[claim_hash] = txo.claim.channel.public_key_bytes - It is already verified they correctly connect onto our tip. - """ - min_height = self.db.min_undo_height(self.daemon.cached_height()) - height = self.height + self.doesnt_have_valid_signature.add(claim_hash) + raw_channel_tx = None + if signable and signable.signing_channel_hash: + signing_channel = self.db.get_claim_txo(signing_channel_hash) - for block in blocks: - height += 1 - undo_info = self.advance_txs( - height, block.transactions, self.coin.electrum_header(block.header, height), - self.coin.header_hash(block.header) + if signing_channel: + raw_channel_tx = self.db.prefix_db.tx.get( + self.db.prefix_db.tx_hash.get( + signing_channel.tx_num, deserialize_value=False + ), deserialize_value=False + ) + channel_pub_key_bytes = None + try: + if not signing_channel: + if txo.signable.signing_channel_hash[::-1] in self.pending_channels: + channel_pub_key_bytes = self.pending_channels[signing_channel_hash] + elif raw_channel_tx: + chan_output = self.coin.transaction(raw_channel_tx).outputs[signing_channel.position] + chan_script = OutputScript(chan_output.pk_script) + chan_script.parse() + channel_meta = Claim.from_bytes(chan_script.values['claim']) + + channel_pub_key_bytes = channel_meta.channel.public_key_bytes + if channel_pub_key_bytes: + channel_signature_is_valid = Output.is_signature_valid( + txo.get_encoded_signature(), txo.get_signature_digest(self.ledger), channel_pub_key_bytes + ) + if channel_signature_is_valid: + self.pending_channel_counts[signing_channel_hash] += 1 + self.doesnt_have_valid_signature.remove(claim_hash) + self.claim_channels[claim_hash] = signing_channel_hash + except: + self.logger.exception(f"error validating channel signature for %s:%i", tx_hash[::-1].hex(), nout) + + if txo.script.is_claim_name: # it's a root claim + root_tx_num, root_idx = tx_num, nout + previous_amount = 0 + else: # it's a claim update + if claim_hash not in spent_claims: + # print(f"\tthis is a wonky tx, contains unlinked claim update {claim_hash.hex()}") + return + if normalized_name != spent_claims[claim_hash][2]: + self.logger.warning( + f"{tx_hash[::-1].hex()} contains mismatched name for claim update {claim_hash.hex()}" + ) + return + (prev_tx_num, prev_idx, _) = spent_claims.pop(claim_hash) + # print(f"\tupdate {claim_hash.hex()} {tx_hash[::-1].hex()} {txo.amount}") + if (prev_tx_num, prev_idx) in self.txo_to_claim: + previous_claim = self.txo_to_claim.pop((prev_tx_num, prev_idx)) + self.claim_hash_to_txo.pop(claim_hash) + root_tx_num, root_idx = previous_claim.root_tx_num, previous_claim.root_position + else: + previous_claim = self._make_pending_claim_txo(claim_hash) + root_tx_num, root_idx = previous_claim.root_tx_num, previous_claim.root_position + activation = self.db.get_activation(prev_tx_num, prev_idx) + claim_name = previous_claim.name + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, claim_hash, prev_tx_num, prev_idx, activation, normalized_name, + previous_claim.amount + ) + previous_amount = previous_claim.amount + self.updated_claims.add(claim_hash) + + self.db.claim_to_txo[claim_hash] = ClaimToTXOValue( + tx_num, nout, root_tx_num, root_idx, txo.amount, channel_signature_is_valid, claim_name + ) + self.db.txo_to_claim[tx_num][nout] = claim_hash + + pending = StagedClaimtrieItem( + claim_name, normalized_name, claim_hash, txo.amount, self.coin.get_expiration_height(height), tx_num, nout, + root_tx_num, root_idx, channel_signature_is_valid, signing_channel_hash, reposted_claim_hash + ) + self.txo_to_claim[(tx_num, nout)] = pending + self.claim_hash_to_txo[claim_hash] = (tx_num, nout) + self.get_add_claim_utxo_ops(pending) + + def get_add_claim_utxo_ops(self, pending: StagedClaimtrieItem): + # claim tip by claim hash + self.db.prefix_db.claim_to_txo.stage_put( + (pending.claim_hash,), (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, + pending.amount, pending.channel_signature_is_valid, pending.name) + ) + # claim hash by txo + self.db.prefix_db.txo_to_claim.stage_put( + (pending.tx_num, pending.position), (pending.claim_hash, pending.normalized_name) + ) + + # claim expiration + self.db.prefix_db.claim_expiration.stage_put( + (pending.expiration_height, pending.tx_num, pending.position), + (pending.claim_hash, pending.normalized_name) + ) + + # short url resolution + for prefix_len in range(10): + self.db.prefix_db.claim_short_id.stage_put( + (pending.normalized_name, pending.claim_hash.hex()[:prefix_len + 1], + pending.root_tx_num, pending.root_position), + (pending.tx_num, pending.position) ) - if height >= min_height: - self.undo_infos.append((undo_info, height)) - self.db.write_raw_block(block.raw, height) - headers = [block.header for block in blocks] - self.height = height - self.headers.extend(headers) - self.tip = self.coin.header_hash(headers[-1]) + if pending.signing_hash and pending.channel_signature_is_valid: + # channel by stream + self.db.prefix_db.claim_to_channel.stage_put( + (pending.claim_hash, pending.tx_num, pending.position), (pending.signing_hash,) + ) + # stream by channel + self.db.prefix_db.channel_to_claim.stage_put( + (pending.signing_hash, pending.normalized_name, pending.tx_num, pending.position), + (pending.claim_hash,) + ) - def advance_txs(self, height, txs, header, block_hash): - self.block_hashes.append(block_hash) - self.block_txs.append((b''.join(tx_hash for tx, tx_hash in txs), [tx.raw for tx, _ in txs])) + if pending.reposted_claim_hash: + self.db.prefix_db.repost.stage_put((pending.claim_hash,), (pending.reposted_claim_hash,)) + self.db.prefix_db.reposted_claim.stage_put( + (pending.reposted_claim_hash, pending.tx_num, pending.position), (pending.claim_hash,) + ) - undo_info = [] - tx_num = self.tx_count - hashXs_by_tx = [] + def get_remove_claim_utxo_ops(self, pending: StagedClaimtrieItem): + # claim tip by claim hash + self.db.prefix_db.claim_to_txo.stage_delete( + (pending.claim_hash,), (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, + pending.amount, pending.channel_signature_is_valid, pending.name) + ) + # claim hash by txo + self.db.prefix_db.txo_to_claim.stage_delete( + (pending.tx_num, pending.position), (pending.claim_hash, pending.normalized_name) + ) + # claim expiration + self.db.prefix_db.claim_expiration.stage_delete( + (pending.expiration_height, pending.tx_num, pending.position), + (pending.claim_hash, pending.normalized_name) + ) + + # short url resolution + for prefix_len in range(10): + self.db.prefix_db.claim_short_id.stage_delete( + (pending.normalized_name, pending.claim_hash.hex()[:prefix_len + 1], + pending.root_tx_num, pending.root_position), + (pending.tx_num, pending.position) + ) + + if pending.signing_hash and pending.channel_signature_is_valid: + # channel by stream + self.db.prefix_db.claim_to_channel.stage_delete( + (pending.claim_hash, pending.tx_num, pending.position), (pending.signing_hash,) + ) + # stream by channel + self.db.prefix_db.channel_to_claim.stage_delete( + (pending.signing_hash, pending.normalized_name, pending.tx_num, pending.position), + (pending.claim_hash,) + ) + + if pending.reposted_claim_hash: + self.db.prefix_db.repost.stage_delete((pending.claim_hash,), (pending.reposted_claim_hash,)) + self.db.prefix_db.reposted_claim.stage_delete( + (pending.reposted_claim_hash, pending.tx_num, pending.position), (pending.claim_hash,) + ) + + def _add_support(self, height: int, txo: 'Output', tx_num: int, nout: int): + supported_claim_hash = txo.claim_hash[::-1] + self.support_txos_by_claim[supported_claim_hash].append((tx_num, nout)) + self.support_txo_to_claim[(tx_num, nout)] = supported_claim_hash, txo.amount + # print(f"\tsupport claim {supported_claim_hash.hex()} +{txo.amount}") + + self.db.prefix_db.claim_to_support.stage_put((supported_claim_hash, tx_num, nout), (txo.amount,)) + self.db.prefix_db.support_to_claim.stage_put((tx_num, nout), (supported_claim_hash,)) + self.pending_support_amount_change[supported_claim_hash] += txo.amount + + def _add_claim_or_support(self, height: int, tx_hash: bytes, tx_num: int, nout: int, txo: 'Output', + spent_claims: typing.Dict[bytes, Tuple[int, int, str]]): + if txo.script.is_claim_name or txo.script.is_update_claim: + self._add_claim_or_update(height, txo, tx_hash, tx_num, nout, spent_claims) + elif txo.script.is_support_claim or txo.script.is_support_claim_data: + self._add_support(height, txo, tx_num, nout) + + def _spend_support_txo(self, height: int, txin: TxInput): + txin_num = self.get_pending_tx_num(txin.prev_hash) + activation = 0 + if (txin_num, txin.prev_idx) in self.support_txo_to_claim: + spent_support, support_amount = self.support_txo_to_claim.pop((txin_num, txin.prev_idx)) + self.support_txos_by_claim[spent_support].remove((txin_num, txin.prev_idx)) + supported_name = self._get_pending_claim_name(spent_support) + self.removed_support_txos_by_name_by_claim[supported_name][spent_support].append((txin_num, txin.prev_idx)) + else: + spent_support, support_amount = self.db.get_supported_claim_from_txo(txin_num, txin.prev_idx) + if not spent_support: # it is not a support + return + supported_name = self._get_pending_claim_name(spent_support) + if supported_name is not None: + self.removed_support_txos_by_name_by_claim[supported_name][spent_support].append( + (txin_num, txin.prev_idx)) + activation = self.db.get_activation(txin_num, txin.prev_idx, is_support=True) + if 0 < activation < self.height + 1: + self.removed_active_support_amount_by_claim[spent_support].append(support_amount) + if supported_name is not None and activation > 0: + self.get_remove_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE, spent_support, txin_num, txin.prev_idx, activation, supported_name, + support_amount + ) + # print(f"\tspent support for {spent_support.hex()} activation:{activation} {support_amount}") + self.db.prefix_db.claim_to_support.stage_delete((spent_support, txin_num, txin.prev_idx), (support_amount,)) + self.db.prefix_db.support_to_claim.stage_delete((txin_num, txin.prev_idx), (spent_support,)) + self.pending_support_amount_change[spent_support] -= support_amount + + def _spend_claim_txo(self, txin: TxInput, spent_claims: Dict[bytes, Tuple[int, int, str]]) -> bool: + txin_num = self.get_pending_tx_num(txin.prev_hash) + if (txin_num, txin.prev_idx) in self.txo_to_claim: + spent = self.txo_to_claim[(txin_num, txin.prev_idx)] + else: + if txin_num not in self.db.txo_to_claim or txin.prev_idx not in self.db.txo_to_claim[txin_num]: + # txo is not a claim + return False + spent_claim_hash_and_name = self.db.get_claim_from_txo( + txin_num, txin.prev_idx + ) + assert spent_claim_hash_and_name is not None + spent = self._make_pending_claim_txo(spent_claim_hash_and_name.claim_hash) + claim_hash = self.db.txo_to_claim[txin_num].pop(txin.prev_idx) + if not self.db.txo_to_claim[txin_num]: + self.db.txo_to_claim.pop(txin_num) + self.db.claim_to_txo.pop(claim_hash) + if spent.reposted_claim_hash: + self.pending_reposted.add(spent.reposted_claim_hash) + if spent.signing_hash and spent.channel_signature_is_valid and spent.signing_hash not in self.abandoned_claims: + self.pending_channel_counts[spent.signing_hash] -= 1 + spent_claims[spent.claim_hash] = (spent.tx_num, spent.position, spent.normalized_name) + # print(f"\tspend lbry://{spent.name}#{spent.claim_hash.hex()}") + self.get_remove_claim_utxo_ops(spent) + return True + + def _spend_claim_or_support_txo(self, height: int, txin: TxInput, spent_claims): + if not self._spend_claim_txo(txin, spent_claims): + self._spend_support_txo(height, txin) + + def _abandon_claim(self, claim_hash: bytes, tx_num: int, nout: int, normalized_name: str): + if (tx_num, nout) in self.txo_to_claim: + pending = self.txo_to_claim.pop((tx_num, nout)) + self.claim_hash_to_txo.pop(claim_hash) + self.abandoned_claims[pending.claim_hash] = pending + claim_root_tx_num, claim_root_idx = pending.root_tx_num, pending.root_position + prev_amount, prev_signing_hash = pending.amount, pending.signing_hash + reposted_claim_hash, name = pending.reposted_claim_hash, pending.name + expiration = self.coin.get_expiration_height(self.height) + signature_is_valid = pending.channel_signature_is_valid + else: + v = self.db.get_claim_txo( + claim_hash + ) + claim_root_tx_num, claim_root_idx, prev_amount = v.root_tx_num, v.root_position, v.amount + signature_is_valid, name = v.channel_signature_is_valid, v.name + prev_signing_hash = self.db.get_channel_for_claim(claim_hash, tx_num, nout) + reposted_claim_hash = self.db.get_repost(claim_hash) + expiration = self.coin.get_expiration_height(bisect_right(self.db.tx_counts, tx_num)) + self.abandoned_claims[claim_hash] = staged = StagedClaimtrieItem( + name, normalized_name, claim_hash, prev_amount, expiration, tx_num, nout, claim_root_tx_num, + claim_root_idx, signature_is_valid, prev_signing_hash, reposted_claim_hash + ) + for support_txo_to_clear in self.support_txos_by_claim[claim_hash]: + self.support_txo_to_claim.pop(support_txo_to_clear) + self.support_txos_by_claim[claim_hash].clear() + self.support_txos_by_claim.pop(claim_hash) + if normalized_name.startswith('@'): # abandon a channel, invalidate signatures + self._invalidate_channel_signatures(claim_hash) + + def _get_invalidate_signature_ops(self, pending: StagedClaimtrieItem): + if not pending.signing_hash: + return + self.db.prefix_db.claim_to_channel.stage_delete( + (pending.claim_hash, pending.tx_num, pending.position), (pending.signing_hash,) + ) + if pending.channel_signature_is_valid: + self.db.prefix_db.channel_to_claim.stage_delete( + (pending.signing_hash, pending.normalized_name, pending.tx_num, pending.position), + (pending.claim_hash,) + ) + self.db.prefix_db.claim_to_txo.stage_delete( + (pending.claim_hash,), + (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, pending.amount, + pending.channel_signature_is_valid, pending.name) + ) + self.db.prefix_db.claim_to_txo.stage_put( + (pending.claim_hash,), + (pending.tx_num, pending.position, pending.root_tx_num, pending.root_position, pending.amount, + False, pending.name) + ) + + def _invalidate_channel_signatures(self, claim_hash: bytes): + for (signed_claim_hash, ) in self.db.prefix_db.channel_to_claim.iterate( + prefix=(claim_hash, ), include_key=False): + if signed_claim_hash in self.abandoned_claims or signed_claim_hash in self.expired_claim_hashes: + continue + # there is no longer a signing channel for this claim as of this block + if signed_claim_hash in self.doesnt_have_valid_signature: + continue + # the signing channel changed in this block + if signed_claim_hash in self.claim_channels and signed_claim_hash != self.claim_channels[signed_claim_hash]: + continue + + # if the claim with an invalidated signature is in this block, update the StagedClaimtrieItem + # so that if we later try to spend it in this block we won't try to delete the channel info twice + if signed_claim_hash in self.claim_hash_to_txo: + signed_claim_txo = self.claim_hash_to_txo[signed_claim_hash] + claim = self.txo_to_claim[signed_claim_txo] + if claim.signing_hash != claim_hash: # claim was already invalidated this block + continue + self.txo_to_claim[signed_claim_txo] = claim.invalidate_signature() + else: + claim = self._make_pending_claim_txo(signed_claim_hash) + self.signatures_changed.add(signed_claim_hash) + self.pending_channel_counts[claim_hash] -= 1 + self._get_invalidate_signature_ops(claim) + + for staged in list(self.txo_to_claim.values()): + needs_invalidate = staged.claim_hash not in self.doesnt_have_valid_signature + if staged.signing_hash == claim_hash and needs_invalidate: + self._get_invalidate_signature_ops(staged) + self.txo_to_claim[self.claim_hash_to_txo[staged.claim_hash]] = staged.invalidate_signature() + self.signatures_changed.add(staged.claim_hash) + self.pending_channel_counts[claim_hash] -= 1 + + def _make_pending_claim_txo(self, claim_hash: bytes): + claim = self.db.get_claim_txo(claim_hash) + if claim_hash in self.doesnt_have_valid_signature: + signing_hash = None + else: + signing_hash = self.db.get_channel_for_claim(claim_hash, claim.tx_num, claim.position) + reposted_claim_hash = self.db.get_repost(claim_hash) + return StagedClaimtrieItem( + claim.name, claim.normalized_name, claim_hash, claim.amount, + self.coin.get_expiration_height( + bisect_right(self.db.tx_counts, claim.tx_num), + extended=self.height >= self.coin.nExtendedClaimExpirationForkHeight + ), + claim.tx_num, claim.position, claim.root_tx_num, claim.root_position, + claim.channel_signature_is_valid, signing_hash, reposted_claim_hash + ) + + def _expire_claims(self, height: int): + expired = self.db.get_expired_by_height(height) + self.expired_claim_hashes.update(set(expired.keys())) + spent_claims = {} + for expired_claim_hash, (tx_num, position, name, txi) in expired.items(): + if (tx_num, position) not in self.txo_to_claim: + self._spend_claim_txo(txi, spent_claims) + if expired: + # abandon the channels last to handle abandoned signed claims in the same tx, + # see test_abandon_channel_and_claims_in_same_tx + expired_channels = {} + for abandoned_claim_hash, (tx_num, nout, normalized_name) in spent_claims.items(): + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + if normalized_name.startswith('@'): + expired_channels[abandoned_claim_hash] = (tx_num, nout, normalized_name) + else: + # print(f"\texpire {abandoned_claim_hash.hex()} {tx_num} {nout}") + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + # do this to follow the same content claim removing pathway as if a claim (possible channel) was abandoned + for abandoned_claim_hash, (tx_num, nout, normalized_name) in expired_channels.items(): + # print(f"\texpire {abandoned_claim_hash.hex()} {tx_num} {nout}") + self._abandon_claim(abandoned_claim_hash, tx_num, nout, normalized_name) + + def _cached_get_active_amount(self, claim_hash: bytes, txo_type: int, height: int) -> int: + if (claim_hash, txo_type, height) in self.amount_cache: + return self.amount_cache[(claim_hash, txo_type, height)] + if txo_type == ACTIVATED_CLAIM_TXO_TYPE: + if claim_hash in self.claim_hash_to_txo: + amount = self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].amount + else: + amount = self.db.get_active_amount_as_of_height( + claim_hash, height + ) + self.amount_cache[(claim_hash, txo_type, height)] = amount + else: + self.amount_cache[(claim_hash, txo_type, height)] = amount = self.db._get_active_amount( + claim_hash, txo_type, height + ) + return amount + + def _get_pending_claim_amount(self, name: str, claim_hash: bytes, height=None) -> int: + if (name, claim_hash) in self.activated_claim_amount_by_name_and_hash: + if claim_hash in self.claim_hash_to_txo: + return self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].amount + return self.activated_claim_amount_by_name_and_hash[(name, claim_hash)] + if (name, claim_hash) in self.possible_future_claim_amount_by_name_and_hash: + return self.possible_future_claim_amount_by_name_and_hash[(name, claim_hash)] + return self._cached_get_active_amount(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, height or (self.height + 1)) + + def _get_pending_claim_name(self, claim_hash: bytes) -> Optional[str]: + assert claim_hash is not None + if claim_hash in self.claim_hash_to_txo: + return self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].normalized_name + claim_info = self.db.get_claim_txo(claim_hash) + if claim_info: + return claim_info.normalized_name + + def _get_pending_supported_amount(self, claim_hash: bytes, height: Optional[int] = None) -> int: + amount = self._cached_get_active_amount(claim_hash, ACTIVATED_SUPPORT_TXO_TYPE, height or (self.height + 1)) + if claim_hash in self.activated_support_amount_by_claim: + amount += sum(self.activated_support_amount_by_claim[claim_hash]) + if claim_hash in self.possible_future_support_amounts_by_claim_hash: + amount += sum(self.possible_future_support_amounts_by_claim_hash[claim_hash]) + if claim_hash in self.removed_active_support_amount_by_claim: + return amount - sum(self.removed_active_support_amount_by_claim[claim_hash]) + return amount + + def _get_pending_effective_amount(self, name: str, claim_hash: bytes, height: Optional[int] = None) -> int: + claim_amount = self._get_pending_claim_amount(name, claim_hash, height=height) + support_amount = self._get_pending_supported_amount(claim_hash, height=height) + return claim_amount + support_amount + + def get_activate_ops(self, txo_type: int, claim_hash: bytes, tx_num: int, position: int, + activation_height: int, name: str, amount: int): + self.db.prefix_db.activated.stage_put( + (txo_type, tx_num, position), (activation_height, claim_hash, name) + ) + self.db.prefix_db.pending_activation.stage_put( + (activation_height, txo_type, tx_num, position), (claim_hash, name) + ) + self.db.prefix_db.active_amount.stage_put( + (claim_hash, txo_type, activation_height, tx_num, position), (amount,) + ) + + def get_remove_activate_ops(self, txo_type: int, claim_hash: bytes, tx_num: int, position: int, + activation_height: int, name: str, amount: int): + self.db.prefix_db.activated.stage_delete( + (txo_type, tx_num, position), (activation_height, claim_hash, name) + ) + self.db.prefix_db.pending_activation.stage_delete( + (activation_height, txo_type, tx_num, position), (claim_hash, name) + ) + self.db.prefix_db.active_amount.stage_delete( + (claim_hash, txo_type, activation_height, tx_num, position), (amount,) + ) + + def _get_takeover_ops(self, height: int): + + # cache for controlling claims as of the previous block + controlling_claims = {} + + def get_controlling(_name): + if _name not in controlling_claims: + _controlling = self.db.get_controlling_claim(_name) + controlling_claims[_name] = _controlling + else: + _controlling = controlling_claims[_name] + return _controlling + + names_with_abandoned_or_updated_controlling_claims: List[str] = [] + + # get the claims and supports previously scheduled to be activated at this block + activated_at_height = self.db.get_activated_at_height(height) + activate_in_future = defaultdict(lambda: defaultdict(list)) + future_activations = defaultdict(dict) + + def get_delayed_activate_ops(name: str, claim_hash: bytes, is_new_claim: bool, tx_num: int, nout: int, + amount: int, is_support: bool): + controlling = get_controlling(name) + nothing_is_controlling = not controlling + staged_is_controlling = False if not controlling else claim_hash == controlling.claim_hash + controlling_is_abandoned = False if not controlling else \ + name in names_with_abandoned_or_updated_controlling_claims + + if nothing_is_controlling or staged_is_controlling or controlling_is_abandoned: + delay = 0 + elif is_new_claim: + delay = self.coin.get_delay_for_name(height - controlling.height) + else: + controlling_effective_amount = self._get_pending_effective_amount(name, controlling.claim_hash) + staged_effective_amount = self._get_pending_effective_amount(name, claim_hash) + staged_update_could_cause_takeover = staged_effective_amount > controlling_effective_amount + delay = 0 if not staged_update_could_cause_takeover else self.coin.get_delay_for_name( + height - controlling.height + ) + if delay == 0: # if delay was 0 it needs to be considered for takeovers + activated_at_height[PendingActivationValue(claim_hash, name)].append( + PendingActivationKey( + height, ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, tx_num, nout + ) + ) + else: # if the delay was higher if still needs to be considered if something else triggers a takeover + activate_in_future[name][claim_hash].append(( + PendingActivationKey( + height + delay, ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, + tx_num, nout + ), amount + )) + if is_support: + self.possible_future_support_txos_by_claim_hash[claim_hash].append((tx_num, nout)) + self.get_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, claim_hash, tx_num, nout, + height + delay, name, amount + ) + + # determine names needing takeover/deletion due to controlling claims being abandoned + # and add ops to deactivate abandoned claims + for claim_hash, staged in self.abandoned_claims.items(): + controlling = get_controlling(staged.normalized_name) + if controlling and controlling.claim_hash == claim_hash: + names_with_abandoned_or_updated_controlling_claims.append(staged.normalized_name) + # print(f"\t{staged.name} needs takeover") + activation = self.db.get_activation(staged.tx_num, staged.position) + if activation > 0: # db returns -1 for non-existent txos + # removed queued future activation from the db + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, staged.claim_hash, staged.tx_num, staged.position, + activation, staged.normalized_name, staged.amount + ) + else: + # it hadn't yet been activated + pass + + # get the removed activated supports for controlling claims to determine if takeovers are possible + abandoned_support_check_need_takeover = defaultdict(list) + for claim_hash, amounts in self.removed_active_support_amount_by_claim.items(): + name = self._get_pending_claim_name(claim_hash) + if name is None: + continue + controlling = get_controlling(name) + if controlling and controlling.claim_hash == claim_hash and \ + name not in names_with_abandoned_or_updated_controlling_claims: + abandoned_support_check_need_takeover[(name, claim_hash)].extend(amounts) + + # get the controlling claims with updates to the claim to check if takeover is needed + for claim_hash in self.updated_claims: + if claim_hash in self.abandoned_claims: + continue + name = self._get_pending_claim_name(claim_hash) + if name is None: + continue + controlling = get_controlling(name) + if controlling and controlling.claim_hash == claim_hash and \ + name not in names_with_abandoned_or_updated_controlling_claims: + names_with_abandoned_or_updated_controlling_claims.append(name) + + # prepare to activate or delay activation of the pending claims being added this block + for (tx_num, nout), staged in self.txo_to_claim.items(): + is_delayed = not staged.is_update + if staged.claim_hash in self.db.claim_to_txo: + prev_txo = self.db.claim_to_txo[staged.claim_hash] + prev_activation = self.db.get_activation(prev_txo.tx_num, prev_txo.position) + if height < prev_activation or prev_activation < 0: + is_delayed = True + get_delayed_activate_ops( + staged.normalized_name, staged.claim_hash, is_delayed, tx_num, nout, staged.amount, + is_support=False + ) + + # and the supports + for (tx_num, nout), (claim_hash, amount) in self.support_txo_to_claim.items(): + if claim_hash in self.abandoned_claims: + continue + elif claim_hash in self.claim_hash_to_txo: + name = self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].normalized_name + staged_is_new_claim = not self.txo_to_claim[self.claim_hash_to_txo[claim_hash]].is_update + else: + supported_claim_info = self.db.get_claim_txo(claim_hash) + if not supported_claim_info: + # the supported claim doesn't exist + continue + else: + v = supported_claim_info + name = v.normalized_name + staged_is_new_claim = (v.root_tx_num, v.root_position) == (v.tx_num, v.position) + get_delayed_activate_ops( + name, claim_hash, staged_is_new_claim, tx_num, nout, amount, is_support=True + ) + + # add the activation/delayed-activation ops + for activated, activated_txos in activated_at_height.items(): + controlling = get_controlling(activated.normalized_name) + if activated.claim_hash in self.abandoned_claims: + continue + reactivate = False + if not controlling or controlling.claim_hash == activated.claim_hash: + # there is no delay for claims to a name without a controlling value or to the controlling value + reactivate = True + for activated_txo in activated_txos: + if activated_txo.is_support and (activated_txo.tx_num, activated_txo.position) in \ + self.removed_support_txos_by_name_by_claim[activated.normalized_name][activated.claim_hash]: + # print("\tskip activate support for pending abandoned claim") + continue + if activated_txo.is_claim: + txo_type = ACTIVATED_CLAIM_TXO_TYPE + txo_tup = (activated_txo.tx_num, activated_txo.position) + if txo_tup in self.txo_to_claim: + amount = self.txo_to_claim[txo_tup].amount + else: + amount = self.db.get_claim_txo_amount( + activated.claim_hash + ) + if amount is None: + # print("\tskip activate for non existent claim") + continue + self.activated_claim_amount_by_name_and_hash[(activated.normalized_name, activated.claim_hash)] = amount + else: + txo_type = ACTIVATED_SUPPORT_TXO_TYPE + txo_tup = (activated_txo.tx_num, activated_txo.position) + if txo_tup in self.support_txo_to_claim: + amount = self.support_txo_to_claim[txo_tup][1] + else: + amount = self.db.get_support_txo_amount( + activated.claim_hash, activated_txo.tx_num, activated_txo.position + ) + if amount is None: + # print("\tskip activate support for non existent claim") + continue + self.activated_support_amount_by_claim[activated.claim_hash].append(amount) + self.activation_by_claim_by_name[activated.normalized_name][activated.claim_hash].append((activated_txo, amount)) + # print(f"\tactivate {'support' if txo_type == ACTIVATED_SUPPORT_TXO_TYPE else 'claim'} " + # f"{activated.claim_hash.hex()} @ {activated_txo.height}") + + # go through claims where the controlling claim or supports to the controlling claim have been abandoned + # check if takeovers are needed or if the name node is now empty + need_reactivate_if_takes_over = {} + for need_takeover in names_with_abandoned_or_updated_controlling_claims: + existing = self.db.get_claim_txos_for_name(need_takeover) + has_candidate = False + # add existing claims to the queue for the takeover + # track that we need to reactivate these if one of them becomes controlling + for candidate_claim_hash, (tx_num, nout) in existing.items(): + if candidate_claim_hash in self.abandoned_claims: + continue + has_candidate = True + existing_activation = self.db.get_activation(tx_num, nout) + activate_key = PendingActivationKey( + existing_activation, ACTIVATED_CLAIM_TXO_TYPE, tx_num, nout + ) + self.activation_by_claim_by_name[need_takeover][candidate_claim_hash].append(( + activate_key, self.db.get_claim_txo_amount(candidate_claim_hash) + )) + need_reactivate_if_takes_over[(need_takeover, candidate_claim_hash)] = activate_key + # print(f"\tcandidate to takeover abandoned controlling claim for " + # f"{activate_key.tx_num}:{activate_key.position} {activate_key.is_claim}") + if not has_candidate: + # remove name takeover entry, the name is now unclaimed + controlling = get_controlling(need_takeover) + self.db.prefix_db.claim_takeover.stage_delete( + (need_takeover,), (controlling.claim_hash, controlling.height) + ) + + # scan for possible takeovers out of the accumulated activations, of these make sure there + # aren't any future activations for the taken over names with yet higher amounts, if there are + # these need to get activated now and take over instead. for example: + # claim A is winning for 0.1 for long enough for a > 1 takeover delay + # claim B is made for 0.2 + # a block later, claim C is made for 0.3, it will schedule to activate 1 (or rarely 2) block(s) after B + # upon the delayed activation of B, we need to detect to activate C and make it take over early instead + + claim_exists = {} + for activated, activated_claim_txo in self.db.get_future_activated(height).items(): + # uses the pending effective amount for the future activation height, not the current height + future_amount = self._get_pending_claim_amount( + activated.normalized_name, activated.claim_hash, activated_claim_txo.height + 1 + ) + if activated.claim_hash not in claim_exists: + claim_exists[activated.claim_hash] = activated.claim_hash in self.claim_hash_to_txo or ( + self.db.get_claim_txo(activated.claim_hash) is not None) + if claim_exists[activated.claim_hash] and activated.claim_hash not in self.abandoned_claims: + v = future_amount, activated, activated_claim_txo + future_activations[activated.normalized_name][activated.claim_hash] = v + + for name, future_activated in activate_in_future.items(): + for claim_hash, activated in future_activated.items(): + if claim_hash not in claim_exists: + claim_exists[claim_hash] = claim_hash in self.claim_hash_to_txo or ( + self.db.get_claim_txo(claim_hash) is not None) + if not claim_exists[claim_hash]: + continue + if claim_hash in self.abandoned_claims: + continue + for txo in activated: + v = txo[1], PendingActivationValue(claim_hash, name), txo[0] + future_activations[name][claim_hash] = v + if txo[0].is_claim: + self.possible_future_claim_amount_by_name_and_hash[(name, claim_hash)] = txo[1] + else: + self.possible_future_support_amounts_by_claim_hash[claim_hash].append(txo[1]) + + # process takeovers + checked_names = set() + for name, activated in self.activation_by_claim_by_name.items(): + checked_names.add(name) + controlling = controlling_claims[name] + amounts = { + claim_hash: self._get_pending_effective_amount(name, claim_hash) + for claim_hash in activated.keys() if claim_hash not in self.abandoned_claims + } + # if there is a controlling claim include it in the amounts to ensure it remains the max + if controlling and controlling.claim_hash not in self.abandoned_claims: + amounts[controlling.claim_hash] = self._get_pending_effective_amount(name, controlling.claim_hash) + winning_claim_hash = max(amounts, key=lambda x: amounts[x]) + if not controlling or (winning_claim_hash != controlling.claim_hash and + name in names_with_abandoned_or_updated_controlling_claims) or \ + ((winning_claim_hash != controlling.claim_hash) and (amounts[winning_claim_hash] > amounts[controlling.claim_hash])): + amounts_with_future_activations = {claim_hash: amount for claim_hash, amount in amounts.items()} + amounts_with_future_activations.update( + { + claim_hash: self._get_pending_effective_amount( + name, claim_hash, self.height + 1 + self.coin.maxTakeoverDelay + ) for claim_hash in future_activations[name] + } + ) + winning_including_future_activations = max( + amounts_with_future_activations, key=lambda x: amounts_with_future_activations[x] + ) + future_winning_amount = amounts_with_future_activations[winning_including_future_activations] + + if winning_claim_hash != winning_including_future_activations and \ + future_winning_amount > amounts[winning_claim_hash]: + # print(f"\ttakeover by {winning_claim_hash.hex()} triggered early activation and " + # f"takeover by {winning_including_future_activations.hex()} at {height}") + # handle a pending activated claim jumping the takeover delay when another name takes over + if winning_including_future_activations not in self.claim_hash_to_txo: + claim = self.db.get_claim_txo(winning_including_future_activations) + tx_num = claim.tx_num + position = claim.position + amount = claim.amount + activation = self.db.get_activation(tx_num, position) + else: + tx_num, position = self.claim_hash_to_txo[winning_including_future_activations] + amount = self.txo_to_claim[(tx_num, position)].amount + activation = None + for (k, tx_amount) in activate_in_future[name][winning_including_future_activations]: + if (k.tx_num, k.position) == (tx_num, position): + activation = k.height + break + if activation is None: + # TODO: reproduce this in an integration test (block 604718) + _k = PendingActivationValue(winning_including_future_activations, name) + if _k in activated_at_height: + for pending_activation in activated_at_height[_k]: + if (pending_activation.tx_num, pending_activation.position) == (tx_num, position): + activation = pending_activation.height + break + assert None not in (amount, activation) + # update the claim that's activating early + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_including_future_activations, tx_num, + position, activation, name, amount + ) + self.get_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_including_future_activations, tx_num, + position, height, name, amount + ) + + for (k, amount) in activate_in_future[name][winning_including_future_activations]: + txo = (k.tx_num, k.position) + if txo in self.possible_future_support_txos_by_claim_hash[winning_including_future_activations]: + self.get_remove_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE, winning_including_future_activations, k.tx_num, + k.position, k.height, name, amount + ) + self.get_activate_ops( + ACTIVATED_SUPPORT_TXO_TYPE, winning_including_future_activations, k.tx_num, + k.position, height, name, amount + ) + self.taken_over_names.add(name) + if controlling: + self.db.prefix_db.claim_takeover.stage_delete( + (name,), (controlling.claim_hash, controlling.height) + ) + self.db.prefix_db.claim_takeover.stage_put((name,), (winning_including_future_activations, height)) + self.touched_claim_hashes.add(winning_including_future_activations) + if controlling and controlling.claim_hash not in self.abandoned_claims: + self.touched_claim_hashes.add(controlling.claim_hash) + elif not controlling or (winning_claim_hash != controlling.claim_hash and + name in names_with_abandoned_or_updated_controlling_claims) or \ + ((winning_claim_hash != controlling.claim_hash) and (amounts[winning_claim_hash] > amounts[controlling.claim_hash])): + # print(f"\ttakeover by {winning_claim_hash.hex()} at {height}") + if (name, winning_claim_hash) in need_reactivate_if_takes_over: + previous_pending_activate = need_reactivate_if_takes_over[(name, winning_claim_hash)] + amount = self.db.get_claim_txo_amount( + winning_claim_hash + ) + if winning_claim_hash in self.claim_hash_to_txo: + tx_num, position = self.claim_hash_to_txo[winning_claim_hash] + amount = self.txo_to_claim[(tx_num, position)].amount + else: + tx_num, position = previous_pending_activate.tx_num, previous_pending_activate.position + if previous_pending_activate.height > height: + # the claim had a pending activation in the future, move it to now + if tx_num < self.tx_count: + self.get_remove_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_claim_hash, tx_num, + position, previous_pending_activate.height, name, amount + ) + self.get_activate_ops( + ACTIVATED_CLAIM_TXO_TYPE, winning_claim_hash, tx_num, + position, height, name, amount + ) + self.taken_over_names.add(name) + if controlling: + self.db.prefix_db.claim_takeover.stage_delete( + (name,), (controlling.claim_hash, controlling.height) + ) + self.db.prefix_db.claim_takeover.stage_put((name,), (winning_claim_hash, height)) + if controlling and controlling.claim_hash not in self.abandoned_claims: + self.touched_claim_hashes.add(controlling.claim_hash) + self.touched_claim_hashes.add(winning_claim_hash) + elif winning_claim_hash == controlling.claim_hash: + # print("\tstill winning") + pass + else: + # print("\tno takeover") + pass + + # handle remaining takeovers from abandoned supports + for (name, claim_hash), amounts in abandoned_support_check_need_takeover.items(): + if name in checked_names: + continue + checked_names.add(name) + controlling = get_controlling(name) + amounts = { + claim_hash: self._get_pending_effective_amount(name, claim_hash) + for claim_hash in self.db.get_claims_for_name(name) if claim_hash not in self.abandoned_claims + } + if controlling and controlling.claim_hash not in self.abandoned_claims: + amounts[controlling.claim_hash] = self._get_pending_effective_amount(name, controlling.claim_hash) + winning = max(amounts, key=lambda x: amounts[x]) + + if (controlling and winning != controlling.claim_hash) or (not controlling and winning): + self.taken_over_names.add(name) + # print(f"\ttakeover from abandoned support {controlling.claim_hash.hex()} -> {winning.hex()}") + if controlling: + self.db.prefix_db.claim_takeover.stage_delete( + (name,), (controlling.claim_hash, controlling.height) + ) + self.db.prefix_db.claim_takeover.stage_put((name,), (winning, height)) + if controlling: + self.touched_claim_hashes.add(controlling.claim_hash) + self.touched_claim_hashes.add(winning) + + def _add_claim_activation_change_notification(self, claim_id: str, height: int, added: bool, prev_amount: int, + new_amount: int): + self.activation_info_to_send_es[claim_id].append(TrendingNotification(height, added, prev_amount, new_amount)) + + def _get_cumulative_update_ops(self, height: int): + # update the last takeover height for names with takeovers + for name in self.taken_over_names: + self.touched_claim_hashes.update( + {claim_hash for claim_hash in self.db.get_claims_for_name(name) + if claim_hash not in self.abandoned_claims} + ) + + # gather cumulative removed/touched sets to update the search index + self.removed_claim_hashes.update(set(self.abandoned_claims.keys())) + self.touched_claim_hashes.difference_update(self.removed_claim_hashes) + self.touched_claim_hashes.update( + set( + map(lambda item: item[1], self.activated_claim_amount_by_name_and_hash.keys()) + ).union( + set(self.claim_hash_to_txo.keys()) + ).union( + self.removed_active_support_amount_by_claim.keys() + ).union( + self.signatures_changed + ).union( + set(self.removed_active_support_amount_by_claim.keys()) + ).union( + set(self.activated_support_amount_by_claim.keys()) + ).difference( + self.removed_claim_hashes + ) + ) + + # update support amount totals + for supported_claim, amount in self.pending_support_amount_change.items(): + existing = self.db.prefix_db.support_amount.get(supported_claim) + total = amount + if existing is not None: + total += existing.amount + self.db.prefix_db.support_amount.stage_delete((supported_claim,), existing) + self.db.prefix_db.support_amount.stage_put((supported_claim,), (total,)) + + # use the cumulative changes to update bid ordered resolve + for removed in self.removed_claim_hashes: + removed_claim = self.db.get_claim_txo(removed) + if removed_claim: + amt = self.db.get_url_effective_amount( + removed_claim.normalized_name, removed + ) + if amt: + self.db.prefix_db.effective_amount.stage_delete( + (removed_claim.normalized_name, amt.effective_amount, amt.tx_num, amt.position), (removed,) + ) + for touched in self.touched_claim_hashes: + prev_effective_amount = 0 + + if touched in self.claim_hash_to_txo: + pending = self.txo_to_claim[self.claim_hash_to_txo[touched]] + name, tx_num, position = pending.normalized_name, pending.tx_num, pending.position + claim_from_db = self.db.get_claim_txo(touched) + if claim_from_db: + claim_amount_info = self.db.get_url_effective_amount(name, touched) + if claim_amount_info: + prev_effective_amount = claim_amount_info.effective_amount + self.db.prefix_db.effective_amount.stage_delete( + (name, claim_amount_info.effective_amount, claim_amount_info.tx_num, + claim_amount_info.position), (touched,) + ) + else: + v = self.db.get_claim_txo(touched) + if not v: + continue + name, tx_num, position = v.normalized_name, v.tx_num, v.position + amt = self.db.get_url_effective_amount(name, touched) + if amt: + prev_effective_amount = amt.effective_amount + self.db.prefix_db.effective_amount.stage_delete( + (name, prev_effective_amount, amt.tx_num, amt.position), (touched,) + ) + + if (name, touched) in self.activated_claim_amount_by_name_and_hash: + self._add_claim_activation_change_notification( + touched.hex(), height, True, prev_effective_amount, + self.activated_claim_amount_by_name_and_hash[(name, touched)] + ) + if touched in self.activated_support_amount_by_claim: + for support_amount in self.activated_support_amount_by_claim[touched]: + self._add_claim_activation_change_notification( + touched.hex(), height, True, prev_effective_amount, support_amount + ) + if touched in self.removed_active_support_amount_by_claim: + for support_amount in self.removed_active_support_amount_by_claim[touched]: + self._add_claim_activation_change_notification( + touched.hex(), height, False, prev_effective_amount, support_amount + ) + new_effective_amount = self._get_pending_effective_amount(name, touched) + self.db.prefix_db.effective_amount.stage_put( + (name, new_effective_amount, tx_num, position), (touched,) + ) + + for channel_hash, count in self.pending_channel_counts.items(): + if count != 0: + channel_count_val = self.db.prefix_db.channel_count.get(channel_hash) + channel_count = 0 if not channel_count_val else channel_count_val.count + if channel_count_val is not None: + self.db.prefix_db.channel_count.stage_delete((channel_hash,), (channel_count,)) + self.db.prefix_db.channel_count.stage_put((channel_hash,), (channel_count + count,)) + + self.touched_claim_hashes.update( + {k for k in self.pending_reposted if k not in self.removed_claim_hashes} + ) + self.touched_claim_hashes.update( + {k for k, v in self.pending_channel_counts.items() if v != 0 and k not in self.removed_claim_hashes} + ) + self.touched_claims_to_send_es.difference_update(self.removed_claim_hashes) + self.touched_claims_to_send_es.update(self.touched_claim_hashes) + self.removed_claims_to_send_es.update(self.removed_claim_hashes) + + def advance_block(self, block): + height = self.height + 1 + # print("advance ", height) # Use local vars for speed in the loops - put_utxo = self.utxo_cache.__setitem__ + tx_count = self.tx_count spend_utxo = self.spend_utxo - undo_info_append = undo_info.append - update_touched = self.touched.update - append_hashX_by_tx = hashXs_by_tx.append - hashX_from_script = self.coin.hashX_from_script + add_utxo = self.add_utxo + spend_claim_or_support_txo = self._spend_claim_or_support_txo + add_claim_or_support = self._add_claim_or_support + txs: List[Tuple[Tx, bytes]] = block.transactions + + self.db.prefix_db.block_hash.stage_put(key_args=(height,), value_args=(self.coin.header_hash(block.header),)) + self.db.prefix_db.header.stage_put(key_args=(height,), value_args=(block.header,)) for tx, tx_hash in txs: - hashXs = [] - append_hashX = hashXs.append - tx_numb = pack('= len(raw_blocks) - - coin = self.coin - for raw_block in raw_blocks: - self.logger.info("backup block %i", self.height) - # Check and update self.tip - block = coin.block(raw_block, self.height) - header_hash = coin.header_hash(block.header) - if header_hash != self.tip: - raise ChainError('backup block {} not tip {} at height {:,d}' - .format(hash_to_hex_str(header_hash), - hash_to_hex_str(self.tip), - self.height)) - self.tip = coin.header_prevhash(block.header) - self.backup_txs(block.transactions) - self.height -= 1 - self.db.tx_counts.pop() - - self.logger.info(f'backed up to height {self.height:,d}') - - def backup_txs(self, txs): - # Prevout values, in order down the block (coinbase first if present) - # undo_info is in reverse block order - undo_info = self.db.read_undo_info(self.height) - if undo_info is None: - raise ChainError(f'no undo information found for height {self.height:,d}') - n = len(undo_info) - - # Use local vars for speed in the loops - s_pack = pack - undo_entry_len = 12 + HASHX_LEN - - for tx, tx_hash in reversed(txs): - for idx, txout in enumerate(tx.outputs): - # Spend the TX outputs. Be careful with unspendable - # outputs - we didn't save those in the first place. - hashX = self.coin.hashX_from_script(txout.pk_script) - if hashX: - cache_value = self.spend_utxo(tx_hash, idx) - self.touched.add(cache_value[:-12]) - - # Restore the inputs - for txin in reversed(tx.inputs): - if txin.is_generation(): - continue - n -= undo_entry_len - undo_item = undo_info[n:n + undo_entry_len] - self.utxo_cache[txin.prev_hash + s_pack(' 1: - - tx_num, = unpack('= cached_max_reorg_depth: + self.db.prefix_db.touched_or_deleted.stage_put( + key_args=(height,), value_args=(self.touched_claim_hashes, self.removed_claim_hashes) + ) + + self.height = height + self.db.headers.append(block.header) + self.tip = self.coin.header_hash(block.header) + + min_height = self.db.min_undo_height(self.db.db_height) + if min_height > 0: # delete undos for blocks deep enough they can't be reorged + undo_to_delete = list(self.db.prefix_db.undo.iterate(start=(0,), stop=(min_height,))) + for (k, v) in undo_to_delete: + self.db.prefix_db.undo.stage_delete((k,), (v,)) + touched_or_deleted_to_delete = list(self.db.prefix_db.touched_or_deleted.iterate( + start=(0,), stop=(min_height,)) + ) + for (k, v) in touched_or_deleted_to_delete: + self.db.prefix_db.touched_or_deleted.stage_delete(k, v) + + self.db.fs_height = self.height + self.db.fs_tx_count = self.tx_count + self.db.hist_flush_count += 1 + self.db.hist_unflushed_count = 0 + self.db.utxo_flush_count = self.db.hist_flush_count + self.db.db_height = self.height + self.db.db_tx_count = self.tx_count + self.db.db_tip = self.tip + self.db.last_flush_tx_count = self.db.fs_tx_count + now = time.time() + self.db.wall_time += now - self.db.last_flush + self.db.last_flush = now + + self.db.write_db_state() + + def clear_after_advance_or_reorg(self): + self.txo_to_claim.clear() + self.claim_hash_to_txo.clear() + self.support_txos_by_claim.clear() + self.support_txo_to_claim.clear() + self.removed_support_txos_by_name_by_claim.clear() + self.abandoned_claims.clear() + self.removed_active_support_amount_by_claim.clear() + self.activated_support_amount_by_claim.clear() + self.activated_claim_amount_by_name_and_hash.clear() + self.activation_by_claim_by_name.clear() + self.possible_future_claim_amount_by_name_and_hash.clear() + self.possible_future_support_amounts_by_claim_hash.clear() + self.possible_future_support_txos_by_claim_hash.clear() + self.pending_channels.clear() + self.amount_cache.clear() + self.signatures_changed.clear() + self.expired_claim_hashes.clear() + self.doesnt_have_valid_signature.clear() + self.claim_channels.clear() + self.utxo_cache.clear() + self.hashXs_by_tx.clear() + self.history_cache.clear() + self.mempool.notified_mempool_txs.clear() + self.removed_claim_hashes.clear() + self.touched_claim_hashes.clear() + self.pending_reposted.clear() + self.pending_channel_counts.clear() + self.updated_claims.clear() + self.taken_over_names.clear() + self.pending_transaction_num_mapping.clear() + self.pending_transactions.clear() + self.pending_support_amount_change.clear() + + async def backup_block(self): + assert len(self.db.prefix_db._op_stack) == 0 + touched_and_deleted = self.db.prefix_db.touched_or_deleted.get(self.height) + self.touched_claims_to_send_es.update(touched_and_deleted.touched_claims) + self.removed_claims_to_send_es.difference_update(touched_and_deleted.touched_claims) + self.removed_claims_to_send_es.update(touched_and_deleted.deleted_claims) + + # self.db.assert_flushed(self.flush_data()) + self.logger.info("backup block %i", self.height) + # Check and update self.tip + + self.db.headers.pop() + self.db.tx_counts.pop() + self.tip = self.coin.header_hash(self.db.headers[-1]) + self.tx_count = self.db.tx_counts[-1] + self.height -= 1 + # self.touched can include other addresses which is + # harmless, but remove None. + self.touched_hashXs.discard(None) + + assert self.height < self.db.db_height + assert not self.db.hist_unflushed + + start_time = time.time() + tx_delta = self.tx_count - self.db.last_flush_tx_count + ### + self.db.fs_tx_count = self.tx_count + # Truncate header_mc: header count is 1 more than the height. + self.db.header_mc.truncate(self.height + 1) + ### + # Not certain this is needed, but it doesn't hurt + self.db.hist_flush_count += 1 + + while self.db.fs_height > self.height: + self.db.fs_height -= 1 + self.db.utxo_flush_count = self.db.hist_flush_count + self.db.db_height = self.height + self.db.db_tx_count = self.tx_count + self.db.db_tip = self.tip + # Flush state last as it reads the wall time. + now = time.time() + self.db.wall_time += now - self.db.last_flush + self.db.last_flush = now + self.db.last_flush_tx_count = self.db.fs_tx_count + + await self.run_in_thread_with_lock(self.db.prefix_db.rollback, self.height + 1) + self.clear_after_advance_or_reorg() + + elapsed = self.db.last_flush - start_time + self.logger.warning(f'backup flush #{self.db.hist_flush_count:,d} took {elapsed:.1f}s. ' + f'Height {self.height:,d} txs: {self.tx_count:,d} ({tx_delta:+,d})') + + def add_utxo(self, tx_hash: bytes, tx_num: int, nout: int, txout: 'TxOutput') -> Optional[bytes]: + hashX = self.coin.hashX_from_script(txout.pk_script) + if hashX: + self.touched_hashXs.add(hashX) + self.utxo_cache[(tx_hash, nout)] = (hashX, txout.value) + self.db.prefix_db.utxo.stage_put((hashX, tx_num, nout), (txout.value,)) + self.db.prefix_db.hashX_utxo.stage_put((tx_hash[:4], tx_num, nout), (hashX,)) + return hashX + + def get_pending_tx_num(self, tx_hash: bytes) -> int: + if tx_hash in self.pending_transaction_num_mapping: + return self.pending_transaction_num_mapping[tx_hash] + else: + return self.db.prefix_db.tx_num.get(tx_hash).tx_num + + def spend_utxo(self, tx_hash: bytes, nout: int): + hashX, amount = self.utxo_cache.pop((tx_hash, nout), (None, None)) + txin_num = self.get_pending_tx_num(tx_hash) + if not hashX: + hashX_value = self.db.prefix_db.hashX_utxo.get(tx_hash[:4], txin_num, nout) + if not hashX_value: + return + hashX = hashX_value.hashX + utxo_value = self.db.prefix_db.utxo.get(hashX, txin_num, nout) + if not utxo_value: + self.logger.warning( + "%s:%s is not found in UTXO db for %s", hash_to_hex_str(tx_hash), nout, hash_to_hex_str(hashX) + ) + raise ChainError( + f"{hash_to_hex_str(tx_hash)}:{nout} is not found in UTXO db for {hash_to_hex_str(hashX)}" + ) + self.touched_hashXs.add(hashX) + self.db.prefix_db.hashX_utxo.stage_delete((tx_hash[:4], txin_num, nout), hashX_value) + self.db.prefix_db.utxo.stage_delete((hashX, txin_num, nout), utxo_value) + return hashX + elif amount is not None: + self.db.prefix_db.hashX_utxo.stage_delete((tx_hash[:4], txin_num, nout), (hashX,)) + self.db.prefix_db.utxo.stage_delete((hashX, txin_num, nout), (amount,)) + self.touched_hashXs.add(hashX) + return hashX async def _process_prefetched_blocks(self): """Loop forever processing blocks as they arrive.""" @@ -648,36 +1678,31 @@ class BlockProcessor: self._caught_up_event.set() await self.blocks_event.wait() self.blocks_event.clear() - if self.reorg_count: # this could only happen by calling the reorg rpc - await self.reorg_chain(self.reorg_count) - self.reorg_count = 0 - else: - blocks = self.prefetcher.get_prefetched_blocks() - try: - await self.check_and_advance_blocks(blocks) - except Exception: - self.logger.exception("error while processing txs") - raise + blocks = self.prefetcher.get_prefetched_blocks() + try: + await self.check_and_advance_blocks(blocks) + except Exception: + self.logger.exception("error while processing txs") + raise async def _first_caught_up(self): self.logger.info(f'caught up to height {self.height}') # Flush everything but with first_sync->False state. first_sync = self.db.first_sync self.db.first_sync = False - await self.flush(True) + + def flush(): + assert len(self.db.prefix_db._op_stack) == 0 + self.db.write_db_state() + self.db.prefix_db.unsafe_commit() + self.db.assert_db_state() + + await self.run_in_thread_with_lock(flush) + if first_sync: self.logger.info(f'{lbry.__version__} synced to ' - f'height {self.height:,d}') - # Reopen for serving - await self.db.open_for_serving() - - async def _first_open_dbs(self): - await self.db.open_for_sync() - self.height = self.db.db_height - self.tip = self.db.db_tip - self.tx_count = self.db.db_tx_count - - # --- External API + f'height {self.height:,d}, halting here.') + self.shutdown_event.set() async def fetch_and_process_blocks(self, caught_up_event): """Fetch, process and index blocks from the daemon. @@ -694,7 +1719,10 @@ class BlockProcessor: self._caught_up_event = caught_up_event try: - await self._first_open_dbs() + await self.db.open_dbs() + self.height = self.db.db_height + self.tip = self.db.db_tip + self.tx_count = self.db.db_tx_count self.status_server.set_height(self.db.fs_height, self.db.db_tip) await asyncio.wait([ self.prefetcher.main_loop(self.height), @@ -708,102 +1736,6 @@ class BlockProcessor: finally: self.status_server.stop() # Shut down block processing - self.logger.info('flushing to DB for a clean shutdown...') - await self.flush(True) + self.logger.info('closing the DB for a clean shutdown...') self.db.close() - self.executor.shutdown(wait=True) - - def force_chain_reorg(self, count): - """Force a reorg of the given number of blocks. - - Returns True if a reorg is queued, false if not caught up. - """ - if self._caught_up_event.is_set(): - self.reorg_count = count - self.blocks_event.set() - return True - return False - - -class Timer: - def __init__(self, name): - self.name = name - self.total = 0 - self.count = 0 - self.sub_timers = {} - self._last_start = None - - def add_timer(self, name): - if name not in self.sub_timers: - self.sub_timers[name] = Timer(name) - return self.sub_timers[name] - - def run(self, func, *args, forward_timer=False, timer_name=None, **kwargs): - t = self.add_timer(timer_name or func.__name__) - t.start() - try: - if forward_timer: - return func(*args, **kwargs, timer=t) - else: - return func(*args, **kwargs) - finally: - t.stop() - - def start(self): - self._last_start = time.time() - return self - - def stop(self): - self.total += (time.time() - self._last_start) - self.count += 1 - self._last_start = None - return self - - def show(self, depth=0, height=None): - if depth == 0: - print('='*100) - if height is not None: - print(f'STATISTICS AT HEIGHT {height}') - print('='*100) - else: - print( - f"{' '*depth} {self.total/60:4.2f}mins {self.name}" - # f"{self.total/self.count:.5f}sec/call, " - ) - for sub_timer in self.sub_timers.values(): - sub_timer.show(depth+1) - if depth == 0: - print('='*100) - - -class LBRYBlockProcessor(BlockProcessor): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - if self.env.coin.NET == "regtest": - self.prefetcher.polling_delay = 0.5 - self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False) - self.logger.info(f"LbryumX Block Processor - Validating signatures: {self.should_validate_signatures}") - self.sql: SQLDB = self.db.sql - self.timer = Timer('BlockProcessor') - - def advance_blocks(self, blocks): - if self.sql: - self.sql.begin() - try: - self.timer.run(super().advance_blocks, blocks) - except: - self.logger.exception(f'Error while advancing transaction in new block.') - raise - finally: - if self.sql: - self.sql.commit() - - def advance_txs(self, height, txs, header, block_hash): - timer = self.timer.sub_timers['advance_blocks'] - undo = timer.run(super().advance_txs, height, txs, header, block_hash, timer_name='super().advance_txs') - if self.sql: - timer.run(self.sql.advance_txs, height, txs, header, self.daemon.cached_height(), forward_timer=True) - if (height % 10000 == 0 or not self.db.first_sync) and self.logger.isEnabledFor(10): - self.timer.show(height=height) - return undo + # self.executor.shutdown(wait=True) diff --git a/lbry/wallet/server/coin.py b/lbry/wallet/server/coin.py index 3b7598eb3..bd379f112 100644 --- a/lbry/wallet/server/coin.py +++ b/lbry/wallet/server/coin.py @@ -14,8 +14,7 @@ from lbry.wallet.server.daemon import Daemon, LBCDaemon from lbry.wallet.server.script import ScriptPubKey, OpCodes from lbry.wallet.server.leveldb import LevelDB from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager -from lbry.wallet.server.db.writer import LBRYLevelDB -from lbry.wallet.server.block_processor import LBRYBlockProcessor +from lbry.wallet.server.block_processor import BlockProcessor Block = namedtuple("Block", "raw header transactions") @@ -39,7 +38,7 @@ class Coin: SESSIONCLS = LBRYElectrumX DESERIALIZER = lib_tx.Deserializer DAEMON = Daemon - BLOCK_PROCESSOR = LBRYBlockProcessor + BLOCK_PROCESSOR = BlockProcessor SESSION_MANAGER = LBRYSessionManager DB = LevelDB HEADER_VALUES = [ @@ -214,6 +213,11 @@ class Coin: txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block() return Block(raw_block, header, txs) + @classmethod + def transaction(cls, raw_tx: bytes): + """Return a Block namedtuple given a raw block and its height.""" + return cls.DESERIALIZER(raw_tx).read_tx() + @classmethod def decimal_value(cls, value): """Return the number of standard coin units as a Decimal given a @@ -237,10 +241,9 @@ class Coin: class LBC(Coin): DAEMON = LBCDaemon SESSIONCLS = LBRYElectrumX - BLOCK_PROCESSOR = LBRYBlockProcessor SESSION_MANAGER = LBRYSessionManager DESERIALIZER = DeserializerSegWit - DB = LBRYLevelDB + DB = LevelDB NAME = "LBRY" SHORTNAME = "LBC" NET = "mainnet" @@ -258,6 +261,18 @@ class LBC(Coin): TX_PER_BLOCK = 1 RPC_PORT = 9245 REORG_LIMIT = 200 + + nOriginalClaimExpirationTime = 262974 + nExtendedClaimExpirationTime = 2102400 + nExtendedClaimExpirationForkHeight = 400155 + nNormalizedNameForkHeight = 539940 # targeting 21 March 2019 + nMinTakeoverWorkaroundHeight = 496850 + nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019 + nWitnessForkHeight = 680770 # targeting 11 Dec 2019 + nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019 + proportionalDelayFactor = 32 + maxTakeoverDelay = 4032 + PEERS = [ ] @@ -335,6 +350,18 @@ class LBC(Coin): else: return sha256(script).digest()[:HASHX_LEN] + @classmethod + def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int: + if extended: + return last_updated_height + cls.nExtendedClaimExpirationTime + if last_updated_height < cls.nExtendedClaimExpirationForkHeight: + return last_updated_height + cls.nOriginalClaimExpirationTime + return last_updated_height + cls.nExtendedClaimExpirationTime + + @classmethod + def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int: + return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay) + class LBCRegTest(LBC): NET = "regtest" @@ -344,6 +371,15 @@ class LBCRegTest(LBC): P2PKH_VERBYTE = bytes.fromhex("6f") P2SH_VERBYTES = bytes.fromhex("c4") + nOriginalClaimExpirationTime = 500 + nExtendedClaimExpirationTime = 600 + nExtendedClaimExpirationForkHeight = 800 + nNormalizedNameForkHeight = 250 + nMinTakeoverWorkaroundHeight = -1 + nMaxTakeoverWorkaroundHeight = -1 + nWitnessForkHeight = 150 + nAllClaimsInMerkleForkHeight = 350 + class LBCTestNet(LBCRegTest): NET = "testnet" diff --git a/lbry/wallet/server/daemon.py b/lbry/wallet/server/daemon.py index abcfdf71a..123f17f3b 100644 --- a/lbry/wallet/server/daemon.py +++ b/lbry/wallet/server/daemon.py @@ -364,6 +364,11 @@ class LBCDaemon(Daemon): '''Given a name, returns the winning claim value.''' return await self._send_single('getvalueforname', (name,)) + @handles_errors + async def getnamesintrie(self): + '''Given a name, returns the winning claim value.''' + return await self._send_single('getnamesintrie') + @handles_errors async def claimname(self, name, hexvalue, amount): '''Claim a name, used for functional tests only.''' diff --git a/lbry/wallet/server/db/__init__.py b/lbry/wallet/server/db/__init__.py index e69de29bb..b3201dc79 100644 --- a/lbry/wallet/server/db/__init__.py +++ b/lbry/wallet/server/db/__init__.py @@ -0,0 +1,41 @@ +import enum + + +@enum.unique +class DB_PREFIXES(enum.Enum): + claim_to_support = b'K' + support_to_claim = b'L' + + claim_to_txo = b'E' + txo_to_claim = b'G' + + claim_to_channel = b'I' + channel_to_claim = b'J' + + claim_short_id_prefix = b'F' + effective_amount = b'D' + claim_expiration = b'O' + + claim_takeover = b'P' + pending_activation = b'Q' + activated_claim_and_support = b'R' + active_amount = b'S' + + repost = b'V' + reposted_claim = b'W' + + undo = b'M' + claim_diff = b'Y' + + tx = b'B' + block_hash = b'C' + header = b'H' + tx_num = b'N' + tx_count = b'T' + tx_hash = b'X' + utxo = b'u' + hashx_utxo = b'h' + hashx_history = b'x' + db_state = b's' + channel_count = b'Z' + support_amount = b'a' diff --git a/lbry/wallet/server/db/canonical.py b/lbry/wallet/server/db/canonical.py deleted file mode 100644 index 1b0edacba..000000000 --- a/lbry/wallet/server/db/canonical.py +++ /dev/null @@ -1,22 +0,0 @@ -class FindShortestID: - __slots__ = 'short_id', 'new_id' - - def __init__(self): - self.short_id = '' - self.new_id = None - - def step(self, other_id, new_id): - self.new_id = new_id - for i in range(len(self.new_id)): - if other_id[i] != self.new_id[i]: - if i > len(self.short_id)-1: - self.short_id = self.new_id[:i+1] - break - - def finalize(self): - if self.short_id: - return '#'+self.short_id - - -def register_canonical_functions(connection): - connection.create_aggregate("shortest_id", 2, FindShortestID) diff --git a/lbry/wallet/server/db/common.py b/lbry/wallet/server/db/common.py index c0fdc4f3f..dce98711d 100644 --- a/lbry/wallet/server/db/common.py +++ b/lbry/wallet/server/db/common.py @@ -1,3 +1,5 @@ +import typing + CLAIM_TYPES = { 'stream': 1, 'channel': 2, @@ -418,3 +420,28 @@ INDEXED_LANGUAGES = [ 'zh', 'zu' ] + + +class ResolveResult(typing.NamedTuple): + name: str + normalized_name: str + claim_hash: bytes + tx_num: int + position: int + tx_hash: bytes + height: int + amount: int + short_url: str + is_controlling: bool + canonical_url: str + creation_height: int + activation_height: int + expiration_height: int + effective_amount: int + support_amount: int + reposted: int + last_takeover_height: typing.Optional[int] + claims_in_channel: typing.Optional[int] + channel_hash: typing.Optional[bytes] + reposted_claim_hash: typing.Optional[bytes] + signature_valid: typing.Optional[bool] diff --git a/lbry/wallet/server/db/db.py b/lbry/wallet/server/db/db.py new file mode 100644 index 000000000..6d613df93 --- /dev/null +++ b/lbry/wallet/server/db/db.py @@ -0,0 +1,119 @@ +import struct +from typing import Optional +from lbry.wallet.server.db import DB_PREFIXES +from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete + + +class KeyValueStorage: + def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]: + raise NotImplemented() + + def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None, + include_key=True, include_value=True, fill_cache=True): + raise NotImplemented() + + def write_batch(self, transaction: bool = False): + raise NotImplemented() + + def close(self): + raise NotImplemented() + + @property + def closed(self) -> bool: + raise NotImplemented() + + +class PrefixDB: + UNDO_KEY_STRUCT = struct.Struct(b'>Q') + + def __init__(self, db: KeyValueStorage, max_undo_depth: int = 200, unsafe_prefixes=None): + self._db = db + self._op_stack = RevertableOpStack(db.get, unsafe_prefixes=unsafe_prefixes) + self._max_undo_depth = max_undo_depth + + def unsafe_commit(self): + """ + Write staged changes to the database without keeping undo information + Changes written cannot be undone + """ + try: + with self._db.write_batch(transaction=True) as batch: + batch_put = batch.put + batch_delete = batch.delete + for staged_change in self._op_stack: + if staged_change.is_put: + batch_put(staged_change.key, staged_change.value) + else: + batch_delete(staged_change.key) + finally: + self._op_stack.clear() + + def commit(self, height: int): + """ + Write changes for a block height to the database and keep undo information so that the changes can be reverted + """ + undo_ops = self._op_stack.get_undo_ops() + delete_undos = [] + if height > self._max_undo_depth: + delete_undos.extend(self._db.iterator( + start=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(0), + stop=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height - self._max_undo_depth), + include_value=False + )) + try: + with self._db.write_batch(transaction=True) as batch: + batch_put = batch.put + batch_delete = batch.delete + for staged_change in self._op_stack: + if staged_change.is_put: + batch_put(staged_change.key, staged_change.value) + else: + batch_delete(staged_change.key) + for undo_to_delete in delete_undos: + batch_delete(undo_to_delete) + batch_put(DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height), undo_ops) + finally: + self._op_stack.clear() + + def rollback(self, height: int): + """ + Revert changes for a block height + """ + undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height) + self._op_stack.apply_packed_undo_ops(self._db.get(undo_key)) + try: + with self._db.write_batch(transaction=True) as batch: + batch_put = batch.put + batch_delete = batch.delete + for staged_change in self._op_stack: + if staged_change.is_put: + batch_put(staged_change.key, staged_change.value) + else: + batch_delete(staged_change.key) + batch_delete(undo_key) + finally: + self._op_stack.clear() + + def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]: + return self._db.get(key, fill_cache=fill_cache) + + def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None, + include_key=True, include_value=True, fill_cache=True): + return self._db.iterator( + reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop, + prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache + ) + + def close(self): + if not self._db.closed: + self._db.close() + + @property + def closed(self): + return self._db.closed + + def stage_raw_put(self, key: bytes, value: bytes): + self._op_stack.append_op(RevertablePut(key, value)) + + def stage_raw_delete(self, key: bytes, value: bytes): + self._op_stack.append_op(RevertableDelete(key, value)) diff --git a/lbry/wallet/server/db/elasticsearch/constants.py b/lbry/wallet/server/db/elasticsearch/constants.py index 35f1b054d..2281f6ff5 100644 --- a/lbry/wallet/server/db/elasticsearch/constants.py +++ b/lbry/wallet/server/db/elasticsearch/constants.py @@ -8,7 +8,7 @@ INDEX_DEFAULT_SETTINGS = { "number_of_shards": 1, "number_of_replicas": 0, "sort": { - "field": ["trending_mixed", "release_time"], + "field": ["trending_score", "release_time"], "order": ["desc", "desc"] }} }, @@ -30,8 +30,8 @@ INDEX_DEFAULT_SETTINGS = { "height": {"type": "integer"}, "claim_type": {"type": "byte"}, "censor_type": {"type": "byte"}, - "trending_mixed": {"type": "float"}, - "release_time": {"type": "long"}, + "trending_score": {"type": "double"}, + "release_time": {"type": "long"} } } } @@ -53,30 +53,32 @@ FIELDS = { 'duration', 'release_time', 'tags', 'languages', 'has_source', 'reposted_claim_type', 'reposted_claim_id', 'repost_count', - 'trending_group', 'trending_mixed', 'trending_local', 'trending_global', + 'trending_score', 'tx_num' } -TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'claim_name', 'description', 'claim_id', 'censoring_channel_id', +TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id', 'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature', - 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id', 'tags'} + 'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id', + 'tags'} RANGE_FIELDS = { 'height', 'creation_height', 'activation_height', 'expiration_height', 'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount', 'tx_position', 'channel_join', 'repost_count', 'limit_claims_per_channel', 'amount', 'effective_amount', 'support_amount', - 'trending_group', 'trending_mixed', 'censor_type', - 'trending_local', 'trending_global', + 'trending_score', 'censor_type', 'tx_num' } ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS REPLACEMENTS = { + 'claim_name': 'normalized_name', 'name': 'normalized_name', 'txid': 'tx_id', 'nout': 'tx_nout', - 'valid_channel_signature': 'is_signature_valid', + 'trending_mixed': 'trending_score', + 'reposted': 'repost_count', 'stream_types': 'stream_type', 'media_types': 'media_type', - 'reposted': 'repost_count' + 'valid_channel_signature': 'is_signature_valid' } diff --git a/lbry/wallet/server/db/elasticsearch/search.py b/lbry/wallet/server/db/elasticsearch/search.py index 75f7f4a0e..14b47677b 100644 --- a/lbry/wallet/server/db/elasticsearch/search.py +++ b/lbry/wallet/server/db/elasticsearch/search.py @@ -1,3 +1,4 @@ +import time import asyncio import struct from binascii import unhexlify @@ -8,8 +9,6 @@ from typing import Optional, List, Iterable, Union from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError from elasticsearch.helpers import async_streaming_bulk - -from lbry.crypto.base58 import Base58 from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError from lbry.schema.result import Outputs, Censor from lbry.schema.tags import clean_tags @@ -19,6 +18,7 @@ from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \ RANGE_FIELDS, ALL_FIELDS from lbry.wallet.server.util import class_logger +from lbry.wallet.server.db.common import ResolveResult class ChannelResolution(str): @@ -42,7 +42,8 @@ class IndexVersionMismatch(Exception): class SearchIndex: VERSION = 1 - def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200): + def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200, + half_life=0.4, whale_threshold=10000, whale_half_life=0.99): self.search_timeout = search_timeout self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import self.search_client: Optional[AsyncElasticsearch] = None @@ -55,6 +56,9 @@ class SearchIndex: self.resolution_cache = LRUCache(2 ** 17) self._elastic_host = elastic_host self._elastic_port = elastic_port + self._trending_half_life = half_life + self._trending_whale_threshold = whale_threshold + self._trending_whale_half_life = whale_half_life async def get_index_version(self) -> int: try: @@ -91,6 +95,7 @@ class SearchIndex: if index_version != self.VERSION: self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION) raise IndexVersionMismatch(index_version, self.VERSION) + await self.sync_client.indices.refresh(self.index) return acked def stop(self): @@ -103,15 +108,28 @@ class SearchIndex: async def _consume_claim_producer(self, claim_producer): count = 0 - for op, doc in claim_producer: + async for op, doc in claim_producer: if op == 'delete': - yield {'_index': self.index, '_op_type': 'delete', '_id': doc} + yield { + '_index': self.index, + '_op_type': 'delete', + '_id': doc + } else: - yield extract_doc(doc, self.index) + yield { + 'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS}, + '_id': doc['claim_id'], + '_index': self.index, + '_op_type': 'update', + 'doc_as_upsert': True + } count += 1 if count % 100 == 0: self.logger.info("Indexing in progress, %d claims.", count) - self.logger.info("Indexing done for %d claims.", count) + if count: + self.logger.info("Indexing done for %d claims.", count) + else: + self.logger.debug("Indexing done for %d claims.", count) async def claim_consumer(self, claim_producer): touched = set() @@ -123,22 +141,98 @@ class SearchIndex: item = item.popitem()[1] touched.add(item['_id']) await self.sync_client.indices.refresh(self.index) - self.logger.info("Indexing done.") + self.logger.debug("Indexing done.") def update_filter_query(self, censor_type, blockdict, channels=False): - blockdict = {key[::-1].hex(): value[::-1].hex() for key, value in blockdict.items()} + blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()} if channels: update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}") else: update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}") key = 'channel_id' if channels else 'claim_id' update['script'] = { - "source": f"ctx._source.censor_type={censor_type}; ctx._source.censoring_channel_id=params[ctx._source.{key}]", + "source": f"ctx._source.censor_type={censor_type}; " + f"ctx._source.censoring_channel_id=params[ctx._source.{key}];", "lang": "painless", "params": blockdict } return update + async def update_trending_score(self, params): + update_trending_score_script = """ + double softenLBC(double lbc) { Math.pow(lbc, 1.0f / 3.0f) } + double inflateUnits(int height) { + int renormalizationPeriod = 100000; + double doublingRate = 400.0f; + Math.pow(2.0, (height % renormalizationPeriod) / doublingRate) + } + double spikePower(double newAmount) { + if (newAmount < 50.0) { + 0.5 + } else if (newAmount < 85.0) { + newAmount / 100.0 + } else { + 0.85 + } + } + double spikeMass(double oldAmount, double newAmount) { + double softenedChange = softenLBC(Math.abs(newAmount - oldAmount)); + double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount)); + double power = spikePower(newAmount); + if (oldAmount > newAmount) { + -1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power) + } else { + Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power) + } + } + for (i in params.src.changes) { + double units = inflateUnits(i.height); + if (i.added) { + if (ctx._source.trending_score == null) { + ctx._source.trending_score = (units * spikeMass(i.prev_amount, i.prev_amount + i.new_amount)); + } else { + ctx._source.trending_score += (units * spikeMass(i.prev_amount, i.prev_amount + i.new_amount)); + } + } else { + if (ctx._source.trending_score == null) { + ctx._source.trending_score = (units * spikeMass(i.prev_amount, i.prev_amount - i.new_amount)); + } else { + ctx._source.trending_score += (units * spikeMass(i.prev_amount, i.prev_amount - i.new_amount)); + } + } + } + """ + start = time.perf_counter() + + def producer(): + for claim_id, claim_updates in params.items(): + yield { + '_id': claim_id, + '_index': self.index, + '_op_type': 'update', + 'script': { + 'lang': 'painless', + 'source': update_trending_score_script, + 'params': {'src': { + 'changes': [ + { + 'height': p.height, + 'added': p.added, + 'prev_amount': p.prev_amount * 1E-9, + 'new_amount': p.new_amount * 1E-9, + } for p in claim_updates + ] + }} + }, + } + if not params: + return + async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False): + if not ok: + self.logger.warning("updating trending failed for an item: %s", item) + await self.sync_client.indices.refresh(self.index) + self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000)) + async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels): if filtered_streams: await self.sync_client.update_by_query( @@ -170,52 +264,82 @@ class SearchIndex: self.claim_cache.clear() self.resolution_cache.clear() - async def session_query(self, query_name, kwargs): - offset, total = kwargs.get('offset', 0) if isinstance(kwargs, dict) else 0, 0 + async def cached_search(self, kwargs): total_referenced = [] - if query_name == 'resolve': - total_referenced, response, censor = await self.resolve(*kwargs) - else: - cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache) - if cache_item.result is not None: + cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache) + if cache_item.result is not None: + return cache_item.result + async with cache_item.lock: + if cache_item.result: return cache_item.result - async with cache_item.lock: - if cache_item.result: - return cache_item.result - censor = Censor(Censor.SEARCH) - if kwargs.get('no_totals'): - response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED) - else: - response, offset, total = await self.search(**kwargs) - censor.apply(response) + censor = Censor(Censor.SEARCH) + if kwargs.get('no_totals'): + response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED) + else: + response, offset, total = await self.search(**kwargs) + censor.apply(response) + total_referenced.extend(response) + + if censor.censored: + response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED) total_referenced.extend(response) - if censor.censored: - response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED) - total_referenced.extend(response) - result = Outputs.to_base64( - response, await self._get_referenced_rows(total_referenced), offset, total, censor - ) - cache_item.result = result - return result - return Outputs.to_base64(response, await self._get_referenced_rows(total_referenced), offset, total, censor) - - async def resolve(self, *urls): - censor = Censor(Censor.RESOLVE) - results = [await self.resolve_url(url) for url in urls] - # just heat the cache - await self.populate_claim_cache(*filter(lambda x: isinstance(x, str), results)) - results = [self._get_from_cache_or_error(url, result) for url, result in zip(urls, results)] - - censored = [ - result if not isinstance(result, dict) or not censor.censor(result) - else ResolveCensoredError(url, result['censoring_channel_id']) - for url, result in zip(urls, results) - ] - return results, censored, censor - - def _get_from_cache_or_error(self, url: str, resolution: Union[LookupError, StreamResolution, ChannelResolution]): - cached = self.claim_cache.get(resolution) - return cached or (resolution if isinstance(resolution, LookupError) else resolution.lookup_error(url)) + response = [ + ResolveResult( + name=r['claim_name'], + normalized_name=r['normalized_name'], + claim_hash=r['claim_hash'], + tx_num=r['tx_num'], + position=r['tx_nout'], + tx_hash=r['tx_hash'], + height=r['height'], + amount=r['amount'], + short_url=r['short_url'], + is_controlling=r['is_controlling'], + canonical_url=r['canonical_url'], + creation_height=r['creation_height'], + activation_height=r['activation_height'], + expiration_height=r['expiration_height'], + effective_amount=r['effective_amount'], + support_amount=r['support_amount'], + last_takeover_height=r['last_take_over_height'], + claims_in_channel=r['claims_in_channel'], + channel_hash=r['channel_hash'], + reposted_claim_hash=r['reposted_claim_hash'], + reposted=r['reposted'], + signature_valid=r['signature_valid'] + ) for r in response + ] + extra = [ + ResolveResult( + name=r['claim_name'], + normalized_name=r['normalized_name'], + claim_hash=r['claim_hash'], + tx_num=r['tx_num'], + position=r['tx_nout'], + tx_hash=r['tx_hash'], + height=r['height'], + amount=r['amount'], + short_url=r['short_url'], + is_controlling=r['is_controlling'], + canonical_url=r['canonical_url'], + creation_height=r['creation_height'], + activation_height=r['activation_height'], + expiration_height=r['expiration_height'], + effective_amount=r['effective_amount'], + support_amount=r['support_amount'], + last_takeover_height=r['last_take_over_height'], + claims_in_channel=r['claims_in_channel'], + channel_hash=r['channel_hash'], + reposted_claim_hash=r['reposted_claim_hash'], + reposted=r['reposted'], + signature_valid=r['signature_valid'] + ) for r in await self._get_referenced_rows(total_referenced) + ] + result = Outputs.to_base64( + response, extra, offset, total, censor + ) + cache_item.result = result + return result async def get_many(self, *claim_ids): await self.populate_claim_cache(*claim_ids) @@ -247,15 +371,11 @@ class SearchIndex: return self.short_id_cache.get(key, None) async def search(self, **kwargs): - if 'channel' in kwargs: - kwargs['channel_id'] = await self.resolve_url(kwargs.pop('channel')) - if not kwargs['channel_id'] or not isinstance(kwargs['channel_id'], str): - return [], 0, 0 try: return await self.search_ahead(**kwargs) except NotFoundError: return [], 0, 0 - return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0) + # return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0) async def search_ahead(self, **kwargs): # 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return @@ -335,78 +455,6 @@ class SearchIndex: next_page_hits_maybe_check_later.append((hit_id, hit_channel_id)) return reordered_hits - async def resolve_url(self, raw_url): - if raw_url not in self.resolution_cache: - self.resolution_cache[raw_url] = await self._resolve_url(raw_url) - return self.resolution_cache[raw_url] - - async def _resolve_url(self, raw_url): - try: - url = URL.parse(raw_url) - except ValueError as e: - return e - - stream = LookupError(f'Could not find claim at "{raw_url}".') - - channel_id = await self.resolve_channel_id(url) - if isinstance(channel_id, LookupError): - return channel_id - stream = (await self.resolve_stream(url, channel_id if isinstance(channel_id, str) else None)) or stream - if url.has_stream: - return StreamResolution(stream) - else: - return ChannelResolution(channel_id) - - async def resolve_channel_id(self, url: URL): - if not url.has_channel: - return - if url.channel.is_fullid: - return url.channel.claim_id - if url.channel.is_shortid: - channel_id = await self.full_id_from_short_id(url.channel.name, url.channel.claim_id) - if not channel_id: - return LookupError(f'Could not find channel in "{url}".') - return channel_id - - query = url.channel.to_dict() - if set(query) == {'name'}: - query['is_controlling'] = True - else: - query['order_by'] = ['^creation_height'] - matches, _, _ = await self.search(**query, limit=1) - if matches: - channel_id = matches[0]['claim_id'] - else: - return LookupError(f'Could not find channel in "{url}".') - return channel_id - - async def resolve_stream(self, url: URL, channel_id: str = None): - if not url.has_stream: - return None - if url.has_channel and channel_id is None: - return None - query = url.stream.to_dict() - if url.stream.claim_id is not None: - if url.stream.is_fullid: - claim_id = url.stream.claim_id - else: - claim_id = await self.full_id_from_short_id(query['name'], query['claim_id'], channel_id) - return claim_id - - if channel_id is not None: - if set(query) == {'name'}: - # temporarily emulate is_controlling for claims in channel - query['order_by'] = ['effective_amount', '^height'] - else: - query['order_by'] = ['^channel_join'] - query['channel_id'] = channel_id - query['signature_valid'] = True - elif set(query) == {'name'}: - query['is_controlling'] = True - matches, _, _ = await self.search(**query, limit=1) - if matches: - return matches[0]['claim_id'] - async def _get_referenced_rows(self, txo_rows: List[dict]): txo_rows = [row for row in txo_rows if isinstance(row, dict)] referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows))) @@ -424,33 +472,6 @@ class SearchIndex: return referenced_txos -def extract_doc(doc, index): - doc['claim_id'] = doc.pop('claim_hash')[::-1].hex() - if doc['reposted_claim_hash'] is not None: - doc['reposted_claim_id'] = doc.pop('reposted_claim_hash')[::-1].hex() - else: - doc['reposted_claim_id'] = None - channel_hash = doc.pop('channel_hash') - doc['channel_id'] = channel_hash[::-1].hex() if channel_hash else channel_hash - doc['censoring_channel_id'] = doc.get('censoring_channel_id') - txo_hash = doc.pop('txo_hash') - doc['tx_id'] = txo_hash[:32][::-1].hex() - doc['tx_nout'] = struct.unpack(' bytes: + encoded = name.encode('utf-8') + return len(encoded).to_bytes(2, byteorder='big') + encoded + + +def length_prefix(key: str) -> bytes: + return len(key).to_bytes(1, byteorder='big') + key.encode() + + +ROW_TYPES = {} + + +class PrefixRowType(type): + def __new__(cls, name, bases, kwargs): + klass = super().__new__(cls, name, bases, kwargs) + if name != "PrefixRow": + ROW_TYPES[klass.prefix] = klass + return klass + + +class PrefixRow(metaclass=PrefixRowType): + prefix: bytes + key_struct: struct.Struct + value_struct: struct.Struct + key_part_lambdas = [] + + def __init__(self, db: KeyValueStorage, op_stack: RevertableOpStack): + self._db = db + self._op_stack = op_stack + + def iterate(self, prefix=None, start=None, stop=None, + reverse: bool = False, include_key: bool = True, include_value: bool = True, + fill_cache: bool = True, deserialize_key: bool = True, deserialize_value: bool = True): + if not prefix and not start and not stop: + prefix = () + if prefix is not None: + prefix = self.pack_partial_key(*prefix) + if start is not None: + start = self.pack_partial_key(*start) + if stop is not None: + stop = self.pack_partial_key(*stop) + + if deserialize_key: + key_getter = lambda k: self.unpack_key(k) + else: + key_getter = lambda k: k + if deserialize_value: + value_getter = lambda v: self.unpack_value(v) + else: + value_getter = lambda v: v + + if include_key and include_value: + for k, v in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, + fill_cache=fill_cache): + yield key_getter(k), value_getter(v) + elif include_key: + for k in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_value=False, + fill_cache=fill_cache): + yield key_getter(k) + elif include_value: + for v in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_key=False, + fill_cache=fill_cache): + yield value_getter(v) + else: + for _ in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_key=False, + include_value=False, fill_cache=fill_cache): + yield None + + def get(self, *key_args, fill_cache=True, deserialize_value=True): + v = self._db.get(self.pack_key(*key_args), fill_cache=fill_cache) + if v: + return v if not deserialize_value else self.unpack_value(v) + + def stage_put(self, key_args=(), value_args=()): + self._op_stack.append_op(RevertablePut(self.pack_key(*key_args), self.pack_value(*value_args))) + + def stage_delete(self, key_args=(), value_args=()): + self._op_stack.append_op(RevertableDelete(self.pack_key(*key_args), self.pack_value(*value_args))) + + @classmethod + def pack_partial_key(cls, *args) -> bytes: + return cls.prefix + cls.key_part_lambdas[len(args)](*args) + + @classmethod + def pack_key(cls, *args) -> bytes: + return cls.prefix + cls.key_struct.pack(*args) + + @classmethod + def pack_value(cls, *args) -> bytes: + return cls.value_struct.pack(*args) + + @classmethod + def unpack_key(cls, key: bytes): + assert key[:1] == cls.prefix + return cls.key_struct.unpack(key[1:]) + + @classmethod + def unpack_value(cls, data: bytes): + return cls.value_struct.unpack(data) + + @classmethod + def unpack_item(cls, key: bytes, value: bytes): + return cls.unpack_key(key), cls.unpack_value(value) + + +class UTXOKey(NamedTuple): + hashX: bytes + tx_num: int + nout: int + + def __str__(self): + return f"{self.__class__.__name__}(hashX={self.hashX.hex()}, tx_num={self.tx_num}, nout={self.nout})" + + +class UTXOValue(NamedTuple): + amount: int + + +class HashXUTXOKey(NamedTuple): + short_tx_hash: bytes + tx_num: int + nout: int + + def __str__(self): + return f"{self.__class__.__name__}(short_tx_hash={self.short_tx_hash.hex()}, tx_num={self.tx_num}, nout={self.nout})" + + +class HashXUTXOValue(NamedTuple): + hashX: bytes + + def __str__(self): + return f"{self.__class__.__name__}(hashX={self.hashX.hex()})" + + +class HashXHistoryKey(NamedTuple): + hashX: bytes + height: int + + def __str__(self): + return f"{self.__class__.__name__}(hashX={self.hashX.hex()}, height={self.height})" + + +class HashXHistoryValue(NamedTuple): + hashXes: typing.List[int] + + +class BlockHashKey(NamedTuple): + height: int + + +class BlockHashValue(NamedTuple): + block_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(block_hash={self.block_hash.hex()})" + + +class TxCountKey(NamedTuple): + height: int + + +class TxCountValue(NamedTuple): + tx_count: int + + +class TxHashKey(NamedTuple): + tx_num: int + + +class TxHashValue(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})" + + +class TxNumKey(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})" + + +class TxNumValue(NamedTuple): + tx_num: int + + +class TxKey(NamedTuple): + tx_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})" + + +class TxValue(NamedTuple): + raw_tx: bytes + + def __str__(self): + return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx)})" + + +class BlockHeaderKey(NamedTuple): + height: int + + +class BlockHeaderValue(NamedTuple): + header: bytes + + def __str__(self): + return f"{self.__class__.__name__}(header={base64.b64encode(self.header)})" + + +class ClaimToTXOKey(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class ClaimToTXOValue(typing.NamedTuple): + tx_num: int + position: int + root_tx_num: int + root_position: int + amount: int + # activation: int + channel_signature_is_valid: bool + name: str + + @property + def normalized_name(self) -> str: + try: + return normalize_name(self.name) + except UnicodeDecodeError: + return self.name + + +class TXOToClaimKey(typing.NamedTuple): + tx_num: int + position: int + + +class TXOToClaimValue(typing.NamedTuple): + claim_hash: bytes + name: str + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, name={self.name})" + + +class ClaimShortIDKey(typing.NamedTuple): + normalized_name: str + partial_claim_id: str + root_tx_num: int + root_position: int + + def __str__(self): + return f"{self.__class__.__name__}(normalized_name={self.normalized_name}, " \ + f"partial_claim_id={self.partial_claim_id}, " \ + f"root_tx_num={self.root_tx_num}, root_position={self.root_position})" + + +class ClaimShortIDValue(typing.NamedTuple): + tx_num: int + position: int + + +class ClaimToChannelKey(typing.NamedTuple): + claim_hash: bytes + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, " \ + f"tx_num={self.tx_num}, position={self.position})" + + +class ClaimToChannelValue(typing.NamedTuple): + signing_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(signing_hash={self.signing_hash.hex()})" + + +class ChannelToClaimKey(typing.NamedTuple): + signing_hash: bytes + name: str + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(signing_hash={self.signing_hash.hex()}, name={self.name}, " \ + f"tx_num={self.tx_num}, position={self.position})" + + +class ChannelToClaimValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class ChannelCountKey(typing.NamedTuple): + channel_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(channel_hash={self.channel_hash.hex()})" + + +class ChannelCountValue(typing.NamedTuple): + count: int + + +class SupportAmountKey(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class SupportAmountValue(typing.NamedTuple): + amount: int + + +class ClaimToSupportKey(typing.NamedTuple): + claim_hash: bytes + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, tx_num={self.tx_num}, " \ + f"position={self.position})" + + +class ClaimToSupportValue(typing.NamedTuple): + amount: int + + +class SupportToClaimKey(typing.NamedTuple): + tx_num: int + position: int + + +class SupportToClaimValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class ClaimExpirationKey(typing.NamedTuple): + expiration: int + tx_num: int + position: int + + +class ClaimExpirationValue(typing.NamedTuple): + claim_hash: bytes + normalized_name: str + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, normalized_name={self.normalized_name})" + + +class ClaimTakeoverKey(typing.NamedTuple): + normalized_name: str + + +class ClaimTakeoverValue(typing.NamedTuple): + claim_hash: bytes + height: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, height={self.height})" + + +class PendingActivationKey(typing.NamedTuple): + height: int + txo_type: int + tx_num: int + position: int + + @property + def is_support(self) -> bool: + return self.txo_type == ACTIVATED_SUPPORT_TXO_TYPE + + @property + def is_claim(self) -> bool: + return self.txo_type == ACTIVATED_CLAIM_TXO_TYPE + + +class PendingActivationValue(typing.NamedTuple): + claim_hash: bytes + normalized_name: str + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, normalized_name={self.normalized_name})" + + +class ActivationKey(typing.NamedTuple): + txo_type: int + tx_num: int + position: int + + +class ActivationValue(typing.NamedTuple): + height: int + claim_hash: bytes + normalized_name: str + + def __str__(self): + return f"{self.__class__.__name__}(height={self.height}, claim_hash={self.claim_hash.hex()}, " \ + f"normalized_name={self.normalized_name})" + + +class ActiveAmountKey(typing.NamedTuple): + claim_hash: bytes + txo_type: int + activation_height: int + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()}, txo_type={self.txo_type}, " \ + f"activation_height={self.activation_height}, tx_num={self.tx_num}, position={self.position})" + + +class ActiveAmountValue(typing.NamedTuple): + amount: int + + +class EffectiveAmountKey(typing.NamedTuple): + normalized_name: str + effective_amount: int + tx_num: int + position: int + + +class EffectiveAmountValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class RepostKey(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class RepostValue(typing.NamedTuple): + reposted_claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(reposted_claim_hash={self.reposted_claim_hash.hex()})" + + +class RepostedKey(typing.NamedTuple): + reposted_claim_hash: bytes + tx_num: int + position: int + + def __str__(self): + return f"{self.__class__.__name__}(reposted_claim_hash={self.reposted_claim_hash.hex()}, " \ + f"tx_num={self.tx_num}, position={self.position})" + + +class RepostedValue(typing.NamedTuple): + claim_hash: bytes + + def __str__(self): + return f"{self.__class__.__name__}(claim_hash={self.claim_hash.hex()})" + + +class TouchedOrDeletedClaimKey(typing.NamedTuple): + height: int + + +class TouchedOrDeletedClaimValue(typing.NamedTuple): + touched_claims: typing.Set[bytes] + deleted_claims: typing.Set[bytes] + + def __str__(self): + return f"{self.__class__.__name__}(" \ + f"touched_claims={','.join(map(lambda x: x.hex(), self.touched_claims))}," \ + f"deleted_claims={','.join(map(lambda x: x.hex(), self.deleted_claims))})" + + +class DBState(typing.NamedTuple): + genesis: bytes + height: int + tx_count: int + tip: bytes + utxo_flush_count: int + wall_time: int + first_sync: bool + db_version: int + hist_flush_count: int + comp_flush_count: int + comp_cursor: int + + +class ActiveAmountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.active_amount.value + key_struct = struct.Struct(b'>20sBLLH') + value_struct = struct.Struct(b'>Q') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sB').pack, + struct.Struct(b'>20sBL').pack, + struct.Struct(b'>20sBLL').pack, + struct.Struct(b'>20sBLLH').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes, txo_type: int, activation_height: int, tx_num: int, position: int): + return super().pack_key(claim_hash, txo_type, activation_height, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ActiveAmountKey: + return ActiveAmountKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> ActiveAmountValue: + return ActiveAmountValue(*super().unpack_value(data)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return cls.value_struct.pack(amount) + + @classmethod + def pack_item(cls, claim_hash: bytes, txo_type: int, activation_height: int, tx_num: int, position: int, amount: int): + return cls.pack_key(claim_hash, txo_type, activation_height, tx_num, position), cls.pack_value(amount) + + +class ClaimToTXOPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_to_txo.value + key_struct = struct.Struct(b'>20s') + value_struct = struct.Struct(b'>LHLHQB') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes): + return super().pack_key(claim_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimToTXOKey: + assert key[:1] == cls.prefix and len(key) == 21 + return ClaimToTXOKey(key[1:]) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimToTXOValue: + tx_num, position, root_tx_num, root_position, amount, channel_signature_is_valid = cls.value_struct.unpack( + data[:21] + ) + name_len = int.from_bytes(data[21:23], byteorder='big') + name = data[23:23 + name_len].decode() + return ClaimToTXOValue( + tx_num, position, root_tx_num, root_position, amount, bool(channel_signature_is_valid), name + ) + + @classmethod + def pack_value(cls, tx_num: int, position: int, root_tx_num: int, root_position: int, amount: int, + channel_signature_is_valid: bool, name: str) -> bytes: + return cls.value_struct.pack( + tx_num, position, root_tx_num, root_position, amount, int(channel_signature_is_valid) + ) + length_encoded_name(name) + + @classmethod + def pack_item(cls, claim_hash: bytes, tx_num: int, position: int, root_tx_num: int, root_position: int, + amount: int, channel_signature_is_valid: bool, name: str): + return cls.pack_key(claim_hash), \ + cls.pack_value(tx_num, position, root_tx_num, root_position, amount, channel_signature_is_valid, name) + + +class TXOToClaimPrefixRow(PrefixRow): + prefix = DB_PREFIXES.txo_to_claim.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>20s') + + @classmethod + def pack_key(cls, tx_num: int, position: int): + return super().pack_key(tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> TXOToClaimKey: + return TXOToClaimKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> TXOToClaimValue: + claim_hash, = cls.value_struct.unpack(data[:20]) + name_len = int.from_bytes(data[20:22], byteorder='big') + name = data[22:22 + name_len].decode() + return TXOToClaimValue(claim_hash, name) + + @classmethod + def pack_value(cls, claim_hash: bytes, name: str) -> bytes: + return cls.value_struct.pack(claim_hash) + length_encoded_name(name) + + @classmethod + def pack_item(cls, tx_num: int, position: int, claim_hash: bytes, name: str): + return cls.pack_key(tx_num, position), \ + cls.pack_value(claim_hash, name) + + +def shortid_key_helper(struct_fmt): + packer = struct.Struct(struct_fmt).pack + def wrapper(name, *args): + return length_encoded_name(name) + packer(*args) + return wrapper + + +def shortid_key_partial_claim_helper(name: str, partial_claim_id: str): + assert len(partial_claim_id) < 40 + return length_encoded_name(name) + length_prefix(partial_claim_id) + + +class ClaimShortIDPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_short_id_prefix.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>LH') + key_part_lambdas = [ + lambda: b'', + length_encoded_name, + shortid_key_partial_claim_helper + ] + + @classmethod + def pack_key(cls, name: str, short_claim_id: str, root_tx_num: int, root_position: int): + return cls.prefix + length_encoded_name(name) + length_prefix(short_claim_id) +\ + cls.key_struct.pack(root_tx_num, root_position) + + @classmethod + def pack_value(cls, tx_num: int, position: int): + return super().pack_value(tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimShortIDKey: + assert key[:1] == cls.prefix + name_len = int.from_bytes(key[1:3], byteorder='big') + name = key[3:3 + name_len].decode() + claim_id_len = int.from_bytes(key[3+name_len:4+name_len], byteorder='big') + partial_claim_id = key[4+name_len:4+name_len+claim_id_len].decode() + return ClaimShortIDKey(name, partial_claim_id, *cls.key_struct.unpack(key[4 + name_len + claim_id_len:])) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimShortIDValue: + return ClaimShortIDValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, name: str, partial_claim_id: str, root_tx_num: int, root_position: int, + tx_num: int, position: int): + return cls.pack_key(name, partial_claim_id, root_tx_num, root_position), \ + cls.pack_value(tx_num, position) + + +class ClaimToChannelPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_to_channel.value + key_struct = struct.Struct(b'>20sLH') + value_struct = struct.Struct(b'>20s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sL').pack, + struct.Struct(b'>20sLH').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes, tx_num: int, position: int): + return super().pack_key(claim_hash, tx_num, position) + + @classmethod + def pack_value(cls, signing_hash: bytes): + return super().pack_value(signing_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimToChannelKey: + return ClaimToChannelKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimToChannelValue: + return ClaimToChannelValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, claim_hash: bytes, tx_num: int, position: int, signing_hash: bytes): + return cls.pack_key(claim_hash, tx_num, position), cls.pack_value(signing_hash) + + +def channel_to_claim_helper(struct_fmt): + packer = struct.Struct(struct_fmt).pack + + def wrapper(signing_hash: bytes, name: str, *args): + return signing_hash + length_encoded_name(name) + packer(*args) + + return wrapper + + +class ChannelToClaimPrefixRow(PrefixRow): + prefix = DB_PREFIXES.channel_to_claim.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>20s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + channel_to_claim_helper(b''), + channel_to_claim_helper(b'>s'), + channel_to_claim_helper(b'>L'), + channel_to_claim_helper(b'>LH'), + ] + + @classmethod + def pack_key(cls, signing_hash: bytes, name: str, tx_num: int, position: int): + return cls.prefix + signing_hash + length_encoded_name(name) + cls.key_struct.pack( + tx_num, position + ) + + @classmethod + def unpack_key(cls, key: bytes) -> ChannelToClaimKey: + assert key[:1] == cls.prefix + signing_hash = key[1:21] + name_len = int.from_bytes(key[21:23], byteorder='big') + name = key[23:23 + name_len].decode() + tx_num, position = cls.key_struct.unpack(key[23 + name_len:]) + return ChannelToClaimKey( + signing_hash, name, tx_num, position + ) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> ChannelToClaimValue: + return ChannelToClaimValue(*cls.value_struct.unpack(data)) + + @classmethod + def pack_item(cls, signing_hash: bytes, name: str, tx_num: int, position: int, + claim_hash: bytes): + return cls.pack_key(signing_hash, name, tx_num, position), \ + cls.pack_value(claim_hash) + + +class ClaimToSupportPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_to_support.value + key_struct = struct.Struct(b'>20sLH') + value_struct = struct.Struct(b'>Q') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sL').pack, + struct.Struct(b'>20sLH').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes, tx_num: int, position: int): + return super().pack_key(claim_hash, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimToSupportKey: + return ClaimToSupportKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return super().pack_value(amount) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimToSupportValue: + return ClaimToSupportValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, claim_hash: bytes, tx_num: int, position: int, amount: int): + return cls.pack_key(claim_hash, tx_num, position), \ + cls.pack_value(amount) + + +class SupportToClaimPrefixRow(PrefixRow): + prefix = DB_PREFIXES.support_to_claim.value + key_struct = struct.Struct(b'>LH') + value_struct = struct.Struct(b'>20s') + + @classmethod + def pack_key(cls, tx_num: int, position: int): + return super().pack_key(tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> SupportToClaimKey: + return SupportToClaimKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> SupportToClaimValue: + return SupportToClaimValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, tx_num: int, position: int, claim_hash: bytes): + return cls.pack_key(tx_num, position), \ + cls.pack_value(claim_hash) + + +class ClaimExpirationPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_expiration.value + key_struct = struct.Struct(b'>LLH') + value_struct = struct.Struct(b'>20s') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack, + struct.Struct(b'>LL').pack, + struct.Struct(b'>LLH').pack, + ] + + @classmethod + def pack_key(cls, expiration: int, tx_num: int, position: int) -> bytes: + return super().pack_key(expiration, tx_num, position) + + @classmethod + def pack_value(cls, claim_hash: bytes, name: str) -> bytes: + return cls.value_struct.pack(claim_hash) + length_encoded_name(name) + + @classmethod + def pack_item(cls, expiration: int, tx_num: int, position: int, claim_hash: bytes, name: str) -> typing.Tuple[bytes, bytes]: + return cls.pack_key(expiration, tx_num, position), cls.pack_value(claim_hash, name) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimExpirationKey: + return ClaimExpirationKey(*super().unpack_key(key)) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimExpirationValue: + name_len = int.from_bytes(data[20:22], byteorder='big') + name = data[22:22 + name_len].decode() + claim_id, = cls.value_struct.unpack(data[:20]) + return ClaimExpirationValue(claim_id, name) + + @classmethod + def unpack_item(cls, key: bytes, value: bytes) -> typing.Tuple[ClaimExpirationKey, ClaimExpirationValue]: + return cls.unpack_key(key), cls.unpack_value(value) + + +class ClaimTakeoverPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_takeover.value + value_struct = struct.Struct(b'>20sL') + + key_part_lambdas = [ + lambda: b'', + length_encoded_name + ] + + @classmethod + def pack_key(cls, name: str): + return cls.prefix + length_encoded_name(name) + + @classmethod + def pack_value(cls, claim_hash: bytes, takeover_height: int): + return super().pack_value(claim_hash, takeover_height) + + @classmethod + def unpack_key(cls, key: bytes) -> ClaimTakeoverKey: + assert key[:1] == cls.prefix + name_len = int.from_bytes(key[1:3], byteorder='big') + name = key[3:3 + name_len].decode() + return ClaimTakeoverKey(name) + + @classmethod + def unpack_value(cls, data: bytes) -> ClaimTakeoverValue: + return ClaimTakeoverValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, name: str, claim_hash: bytes, takeover_height: int): + return cls.pack_key(name), cls.pack_value(claim_hash, takeover_height) + + +class PendingActivationPrefixRow(PrefixRow): + prefix = DB_PREFIXES.pending_activation.value + key_struct = struct.Struct(b'>LBLH') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack, + struct.Struct(b'>LB').pack, + struct.Struct(b'>LBL').pack, + struct.Struct(b'>LBLH').pack + ] + + @classmethod + def pack_key(cls, height: int, txo_type: int, tx_num: int, position: int): + return super().pack_key(height, txo_type, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> PendingActivationKey: + return PendingActivationKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, claim_hash: bytes, name: str) -> bytes: + return claim_hash + length_encoded_name(name) + + @classmethod + def unpack_value(cls, data: bytes) -> PendingActivationValue: + claim_hash = data[:20] + name_len = int.from_bytes(data[20:22], byteorder='big') + name = data[22:22 + name_len].decode() + return PendingActivationValue(claim_hash, name) + + @classmethod + def pack_item(cls, height: int, txo_type: int, tx_num: int, position: int, claim_hash: bytes, name: str): + return cls.pack_key(height, txo_type, tx_num, position), \ + cls.pack_value(claim_hash, name) + + +class ActivatedPrefixRow(PrefixRow): + prefix = DB_PREFIXES.activated_claim_and_support.value + key_struct = struct.Struct(b'>BLH') + value_struct = struct.Struct(b'>L20s') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>B').pack, + struct.Struct(b'>BL').pack, + struct.Struct(b'>BLH').pack + ] + + @classmethod + def pack_key(cls, txo_type: int, tx_num: int, position: int): + return super().pack_key(txo_type, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> ActivationKey: + return ActivationKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, height: int, claim_hash: bytes, name: str) -> bytes: + return cls.value_struct.pack(height, claim_hash) + length_encoded_name(name) + + @classmethod + def unpack_value(cls, data: bytes) -> ActivationValue: + height, claim_hash = cls.value_struct.unpack(data[:24]) + name_len = int.from_bytes(data[24:26], byteorder='big') + name = data[26:26 + name_len].decode() + return ActivationValue(height, claim_hash, name) + + @classmethod + def pack_item(cls, txo_type: int, tx_num: int, position: int, height: int, claim_hash: bytes, name: str): + return cls.pack_key(txo_type, tx_num, position), \ + cls.pack_value(height, claim_hash, name) + + +def effective_amount_helper(struct_fmt): + packer = struct.Struct(struct_fmt).pack + + def wrapper(name, *args): + if not args: + return length_encoded_name(name) + if len(args) == 1: + return length_encoded_name(name) + packer(0xffffffffffffffff - args[0]) + return length_encoded_name(name) + packer(0xffffffffffffffff - args[0], *args[1:]) + + return wrapper + + +class EffectiveAmountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.effective_amount.value + key_struct = struct.Struct(b'>QLH') + value_struct = struct.Struct(b'>20s') + key_part_lambdas = [ + lambda: b'', + length_encoded_name, + shortid_key_helper(b'>Q'), + shortid_key_helper(b'>QL'), + shortid_key_helper(b'>QLH'), + ] + + @classmethod + def pack_key(cls, name: str, effective_amount: int, tx_num: int, position: int): + return cls.prefix + length_encoded_name(name) + cls.key_struct.pack( + 0xffffffffffffffff - effective_amount, tx_num, position + ) + + @classmethod + def unpack_key(cls, key: bytes) -> EffectiveAmountKey: + assert key[:1] == cls.prefix + name_len = int.from_bytes(key[1:3], byteorder='big') + name = key[3:3 + name_len].decode() + ones_comp_effective_amount, tx_num, position = cls.key_struct.unpack(key[3 + name_len:]) + return EffectiveAmountKey(name, 0xffffffffffffffff - ones_comp_effective_amount, tx_num, position) + + @classmethod + def unpack_value(cls, data: bytes) -> EffectiveAmountValue: + return EffectiveAmountValue(*super().unpack_value(data)) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def pack_item(cls, name: str, effective_amount: int, tx_num: int, position: int, claim_hash: bytes): + return cls.pack_key(name, effective_amount, tx_num, position), cls.pack_value(claim_hash) + + +class RepostPrefixRow(PrefixRow): + prefix = DB_PREFIXES.repost.value + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes): + return cls.prefix + claim_hash + + @classmethod + def unpack_key(cls, key: bytes) -> RepostKey: + assert key[:1] == cls.prefix + assert len(key) == 21 + return RepostKey(key[1:]) + + @classmethod + def pack_value(cls, reposted_claim_hash: bytes) -> bytes: + return reposted_claim_hash + + @classmethod + def unpack_value(cls, data: bytes) -> RepostValue: + return RepostValue(data) + + @classmethod + def pack_item(cls, claim_hash: bytes, reposted_claim_hash: bytes): + return cls.pack_key(claim_hash), cls.pack_value(reposted_claim_hash) + + +class RepostedPrefixRow(PrefixRow): + prefix = DB_PREFIXES.reposted_claim.value + key_struct = struct.Struct(b'>20sLH') + value_struct = struct.Struct(b'>20s') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack, + struct.Struct(b'>20sL').pack, + struct.Struct(b'>20sLH').pack + ] + + @classmethod + def pack_key(cls, reposted_claim_hash: bytes, tx_num: int, position: int): + return super().pack_key(reposted_claim_hash, tx_num, position) + + @classmethod + def unpack_key(cls, key: bytes) -> RepostedKey: + return RepostedKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, claim_hash: bytes) -> bytes: + return super().pack_value(claim_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> RepostedValue: + return RepostedValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, reposted_claim_hash: bytes, tx_num: int, position: int, claim_hash: bytes): + return cls.pack_key(reposted_claim_hash, tx_num, position), cls.pack_value(claim_hash) + + +class UndoPrefixRow(PrefixRow): + prefix = DB_PREFIXES.undo.value + key_struct = struct.Struct(b'>Q') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>Q').pack + ] + + @classmethod + def pack_key(cls, height: int): + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> int: + assert key[:1] == cls.prefix + height, = cls.key_struct.unpack(key[1:]) + return height + + @classmethod + def pack_value(cls, undo_ops: bytes) -> bytes: + return undo_ops + + @classmethod + def unpack_value(cls, data: bytes) -> bytes: + return data + + @classmethod + def pack_item(cls, height: int, undo_ops: bytes): + return cls.pack_key(height), cls.pack_value(undo_ops) + + +class BlockHashPrefixRow(PrefixRow): + prefix = DB_PREFIXES.block_hash.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int) -> bytes: + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> BlockHashKey: + return BlockHashKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, block_hash: bytes) -> bytes: + return super().pack_value(block_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> BlockHashValue: + return BlockHashValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height: int, block_hash: bytes): + return cls.pack_key(height), cls.pack_value(block_hash) + + +class BlockHeaderPrefixRow(PrefixRow): + prefix = DB_PREFIXES.header.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>112s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int) -> bytes: + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> BlockHeaderKey: + return BlockHeaderKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, header: bytes) -> bytes: + return super().pack_value(header) + + @classmethod + def unpack_value(cls, data: bytes) -> BlockHeaderValue: + return BlockHeaderValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height: int, header: bytes): + return cls.pack_key(height), cls.pack_value(header) + + +class TXNumPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx_num.value + key_struct = struct.Struct(b'>32s') + value_struct = struct.Struct(b'>L') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>32s').pack + ] + + @classmethod + def pack_key(cls, tx_hash: bytes) -> bytes: + return super().pack_key(tx_hash) + + @classmethod + def unpack_key(cls, tx_hash: bytes) -> TxNumKey: + return TxNumKey(*super().unpack_key(tx_hash)) + + @classmethod + def pack_value(cls, tx_num: int) -> bytes: + return super().pack_value(tx_num) + + @classmethod + def unpack_value(cls, data: bytes) -> TxNumValue: + return TxNumValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, tx_hash: bytes, tx_num: int): + return cls.pack_key(tx_hash), cls.pack_value(tx_num) + + +class TxCountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx_count.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>L') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int) -> bytes: + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> TxCountKey: + return TxCountKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, tx_count: int) -> bytes: + return super().pack_value(tx_count) + + @classmethod + def unpack_value(cls, data: bytes) -> TxCountValue: + return TxCountValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, height: int, tx_count: int): + return cls.pack_key(height), cls.pack_value(tx_count) + + +class TXHashPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx_hash.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, tx_num: int) -> bytes: + return super().pack_key(tx_num) + + @classmethod + def unpack_key(cls, key: bytes) -> TxHashKey: + return TxHashKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, tx_hash: bytes) -> bytes: + return super().pack_value(tx_hash) + + @classmethod + def unpack_value(cls, data: bytes) -> TxHashValue: + return TxHashValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, tx_num: int, tx_hash: bytes): + return cls.pack_key(tx_num), cls.pack_value(tx_hash) + + +class TXPrefixRow(PrefixRow): + prefix = DB_PREFIXES.tx.value + key_struct = struct.Struct(b'>32s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>32s').pack + ] + + @classmethod + def pack_key(cls, tx_hash: bytes) -> bytes: + return super().pack_key(tx_hash) + + @classmethod + def unpack_key(cls, tx_hash: bytes) -> TxKey: + return TxKey(*super().unpack_key(tx_hash)) + + @classmethod + def pack_value(cls, tx: bytes) -> bytes: + return tx + + @classmethod + def unpack_value(cls, data: bytes) -> TxValue: + return TxValue(data) + + @classmethod + def pack_item(cls, tx_hash: bytes, raw_tx: bytes): + return cls.pack_key(tx_hash), cls.pack_value(raw_tx) + + +class UTXOPrefixRow(PrefixRow): + prefix = DB_PREFIXES.utxo.value + key_struct = struct.Struct(b'>11sLH') + value_struct = struct.Struct(b'>Q') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>11s').pack, + struct.Struct(b'>11sL').pack, + struct.Struct(b'>11sLH').pack + ] + + @classmethod + def pack_key(cls, hashX: bytes, tx_num, nout: int): + return super().pack_key(hashX, tx_num, nout) + + @classmethod + def unpack_key(cls, key: bytes) -> UTXOKey: + return UTXOKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return super().pack_value(amount) + + @classmethod + def unpack_value(cls, data: bytes) -> UTXOValue: + return UTXOValue(*cls.value_struct.unpack(data)) + + @classmethod + def pack_item(cls, hashX: bytes, tx_num: int, nout: int, amount: int): + return cls.pack_key(hashX, tx_num, nout), cls.pack_value(amount) + + +class HashXUTXOPrefixRow(PrefixRow): + prefix = DB_PREFIXES.hashx_utxo.value + key_struct = struct.Struct(b'>4sLH') + value_struct = struct.Struct(b'>11s') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>4s').pack, + struct.Struct(b'>4sL').pack, + struct.Struct(b'>4sLH').pack + ] + + @classmethod + def pack_key(cls, short_tx_hash: bytes, tx_num, nout: int): + return super().pack_key(short_tx_hash, tx_num, nout) + + @classmethod + def unpack_key(cls, key: bytes) -> HashXUTXOKey: + return HashXUTXOKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, hashX: bytes) -> bytes: + return super().pack_value(hashX) + + @classmethod + def unpack_value(cls, data: bytes) -> HashXUTXOValue: + return HashXUTXOValue(*cls.value_struct.unpack(data)) + + @classmethod + def pack_item(cls, short_tx_hash: bytes, tx_num: int, nout: int, hashX: bytes): + return cls.pack_key(short_tx_hash, tx_num, nout), cls.pack_value(hashX) + + +class HashXHistoryPrefixRow(PrefixRow): + prefix = DB_PREFIXES.hashx_history.value + key_struct = struct.Struct(b'>11sL') + + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>11s').pack, + struct.Struct(b'>11sL').pack + ] + + @classmethod + def pack_key(cls, hashX: bytes, height: int): + return super().pack_key(hashX, height) + + @classmethod + def unpack_key(cls, key: bytes) -> HashXHistoryKey: + return HashXHistoryKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, history: typing.List[int]) -> bytes: + a = array.array('I') + a.fromlist(history) + return a.tobytes() + + @classmethod + def unpack_value(cls, data: bytes) -> array.array: + a = array.array('I') + a.frombytes(data) + return a + + @classmethod + def pack_item(cls, hashX: bytes, height: int, history: typing.List[int]): + return cls.pack_key(hashX, height), cls.pack_value(history) + + +class TouchedOrDeletedPrefixRow(PrefixRow): + prefix = DB_PREFIXES.claim_diff.value + key_struct = struct.Struct(b'>L') + value_struct = struct.Struct(b'>LL') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>L').pack + ] + + @classmethod + def pack_key(cls, height: int): + return super().pack_key(height) + + @classmethod + def unpack_key(cls, key: bytes) -> TouchedOrDeletedClaimKey: + return TouchedOrDeletedClaimKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, touched: typing.Set[bytes], deleted: typing.Set[bytes]) -> bytes: + assert True if not touched else all(len(item) == 20 for item in touched) + assert True if not deleted else all(len(item) == 20 for item in deleted) + return cls.value_struct.pack(len(touched), len(deleted)) + b''.join(sorted(touched)) + b''.join(sorted(deleted)) + + @classmethod + def unpack_value(cls, data: bytes) -> TouchedOrDeletedClaimValue: + touched_len, deleted_len = cls.value_struct.unpack(data[:8]) + data = data[8:] + assert len(data) == 20 * (touched_len + deleted_len) + touched_bytes, deleted_bytes = data[:touched_len*20], data[touched_len*20:] + return TouchedOrDeletedClaimValue( + {touched_bytes[20*i:20*(i+1)] for i in range(touched_len)}, + {deleted_bytes[20*i:20*(i+1)] for i in range(deleted_len)} + ) + + @classmethod + def pack_item(cls, height, touched, deleted): + return cls.pack_key(height), cls.pack_value(touched, deleted) + + +class ChannelCountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.channel_count.value + key_struct = struct.Struct(b'>20s') + value_struct = struct.Struct(b'>L') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, channel_hash: int): + return super().pack_key(channel_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> ChannelCountKey: + return ChannelCountKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, count: int) -> bytes: + return super().pack_value(count) + + @classmethod + def unpack_value(cls, data: bytes) -> ChannelCountValue: + return ChannelCountValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, channel_hash, count): + return cls.pack_key(channel_hash), cls.pack_value(count) + + +class SupportAmountPrefixRow(PrefixRow): + prefix = DB_PREFIXES.support_amount.value + key_struct = struct.Struct(b'>20s') + value_struct = struct.Struct(b'>Q') + key_part_lambdas = [ + lambda: b'', + struct.Struct(b'>20s').pack + ] + + @classmethod + def pack_key(cls, claim_hash: bytes): + return super().pack_key(claim_hash) + + @classmethod + def unpack_key(cls, key: bytes) -> SupportAmountKey: + return SupportAmountKey(*super().unpack_key(key)) + + @classmethod + def pack_value(cls, amount: int) -> bytes: + return super().pack_value(amount) + + @classmethod + def unpack_value(cls, data: bytes) -> SupportAmountValue: + return SupportAmountValue(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, claim_hash, amount): + return cls.pack_key(claim_hash), cls.pack_value(amount) + + +class DBStatePrefixRow(PrefixRow): + prefix = DB_PREFIXES.db_state.value + value_struct = struct.Struct(b'>32sLL32sLLBBlll') + key_struct = struct.Struct(b'') + + key_part_lambdas = [ + lambda: b'' + ] + + @classmethod + def pack_key(cls) -> bytes: + return cls.prefix + + @classmethod + def unpack_key(cls, key: bytes): + return + + @classmethod + def pack_value(cls, genesis: bytes, height: int, tx_count: int, tip: bytes, utxo_flush_count: int, wall_time: int, + first_sync: bool, db_version: int, hist_flush_count: int, comp_flush_count: int, + comp_cursor: int) -> bytes: + return super().pack_value( + genesis, height, tx_count, tip, utxo_flush_count, + wall_time, 1 if first_sync else 0, db_version, hist_flush_count, + comp_flush_count, comp_cursor + ) + + @classmethod + def unpack_value(cls, data: bytes) -> DBState: + return DBState(*super().unpack_value(data)) + + @classmethod + def pack_item(cls, genesis: bytes, height: int, tx_count: int, tip: bytes, utxo_flush_count: int, wall_time: int, + first_sync: bool, db_version: int, hist_flush_count: int, comp_flush_count: int, + comp_cursor: int): + return cls.pack_key(), cls.pack_value( + genesis, height, tx_count, tip, utxo_flush_count, wall_time, first_sync, db_version, hist_flush_count, + comp_flush_count, comp_cursor + ) + + +class LevelDBStore(KeyValueStorage): + def __init__(self, path: str, cache_mb: int, max_open_files: int): + import plyvel + self.db = plyvel.DB( + path, create_if_missing=True, max_open_files=max_open_files, + lru_cache_size=cache_mb * 1024 * 1024, write_buffer_size=64 * 1024 * 1024, + max_file_size=1024 * 1024 * 64, bloom_filter_bits=32 + ) + + def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]: + return self.db.get(key, fill_cache=fill_cache) + + def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None, + include_key=True, include_value=True, fill_cache=True): + return self.db.iterator( + reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop, + prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache + ) + + def write_batch(self, transaction: bool = False, sync: bool = False): + return self.db.write_batch(transaction=transaction, sync=sync) + + def close(self): + return self.db.close() + + @property + def closed(self) -> bool: + return self.db.closed + + +class HubDB(PrefixDB): + def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 512, + unsafe_prefixes: Optional[typing.Set[bytes]] = None): + db = LevelDBStore(path, cache_mb, max_open_files) + super().__init__(db, reorg_limit, unsafe_prefixes=unsafe_prefixes) + self.claim_to_support = ClaimToSupportPrefixRow(db, self._op_stack) + self.support_to_claim = SupportToClaimPrefixRow(db, self._op_stack) + self.claim_to_txo = ClaimToTXOPrefixRow(db, self._op_stack) + self.txo_to_claim = TXOToClaimPrefixRow(db, self._op_stack) + self.claim_to_channel = ClaimToChannelPrefixRow(db, self._op_stack) + self.channel_to_claim = ChannelToClaimPrefixRow(db, self._op_stack) + self.claim_short_id = ClaimShortIDPrefixRow(db, self._op_stack) + self.claim_expiration = ClaimExpirationPrefixRow(db, self._op_stack) + self.claim_takeover = ClaimTakeoverPrefixRow(db, self._op_stack) + self.pending_activation = PendingActivationPrefixRow(db, self._op_stack) + self.activated = ActivatedPrefixRow(db, self._op_stack) + self.active_amount = ActiveAmountPrefixRow(db, self._op_stack) + self.effective_amount = EffectiveAmountPrefixRow(db, self._op_stack) + self.repost = RepostPrefixRow(db, self._op_stack) + self.reposted_claim = RepostedPrefixRow(db, self._op_stack) + self.undo = UndoPrefixRow(db, self._op_stack) + self.utxo = UTXOPrefixRow(db, self._op_stack) + self.hashX_utxo = HashXUTXOPrefixRow(db, self._op_stack) + self.hashX_history = HashXHistoryPrefixRow(db, self._op_stack) + self.block_hash = BlockHashPrefixRow(db, self._op_stack) + self.tx_count = TxCountPrefixRow(db, self._op_stack) + self.tx_hash = TXHashPrefixRow(db, self._op_stack) + self.tx_num = TXNumPrefixRow(db, self._op_stack) + self.tx = TXPrefixRow(db, self._op_stack) + self.header = BlockHeaderPrefixRow(db, self._op_stack) + self.touched_or_deleted = TouchedOrDeletedPrefixRow(db, self._op_stack) + self.channel_count = ChannelCountPrefixRow(db, self._op_stack) + self.db_state = DBStatePrefixRow(db, self._op_stack) + self.support_amount = SupportAmountPrefixRow(db, self._op_stack) + + +def auto_decode_item(key: bytes, value: bytes) -> Union[Tuple[NamedTuple, NamedTuple], Tuple[bytes, bytes]]: + try: + return ROW_TYPES[key[:1]].unpack_item(key, value) + except KeyError: + return key, value diff --git a/lbry/wallet/server/db/revertable.py b/lbry/wallet/server/db/revertable.py new file mode 100644 index 000000000..e82c36f12 --- /dev/null +++ b/lbry/wallet/server/db/revertable.py @@ -0,0 +1,147 @@ +import struct +import logging +from string import printable +from collections import defaultdict +from typing import Tuple, Iterable, Callable, Optional +from lbry.wallet.server.db import DB_PREFIXES + +_OP_STRUCT = struct.Struct('>BLL') +log = logging.getLogger() + + +class RevertableOp: + __slots__ = [ + 'key', + 'value', + ] + is_put = 0 + + def __init__(self, key: bytes, value: bytes): + self.key = key + self.value = value + + @property + def is_delete(self) -> bool: + return not self.is_put + + def invert(self) -> 'RevertableOp': + raise NotImplementedError() + + def pack(self) -> bytes: + """ + Serialize to bytes + """ + return struct.pack( + f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key, + self.value + ) + + @classmethod + def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]: + """ + Deserialize from bytes + + :param packed: bytes containing at least one packed revertable op + :return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes + """ + is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9]) + key = packed[9:9 + key_len] + value = packed[9 + key_len:9 + key_len + val_len] + if is_put == 1: + return RevertablePut(key, value), packed[9 + key_len + val_len:] + return RevertableDelete(key, value), packed[9 + key_len + val_len:] + + def __eq__(self, other: 'RevertableOp') -> bool: + return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value) + + def __repr__(self) -> str: + return str(self) + + def __str__(self) -> str: + from lbry.wallet.server.db.prefixes import auto_decode_item + k, v = auto_decode_item(self.key, self.value) + key = ''.join(c if c in printable else '.' for c in str(k)) + val = ''.join(c if c in printable else '.' for c in str(v)) + return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}" + + +class RevertableDelete(RevertableOp): + def invert(self): + return RevertablePut(self.key, self.value) + + +class RevertablePut(RevertableOp): + is_put = True + + def invert(self): + return RevertableDelete(self.key, self.value) + + +class OpStackIntegrity(Exception): + pass + + +class RevertableOpStack: + def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None): + self._get = get_fn + self._items = defaultdict(list) + self._unsafe_prefixes = unsafe_prefixes or set() + + def append_op(self, op: RevertableOp): + inverted = op.invert() + if self._items[op.key] and inverted == self._items[op.key][-1]: + self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both + return + elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op + return # raise an error? + stored_val = self._get(op.key) + has_stored_val = stored_val is not None + delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val) + will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key]) + try: + if op.is_put and has_stored_val and not will_delete_existing_stored: + raise OpStackIntegrity( + f"db op tries to add on top of existing key without deleting first: {op}" + ) + elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored: + # there is a value and we're not deleting it in this op + # check that a delete for the stored value is in the stack + raise OpStackIntegrity(f"delete {op}") + elif op.is_delete and not has_stored_val: + raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}") + elif op.is_delete and stored_val != op.value: + raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}") + except OpStackIntegrity as err: + if op.key[:1] in self._unsafe_prefixes: + log.error(f"skipping over integrity error: {err}") + else: + raise err + self._items[op.key].append(op) + + def extend_ops(self, ops: Iterable[RevertableOp]): + for op in ops: + self.append_op(op) + + def clear(self): + self._items.clear() + + def __len__(self): + return sum(map(len, self._items.values())) + + def __iter__(self): + for key, ops in self._items.items(): + for op in ops: + yield op + + def __reversed__(self): + for key, ops in self._items.items(): + for op in reversed(ops): + yield op + + def get_undo_ops(self) -> bytes: + return b''.join(op.invert().pack() for op in reversed(self)) + + def apply_packed_undo_ops(self, packed: bytes): + while packed: + op, packed = RevertableOp.unpack(packed) + self.append_op(op) diff --git a/lbry/wallet/server/db/trending/__init__.py b/lbry/wallet/server/db/trending/__init__.py deleted file mode 100644 index 86d94bdc3..000000000 --- a/lbry/wallet/server/db/trending/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from . import zscore -from . import ar -from . import variable_decay - -TRENDING_ALGORITHMS = { - 'zscore': zscore, - 'ar': ar, - 'variable_decay': variable_decay -} diff --git a/lbry/wallet/server/db/trending/ar.py b/lbry/wallet/server/db/trending/ar.py deleted file mode 100644 index 2e7b3474f..000000000 --- a/lbry/wallet/server/db/trending/ar.py +++ /dev/null @@ -1,265 +0,0 @@ -import copy -import math -import time - -# Half life in blocks -HALF_LIFE = 134 - -# Decay coefficient per block -DECAY = 0.5**(1.0/HALF_LIFE) - -# How frequently to write trending values to the db -SAVE_INTERVAL = 10 - -# Renormalisation interval -RENORM_INTERVAL = 1000 - -# Assertion -assert RENORM_INTERVAL % SAVE_INTERVAL == 0 - -# Decay coefficient per renormalisation interval -DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL) - -# Log trending calculations? -TRENDING_LOG = True - - -def install(connection): - """ - Install the AR trending algorithm. - """ - check_trending_values(connection) - - if TRENDING_LOG: - f = open("trending_ar.log", "a") - f.close() - -# Stub -CREATE_TREND_TABLE = "" - - -def check_trending_values(connection): - """ - If the trending values appear to be based on the zscore algorithm, - reset them. This will allow resyncing from a standard snapshot. - """ - c = connection.cursor() - needs_reset = False - for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"): - if row[0] != 0: - needs_reset = True - break - - if needs_reset: - print("Resetting some columns. This might take a while...", flush=True, end="") - c.execute(""" BEGIN; - UPDATE claim SET trending_group = 0; - UPDATE claim SET trending_mixed = 0; - UPDATE claim SET trending_global = 0; - UPDATE claim SET trending_local = 0; - COMMIT;""") - print("done.") - - -def spike_height(trending_score, x, x_old, time_boost=1.0): - """ - Compute the size of a trending spike. - """ - - # Change in softened amount - change_in_softened_amount = x**0.25 - x_old**0.25 - - # Softened change in amount - delta = x - x_old - softened_change_in_amount = abs(delta)**0.25 - - # Softened change in amount counts more for minnows - if delta > 0.0: - if trending_score >= 0.0: - multiplier = 0.1/((trending_score/time_boost + softened_change_in_amount) + 1.0) - softened_change_in_amount *= multiplier - else: - softened_change_in_amount *= -1.0 - - return time_boost*(softened_change_in_amount + change_in_softened_amount) - - -def get_time_boost(height): - """ - Return the time boost at a given height. - """ - return 1.0/DECAY**(height % RENORM_INTERVAL) - - -def trending_log(s): - """ - Log a string. - """ - if TRENDING_LOG: - fout = open("trending_ar.log", "a") - fout.write(s) - fout.flush() - fout.close() - -class TrendingData: - """ - An object of this class holds trending data - """ - def __init__(self): - self.claims = {} - - # Have all claims been read from db yet? - self.initialised = False - - def insert_claim_from_load(self, claim_hash, trending_score, total_amount): - assert not self.initialised - self.claims[claim_hash] = {"trending_score": trending_score, - "total_amount": total_amount, - "changed": False} - - - def update_claim(self, claim_hash, total_amount, time_boost=1.0): - """ - Update trending data for a claim, given its new total amount. - """ - assert self.initialised - - # Extract existing total amount and trending score - # or use starting values if the claim is new - if claim_hash in self.claims: - old_state = copy.deepcopy(self.claims[claim_hash]) - else: - old_state = {"trending_score": 0.0, - "total_amount": 0.0, - "changed": False} - - # Calculate LBC change - change = total_amount - old_state["total_amount"] - - # Modify data if there was an LBC change - if change != 0.0: - spike = spike_height(old_state["trending_score"], - total_amount, - old_state["total_amount"], - time_boost) - trending_score = old_state["trending_score"] + spike - self.claims[claim_hash] = {"total_amount": total_amount, - "trending_score": trending_score, - "changed": True} - - - -def test_trending(): - """ - Quick trending test for something receiving 10 LBC per block - """ - data = TrendingData() - data.insert_claim_from_load("abc", 10.0, 1.0) - data.initialised = True - - for height in range(1, 5000): - - if height % RENORM_INTERVAL == 0: - data.claims["abc"]["trending_score"] *= DECAY_PER_RENORM - - time_boost = get_time_boost(height) - data.update_claim("abc", data.claims["abc"]["total_amount"] + 10.0, - time_boost=time_boost) - - - print(str(height) + " " + str(time_boost) + " " \ - + str(data.claims["abc"]["trending_score"])) - - - -# One global instance -# pylint: disable=C0103 -trending_data = TrendingData() - -def run(db, height, final_height, recalculate_claim_hashes): - - if height < final_height - 5*HALF_LIFE: - trending_log("Skipping AR trending at block {h}.\n".format(h=height)) - return - - start = time.time() - - trending_log("Calculating AR trending at block {h}.\n".format(h=height)) - trending_log(" Length of trending data = {l}.\n"\ - .format(l=len(trending_data.claims))) - - # Renormalise trending scores and mark all as having changed - if height % RENORM_INTERVAL == 0: - trending_log(" Renormalising trending scores...") - - keys = trending_data.claims.keys() - for key in keys: - if trending_data.claims[key]["trending_score"] != 0.0: - trending_data.claims[key]["trending_score"] *= DECAY_PER_RENORM - trending_data.claims[key]["changed"] = True - - # Tiny becomes zero - if abs(trending_data.claims[key]["trending_score"]) < 1E-9: - trending_data.claims[key]["trending_score"] = 0.0 - - trending_log("done.\n") - - - # Regular message. - trending_log(" Reading total_amounts from db and updating"\ - + " trending scores in RAM...") - - # Get the value of the time boost - time_boost = get_time_boost(height) - - # Update claims from db - if not trending_data.initialised: - # On fresh launch - for row in db.execute(""" - SELECT claim_hash, trending_mixed, - (amount + support_amount) - AS total_amount - FROM claim; - """): - trending_data.insert_claim_from_load(row[0], row[1], 1E-8*row[2]) - trending_data.initialised = True - else: - for row in db.execute(f""" - SELECT claim_hash, - (amount + support_amount) - AS total_amount - FROM claim - WHERE claim_hash IN - ({','.join('?' for _ in recalculate_claim_hashes)}); - """, list(recalculate_claim_hashes)): - trending_data.update_claim(row[0], 1E-8*row[1], time_boost) - - trending_log("done.\n") - - - # Write trending scores to DB - if height % SAVE_INTERVAL == 0: - - trending_log(" Writing trending scores to db...") - - the_list = [] - keys = trending_data.claims.keys() - for key in keys: - if trending_data.claims[key]["changed"]: - the_list.append((trending_data.claims[key]["trending_score"], - key)) - trending_data.claims[key]["changed"] = False - - trending_log("{n} scores to write...".format(n=len(the_list))) - - db.executemany("UPDATE claim SET trending_mixed=? WHERE claim_hash=?;", - the_list) - - trending_log("done.\n") - - trending_log("Trending operations took {time} seconds.\n\n"\ - .format(time=time.time() - start)) - - -if __name__ == "__main__": - test_trending() diff --git a/lbry/wallet/server/db/trending/variable_decay.py b/lbry/wallet/server/db/trending/variable_decay.py deleted file mode 100644 index d900920a0..000000000 --- a/lbry/wallet/server/db/trending/variable_decay.py +++ /dev/null @@ -1,485 +0,0 @@ -""" -AR-like trending with a delayed effect and a faster -decay rate for high valued claims. -""" - -import math -import time -import sqlite3 - -# Half life in blocks *for lower LBC claims* (it's shorter for whale claims) -HALF_LIFE = 200 - -# Whale threshold, in LBC (higher -> less DB writing) -WHALE_THRESHOLD = 10000.0 - -# Decay coefficient per block -DECAY = 0.5**(1.0/HALF_LIFE) - -# How frequently to write trending values to the db -SAVE_INTERVAL = 10 - -# Renormalisation interval -RENORM_INTERVAL = 1000 - -# Assertion -assert RENORM_INTERVAL % SAVE_INTERVAL == 0 - -# Decay coefficient per renormalisation interval -DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL) - -# Log trending calculations? -TRENDING_LOG = True - - -def install(connection): - """ - Install the trending algorithm. - """ - check_trending_values(connection) - trending_data.initialise(connection.cursor()) - - if TRENDING_LOG: - f = open("trending_variable_decay.log", "a") - f.close() - -# Stub -CREATE_TREND_TABLE = "" - -def check_trending_values(connection): - """ - If the trending values appear to be based on the zscore algorithm, - reset them. This will allow resyncing from a standard snapshot. - """ - c = connection.cursor() - needs_reset = False - for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"): - if row[0] != 0: - needs_reset = True - break - - if needs_reset: - print("Resetting some columns. This might take a while...", flush=True, - end="") - c.execute(""" BEGIN; - UPDATE claim SET trending_group = 0; - UPDATE claim SET trending_mixed = 0; - COMMIT;""") - print("done.") - - - - -def trending_log(s): - """ - Log a string to the log file - """ - if TRENDING_LOG: - fout = open("trending_variable_decay.log", "a") - fout.write(s) - fout.flush() - fout.close() - - -def trending_unit(height): - """ - Return the trending score unit at a given height. - """ - # Round to the beginning of a SAVE_INTERVAL batch of blocks. - _height = height - (height % SAVE_INTERVAL) - return 1.0/DECAY**(height % RENORM_INTERVAL) - - -class TrendingDB: - """ - An in-memory database of trending scores - """ - - def __init__(self): - self.conn = sqlite3.connect(":memory:", check_same_thread=False) - self.cursor = self.conn.cursor() - self.initialised = False - self.write_needed = set() - - def execute(self, query, *args, **kwargs): - return self.conn.execute(query, *args, **kwargs) - - def executemany(self, query, *args, **kwargs): - return self.conn.executemany(query, *args, **kwargs) - - def begin(self): - self.execute("BEGIN;") - - def commit(self): - self.execute("COMMIT;") - - def initialise(self, db): - """ - Pass in claims.db - """ - if self.initialised: - return - - trending_log("Initialising trending database...") - - # The need for speed - self.execute("PRAGMA JOURNAL_MODE=OFF;") - self.execute("PRAGMA SYNCHRONOUS=0;") - - self.begin() - - # Create the tables - self.execute(""" - CREATE TABLE IF NOT EXISTS claims - (claim_hash BYTES PRIMARY KEY, - lbc REAL NOT NULL DEFAULT 0.0, - trending_score REAL NOT NULL DEFAULT 0.0) - WITHOUT ROWID;""") - - self.execute(""" - CREATE TABLE IF NOT EXISTS spikes - (id INTEGER PRIMARY KEY, - claim_hash BYTES NOT NULL, - height INTEGER NOT NULL, - mass REAL NOT NULL, - FOREIGN KEY (claim_hash) - REFERENCES claims (claim_hash));""") - - # Clear out any existing data - self.execute("DELETE FROM claims;") - self.execute("DELETE FROM spikes;") - - # Create indexes - self.execute("CREATE INDEX idx1 ON spikes (claim_hash, height, mass);") - self.execute("CREATE INDEX idx2 ON spikes (claim_hash, height, mass DESC);") - self.execute("CREATE INDEX idx3 on claims (lbc DESC, claim_hash, trending_score);") - - # Import data from claims.db - for row in db.execute(""" - SELECT claim_hash, - 1E-8*(amount + support_amount) AS lbc, - trending_mixed - FROM claim; - """): - self.execute("INSERT INTO claims VALUES (?, ?, ?);", row) - self.commit() - - self.initialised = True - trending_log("done.\n") - - def apply_spikes(self, height): - """ - Apply spikes that are due. This occurs inside a transaction. - """ - - spikes = [] - unit = trending_unit(height) - for row in self.execute(""" - SELECT SUM(mass), claim_hash FROM spikes - WHERE height = ? - GROUP BY claim_hash; - """, (height, )): - spikes.append((row[0]*unit, row[1])) - self.write_needed.add(row[1]) - - self.executemany(""" - UPDATE claims - SET trending_score = (trending_score + ?) - WHERE claim_hash = ?; - """, spikes) - self.execute("DELETE FROM spikes WHERE height = ?;", (height, )) - - - def decay_whales(self, height): - """ - Occurs inside transaction. - """ - if height % SAVE_INTERVAL != 0: - return - - whales = self.execute(""" - SELECT trending_score, lbc, claim_hash - FROM claims - WHERE lbc >= ?; - """, (WHALE_THRESHOLD, )).fetchall() - whales2 = [] - for whale in whales: - trending, lbc, claim_hash = whale - - # Overall multiplication factor for decay rate - # At WHALE_THRESHOLD, this is 1 - # At 10*WHALE_THRESHOLD, it is 3 - decay_rate_factor = 1.0 + 2.0*math.log10(lbc/WHALE_THRESHOLD) - - # The -1 is because this is just the *extra* part being applied - factor = (DECAY**SAVE_INTERVAL)**(decay_rate_factor - 1.0) - - # Decay - trending *= factor - whales2.append((trending, claim_hash)) - self.write_needed.add(claim_hash) - - self.executemany("UPDATE claims SET trending_score=? WHERE claim_hash=?;", - whales2) - - - def renorm(self, height): - """ - Renormalise trending scores. Occurs inside a transaction. - """ - - if height % RENORM_INTERVAL == 0: - threshold = 1.0E-3/DECAY_PER_RENORM - for row in self.execute("""SELECT claim_hash FROM claims - WHERE ABS(trending_score) >= ?;""", - (threshold, )): - self.write_needed.add(row[0]) - - self.execute("""UPDATE claims SET trending_score = ?*trending_score - WHERE ABS(trending_score) >= ?;""", - (DECAY_PER_RENORM, threshold)) - - def write_to_claims_db(self, db, height): - """ - Write changed trending scores to claims.db. - """ - if height % SAVE_INTERVAL != 0: - return - - rows = self.execute(f""" - SELECT trending_score, claim_hash - FROM claims - WHERE claim_hash IN - ({','.join('?' for _ in self.write_needed)}); - """, list(self.write_needed)).fetchall() - - db.executemany("""UPDATE claim SET trending_mixed = ? - WHERE claim_hash = ?;""", rows) - - # Clear list of claims needing to be written to claims.db - self.write_needed = set() - - - def update(self, db, height, recalculate_claim_hashes): - """ - Update trending scores. - Input is a cursor to claims.db, the block height, and the list of - claims that changed. - """ - assert self.initialised - - self.begin() - self.renorm(height) - - # Fetch changed/new claims from claims.db - for row in db.execute(f""" - SELECT claim_hash, - 1E-8*(amount + support_amount) AS lbc - FROM claim - WHERE claim_hash IN - ({','.join('?' for _ in recalculate_claim_hashes)}); - """, list(recalculate_claim_hashes)): - claim_hash, lbc = row - - # Insert into trending db if it does not exist - self.execute(""" - INSERT INTO claims (claim_hash) - VALUES (?) - ON CONFLICT (claim_hash) DO NOTHING;""", - (claim_hash, )) - - # See if it was an LBC change - old = self.execute("SELECT * FROM claims WHERE claim_hash=?;", - (claim_hash, )).fetchone() - lbc_old = old[1] - - # Save new LBC value into trending db - self.execute("UPDATE claims SET lbc = ? WHERE claim_hash = ?;", - (lbc, claim_hash)) - - if lbc > lbc_old: - - # Schedule a future spike - delay = min(int((lbc + 1E-8)**0.4), HALF_LIFE) - spike = (claim_hash, height + delay, spike_mass(lbc, lbc_old)) - self.execute("""INSERT INTO spikes - (claim_hash, height, mass) - VALUES (?, ?, ?);""", spike) - - elif lbc < lbc_old: - - # Subtract from future spikes - penalty = spike_mass(lbc_old, lbc) - spikes = self.execute(""" - SELECT * FROM spikes - WHERE claim_hash = ? - ORDER BY height ASC, mass DESC; - """, (claim_hash, )).fetchall() - for spike in spikes: - spike_id, mass = spike[0], spike[3] - - if mass > penalty: - # The entire penalty merely reduces this spike - self.execute("UPDATE spikes SET mass=? WHERE id=?;", - (mass - penalty, spike_id)) - penalty = 0.0 - else: - # Removing this spike entirely accounts for some (or - # all) of the penalty, then move on to other spikes - self.execute("DELETE FROM spikes WHERE id=?;", - (spike_id, )) - penalty -= mass - - # If penalty remains, that's a negative spike to be applied - # immediately. - if penalty > 0.0: - self.execute(""" - INSERT INTO spikes (claim_hash, height, mass) - VALUES (?, ?, ?);""", - (claim_hash, height, -penalty)) - - self.apply_spikes(height) - self.decay_whales(height) - self.commit() - - self.write_to_claims_db(db, height) - - - - - -# The "global" instance to work with -# pylint: disable=C0103 -trending_data = TrendingDB() - -def spike_mass(x, x_old): - """ - Compute the mass of a trending spike (normed - constant units). - x_old = old LBC value - x = new LBC value - """ - - # Sign of trending spike - sign = 1.0 - if x < x_old: - sign = -1.0 - - # Magnitude - mag = abs(x**0.25 - x_old**0.25) - - # Minnow boost - mag *= 1.0 + 2E4/(x + 100.0)**2 - - return sign*mag - - -def run(db, height, final_height, recalculate_claim_hashes): - if height < final_height - 5*HALF_LIFE: - trending_log(f"Skipping trending calculations at block {height}.\n") - return - - start = time.time() - trending_log(f"Calculating variable_decay trending at block {height}.\n") - trending_data.update(db, height, recalculate_claim_hashes) - end = time.time() - trending_log(f"Trending operations took {end - start} seconds.\n\n") - -def test_trending(): - """ - Quick trending test for claims with different support patterns. - Actually use the run() function. - """ - - # Create a fake "claims.db" for testing - # pylint: disable=I1101 - dbc = apsw.Connection(":memory:") - db = dbc.cursor() - - # Create table - db.execute(""" - BEGIN; - CREATE TABLE claim (claim_hash TEXT PRIMARY KEY, - amount REAL NOT NULL DEFAULT 0.0, - support_amount REAL NOT NULL DEFAULT 0.0, - trending_mixed REAL NOT NULL DEFAULT 0.0); - COMMIT; - """) - - # Initialise trending data before anything happens with the claims - trending_data.initialise(db) - - # Insert initial states of claims - everything = {"huge_whale": 0.01, "medium_whale": 0.01, "small_whale": 0.01, - "huge_whale_botted": 0.01, "minnow": 0.01} - - def to_list_of_tuples(stuff): - l = [] - for key in stuff: - l.append((key, stuff[key])) - return l - - db.executemany(""" - INSERT INTO claim (claim_hash, amount) VALUES (?, 1E8*?); - """, to_list_of_tuples(everything)) - - # Process block zero - height = 0 - run(db, height, height, everything.keys()) - - # Save trajectories for plotting - trajectories = {} - for row in trending_data.execute(""" - SELECT claim_hash, trending_score - FROM claims; - """): - trajectories[row[0]] = [row[1]/trending_unit(height)] - - # Main loop - for height in range(1, 1000): - - # One-off supports - if height == 1: - everything["huge_whale"] += 5E5 - everything["medium_whale"] += 5E4 - everything["small_whale"] += 5E3 - - # Every block - if height < 500: - everything["huge_whale_botted"] += 5E5/500 - everything["minnow"] += 1 - - # Remove supports - if height == 500: - for key in everything: - everything[key] = 0.01 - - # Whack into the db - db.executemany(""" - UPDATE claim SET amount = 1E8*? WHERE claim_hash = ?; - """, [(y, x) for (x, y) in to_list_of_tuples(everything)]) - - # Call run() - run(db, height, height, everything.keys()) - - # Append current trending scores to trajectories - for row in db.execute(""" - SELECT claim_hash, trending_mixed - FROM claim; - """): - trajectories[row[0]].append(row[1]/trending_unit(height)) - - dbc.close() - - # pylint: disable=C0415 - import matplotlib.pyplot as plt - for key in trajectories: - plt.plot(trajectories[key], label=key) - plt.legend() - plt.show() - - - - - -if __name__ == "__main__": - test_trending() diff --git a/lbry/wallet/server/db/trending/zscore.py b/lbry/wallet/server/db/trending/zscore.py deleted file mode 100644 index ff442fdec..000000000 --- a/lbry/wallet/server/db/trending/zscore.py +++ /dev/null @@ -1,119 +0,0 @@ -from math import sqrt - -# TRENDING_WINDOW is the number of blocks in ~6hr period (21600 seconds / 161 seconds per block) -TRENDING_WINDOW = 134 - -# TRENDING_DATA_POINTS says how many samples to use for the trending algorithm -# i.e. only consider claims from the most recent (TRENDING_WINDOW * TRENDING_DATA_POINTS) blocks -TRENDING_DATA_POINTS = 28 - -CREATE_TREND_TABLE = """ - create table if not exists trend ( - claim_hash bytes not null, - height integer not null, - amount integer not null, - primary key (claim_hash, height) - ) without rowid; -""" - - -class ZScore: - __slots__ = 'count', 'total', 'power', 'last' - - def __init__(self): - self.count = 0 - self.total = 0 - self.power = 0 - self.last = None - - def step(self, value): - if self.last is not None: - self.count += 1 - self.total += self.last - self.power += self.last ** 2 - self.last = value - - @property - def mean(self): - return self.total / self.count - - @property - def standard_deviation(self): - value = (self.power / self.count) - self.mean ** 2 - return sqrt(value) if value > 0 else 0 - - def finalize(self): - if self.count == 0: - return self.last - return (self.last - self.mean) / (self.standard_deviation or 1) - - -def install(connection): - connection.create_aggregate("zscore", 1, ZScore) - connection.executescript(CREATE_TREND_TABLE) - - -def run(db, height, final_height, affected_claims): - # don't start tracking until we're at the end of initial sync - if height < (final_height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)): - return - - if height % TRENDING_WINDOW != 0: - return - - db.execute(f""" - DELETE FROM trend WHERE height < {height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)} - """) - - start = (height - TRENDING_WINDOW) + 1 - db.execute(f""" - INSERT OR IGNORE INTO trend (claim_hash, height, amount) - SELECT claim_hash, {start}, COALESCE( - (SELECT SUM(amount) FROM support WHERE claim_hash=claim.claim_hash - AND height >= {start}), 0 - ) AS support_sum - FROM claim WHERE support_sum > 0 - """) - - zscore = ZScore() - for global_sum in db.execute("SELECT AVG(amount) AS avg_amount FROM trend GROUP BY height"): - zscore.step(global_sum.avg_amount) - global_mean, global_deviation = 0, 1 - if zscore.count > 0: - global_mean = zscore.mean - global_deviation = zscore.standard_deviation - - db.execute(f""" - UPDATE claim SET - trending_local = COALESCE(( - SELECT zscore(amount) FROM trend - WHERE claim_hash=claim.claim_hash ORDER BY height DESC - ), 0), - trending_global = COALESCE(( - SELECT (amount - {global_mean}) / {global_deviation} FROM trend - WHERE claim_hash=claim.claim_hash AND height = {start} - ), 0), - trending_group = 0, - trending_mixed = 0 - """) - - # trending_group and trending_mixed determine how trending will show in query results - # normally the SQL will be: "ORDER BY trending_group, trending_mixed" - # changing the trending_group will have significant impact on trending results - # changing the value used for trending_mixed will only impact trending within a trending_group - db.execute(f""" - UPDATE claim SET - trending_group = CASE - WHEN trending_local > 0 AND trending_global > 0 THEN 4 - WHEN trending_local <= 0 AND trending_global > 0 THEN 3 - WHEN trending_local > 0 AND trending_global <= 0 THEN 2 - WHEN trending_local <= 0 AND trending_global <= 0 THEN 1 - END, - trending_mixed = CASE - WHEN trending_local > 0 AND trending_global > 0 THEN trending_global - WHEN trending_local <= 0 AND trending_global > 0 THEN trending_local - WHEN trending_local > 0 AND trending_global <= 0 THEN trending_local - WHEN trending_local <= 0 AND trending_global <= 0 THEN trending_global - END - WHERE trending_local <> 0 OR trending_global <> 0 - """) diff --git a/lbry/wallet/server/db/writer.py b/lbry/wallet/server/db/writer.py deleted file mode 100644 index 34e14ced1..000000000 --- a/lbry/wallet/server/db/writer.py +++ /dev/null @@ -1,994 +0,0 @@ -import os - -import sqlite3 -from typing import Union, Tuple, Set, List -from itertools import chain -from decimal import Decimal -from collections import namedtuple -from binascii import unhexlify, hexlify -from lbry.wallet.server.leveldb import LevelDB -from lbry.wallet.server.util import class_logger -from lbry.wallet.database import query, constraints_to_sql - -from lbry.schema.tags import clean_tags -from lbry.schema.mime_types import guess_stream_type -from lbry.wallet import Ledger, RegTestLedger -from lbry.wallet.transaction import Transaction, Output -from lbry.wallet.server.db.canonical import register_canonical_functions -from lbry.wallet.server.db.trending import TRENDING_ALGORITHMS - -from .common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES -from lbry.wallet.server.db.elasticsearch import SearchIndex - -ATTRIBUTE_ARRAY_MAX_LENGTH = 100 -sqlite3.enable_callback_tracebacks(True) - - -class SQLDB: - - PRAGMAS = """ - pragma journal_mode=WAL; - """ - - CREATE_CLAIM_TABLE = """ - create table if not exists claim ( - claim_hash bytes primary key, - claim_id text not null, - claim_name text not null, - normalized text not null, - txo_hash bytes not null, - tx_position integer not null, - amount integer not null, - timestamp integer not null, -- last updated timestamp - creation_timestamp integer not null, - height integer not null, -- last updated height - creation_height integer not null, - activation_height integer, - expiration_height integer not null, - release_time integer not null, - - short_url text not null, -- normalized#shortest-unique-claim_id - canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel - - title text, - author text, - description text, - - claim_type integer, - has_source bool, - reposted integer default 0, - - -- streams - stream_type text, - media_type text, - fee_amount integer default 0, - fee_currency text, - duration integer, - - -- reposts - reposted_claim_hash bytes, - - -- claims which are channels - public_key_bytes bytes, - public_key_hash bytes, - claims_in_channel integer, - - -- claims which are inside channels - channel_hash bytes, - channel_join integer, -- height at which claim got valid signature / joined channel - signature bytes, - signature_digest bytes, - signature_valid bool, - - effective_amount integer not null default 0, - support_amount integer not null default 0, - trending_group integer not null default 0, - trending_mixed integer not null default 0, - trending_local integer not null default 0, - trending_global integer not null default 0 - ); - - create index if not exists claim_normalized_idx on claim (normalized, activation_height); - create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash); - create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized); - create index if not exists claim_txo_hash_idx on claim (txo_hash); - create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash); - create index if not exists claim_expiration_height_idx on claim (expiration_height); - create index if not exists claim_reposted_claim_hash_idx on claim (reposted_claim_hash); - """ - - CREATE_SUPPORT_TABLE = """ - create table if not exists support ( - txo_hash bytes primary key, - tx_position integer not null, - height integer not null, - claim_hash bytes not null, - amount integer not null - ); - create index if not exists support_claim_hash_idx on support (claim_hash, height); - """ - - CREATE_TAG_TABLE = """ - create table if not exists tag ( - tag text not null, - claim_hash bytes not null, - height integer not null - ); - create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag); - """ - - CREATE_LANGUAGE_TABLE = """ - create table if not exists language ( - language text not null, - claim_hash bytes not null, - height integer not null - ); - create unique index if not exists language_claim_hash_language_idx on language (claim_hash, language); - """ - - CREATE_CLAIMTRIE_TABLE = """ - create table if not exists claimtrie ( - normalized text primary key, - claim_hash bytes not null, - last_take_over_height integer not null - ); - create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash); - """ - - CREATE_CHANGELOG_TRIGGER = """ - create table if not exists changelog ( - claim_hash bytes primary key - ); - create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash); - create trigger if not exists claim_changelog after update on claim - begin - insert or ignore into changelog (claim_hash) values (new.claim_hash); - end; - create trigger if not exists claimtrie_changelog after update on claimtrie - begin - insert or ignore into changelog (claim_hash) values (new.claim_hash); - insert or ignore into changelog (claim_hash) values (old.claim_hash); - end; - """ - - SEARCH_INDEXES = """ - -- used by any tag clouds - create index if not exists tag_tag_idx on tag (tag, claim_hash); - - -- naked order bys (no filters) - create unique index if not exists claim_release_idx on claim (release_time, claim_hash); - create unique index if not exists claim_trending_idx on claim (trending_group, trending_mixed, claim_hash); - create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash); - - -- claim_type filter + order by - create unique index if not exists claim_type_release_idx on claim (release_time, claim_type, claim_hash); - create unique index if not exists claim_type_trending_idx on claim (trending_group, trending_mixed, claim_type, claim_hash); - create unique index if not exists claim_type_effective_amount_idx on claim (effective_amount, claim_type, claim_hash); - - -- stream_type filter + order by - create unique index if not exists stream_type_release_idx on claim (stream_type, release_time, claim_hash); - create unique index if not exists stream_type_trending_idx on claim (stream_type, trending_group, trending_mixed, claim_hash); - create unique index if not exists stream_type_effective_amount_idx on claim (stream_type, effective_amount, claim_hash); - - -- channel_hash filter + order by - create unique index if not exists channel_hash_release_idx on claim (channel_hash, release_time, claim_hash); - create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_group, trending_mixed, claim_hash); - create unique index if not exists channel_hash_effective_amount_idx on claim (channel_hash, effective_amount, claim_hash); - - -- duration filter + order by - create unique index if not exists duration_release_idx on claim (duration, release_time, claim_hash); - create unique index if not exists duration_trending_idx on claim (duration, trending_group, trending_mixed, claim_hash); - create unique index if not exists duration_effective_amount_idx on claim (duration, effective_amount, claim_hash); - - -- fee_amount + order by - create unique index if not exists fee_amount_release_idx on claim (fee_amount, release_time, claim_hash); - create unique index if not exists fee_amount_trending_idx on claim (fee_amount, trending_group, trending_mixed, claim_hash); - create unique index if not exists fee_amount_effective_amount_idx on claim (fee_amount, effective_amount, claim_hash); - - -- TODO: verify that all indexes below are used - create index if not exists claim_height_normalized_idx on claim (height, normalized asc); - create index if not exists claim_resolve_idx on claim (normalized, claim_id); - create index if not exists claim_id_idx on claim (claim_id, claim_hash); - create index if not exists claim_timestamp_idx on claim (timestamp); - create index if not exists claim_public_key_hash_idx on claim (public_key_hash); - create index if not exists claim_signature_valid_idx on claim (signature_valid); - """ - - TAG_INDEXES = '\n'.join( - f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';" - for tag_value, tag_key in COMMON_TAGS.items() - ) - - LANGUAGE_INDEXES = '\n'.join( - f"create unique index if not exists language_{language}_idx on language (language, claim_hash) WHERE language='{language}';" - for language in INDEXED_LANGUAGES - ) - - CREATE_TABLES_QUERY = ( - CREATE_CLAIM_TABLE + - CREATE_SUPPORT_TABLE + - CREATE_CLAIMTRIE_TABLE + - CREATE_TAG_TABLE + - CREATE_CHANGELOG_TRIGGER + - CREATE_LANGUAGE_TABLE - ) - - def __init__( - self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list): - self.main = main - self._db_path = path - self.db = None - self.logger = class_logger(__name__, self.__class__.__name__) - self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger - self.blocked_streams = None - self.blocked_channels = None - self.blocking_channel_hashes = { - unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id - } - self.filtered_streams = None - self.filtered_channels = None - self.filtering_channel_hashes = { - unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id - } - self.trending = trending - self.pending_deletes = set() - - def open(self): - self.db = sqlite3.connect(self._db_path, isolation_level=None, check_same_thread=False, uri=True) - - def namedtuple_factory(cursor, row): - Row = namedtuple('Row', (d[0] for d in cursor.description)) - return Row(*row) - self.db.row_factory = namedtuple_factory - self.db.executescript(self.PRAGMAS) - self.db.executescript(self.CREATE_TABLES_QUERY) - register_canonical_functions(self.db) - self.blocked_streams = {} - self.blocked_channels = {} - self.filtered_streams = {} - self.filtered_channels = {} - self.update_blocked_and_filtered_claims() - for algorithm in self.trending: - algorithm.install(self.db) - - def close(self): - if self.db is not None: - self.db.close() - - def update_blocked_and_filtered_claims(self): - self.update_claims_from_channel_hashes( - self.blocked_streams, self.blocked_channels, self.blocking_channel_hashes - ) - self.update_claims_from_channel_hashes( - self.filtered_streams, self.filtered_channels, self.filtering_channel_hashes - ) - self.filtered_streams.update(self.blocked_streams) - self.filtered_channels.update(self.blocked_channels) - - def update_claims_from_channel_hashes(self, shared_streams, shared_channels, channel_hashes): - streams, channels = {}, {} - if channel_hashes: - sql = query( - "SELECT repost.channel_hash, repost.reposted_claim_hash, target.claim_type " - "FROM claim as repost JOIN claim AS target ON (target.claim_hash=repost.reposted_claim_hash)", **{ - 'repost.reposted_claim_hash__is_not_null': 1, - 'repost.channel_hash__in': channel_hashes - } - ) - for blocked_claim in self.execute(*sql): - if blocked_claim.claim_type == CLAIM_TYPES['stream']: - streams[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash - elif blocked_claim.claim_type == CLAIM_TYPES['channel']: - channels[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash - shared_streams.clear() - shared_streams.update(streams) - shared_channels.clear() - shared_channels.update(channels) - - @staticmethod - def _insert_sql(table: str, data: dict) -> Tuple[str, list]: - columns, values = [], [] - for column, value in data.items(): - columns.append(column) - values.append(value) - sql = ( - f"INSERT INTO {table} ({', '.join(columns)}) " - f"VALUES ({', '.join(['?'] * len(values))})" - ) - return sql, values - - @staticmethod - def _update_sql(table: str, data: dict, where: str, - constraints: Union[list, tuple]) -> Tuple[str, list]: - columns, values = [], [] - for column, value in data.items(): - columns.append(f"{column} = ?") - values.append(value) - values.extend(constraints) - return f"UPDATE {table} SET {', '.join(columns)} WHERE {where}", values - - @staticmethod - def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]: - where, values = constraints_to_sql(constraints) - return f"DELETE FROM {table} WHERE {where}", values - - def execute(self, *args): - return self.db.execute(*args) - - def executemany(self, *args): - return self.db.executemany(*args) - - def begin(self): - self.execute('begin;') - - def commit(self): - self.execute('commit;') - - def _upsertable_claims(self, txos: List[Output], header, clear_first=False): - claim_hashes, claims, tags, languages = set(), [], {}, {} - for txo in txos: - tx = txo.tx_ref.tx - - try: - assert txo.claim_name - assert txo.normalized_name - except: - #self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.") - continue - - language = 'none' - try: - if txo.claim.is_stream and txo.claim.stream.languages: - language = txo.claim.stream.languages[0].language - except: - pass - - claim_hash = txo.claim_hash - claim_hashes.add(claim_hash) - claim_record = { - 'claim_hash': claim_hash, - 'claim_id': txo.claim_id, - 'claim_name': txo.claim_name, - 'normalized': txo.normalized_name, - 'txo_hash': txo.ref.hash, - 'tx_position': tx.position, - 'amount': txo.amount, - 'timestamp': header['timestamp'], - 'height': tx.height, - 'title': None, - 'description': None, - 'author': None, - 'duration': None, - 'claim_type': None, - 'has_source': False, - 'stream_type': None, - 'media_type': None, - 'release_time': None, - 'fee_currency': None, - 'fee_amount': 0, - 'reposted_claim_hash': None - } - claims.append(claim_record) - - try: - claim = txo.claim - except: - #self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.") - continue - - if claim.is_stream: - claim_record['claim_type'] = CLAIM_TYPES['stream'] - claim_record['has_source'] = claim.stream.has_source - claim_record['media_type'] = claim.stream.source.media_type - claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(claim_record['media_type'])] - claim_record['title'] = claim.stream.title - claim_record['description'] = claim.stream.description - claim_record['author'] = claim.stream.author - if claim.stream.video and claim.stream.video.duration: - claim_record['duration'] = claim.stream.video.duration - if claim.stream.audio and claim.stream.audio.duration: - claim_record['duration'] = claim.stream.audio.duration - if claim.stream.release_time: - claim_record['release_time'] = claim.stream.release_time - if claim.stream.has_fee: - fee = claim.stream.fee - if isinstance(fee.currency, str): - claim_record['fee_currency'] = fee.currency.lower() - if isinstance(fee.amount, Decimal): - if fee.amount >= 0 and int(fee.amount*1000) < 9223372036854775807: - claim_record['fee_amount'] = int(fee.amount*1000) - elif claim.is_repost: - claim_record['claim_type'] = CLAIM_TYPES['repost'] - claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash - elif claim.is_channel: - claim_record['claim_type'] = CLAIM_TYPES['channel'] - elif claim.is_collection: - claim_record['claim_type'] = CLAIM_TYPES['collection'] - - languages[(language, claim_hash)] = (language, claim_hash, tx.height) - - for tag in clean_tags(claim.message.tags): - tags[(tag, claim_hash)] = (tag, claim_hash, tx.height) - - if clear_first: - self._clear_claim_metadata(claim_hashes) - - if tags: - self.executemany( - "INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)", tags.values() - ) - if languages: - self.executemany( - "INSERT OR IGNORE INTO language (language, claim_hash, height) VALUES (?, ?, ?)", languages.values() - ) - - return claims - - def insert_claims(self, txos: List[Output], header): - claims = self._upsertable_claims(txos, header) - if claims: - self.executemany(""" - INSERT OR REPLACE INTO claim ( - claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount, - claim_type, media_type, stream_type, timestamp, creation_timestamp, has_source, - fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash, - creation_height, release_time, activation_height, expiration_height, short_url) - VALUES ( - :claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount, - :claim_type, :media_type, :stream_type, :timestamp, :timestamp, :has_source, - :fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height, - CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END, - CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END, - CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END, - :claim_name||COALESCE( - (SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized), - '#'||substr(:claim_id, 1, 1) - ) - )""", claims) - - def update_claims(self, txos: List[Output], header): - claims = self._upsertable_claims(txos, header, clear_first=True) - if claims: - self.executemany(""" - UPDATE claim SET - txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height, - claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type, - timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency, has_source=:has_source, - title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash, - release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END - WHERE claim_hash=:claim_hash; - """, claims) - - def delete_claims(self, claim_hashes: Set[bytes]): - """ Deletes claim supports and from claimtrie in case of an abandon. """ - if claim_hashes: - affected_channels = self.execute(*query( - "SELECT channel_hash FROM claim", channel_hash__is_not_null=1, claim_hash__in=claim_hashes - )).fetchall() - for table in ('claim', 'support', 'claimtrie'): - self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes})) - self._clear_claim_metadata(claim_hashes) - return {r.channel_hash for r in affected_channels} - return set() - - def delete_claims_above_height(self, height: int): - claim_hashes = [x[0] for x in self.execute( - "SELECT claim_hash FROM claim WHERE height>?", (height, ) - ).fetchall()] - while claim_hashes: - batch = set(claim_hashes[:500]) - claim_hashes = claim_hashes[500:] - self.delete_claims(batch) - - def _clear_claim_metadata(self, claim_hashes: Set[bytes]): - if claim_hashes: - for table in ('tag',): # 'language', 'location', etc - self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes})) - - def split_inputs_into_claims_supports_and_other(self, txis): - txo_hashes = {txi.txo_ref.hash for txi in txis} - claims = self.execute(*query( - "SELECT txo_hash, claim_hash, normalized FROM claim", txo_hash__in=txo_hashes - )).fetchall() - txo_hashes -= {r.txo_hash for r in claims} - supports = {} - if txo_hashes: - supports = self.execute(*query( - "SELECT txo_hash, claim_hash FROM support", txo_hash__in=txo_hashes - )).fetchall() - txo_hashes -= {r.txo_hash for r in supports} - return claims, supports, txo_hashes - - def insert_supports(self, txos: List[Output]): - supports = [] - for txo in txos: - tx = txo.tx_ref.tx - supports.append(( - txo.ref.hash, tx.position, tx.height, - txo.claim_hash, txo.amount - )) - if supports: - self.executemany( - "INSERT OR IGNORE INTO support (" - " txo_hash, tx_position, height, claim_hash, amount" - ") " - "VALUES (?, ?, ?, ?, ?)", supports - ) - - def delete_supports(self, txo_hashes: Set[bytes]): - if txo_hashes: - self.execute(*self._delete_sql('support', {'txo_hash__in': txo_hashes})) - - def calculate_reposts(self, txos: List[Output]): - targets = set() - for txo in txos: - try: - claim = txo.claim - except: - continue - if claim.is_repost: - targets.add((claim.repost.reference.claim_hash,)) - if targets: - self.executemany( - """ - UPDATE claim SET reposted = ( - SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash - ) - WHERE claim_hash = ? - """, targets - ) - return {target[0] for target in targets} - - def validate_channel_signatures(self, height, new_claims, updated_claims, spent_claims, affected_channels, timer): - if not new_claims and not updated_claims and not spent_claims: - return - - sub_timer = timer.add_timer('segregate channels and signables') - sub_timer.start() - channels, new_channel_keys, signables = {}, {}, {} - for txo in chain(new_claims, updated_claims): - try: - claim = txo.claim - except: - continue - if claim.is_channel: - channels[txo.claim_hash] = txo - new_channel_keys[txo.claim_hash] = claim.channel.public_key_bytes - else: - signables[txo.claim_hash] = txo - sub_timer.stop() - - sub_timer = timer.add_timer('make list of channels we need to lookup') - sub_timer.start() - missing_channel_keys = set() - for txo in signables.values(): - claim = txo.claim - if claim.is_signed and claim.signing_channel_hash not in new_channel_keys: - missing_channel_keys.add(claim.signing_channel_hash) - sub_timer.stop() - - sub_timer = timer.add_timer('lookup missing channels') - sub_timer.start() - all_channel_keys = {} - if new_channel_keys or missing_channel_keys or affected_channels: - all_channel_keys = dict(self.execute(*query( - "SELECT claim_hash, public_key_bytes FROM claim", - claim_hash__in=set(new_channel_keys) | missing_channel_keys | affected_channels - ))) - sub_timer.stop() - - sub_timer = timer.add_timer('prepare for updating claims') - sub_timer.start() - changed_channel_keys = {} - for claim_hash, new_key in new_channel_keys.items(): - if claim_hash not in all_channel_keys or all_channel_keys[claim_hash] != new_key: - all_channel_keys[claim_hash] = new_key - changed_channel_keys[claim_hash] = new_key - - claim_updates = [] - - for claim_hash, txo in signables.items(): - claim = txo.claim - update = { - 'claim_hash': claim_hash, - 'channel_hash': None, - 'signature': None, - 'signature_digest': None, - 'signature_valid': None - } - if claim.is_signed: - update.update({ - 'channel_hash': claim.signing_channel_hash, - 'signature': txo.get_encoded_signature(), - 'signature_digest': txo.get_signature_digest(self.ledger), - 'signature_valid': 0 - }) - claim_updates.append(update) - sub_timer.stop() - - sub_timer = timer.add_timer('find claims affected by a change in channel key') - sub_timer.start() - if changed_channel_keys: - sql = f""" - SELECT * FROM claim WHERE - channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND - signature IS NOT NULL - """ - for affected_claim in self.execute(sql, list(changed_channel_keys.keys())): - if affected_claim.claim_hash not in signables: - claim_updates.append({ - 'claim_hash': affected_claim.claim_hash, - 'channel_hash': affected_claim.channel_hash, - 'signature': affected_claim.signature, - 'signature_digest': affected_claim.signature_digest, - 'signature_valid': 0 - }) - sub_timer.stop() - - sub_timer = timer.add_timer('verify signatures') - sub_timer.start() - for update in claim_updates: - channel_pub_key = all_channel_keys.get(update['channel_hash']) - if channel_pub_key and update['signature']: - update['signature_valid'] = Output.is_signature_valid( - bytes(update['signature']), bytes(update['signature_digest']), channel_pub_key - ) - sub_timer.stop() - - sub_timer = timer.add_timer('update claims') - sub_timer.start() - if claim_updates: - self.executemany(f""" - UPDATE claim SET - channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest, - signature_valid=:signature_valid, - channel_join=CASE - WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join - WHEN :signature_valid=1 THEN {height} - END, - canonical_url=CASE - WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url - WHEN :signature_valid=1 THEN - (SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'|| - claim_name||COALESCE( - (SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim - WHERE other_claim.signature_valid = 1 AND - other_claim.channel_hash = :channel_hash AND - other_claim.normalized = claim.normalized), - '#'||substr(claim_id, 1, 1) - ) - END - WHERE claim_hash=:claim_hash; - """, claim_updates) - sub_timer.stop() - - sub_timer = timer.add_timer('update claims affected by spent channels') - sub_timer.start() - if spent_claims: - self.execute( - f""" - UPDATE claim SET - signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END, - channel_join=NULL, canonical_url=NULL - WHERE channel_hash IN ({','.join('?' for _ in spent_claims)}) - """, list(spent_claims) - ) - sub_timer.stop() - - sub_timer = timer.add_timer('update channels') - sub_timer.start() - if channels: - self.executemany( - """ - UPDATE claim SET - public_key_bytes=:public_key_bytes, - public_key_hash=:public_key_hash - WHERE claim_hash=:claim_hash""", [{ - 'claim_hash': claim_hash, - 'public_key_bytes': txo.claim.channel.public_key_bytes, - 'public_key_hash': self.ledger.address_to_hash160( - self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes) - ) - } for claim_hash, txo in channels.items()] - ) - sub_timer.stop() - - sub_timer = timer.add_timer('update claims_in_channel counts') - sub_timer.start() - if all_channel_keys: - self.executemany(f""" - UPDATE claim SET - claims_in_channel=( - SELECT COUNT(*) FROM claim AS claim_in_channel - WHERE claim_in_channel.signature_valid=1 AND - claim_in_channel.channel_hash=claim.claim_hash - ) - WHERE claim_hash = ? - """, [(channel_hash,) for channel_hash in all_channel_keys]) - sub_timer.stop() - - sub_timer = timer.add_timer('update blocked claims list') - sub_timer.start() - if (self.blocking_channel_hashes.intersection(all_channel_keys) or - self.filtering_channel_hashes.intersection(all_channel_keys)): - self.update_blocked_and_filtered_claims() - sub_timer.stop() - - def _update_support_amount(self, claim_hashes): - if claim_hashes: - self.execute(f""" - UPDATE claim SET - support_amount = COALESCE( - (SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0 - ) - WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)}) - """, claim_hashes) - - def _update_effective_amount(self, height, claim_hashes=None): - self.execute( - f"UPDATE claim SET effective_amount = amount + support_amount " - f"WHERE activation_height = {height}" - ) - if claim_hashes: - self.execute( - f"UPDATE claim SET effective_amount = amount + support_amount " - f"WHERE activation_height < {height} " - f" AND claim_hash IN ({','.join('?' for _ in claim_hashes)})", - claim_hashes - ) - - def _calculate_activation_height(self, height): - last_take_over_height = f"""COALESCE( - (SELECT last_take_over_height FROM claimtrie - WHERE claimtrie.normalized=claim.normalized), - {height} - ) - """ - self.execute(f""" - UPDATE claim SET activation_height = - {height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT)) - WHERE activation_height IS NULL - """) - - def _perform_overtake(self, height, changed_claim_hashes, deleted_names): - deleted_names_sql = claim_hashes_sql = "" - if changed_claim_hashes: - claim_hashes_sql = f"OR claim_hash IN ({','.join('?' for _ in changed_claim_hashes)})" - if deleted_names: - deleted_names_sql = f"OR normalized IN ({','.join('?' for _ in deleted_names)})" - overtakes = self.execute(f""" - SELECT winner.normalized, winner.claim_hash, - claimtrie.claim_hash AS current_winner, - MAX(winner.effective_amount) AS max_winner_effective_amount - FROM ( - SELECT normalized, claim_hash, effective_amount FROM claim - WHERE normalized IN ( - SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql} - ) {deleted_names_sql} - ORDER BY effective_amount DESC, height ASC, tx_position ASC - ) AS winner LEFT JOIN claimtrie USING (normalized) - GROUP BY winner.normalized - HAVING current_winner IS NULL OR current_winner <> winner.claim_hash - """, list(changed_claim_hashes)+deleted_names) - for overtake in overtakes: - if overtake.current_winner: - self.execute( - f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} " - f"WHERE normalized = ?", - (overtake.claim_hash, overtake.normalized) - ) - else: - self.execute( - f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) " - f"VALUES (?, ?, {height})", - (overtake.claim_hash, overtake.normalized) - ) - self.execute( - f"UPDATE claim SET activation_height = {height} WHERE normalized = ? " - f"AND (activation_height IS NULL OR activation_height > {height})", - (overtake.normalized,) - ) - - def _copy(self, height): - if height > 50: - self.execute(f"DROP TABLE claimtrie{height-50}") - self.execute(f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie") - - def update_claimtrie(self, height, changed_claim_hashes, deleted_names, timer): - r = timer.run - binary_claim_hashes = list(changed_claim_hashes) - - r(self._calculate_activation_height, height) - r(self._update_support_amount, binary_claim_hashes) - - r(self._update_effective_amount, height, binary_claim_hashes) - r(self._perform_overtake, height, binary_claim_hashes, list(deleted_names)) - - r(self._update_effective_amount, height) - r(self._perform_overtake, height, [], []) - - def get_expiring(self, height): - return self.execute( - f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}" - ) - - def enqueue_changes(self): - query = """ - SELECT claimtrie.claim_hash as is_controlling, - claimtrie.last_take_over_height, - (select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags, - (select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages, - cr.has_source as reposted_has_source, - cr.claim_type as reposted_claim_type, - cr.stream_type as reposted_stream_type, - cr.media_type as reposted_media_type, - cr.duration as reposted_duration, - cr.fee_amount as reposted_fee_amount, - cr.fee_currency as reposted_fee_currency, - claim.* - FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash - WHERE claim.claim_hash in (SELECT claim_hash FROM changelog) - """ - for claim in self.execute(query): - claim = claim._asdict() - id_set = set(filter(None, (claim['claim_hash'], claim['channel_hash'], claim['reposted_claim_hash']))) - claim['censor_type'] = 0 - censoring_channel_hash = None - claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source']) - claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type'] - claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type'] - claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount'] - claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency'] - claim['duration'] = claim.pop('reposted_duration') or claim['duration'] - for reason_id in id_set: - if reason_id in self.blocked_streams: - claim['censor_type'] = 2 - censoring_channel_hash = self.blocked_streams.get(reason_id) - elif reason_id in self.blocked_channels: - claim['censor_type'] = 2 - censoring_channel_hash = self.blocked_channels.get(reason_id) - elif reason_id in self.filtered_streams: - claim['censor_type'] = 1 - censoring_channel_hash = self.filtered_streams.get(reason_id) - elif reason_id in self.filtered_channels: - claim['censor_type'] = 1 - censoring_channel_hash = self.filtered_channels.get(reason_id) - claim['censoring_channel_id'] = censoring_channel_hash[::-1].hex() if censoring_channel_hash else None - - claim['tags'] = claim['tags'].split(',,') if claim['tags'] else [] - claim['languages'] = claim['languages'].split(' ') if claim['languages'] else [] - yield 'update', claim - - def clear_changelog(self): - self.execute("delete from changelog;") - - def claim_producer(self): - while self.pending_deletes: - claim_hash = self.pending_deletes.pop() - yield 'delete', hexlify(claim_hash[::-1]).decode() - for claim in self.enqueue_changes(): - yield claim - self.clear_changelog() - - def advance_txs(self, height, all_txs, header, daemon_height, timer): - insert_claims = [] - update_claims = [] - update_claim_hashes = set() - delete_claim_hashes = self.pending_deletes - insert_supports = [] - delete_support_txo_hashes = set() - recalculate_claim_hashes = set() # added/deleted supports, added/updated claim - deleted_claim_names = set() - delete_others = set() - body_timer = timer.add_timer('body') - for position, (etx, txid) in enumerate(all_txs): - tx = timer.run( - Transaction, etx.raw, height=height, position=position - ) - # Inputs - spent_claims, spent_supports, spent_others = timer.run( - self.split_inputs_into_claims_supports_and_other, tx.inputs - ) - body_timer.start() - delete_claim_hashes.update({r.claim_hash for r in spent_claims}) - deleted_claim_names.update({r.normalized for r in spent_claims}) - delete_support_txo_hashes.update({r.txo_hash for r in spent_supports}) - recalculate_claim_hashes.update({r.claim_hash for r in spent_supports}) - delete_others.update(spent_others) - # Outputs - for output in tx.outputs: - if output.is_support: - insert_supports.append(output) - recalculate_claim_hashes.add(output.claim_hash) - elif output.script.is_claim_name: - insert_claims.append(output) - recalculate_claim_hashes.add(output.claim_hash) - elif output.script.is_update_claim: - claim_hash = output.claim_hash - update_claims.append(output) - recalculate_claim_hashes.add(claim_hash) - body_timer.stop() - - skip_update_claim_timer = timer.add_timer('skip update of abandoned claims') - skip_update_claim_timer.start() - for updated_claim in list(update_claims): - if updated_claim.ref.hash in delete_others: - update_claims.remove(updated_claim) - for updated_claim in update_claims: - claim_hash = updated_claim.claim_hash - delete_claim_hashes.discard(claim_hash) - update_claim_hashes.add(claim_hash) - skip_update_claim_timer.stop() - - skip_insert_claim_timer = timer.add_timer('skip insertion of abandoned claims') - skip_insert_claim_timer.start() - for new_claim in list(insert_claims): - if new_claim.ref.hash in delete_others: - if new_claim.claim_hash not in update_claim_hashes: - insert_claims.remove(new_claim) - skip_insert_claim_timer.stop() - - skip_insert_support_timer = timer.add_timer('skip insertion of abandoned supports') - skip_insert_support_timer.start() - for new_support in list(insert_supports): - if new_support.ref.hash in delete_others: - insert_supports.remove(new_support) - skip_insert_support_timer.stop() - - expire_timer = timer.add_timer('recording expired claims') - expire_timer.start() - for expired in self.get_expiring(height): - delete_claim_hashes.add(expired.claim_hash) - deleted_claim_names.add(expired.normalized) - expire_timer.stop() - - r = timer.run - affected_channels = r(self.delete_claims, delete_claim_hashes) - r(self.delete_supports, delete_support_txo_hashes) - r(self.insert_claims, insert_claims, header) - r(self.calculate_reposts, insert_claims) - r(self.update_claims, update_claims, header) - r(self.validate_channel_signatures, height, insert_claims, - update_claims, delete_claim_hashes, affected_channels, forward_timer=True) - r(self.insert_supports, insert_supports) - r(self.update_claimtrie, height, recalculate_claim_hashes, deleted_claim_names, forward_timer=True) - for algorithm in self.trending: - r(algorithm.run, self.db.cursor(), height, daemon_height, recalculate_claim_hashes) - - -class LBRYLevelDB(LevelDB): - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - path = os.path.join(self.env.db_dir, 'claims.db') - trending = [] - for algorithm_name in self.env.trending_algorithms: - if algorithm_name in TRENDING_ALGORITHMS: - trending.append(TRENDING_ALGORITHMS[algorithm_name]) - if self.env.es_mode == 'reader': - self.logger.info('Index mode: reader') - self.sql = None - else: - self.logger.info('Index mode: writer. Using SQLite db to sync ES') - self.sql = SQLDB( - self, path, - self.env.default('BLOCKING_CHANNEL_IDS', '').split(' '), - self.env.default('FILTERING_CHANNEL_IDS', '').split(' '), - trending - ) - - # Search index - self.search_index = SearchIndex( - self.env.es_index_prefix, self.env.database_query_timeout, self.env.elastic_host, self.env.elastic_port - ) - - def close(self): - super().close() - if self.sql: - self.sql.close() - - async def _open_dbs(self, *args, **kwargs): - await self.search_index.start() - await super()._open_dbs(*args, **kwargs) - if self.sql: - self.sql.open() diff --git a/lbry/wallet/server/env.py b/lbry/wallet/server/env.py index 1a109b9d3..2b4c489b3 100644 --- a/lbry/wallet/server/env.py +++ b/lbry/wallet/server/env.py @@ -5,7 +5,7 @@ # See the file "LICENCE" for information about the copyright # and warranty status of this software. - +import math import re import resource from os import environ @@ -39,10 +39,14 @@ class Env: self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK']) self.db_dir = self.required('DB_DIRECTORY') self.db_engine = self.default('DB_ENGINE', 'leveldb') - self.trending_algorithms = [ - trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending - ] - self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None) + # self.trending_algorithms = [ + # trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending + # ] + self.trending_half_life = math.log2(0.1 ** (1 / (3 + self.integer('TRENDING_DECAY_RATE', 48)))) + 1 + self.trending_whale_half_life = math.log2(0.1 ** (1 / (3 + self.integer('TRENDING_WHALE_DECAY_RATE', 24)))) + 1 + self.trending_whale_threshold = float(self.integer('TRENDING_WHALE_THRESHOLD', 10000)) * 1E8 + + self.max_query_workers = self.integer('MAX_QUERY_WORKERS', 4) self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True) self.track_metrics = self.boolean('TRACK_METRICS', False) self.websocket_host = self.default('WEBSOCKET_HOST', self.host) @@ -57,7 +61,7 @@ class Env: self.coin = Coin.lookup_coin_class(coin_name, network) self.es_index_prefix = self.default('ES_INDEX_PREFIX', '') self.es_mode = self.default('ES_MODE', 'writer') - self.cache_MB = self.integer('CACHE_MB', 1200) + self.cache_MB = self.integer('CACHE_MB', 4096) self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT) # Server stuff self.tcp_port = self.integer('TCP_PORT', None) diff --git a/lbry/wallet/server/hash.py b/lbry/wallet/server/hash.py index 2c0201952..e9d088684 100644 --- a/lbry/wallet/server/hash.py +++ b/lbry/wallet/server/hash.py @@ -36,6 +36,7 @@ _sha512 = hashlib.sha512 _new_hash = hashlib.new _new_hmac = hmac.new HASHX_LEN = 11 +CLAIM_HASH_LEN = 20 def sha256(x): diff --git a/lbry/wallet/server/history.py b/lbry/wallet/server/history.py deleted file mode 100644 index f3a7fbf17..000000000 --- a/lbry/wallet/server/history.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2016-2018, Neil Booth -# Copyright (c) 2017, the ElectrumX authors -# -# All rights reserved. -# -# See the file "LICENCE" for information about the copyright -# and warranty status of this software. - -"""History by script hash (address).""" - -import array -import ast -import bisect -import time -from collections import defaultdict -from functools import partial - -from lbry.wallet.server import util -from lbry.wallet.server.util import pack_be_uint32, unpack_be_uint32_from, unpack_be_uint16_from -from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN - - -class History: - - DB_VERSIONS = [0, 1] - - def __init__(self): - self.logger = util.class_logger(__name__, self.__class__.__name__) - # For history compaction - self.max_hist_row_entries = 12500 - self.unflushed = defaultdict(partial(array.array, 'I')) - self.unflushed_count = 0 - self.db = None - - @property - def needs_migration(self): - return self.db_version != max(self.DB_VERSIONS) - - def migrate(self): - # 0 -> 1: flush_count from 16 to 32 bits - self.logger.warning("HISTORY MIGRATION IN PROGRESS. Please avoid shutting down before it finishes.") - with self.db.write_batch() as batch: - for key, value in self.db.iterator(prefix=b''): - if len(key) != 13: - continue - flush_id, = unpack_be_uint16_from(key[-2:]) - new_key = key[:-2] + pack_be_uint32(flush_id) - batch.put(new_key, value) - self.logger.warning("history migration: new keys added, removing old ones.") - for key, value in self.db.iterator(prefix=b''): - if len(key) == 13: - batch.delete(key) - self.logger.warning("history migration: writing new state.") - self.db_version = 1 - self.write_state(batch) - self.logger.warning("history migration: done.") - - def open_db(self, db_class, for_sync, utxo_flush_count, compacting): - self.db = db_class('hist', for_sync) - self.read_state() - if self.needs_migration: - self.migrate() - self.clear_excess(utxo_flush_count) - # An incomplete compaction needs to be cancelled otherwise - # restarting it will corrupt the history - if not compacting: - self._cancel_compaction() - return self.flush_count - - def close_db(self): - if self.db: - self.db.close() - self.db = None - - def read_state(self): - state = self.db.get(b'state\0\0') - if state: - state = ast.literal_eval(state.decode()) - if not isinstance(state, dict): - raise RuntimeError('failed reading state from history DB') - self.flush_count = state['flush_count'] - self.comp_flush_count = state.get('comp_flush_count', -1) - self.comp_cursor = state.get('comp_cursor', -1) - self.db_version = state.get('db_version', 0) - else: - self.flush_count = 0 - self.comp_flush_count = -1 - self.comp_cursor = -1 - self.db_version = max(self.DB_VERSIONS) - - self.logger.info(f'history DB version: {self.db_version}') - if self.db_version not in self.DB_VERSIONS: - msg = f'this software only handles DB versions {self.DB_VERSIONS}' - self.logger.error(msg) - raise RuntimeError(msg) - self.logger.info(f'flush count: {self.flush_count:,d}') - - def clear_excess(self, utxo_flush_count): - # < might happen at end of compaction as both DBs cannot be - # updated atomically - if self.flush_count <= utxo_flush_count: - return - - self.logger.info('DB shut down uncleanly. Scanning for ' - 'excess history flushes...') - - keys = [] - for key, hist in self.db.iterator(prefix=b''): - flush_id, = unpack_be_uint32_from(key[-4:]) - if flush_id > utxo_flush_count: - keys.append(key) - - self.logger.info(f'deleting {len(keys):,d} history entries') - - self.flush_count = utxo_flush_count - with self.db.write_batch() as batch: - for key in keys: - batch.delete(key) - self.write_state(batch) - - self.logger.info('deleted excess history entries') - - def write_state(self, batch): - """Write state to the history DB.""" - state = { - 'flush_count': self.flush_count, - 'comp_flush_count': self.comp_flush_count, - 'comp_cursor': self.comp_cursor, - 'db_version': self.db_version, - } - # History entries are not prefixed; the suffix \0\0 ensures we - # look similar to other entries and aren't interfered with - batch.put(b'state\0\0', repr(state).encode()) - - def add_unflushed(self, hashXs_by_tx, first_tx_num): - unflushed = self.unflushed - count = 0 - for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num): - hashXs = set(hashXs) - for hashX in hashXs: - unflushed[hashX].append(tx_num) - count += len(hashXs) - self.unflushed_count += count - - def unflushed_memsize(self): - return len(self.unflushed) * 180 + self.unflushed_count * 4 - - def assert_flushed(self): - assert not self.unflushed - - def flush(self): - start_time = time.time() - self.flush_count += 1 - flush_id = pack_be_uint32(self.flush_count) - unflushed = self.unflushed - - with self.db.write_batch() as batch: - for hashX in sorted(unflushed): - key = hashX + flush_id - batch.put(key, unflushed[hashX].tobytes()) - self.write_state(batch) - - count = len(unflushed) - unflushed.clear() - self.unflushed_count = 0 - - if self.db.for_sync: - elapsed = time.time() - start_time - self.logger.info(f'flushed history in {elapsed:.1f}s ' - f'for {count:,d} addrs') - - def backup(self, hashXs, tx_count): - # Not certain this is needed, but it doesn't hurt - self.flush_count += 1 - nremoves = 0 - bisect_left = bisect.bisect_left - - with self.db.write_batch() as batch: - for hashX in sorted(hashXs): - deletes = [] - puts = {} - for key, hist in self.db.iterator(prefix=hashX, reverse=True): - a = array.array('I') - a.frombytes(hist) - # Remove all history entries >= tx_count - idx = bisect_left(a, tx_count) - nremoves += len(a) - idx - if idx > 0: - puts[key] = a[:idx].tobytes() - break - deletes.append(key) - - for key in deletes: - batch.delete(key) - for key, value in puts.items(): - batch.put(key, value) - self.write_state(batch) - - self.logger.info(f'backing up removed {nremoves:,d} history entries') - - # def get_txnums(self, hashX, limit=1000): - # """Generator that returns an unpruned, sorted list of tx_nums in the - # history of a hashX. Includes both spending and receiving - # transactions. By default yields at most 1000 entries. Set - # limit to None to get them all. """ - # limit = util.resolve_limit(limit) - # for key, hist in self.db.iterator(prefix=hashX): - # a = array.array('I') - # a.frombytes(hist) - # for tx_num in a: - # if limit == 0: - # return - # yield tx_num - # limit -= 1 - - # - # History compaction - # - - # comp_cursor is a cursor into compaction progress. - # -1: no compaction in progress - # 0-65535: Compaction in progress; all prefixes < comp_cursor have - # been compacted, and later ones have not. - # 65536: compaction complete in-memory but not flushed - # - # comp_flush_count applies during compaction, and is a flush count - # for history with prefix < comp_cursor. flush_count applies - # to still uncompacted history. It is -1 when no compaction is - # taking place. Key suffixes up to and including comp_flush_count - # are used, so a parallel history flush must first increment this - # - # When compaction is complete and the final flush takes place, - # flush_count is reset to comp_flush_count, and comp_flush_count to -1 - - def _flush_compaction(self, cursor, write_items, keys_to_delete): - """Flush a single compaction pass as a batch.""" - # Update compaction state - if cursor == 65536: - self.flush_count = self.comp_flush_count - self.comp_cursor = -1 - self.comp_flush_count = -1 - else: - self.comp_cursor = cursor - - # History DB. Flush compacted history and updated state - with self.db.write_batch() as batch: - # Important: delete first! The keyspace may overlap. - for key in keys_to_delete: - batch.delete(key) - for key, value in write_items: - batch.put(key, value) - self.write_state(batch) - - def _compact_hashX(self, hashX, hist_map, hist_list, - write_items, keys_to_delete): - """Compress history for a hashX. hist_list is an ordered list of - the histories to be compressed.""" - # History entries (tx numbers) are 4 bytes each. Distribute - # over rows of up to 50KB in size. A fixed row size means - # future compactions will not need to update the first N - 1 - # rows. - max_row_size = self.max_hist_row_entries * 4 - full_hist = b''.join(hist_list) - nrows = (len(full_hist) + max_row_size - 1) // max_row_size - if nrows > 4: - self.logger.info('hashX {} is large: {:,d} entries across ' - '{:,d} rows' - .format(hash_to_hex_str(hashX), - len(full_hist) // 4, nrows)) - - # Find what history needs to be written, and what keys need to - # be deleted. Start by assuming all keys are to be deleted, - # and then remove those that are the same on-disk as when - # compacted. - write_size = 0 - keys_to_delete.update(hist_map) - for n, chunk in enumerate(util.chunks(full_hist, max_row_size)): - key = hashX + pack_be_uint32(n) - if hist_map.get(key) == chunk: - keys_to_delete.remove(key) - else: - write_items.append((key, chunk)) - write_size += len(chunk) - - assert n + 1 == nrows - self.comp_flush_count = max(self.comp_flush_count, n) - - return write_size - - def _compact_prefix(self, prefix, write_items, keys_to_delete): - """Compact all history entries for hashXs beginning with the - given prefix. Update keys_to_delete and write.""" - prior_hashX = None - hist_map = {} - hist_list = [] - - key_len = HASHX_LEN + 2 - write_size = 0 - for key, hist in self.db.iterator(prefix=prefix): - # Ignore non-history entries - if len(key) != key_len: - continue - hashX = key[:-2] - if hashX != prior_hashX and prior_hashX: - write_size += self._compact_hashX(prior_hashX, hist_map, - hist_list, write_items, - keys_to_delete) - hist_map.clear() - hist_list.clear() - prior_hashX = hashX - hist_map[key] = hist - hist_list.append(hist) - - if prior_hashX: - write_size += self._compact_hashX(prior_hashX, hist_map, hist_list, - write_items, keys_to_delete) - return write_size - - def _compact_history(self, limit): - """Inner loop of history compaction. Loops until limit bytes have - been processed. - """ - keys_to_delete = set() - write_items = [] # A list of (key, value) pairs - write_size = 0 - - # Loop over 2-byte prefixes - cursor = self.comp_cursor - while write_size < limit and cursor < (1 << 32): - prefix = pack_be_uint32(cursor) - write_size += self._compact_prefix(prefix, write_items, - keys_to_delete) - cursor += 1 - - max_rows = self.comp_flush_count + 1 - self._flush_compaction(cursor, write_items, keys_to_delete) - - self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), ' - 'removed {:,d} rows, largest: {:,d}, {:.1f}% complete' - .format(len(write_items), write_size / 1000000, - len(keys_to_delete), max_rows, - 100 * cursor / 65536)) - return write_size - - def _cancel_compaction(self): - if self.comp_cursor != -1: - self.logger.warning('cancelling in-progress history compaction') - self.comp_flush_count = -1 - self.comp_cursor = -1 diff --git a/lbry/wallet/server/leveldb.py b/lbry/wallet/server/leveldb.py index 5498706bd..bc48be52a 100644 --- a/lbry/wallet/server/leveldb.py +++ b/lbry/wallet/server/leveldb.py @@ -8,79 +8,92 @@ """Interface to the blockchain database.""" - +import os import asyncio import array -import ast -import base64 -import os import time -import zlib import typing -from typing import Optional, List, Tuple, Iterable +import struct +import zlib +import base64 +from typing import Optional, Iterable, Tuple, DefaultDict, Set, Dict, List, TYPE_CHECKING +from functools import partial from asyncio import sleep from bisect import bisect_right -from collections import namedtuple -from glob import glob -from struct import pack, unpack -from concurrent.futures.thread import ThreadPoolExecutor -import attr +from collections import defaultdict + +from lbry.error import ResolveCensoredError +from lbry.schema.result import Censor from lbry.utils import LRUCacheWithMetrics +from lbry.schema.url import URL, normalize_name from lbry.wallet.server import util -from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN +from lbry.wallet.server.hash import hash_to_hex_str +from lbry.wallet.server.tx import TxInput from lbry.wallet.server.merkle import Merkle, MerkleCache -from lbry.wallet.server.util import formatted_time -from lbry.wallet.server.storage import db_class -from lbry.wallet.server.history import History +from lbry.wallet.server.db.common import ResolveResult, STREAM_TYPES, CLAIM_TYPES +from lbry.wallet.server.db.prefixes import PendingActivationValue, ClaimTakeoverValue, ClaimToTXOValue, HubDB +from lbry.wallet.server.db.prefixes import ACTIVATED_CLAIM_TXO_TYPE, ACTIVATED_SUPPORT_TXO_TYPE +from lbry.wallet.server.db.prefixes import PendingActivationKey, TXOToClaimValue +from lbry.wallet.transaction import OutputScript +from lbry.schema.claim import Claim, guess_stream_type +from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger + +from lbry.wallet.server.db.elasticsearch import SearchIndex + +if TYPE_CHECKING: + from lbry.wallet.server.db.prefixes import EffectiveAmountKey -UTXO = namedtuple("UTXO", "tx_num tx_pos tx_hash height value") -HEADER_PREFIX = b'H' -TX_COUNT_PREFIX = b'T' -TX_HASH_PREFIX = b'X' -TX_PREFIX = b'B' -TX_NUM_PREFIX = b'N' -BLOCK_HASH_PREFIX = b'C' +class UTXO(typing.NamedTuple): + tx_num: int + tx_pos: int + tx_hash: bytes + height: int + value: int +TXO_STRUCT = struct.Struct(b'>LH') +TXO_STRUCT_unpack = TXO_STRUCT.unpack +TXO_STRUCT_pack = TXO_STRUCT.pack +OptionalResolveResultOrError = Optional[typing.Union[ResolveResult, ResolveCensoredError, LookupError, ValueError]] -@attr.s(slots=True) -class FlushData: - height = attr.ib() - tx_count = attr.ib() - headers = attr.ib() - block_hashes = attr.ib() - block_txs = attr.ib() - # The following are flushed to the UTXO DB if undo_infos is not None - undo_infos = attr.ib() - adds = attr.ib() - deletes = attr.ib() - tip = attr.ib() + +class DBError(Exception): + """Raised on general DB errors generally indicating corruption.""" class LevelDB: - """Simple wrapper of the backend database for querying. - - Performs no DB update, though the DB will be cleaned on opening if - it was shutdown uncleanly. - """ - - DB_VERSIONS = [6] - - class DBError(Exception): - """Raised on general DB errors generally indicating corruption.""" + DB_VERSIONS = HIST_DB_VERSIONS = [7] def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin - self.executor = None self.logger.info(f'switching current directory to {env.db_dir}') - self.db_class = db_class(env.db_dir, self.env.db_engine) - self.history = History() - self.utxo_db = None + self.prefix_db = None + + self.hist_unflushed = defaultdict(partial(array.array, 'I')) + self.hist_unflushed_count = 0 + self.hist_flush_count = 0 + self.hist_comp_flush_count = -1 + self.hist_comp_cursor = -1 + + # blocking/filtering dicts + blocking_channels = self.env.default('BLOCKING_CHANNEL_IDS', '').split(' ') + filtering_channels = self.env.default('FILTERING_CHANNEL_IDS', '').split(' ') + self.blocked_streams = {} + self.blocked_channels = {} + self.blocking_channel_hashes = { + bytes.fromhex(channel_id) for channel_id in blocking_channels if channel_id + } + self.filtered_streams = {} + self.filtered_channels = {} + self.filtering_channel_hashes = { + bytes.fromhex(channel_id) for channel_id in filtering_channels if channel_id + } + self.tx_counts = None self.headers = None self.encoded_headers = LRUCacheWithMetrics(1 << 21, metric_name='encoded_headers', namespace='wallet_server') @@ -92,11 +105,645 @@ class LevelDB: self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) - self.headers_db = None - self.tx_db = None - self._tx_and_merkle_cache = LRUCacheWithMetrics(2 ** 17, metric_name='tx_and_merkle', namespace="wallet_server") - self.total_transactions = None + + self.claim_to_txo: Dict[bytes, ClaimToTXOValue] = {} + self.txo_to_claim: DefaultDict[int, Dict[int, bytes]] = defaultdict(dict) + + # Search index + self.search_index = SearchIndex( + self.env.es_index_prefix, self.env.database_query_timeout, + elastic_host=env.elastic_host, elastic_port=env.elastic_port, + half_life=self.env.trending_half_life, whale_threshold=self.env.trending_whale_threshold, + whale_half_life=self.env.trending_whale_half_life + ) + + self.genesis_bytes = bytes.fromhex(self.coin.GENESIS_HASH) + + if env.coin.NET == 'mainnet': + self.ledger = Ledger + elif env.coin.NET == 'testnet': + self.ledger = TestNetLedger + else: + self.ledger = RegTestLedger + + def get_claim_from_txo(self, tx_num: int, tx_idx: int) -> Optional[TXOToClaimValue]: + claim_hash_and_name = self.prefix_db.txo_to_claim.get(tx_num, tx_idx) + if not claim_hash_and_name: + return + return claim_hash_and_name + + def get_repost(self, claim_hash) -> Optional[bytes]: + repost = self.prefix_db.repost.get(claim_hash) + if repost: + return repost.reposted_claim_hash + return + + def get_reposted_count(self, claim_hash: bytes) -> int: + return sum( + 1 for _ in self.prefix_db.reposted_claim.iterate(prefix=(claim_hash,), include_value=False) + ) + + def get_activation(self, tx_num, position, is_support=False) -> int: + activation = self.prefix_db.activated.get( + ACTIVATED_SUPPORT_TXO_TYPE if is_support else ACTIVATED_CLAIM_TXO_TYPE, tx_num, position + ) + if activation: + return activation.height + return -1 + + def get_supported_claim_from_txo(self, tx_num: int, position: int) -> typing.Tuple[Optional[bytes], Optional[int]]: + supported_claim_hash = self.prefix_db.support_to_claim.get(tx_num, position) + if supported_claim_hash: + packed_support_amount = self.prefix_db.claim_to_support.get( + supported_claim_hash.claim_hash, tx_num, position + ) + if packed_support_amount: + return supported_claim_hash.claim_hash, packed_support_amount.amount + return None, None + + def get_support_amount(self, claim_hash: bytes): + support_amount_val = self.prefix_db.support_amount.get(claim_hash) + if support_amount_val is None: + return 0 + return support_amount_val.amount + + def get_supports(self, claim_hash: bytes): + return [ + (k.tx_num, k.position, v.amount) for k, v in self.prefix_db.claim_to_support.iterate(prefix=(claim_hash,)) + ] + + def get_short_claim_id_url(self, name: str, normalized_name: str, claim_hash: bytes, + root_tx_num: int, root_position: int) -> str: + claim_id = claim_hash.hex() + for prefix_len in range(10): + for k in self.prefix_db.claim_short_id.iterate(prefix=(normalized_name, claim_id[:prefix_len+1]), + include_value=False): + if k.root_tx_num == root_tx_num and k.root_position == root_position: + return f'{name}#{k.partial_claim_id}' + break + print(f"{claim_id} has a collision") + return f'{name}#{claim_id}' + + def _prepare_resolve_result(self, tx_num: int, position: int, claim_hash: bytes, name: str, + root_tx_num: int, root_position: int, activation_height: int, + signature_valid: bool) -> ResolveResult: + try: + normalized_name = normalize_name(name) + except UnicodeDecodeError: + normalized_name = name + controlling_claim = self.get_controlling_claim(normalized_name) + + tx_hash = self.prefix_db.tx_hash.get(tx_num, deserialize_value=False) + height = bisect_right(self.tx_counts, tx_num) + created_height = bisect_right(self.tx_counts, root_tx_num) + last_take_over_height = controlling_claim.height + + expiration_height = self.coin.get_expiration_height(height) + support_amount = self.get_support_amount(claim_hash) + claim_amount = self.claim_to_txo[claim_hash].amount + + effective_amount = support_amount + claim_amount + channel_hash = self.get_channel_for_claim(claim_hash, tx_num, position) + reposted_claim_hash = self.get_repost(claim_hash) + short_url = self.get_short_claim_id_url(name, normalized_name, claim_hash, root_tx_num, root_position) + canonical_url = short_url + claims_in_channel = self.get_claims_in_channel_count(claim_hash) + if channel_hash: + channel_vals = self.claim_to_txo.get(channel_hash) + if channel_vals: + channel_short_url = self.get_short_claim_id_url( + channel_vals.name, channel_vals.normalized_name, channel_hash, channel_vals.root_tx_num, + channel_vals.root_position + ) + canonical_url = f'{channel_short_url}/{short_url}' + return ResolveResult( + name, normalized_name, claim_hash, tx_num, position, tx_hash, height, claim_amount, short_url=short_url, + is_controlling=controlling_claim.claim_hash == claim_hash, canonical_url=canonical_url, + last_takeover_height=last_take_over_height, claims_in_channel=claims_in_channel, + creation_height=created_height, activation_height=activation_height, + expiration_height=expiration_height, effective_amount=effective_amount, support_amount=support_amount, + channel_hash=channel_hash, reposted_claim_hash=reposted_claim_hash, + reposted=self.get_reposted_count(claim_hash), + signature_valid=None if not channel_hash else signature_valid + ) + + def _resolve(self, name: str, claim_id: Optional[str] = None, + amount_order: Optional[int] = None) -> Optional[ResolveResult]: + """ + :param normalized_name: name + :param claim_id: partial or complete claim id + :param amount_order: '$' suffix to a url, defaults to 1 (winning) if no claim id modifier is provided + """ + try: + normalized_name = normalize_name(name) + except UnicodeDecodeError: + normalized_name = name + if (not amount_order and not claim_id) or amount_order == 1: + # winning resolution + controlling = self.get_controlling_claim(normalized_name) + if not controlling: + # print(f"none controlling for lbry://{normalized_name}") + return + # print(f"resolved controlling lbry://{normalized_name}#{controlling.claim_hash.hex()}") + return self._fs_get_claim_by_hash(controlling.claim_hash) + + amount_order = max(int(amount_order or 1), 1) + + if claim_id: + if len(claim_id) == 40: # a full claim id + claim_txo = self.get_claim_txo(bytes.fromhex(claim_id)) + if not claim_txo or normalized_name != claim_txo.normalized_name: + return + return self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, bytes.fromhex(claim_id), claim_txo.name, + claim_txo.root_tx_num, claim_txo.root_position, + self.get_activation(claim_txo.tx_num, claim_txo.position), claim_txo.channel_signature_is_valid + ) + # resolve by partial/complete claim id + for key, claim_txo in self.prefix_db.claim_short_id.iterate(prefix=(normalized_name, claim_id[:10])): + claim_hash = self.txo_to_claim[claim_txo.tx_num][claim_txo.position] + non_normalized_name = self.claim_to_txo.get(claim_hash).name + signature_is_valid = self.claim_to_txo.get(claim_hash).channel_signature_is_valid + return self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, claim_hash, non_normalized_name, key.root_tx_num, + key.root_position, self.get_activation(claim_txo.tx_num, claim_txo.position), + signature_is_valid + ) + return + + # resolve by amount ordering, 1 indexed + for idx, (key, claim_val) in enumerate(self.prefix_db.effective_amount.iterate(prefix=(normalized_name,))): + if amount_order > idx + 1: + continue + claim_txo = self.claim_to_txo.get(claim_val.claim_hash) + activation = self.get_activation(key.tx_num, key.position) + return self._prepare_resolve_result( + key.tx_num, key.position, claim_val.claim_hash, key.normalized_name, claim_txo.root_tx_num, + claim_txo.root_position, activation, claim_txo.channel_signature_is_valid + ) + return + + def _resolve_claim_in_channel(self, channel_hash: bytes, normalized_name: str): + candidates = [] + for key, stream in self.prefix_db.channel_to_claim.iterate(prefix=(channel_hash, normalized_name)): + effective_amount = self.get_effective_amount(stream.claim_hash) + if not candidates or candidates[-1][-1] == effective_amount: + candidates.append((stream.claim_hash, key.tx_num, key.position, effective_amount)) + else: + break + if not candidates: + return + return list(sorted(candidates, key=lambda item: item[1]))[0] + + def _fs_resolve(self, url) -> typing.Tuple[OptionalResolveResultOrError, OptionalResolveResultOrError, + OptionalResolveResultOrError]: + try: + parsed = URL.parse(url) + except ValueError as e: + return e, None, None + + stream = channel = resolved_channel = resolved_stream = None + if parsed.has_stream_in_channel: + channel = parsed.channel + stream = parsed.stream + elif parsed.has_channel: + channel = parsed.channel + elif parsed.has_stream: + stream = parsed.stream + if channel: + resolved_channel = self._resolve(channel.name, channel.claim_id, channel.amount_order) + if not resolved_channel: + return None, LookupError(f'Could not find channel in "{url}".'), None + if stream: + if resolved_channel: + stream_claim = self._resolve_claim_in_channel(resolved_channel.claim_hash, stream.normalized) + if stream_claim: + stream_claim_id, stream_tx_num, stream_tx_pos, effective_amount = stream_claim + resolved_stream = self._fs_get_claim_by_hash(stream_claim_id) + else: + resolved_stream = self._resolve(stream.name, stream.claim_id, stream.amount_order) + if not channel and not resolved_channel and resolved_stream and resolved_stream.channel_hash: + resolved_channel = self._fs_get_claim_by_hash(resolved_stream.channel_hash) + if not resolved_stream: + return LookupError(f'Could not find claim at "{url}".'), None, None + + repost = None + if resolved_stream or resolved_channel: + claim_hash = resolved_stream.claim_hash if resolved_stream else resolved_channel.claim_hash + claim = resolved_stream if resolved_stream else resolved_channel + reposted_claim_hash = resolved_stream.reposted_claim_hash if resolved_stream else None + blocker_hash = self.blocked_streams.get(claim_hash) or self.blocked_streams.get( + reposted_claim_hash) or self.blocked_channels.get(claim_hash) or self.blocked_channels.get( + reposted_claim_hash) or self.blocked_channels.get(claim.channel_hash) + if blocker_hash: + reason_row = self._fs_get_claim_by_hash(blocker_hash) + return None, ResolveCensoredError(url, blocker_hash, censor_row=reason_row), None + if claim.reposted_claim_hash: + repost = self._fs_get_claim_by_hash(claim.reposted_claim_hash) + return resolved_stream, resolved_channel, repost + + async def fs_resolve(self, url) -> typing.Tuple[OptionalResolveResultOrError, OptionalResolveResultOrError, + OptionalResolveResultOrError]: + return await asyncio.get_event_loop().run_in_executor(None, self._fs_resolve, url) + + def _fs_get_claim_by_hash(self, claim_hash): + claim = self.claim_to_txo.get(claim_hash) + if claim: + activation = self.get_activation(claim.tx_num, claim.position) + return self._prepare_resolve_result( + claim.tx_num, claim.position, claim_hash, claim.name, claim.root_tx_num, claim.root_position, + activation, claim.channel_signature_is_valid + ) + + async def fs_getclaimbyid(self, claim_id): + return await asyncio.get_event_loop().run_in_executor( + None, self._fs_get_claim_by_hash, bytes.fromhex(claim_id) + ) + + def get_claim_txo_amount(self, claim_hash: bytes) -> Optional[int]: + claim = self.get_claim_txo(claim_hash) + if claim: + return claim.amount + + def get_block_hash(self, height: int) -> Optional[bytes]: + v = self.prefix_db.block_hash.get(height) + if v: + return v.block_hash + + def get_support_txo_amount(self, claim_hash: bytes, tx_num: int, position: int) -> Optional[int]: + v = self.prefix_db.claim_to_support.get(claim_hash, tx_num, position) + return None if not v else v.amount + + def get_claim_txo(self, claim_hash: bytes) -> Optional[ClaimToTXOValue]: + assert claim_hash + return self.prefix_db.claim_to_txo.get(claim_hash) + + def _get_active_amount(self, claim_hash: bytes, txo_type: int, height: int) -> int: + return sum( + v.amount for v in self.prefix_db.active_amount.iterate( + start=(claim_hash, txo_type, 0), stop=(claim_hash, txo_type, height), include_key=False + ) + ) + + def get_active_amount_as_of_height(self, claim_hash: bytes, height: int) -> int: + for v in self.prefix_db.active_amount.iterate( + start=(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, 0), stop=(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, height), + include_key=False, reverse=True): + return v.amount + return 0 + + def get_effective_amount(self, claim_hash: bytes, support_only=False) -> int: + support_amount = self._get_active_amount(claim_hash, ACTIVATED_SUPPORT_TXO_TYPE, self.db_height + 1) + if support_only: + return support_only + return support_amount + self._get_active_amount(claim_hash, ACTIVATED_CLAIM_TXO_TYPE, self.db_height + 1) + + def get_url_effective_amount(self, name: str, claim_hash: bytes) -> Optional['EffectiveAmountKey']: + for k, v in self.prefix_db.effective_amount.iterate(prefix=(name,)): + if v.claim_hash == claim_hash: + return k + + def get_claims_for_name(self, name): + claims = [] + prefix = self.prefix_db.claim_short_id.pack_partial_key(name) + bytes([1]) + for _k, _v in self.prefix_db.iterator(prefix=prefix): + v = self.prefix_db.claim_short_id.unpack_value(_v) + claim_hash = self.get_claim_from_txo(v.tx_num, v.position).claim_hash + if claim_hash not in claims: + claims.append(claim_hash) + return claims + + def get_claims_in_channel_count(self, channel_hash) -> int: + channel_count_val = self.prefix_db.channel_count.get(channel_hash) + if channel_count_val is None: + return 0 + return channel_count_val.count + + async def reload_blocking_filtering_streams(self): + def reload(): + self.blocked_streams, self.blocked_channels = self.get_streams_and_channels_reposted_by_channel_hashes( + self.blocking_channel_hashes + ) + self.filtered_streams, self.filtered_channels = self.get_streams_and_channels_reposted_by_channel_hashes( + self.filtering_channel_hashes + ) + await asyncio.get_event_loop().run_in_executor(None, reload) + + def get_streams_and_channels_reposted_by_channel_hashes(self, reposter_channel_hashes: Set[bytes]): + streams, channels = {}, {} + for reposter_channel_hash in reposter_channel_hashes: + for stream in self.prefix_db.channel_to_claim.iterate((reposter_channel_hash, ), include_key=False): + repost = self.get_repost(stream.claim_hash) + if repost: + txo = self.get_claim_txo(repost) + if txo: + if txo.normalized_name.startswith('@'): + channels[repost] = reposter_channel_hash + else: + streams[repost] = reposter_channel_hash + return streams, channels + + def get_channel_for_claim(self, claim_hash, tx_num, position) -> Optional[bytes]: + v = self.prefix_db.claim_to_channel.get(claim_hash, tx_num, position) + if v: + return v.signing_hash + + def get_expired_by_height(self, height: int) -> Dict[bytes, Tuple[int, int, str, TxInput]]: + expired = {} + for k, v in self.prefix_db.claim_expiration.iterate(prefix=(height,)): + tx_hash = self.prefix_db.tx_hash.get(k.tx_num, deserialize_value=False) + tx = self.coin.transaction(self.prefix_db.tx.get(tx_hash, deserialize_value=False)) + # treat it like a claim spend so it will delete/abandon properly + # the _spend_claim function this result is fed to expects a txi, so make a mock one + # print(f"\texpired lbry://{v.name} {v.claim_hash.hex()}") + expired[v.claim_hash] = ( + k.tx_num, k.position, v.normalized_name, + TxInput(prev_hash=tx_hash, prev_idx=k.position, script=tx.outputs[k.position].pk_script, sequence=0) + ) + return expired + + def get_controlling_claim(self, name: str) -> Optional[ClaimTakeoverValue]: + controlling = self.prefix_db.claim_takeover.get(name) + if not controlling: + return + return controlling + + def get_claim_txos_for_name(self, name: str): + txos = {} + prefix = self.prefix_db.claim_short_id.pack_partial_key(name) + int(1).to_bytes(1, byteorder='big') + for k, v in self.prefix_db.iterator(prefix=prefix): + tx_num, nout = self.prefix_db.claim_short_id.unpack_value(v) + txos[self.get_claim_from_txo(tx_num, nout).claim_hash] = tx_num, nout + return txos + + def get_claim_metadata(self, tx_hash, nout): + raw = self.prefix_db.tx.get(tx_hash, deserialize_value=False) + try: + output = self.coin.transaction(raw).outputs[nout] + script = OutputScript(output.pk_script) + script.parse() + return Claim.from_bytes(script.values['claim']) + except: + self.logger.error( + "tx parsing for ES went boom %s %s", tx_hash[::-1].hex(), + (raw or b'').hex() + ) + return + + def _prepare_claim_for_sync(self, claim_hash: bytes): + claim = self._fs_get_claim_by_hash(claim_hash) + if not claim: + print("wat") + return + return self._prepare_claim_metadata(claim_hash, claim) + + def _prepare_claim_metadata(self, claim_hash: bytes, claim: ResolveResult): + metadata = self.get_claim_metadata(claim.tx_hash, claim.position) + if not metadata: + return + metadata = metadata + if not metadata.is_stream or not metadata.stream.has_fee: + fee_amount = 0 + else: + fee_amount = int(max(metadata.stream.fee.amount or 0, 0) * 1000) + if fee_amount >= 9223372036854775807: + return + reposted_claim_hash = None if not metadata.is_repost else metadata.repost.reference.claim_hash[::-1] + reposted_claim = None + reposted_metadata = None + if reposted_claim_hash: + reposted_claim = self.claim_to_txo.get(reposted_claim_hash) + if not reposted_claim: + return + reposted_metadata = self.get_claim_metadata( + self.prefix_db.tx_hash.get(reposted_claim.tx_num, deserialize_value=False), reposted_claim.position + ) + if not reposted_metadata: + return + reposted_tags = [] + reposted_languages = [] + reposted_has_source = False + reposted_claim_type = None + reposted_stream_type = None + reposted_media_type = None + reposted_fee_amount = None + reposted_fee_currency = None + reposted_duration = None + if reposted_claim: + reposted_tx_hash = self.prefix_db.tx_hash.get(reposted_claim.tx_num, deserialize_value=False) + raw_reposted_claim_tx = self.prefix_db.tx.get(reposted_tx_hash, deserialize_value=False) + try: + reposted_claim_txo = self.coin.transaction( + raw_reposted_claim_tx + ).outputs[reposted_claim.position] + reposted_script = OutputScript(reposted_claim_txo.pk_script) + reposted_script.parse() + except: + self.logger.error( + "repost tx parsing for ES went boom %s %s", reposted_tx_hash[::-1].hex(), + raw_reposted_claim_tx.hex() + ) + return + try: + reposted_metadata = Claim.from_bytes(reposted_script.values['claim']) + except: + self.logger.error( + "reposted claim parsing for ES went boom %s %s", reposted_tx_hash[::-1].hex(), + raw_reposted_claim_tx.hex() + ) + return + if reposted_metadata: + if reposted_metadata.is_stream: + meta = reposted_metadata.stream + elif reposted_metadata.is_channel: + meta = reposted_metadata.channel + elif reposted_metadata.is_collection: + meta = reposted_metadata.collection + elif reposted_metadata.is_repost: + meta = reposted_metadata.repost + else: + return + reposted_tags = [tag for tag in meta.tags] + reposted_languages = [lang.language or 'none' for lang in meta.languages] or ['none'] + reposted_has_source = False if not reposted_metadata.is_stream else reposted_metadata.stream.has_source + reposted_claim_type = CLAIM_TYPES[reposted_metadata.claim_type] + reposted_stream_type = STREAM_TYPES[guess_stream_type(reposted_metadata.stream.source.media_type)] \ + if reposted_metadata.is_stream else 0 + reposted_media_type = reposted_metadata.stream.source.media_type if reposted_metadata.is_stream else 0 + if not reposted_metadata.is_stream or not reposted_metadata.stream.has_fee: + reposted_fee_amount = 0 + else: + reposted_fee_amount = int(max(reposted_metadata.stream.fee.amount or 0, 0) * 1000) + if reposted_fee_amount >= 9223372036854775807: + return + reposted_fee_currency = None if not reposted_metadata.is_stream else reposted_metadata.stream.fee.currency + reposted_duration = None + if reposted_metadata.is_stream and \ + (reposted_metadata.stream.video.duration or reposted_metadata.stream.audio.duration): + reposted_duration = reposted_metadata.stream.video.duration or reposted_metadata.stream.audio.duration + if metadata.is_stream: + meta = metadata.stream + elif metadata.is_channel: + meta = metadata.channel + elif metadata.is_collection: + meta = metadata.collection + elif metadata.is_repost: + meta = metadata.repost + else: + return + claim_tags = [tag for tag in meta.tags] + claim_languages = [lang.language or 'none' for lang in meta.languages] or ['none'] + + tags = list(set(claim_tags).union(set(reposted_tags))) + languages = list(set(claim_languages).union(set(reposted_languages))) + blocked_hash = self.blocked_streams.get(claim_hash) or self.blocked_streams.get( + reposted_claim_hash) or self.blocked_channels.get(claim_hash) or self.blocked_channels.get( + reposted_claim_hash) or self.blocked_channels.get(claim.channel_hash) + filtered_hash = self.filtered_streams.get(claim_hash) or self.filtered_streams.get( + reposted_claim_hash) or self.filtered_channels.get(claim_hash) or self.filtered_channels.get( + reposted_claim_hash) or self.filtered_channels.get(claim.channel_hash) + value = { + 'claim_id': claim_hash.hex(), + 'claim_name': claim.name, + 'normalized_name': claim.normalized_name, + 'tx_id': claim.tx_hash[::-1].hex(), + 'tx_num': claim.tx_num, + 'tx_nout': claim.position, + 'amount': claim.amount, + 'timestamp': self.estimate_timestamp(claim.height), + 'creation_timestamp': self.estimate_timestamp(claim.creation_height), + 'height': claim.height, + 'creation_height': claim.creation_height, + 'activation_height': claim.activation_height, + 'expiration_height': claim.expiration_height, + 'effective_amount': claim.effective_amount, + 'support_amount': claim.support_amount, + 'is_controlling': bool(claim.is_controlling), + 'last_take_over_height': claim.last_takeover_height, + 'short_url': claim.short_url, + 'canonical_url': claim.canonical_url, + 'title': None if not metadata.is_stream else metadata.stream.title, + 'author': None if not metadata.is_stream else metadata.stream.author, + 'description': None if not metadata.is_stream else metadata.stream.description, + 'claim_type': CLAIM_TYPES[metadata.claim_type], + 'has_source': reposted_has_source if metadata.is_repost else ( + False if not metadata.is_stream else metadata.stream.has_source), + 'stream_type': STREAM_TYPES[guess_stream_type(metadata.stream.source.media_type)] + if metadata.is_stream else reposted_stream_type if metadata.is_repost else 0, + 'media_type': metadata.stream.source.media_type + if metadata.is_stream else reposted_media_type if metadata.is_repost else None, + 'fee_amount': fee_amount if not metadata.is_repost else reposted_fee_amount, + 'fee_currency': metadata.stream.fee.currency + if metadata.is_stream else reposted_fee_currency if metadata.is_repost else None, + 'repost_count': self.get_reposted_count(claim_hash), + 'reposted_claim_id': None if not reposted_claim_hash else reposted_claim_hash.hex(), + 'reposted_claim_type': reposted_claim_type, + 'reposted_has_source': reposted_has_source, + 'channel_id': None if not metadata.is_signed else metadata.signing_channel_hash[::-1].hex(), + 'public_key_id': None if not metadata.is_channel else + self.ledger.public_key_to_address(metadata.channel.public_key_bytes), + 'signature': (metadata.signature or b'').hex() or None, + # 'signature_digest': metadata.signature, + 'is_signature_valid': bool(claim.signature_valid), + 'tags': tags, + 'languages': languages, + 'censor_type': Censor.RESOLVE if blocked_hash else Censor.SEARCH if filtered_hash else Censor.NOT_CENSORED, + 'censoring_channel_id': (blocked_hash or filtered_hash or b'').hex() or None, + 'claims_in_channel': None if not metadata.is_channel else self.get_claims_in_channel_count(claim_hash) + } + + if metadata.is_repost and reposted_duration is not None: + value['duration'] = reposted_duration + elif metadata.is_stream and (metadata.stream.video.duration or metadata.stream.audio.duration): + value['duration'] = metadata.stream.video.duration or metadata.stream.audio.duration + if metadata.is_stream: + value['release_time'] = metadata.stream.release_time or value['creation_timestamp'] + elif metadata.is_repost or metadata.is_collection: + value['release_time'] = value['creation_timestamp'] + return value + + async def all_claims_producer(self, batch_size=500_000): + batch = [] + for claim_hash, claim_txo in self.claim_to_txo.items(): + # TODO: fix the couple of claim txos that dont have controlling names + if not self.prefix_db.claim_takeover.get(claim_txo.normalized_name): + continue + claim = self._fs_get_claim_by_hash(claim_hash) + if claim: + batch.append(claim) + if len(batch) == batch_size: + batch.sort(key=lambda x: x.tx_hash) + for claim in batch: + meta = self._prepare_claim_metadata(claim.claim_hash, claim) + if meta: + yield meta + batch.clear() + batch.sort(key=lambda x: x.tx_hash) + for claim in batch: + meta = self._prepare_claim_metadata(claim.claim_hash, claim) + if meta: + yield meta + batch.clear() + + async def claims_producer(self, claim_hashes: Set[bytes]): + batch = [] + results = [] + + loop = asyncio.get_event_loop() + + def produce_claim(claim_hash): + if claim_hash not in self.claim_to_txo: + self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) + return + name = self.claim_to_txo[claim_hash].normalized_name + if not self.prefix_db.claim_takeover.get(name): + self.logger.warning("can't sync non existent claim to ES: %s", claim_hash.hex()) + return + claim_txo = self.claim_to_txo.get(claim_hash) + if not claim_txo: + return + activation = self.get_activation(claim_txo.tx_num, claim_txo.position) + claim = self._prepare_resolve_result( + claim_txo.tx_num, claim_txo.position, claim_hash, claim_txo.name, claim_txo.root_tx_num, + claim_txo.root_position, activation, claim_txo.channel_signature_is_valid + ) + if claim: + batch.append(claim) + + def get_metadata(claim): + meta = self._prepare_claim_metadata(claim.claim_hash, claim) + if meta: + results.append(meta) + + if claim_hashes: + await asyncio.wait( + [loop.run_in_executor(None, produce_claim, claim_hash) for claim_hash in claim_hashes] + ) + batch.sort(key=lambda x: x.tx_hash) + + if batch: + await asyncio.wait( + [loop.run_in_executor(None, get_metadata, claim) for claim in batch] + ) + for meta in results: + yield meta + + batch.clear() + + def get_activated_at_height(self, height: int) -> DefaultDict[PendingActivationValue, List[PendingActivationKey]]: + activated = defaultdict(list) + for k, v in self.prefix_db.pending_activation.iterate(prefix=(height,)): + activated[v].append(k) + return activated + + def get_future_activated(self, height: int) -> typing.Dict[PendingActivationValue, PendingActivationKey]: + results = {} + for k, v in self.prefix_db.pending_activation.iterate( + start=(height + 1,), stop=(height + 1 + self.coin.maxTakeoverDelay,), reverse=True): + if v not in results: + results[v] = k + return results async def _read_tx_counts(self): if self.tx_counts is not None: @@ -105,12 +752,11 @@ class LevelDB: # height N. So tx_counts[0] is 1 - the genesis coinbase def get_counts(): - return tuple( - util.unpack_be_uint64(tx_count) - for tx_count in self.tx_db.iterator(prefix=TX_COUNT_PREFIX, include_key=False) - ) + return [ + v.tx_count for v in self.prefix_db.tx_count.iterate(include_key=False, fill_cache=False) + ] - tx_counts = await asyncio.get_event_loop().run_in_executor(self.executor, get_counts) + tx_counts = await asyncio.get_event_loop().run_in_executor(None, get_counts) assert len(tx_counts) == self.db_height + 1, f"{len(tx_counts)} vs {self.db_height + 1}" self.tx_counts = array.array('I', tx_counts) @@ -120,110 +766,87 @@ class LevelDB: else: assert self.db_tx_count == 0 - async def _read_txids(self): - def get_txids(): - return list(self.tx_db.iterator(prefix=TX_HASH_PREFIX, include_key=False)) + async def _read_claim_txos(self): + def read_claim_txos(): + set_claim_to_txo = self.claim_to_txo.__setitem__ + for k, v in self.prefix_db.claim_to_txo.iterate(fill_cache=False): + set_claim_to_txo(k.claim_hash, v) + self.txo_to_claim[v.tx_num][v.position] = k.claim_hash + + self.claim_to_txo.clear() + self.txo_to_claim.clear() start = time.perf_counter() - self.logger.info("loading txids") - txids = await asyncio.get_event_loop().run_in_executor(self.executor, get_txids) - assert len(txids) == len(self.tx_counts) == 0 or len(txids) == self.tx_counts[-1] - self.total_transactions = txids + self.logger.info("loading claims") + await asyncio.get_event_loop().run_in_executor(None, read_claim_txos) ts = time.perf_counter() - start - self.logger.info("loaded %i txids in %ss", len(self.total_transactions), round(ts, 4)) + self.logger.info("loaded %i claim txos in %ss", len(self.claim_to_txo), round(ts, 4)) async def _read_headers(self): if self.headers is not None: return def get_headers(): - return list(self.headers_db.iterator(prefix=HEADER_PREFIX, include_key=False)) + return [ + header for header in self.prefix_db.header.iterate( + include_key=False, fill_cache=False, deserialize_value=False + ) + ] - headers = await asyncio.get_event_loop().run_in_executor(self.executor, get_headers) + headers = await asyncio.get_event_loop().run_in_executor(None, get_headers) assert len(headers) - 1 == self.db_height, f"{len(headers)} vs {self.db_height}" self.headers = headers - async def _open_dbs(self, for_sync, compacting): - if self.executor is None: - self.executor = ThreadPoolExecutor(1) - coin_path = os.path.join(self.env.db_dir, 'COIN') - if not os.path.isfile(coin_path): - with util.open_file(coin_path, create=True) as f: - f.write(f'ElectrumX databases and metadata for ' - f'{self.coin.NAME} {self.coin.NET}'.encode()) + def estimate_timestamp(self, height: int) -> int: + if height < len(self.headers): + return struct.unpack('= 0 else 0) - assert len(flush_data.block_txs) == len(flush_data.headers) - assert flush_data.height == self.fs_height + len(flush_data.headers) - assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts - else 0) - assert len(self.tx_counts) == flush_data.height + 1 - assert len( - b''.join(hashes for hashes, _ in flush_data.block_txs) - ) // 32 == flush_data.tx_count - prior_tx_count - - # Write the headers - start_time = time.perf_counter() - - with self.headers_db.write_batch() as batch: - batch_put = batch.put - for i, header in enumerate(flush_data.headers): - batch_put(HEADER_PREFIX + util.pack_be_uint64(self.fs_height + i + 1), header) - self.headers.append(header) - flush_data.headers.clear() - - height_start = self.fs_height + 1 - tx_num = prior_tx_count - - with self.tx_db.write_batch() as batch: - batch_put = batch.put - for block_hash, (tx_hashes, txs) in zip(flush_data.block_hashes, flush_data.block_txs): - tx_count = self.tx_counts[height_start] - batch_put(BLOCK_HASH_PREFIX + util.pack_be_uint64(height_start), block_hash[::-1]) - batch_put(TX_COUNT_PREFIX + util.pack_be_uint64(height_start), util.pack_be_uint64(tx_count)) - height_start += 1 - offset = 0 - while offset < len(tx_hashes): - batch_put(TX_HASH_PREFIX + util.pack_be_uint64(tx_num), tx_hashes[offset:offset+32]) - batch_put(TX_NUM_PREFIX + tx_hashes[offset:offset+32], util.pack_be_uint64(tx_num)) - batch_put(TX_PREFIX + tx_hashes[offset:offset+32], txs[offset // 32]) - tx_num += 1 - offset += 32 - - flush_data.block_txs.clear() - flush_data.block_hashes.clear() - - self.fs_height = flush_data.height - self.fs_tx_count = flush_data.tx_count - elapsed = time.perf_counter() - start_time - self.logger.info(f'flushed filesystem data in {elapsed:.2f}s') - - def flush_history(self): - self.history.flush() - - def flush_utxo_db(self, batch, flush_data): - """Flush the cached DB writes and UTXO set to the batch.""" - # Care is needed because the writes generated by flushing the - # UTXO state may have keys in common with our write cache or - # may be in the DB already. - start_time = time.time() - add_count = len(flush_data.adds) - spend_count = len(flush_data.deletes) // 2 - - # Spends - batch_delete = batch.delete - for key in sorted(flush_data.deletes): - batch_delete(key) - flush_data.deletes.clear() - - # New UTXOs - batch_put = batch.put - for key, value in flush_data.adds.items(): - # suffix = tx_idx + tx_num - hashX = value[:-12] - suffix = key[-2:] + value[-12:-8] - batch_put(b'h' + key[:4] + suffix, hashX) - batch_put(b'u' + hashX + suffix, value[-8:]) - flush_data.adds.clear() - - # New undo information - self.flush_undo_infos(batch_put, flush_data.undo_infos) - flush_data.undo_infos.clear() - - if self.utxo_db.for_sync: - block_count = flush_data.height - self.db_height - tx_count = flush_data.tx_count - self.db_tx_count - elapsed = time.time() - start_time - self.logger.info(f'flushed {block_count:,d} blocks with ' - f'{tx_count:,d} txs, {add_count:,d} UTXO adds, ' - f'{spend_count:,d} spends in ' - f'{elapsed:.1f}s, committing...') - - self.utxo_flush_count = self.history.flush_count - self.db_height = flush_data.height - self.db_tx_count = flush_data.tx_count - self.db_tip = flush_data.tip - - def flush_state(self, batch): - """Flush chain state to the batch.""" - now = time.time() - self.wall_time += now - self.last_flush - self.last_flush = now - self.last_flush_tx_count = self.fs_tx_count - self.write_utxo_state(batch) - - def flush_backup(self, flush_data, touched): - """Like flush_dbs() but when backing up. All UTXOs are flushed.""" - assert not flush_data.headers - assert not flush_data.block_txs - assert flush_data.height < self.db_height - self.history.assert_flushed() - - start_time = time.time() - tx_delta = flush_data.tx_count - self.last_flush_tx_count - - self.backup_fs(flush_data.height, flush_data.tx_count) - self.history.backup(touched, flush_data.tx_count) - with self.utxo_db.write_batch() as batch: - self.flush_utxo_db(batch, flush_data) - # Flush state last as it reads the wall time. - self.flush_state(batch) - - elapsed = self.last_flush - start_time - self.logger.info(f'backup flush #{self.history.flush_count:,d} took ' - f'{elapsed:.1f}s. Height {flush_data.height:,d} ' - f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})') - - def backup_fs(self, height, tx_count): - """Back up during a reorg. This just updates our pointers.""" - while self.fs_height > height: - self.fs_height -= 1 - self.headers.pop() - self.fs_tx_count = tx_count - # Truncate header_mc: header count is 1 more than the height. - self.header_mc.truncate(height + 1) - def raw_header(self, height): """Return the binary header at the given height.""" header, n = self.read_headers(height, 1) @@ -461,8 +889,7 @@ class LevelDB: """ if start_height < 0 or count < 0: - raise self.DBError(f'{count:,d} headers starting at ' - f'{start_height:,d} not on disk') + raise DBError(f'{count:,d} headers starting at {start_height:,d} not on disk') disk_count = max(0, min(count, self.db_height + 1 - start_height)) if disk_count: @@ -477,7 +904,7 @@ class LevelDB: if tx_height > self.db_height: return None, tx_height try: - return self.total_transactions[tx_num], tx_height + return self.prefix_db.tx_hash.get(tx_num, deserialize_value=False), tx_height except IndexError: self.logger.exception( "Failed to access a cached transaction, known bug #3142 " @@ -485,12 +912,18 @@ class LevelDB: ) return None, tx_height - def _fs_transactions(self, txids: Iterable[str]): - unpack_be_uint64 = util.unpack_be_uint64 - tx_counts = self.tx_counts - tx_db_get = self.tx_db.get - tx_cache = self._tx_and_merkle_cache + def get_block_txs(self, height: int) -> List[bytes]: + return [ + tx_hash for tx_hash in self.prefix_db.tx_hash.iterate( + start=(self.tx_counts[height-1],), stop=(self.tx_counts[height],), + deserialize_value=False, include_key=False + ) + ] + def _fs_transactions(self, txids: Iterable[str]): + tx_counts = self.tx_counts + tx_db_get = self.prefix_db.tx.get + tx_cache = self._tx_and_merkle_cache tx_infos = {} for tx_hash in txids: @@ -499,14 +932,14 @@ class LevelDB: tx, merkle = cached_tx else: tx_hash_bytes = bytes.fromhex(tx_hash)[::-1] - tx_num = tx_db_get(TX_NUM_PREFIX + tx_hash_bytes) + tx_num = self.prefix_db.tx_num.get(tx_hash_bytes) tx = None tx_height = -1 + tx_num = None if not tx_num else tx_num.tx_num if tx_num is not None: - tx_num = unpack_be_uint64(tx_num) + fill_cache = tx_num in self.txo_to_claim and len(self.txo_to_claim[tx_num]) > 0 tx_height = bisect_right(tx_counts, tx_num) - if tx_height < self.db_height: - tx = tx_db_get(TX_PREFIX + tx_hash_bytes) + tx = tx_db_get(tx_hash_bytes, fill_cache=fill_cache, deserialize_value=False) if tx_height == -1: merkle = { 'block_height': -1 @@ -514,7 +947,7 @@ class LevelDB: else: tx_pos = tx_num - tx_counts[tx_height - 1] branch, root = self.merkle.branch_and_root( - self.total_transactions[tx_counts[tx_height - 1]:tx_counts[tx_height]], tx_pos + self.get_block_txs(tx_height), tx_pos ) merkle = { 'block_height': tx_height, @@ -530,13 +963,25 @@ class LevelDB: return tx_infos async def fs_transactions(self, txids): - return await asyncio.get_event_loop().run_in_executor(self.executor, self._fs_transactions, txids) + return await asyncio.get_event_loop().run_in_executor(None, self._fs_transactions, txids) async def fs_block_hashes(self, height, count): if height + count > len(self.headers): - raise self.DBError(f'only got {len(self.headers) - height:,d} headers starting at {height:,d}, not {count:,d}') + raise DBError(f'only got {len(self.headers) - height:,d} headers starting at {height:,d}, not {count:,d}') return [self.coin.header_hash(header) for header in self.headers[height:height + count]] + def read_history(self, hashX: bytes, limit: int = 1000) -> List[Tuple[bytes, int]]: + txs = [] + txs_extend = txs.extend + for hist in self.prefix_db.hashX_history.iterate(prefix=(hashX,), include_key=False): + txs_extend([ + (self.prefix_db.tx_hash.get(tx_num, deserialize_value=False), bisect_right(self.tx_counts, tx_num)) + for tx_num in hist + ]) + if len(txs) >= limit: + break + return txs + async def limited_history(self, hashX, *, limit=1000): """Return an unpruned, sorted list of (tx_hash, height) tuples of confirmed transactions that touched the address, earliest in @@ -544,36 +989,7 @@ class LevelDB: transactions. By default returns at most 1000 entries. Set limit to None to get them all. """ - - def read_history(): - db_height = self.db_height - tx_counts = self.tx_counts - - cnt = 0 - txs = [] - - for hist in self.history.db.iterator(prefix=hashX, include_key=False): - a = array.array('I') - a.frombytes(hist) - for tx_num in a: - tx_height = bisect_right(tx_counts, tx_num) - if tx_height > db_height: - return - txs.append((tx_num, tx_height)) - cnt += 1 - if limit and cnt >= limit: - break - if limit and cnt >= limit: - break - return txs - - while True: - history = await asyncio.get_event_loop().run_in_executor(self.executor, read_history) - if history is not None: - return [(self.total_transactions[tx_num], tx_height) for (tx_num, tx_height) in history] - self.logger.warning(f'limited_history: tx hash ' - f'not found (reorg?), retrying...') - await sleep(0.25) + return await asyncio.get_event_loop().run_in_executor(None, self.read_history, hashX, limit) # -- Undo information @@ -581,80 +997,30 @@ class LevelDB: """Returns a height from which we should store undo info.""" return max_height - self.env.reorg_limit + 1 - def undo_key(self, height): - """DB key for undo information at the given height.""" - return b'U' + pack('>I', height) + def apply_expiration_extension_fork(self): + # TODO: this can't be reorged + for k, v in self.prefix_db.claim_expiration.iterate(): + self.prefix_db.claim_expiration.stage_delete(k, v) + self.prefix_db.claim_expiration.stage_put( + (bisect_right(self.tx_counts, k.tx_num) + self.coin.nExtendedClaimExpirationTime, + k.tx_num, k.position), v + ) + self.prefix_db.unsafe_commit() - def read_undo_info(self, height): - """Read undo information from a file for the current height.""" - return self.utxo_db.get(self.undo_key(height)) + def write_db_state(self): + """Write (UTXO) state to the batch.""" + if self.db_height > 0: + self.prefix_db.db_state.stage_delete((), self.prefix_db.db_state.get()) + self.prefix_db.db_state.stage_put((), ( + self.genesis_bytes, self.db_height, self.db_tx_count, self.db_tip, + self.utxo_flush_count, int(self.wall_time), self.first_sync, self.db_version, + self.hist_flush_count, self.hist_comp_flush_count, self.hist_comp_cursor + ) + ) - def flush_undo_infos(self, batch_put, undo_infos): - """undo_infos is a list of (undo_info, height) pairs.""" - for undo_info, height in undo_infos: - batch_put(self.undo_key(height), b''.join(undo_info)) + def read_db_state(self): + state = self.prefix_db.db_state.get() - def raw_block_prefix(self): - return 'block' - - def raw_block_path(self, height): - return os.path.join(self.env.db_dir, f'{self.raw_block_prefix()}{height:d}') - - async def read_raw_block(self, height): - """Returns a raw block read from disk. Raises FileNotFoundError - if the block isn't on-disk.""" - - def read(): - with util.open_file(self.raw_block_path(height)) as f: - return f.read(-1) - - return await asyncio.get_event_loop().run_in_executor(self.executor, read) - - def write_raw_block(self, block, height): - """Write a raw block to disk.""" - with util.open_truncate(self.raw_block_path(height)) as f: - f.write(block) - # Delete old blocks to prevent them accumulating - try: - del_height = self.min_undo_height(height) - 1 - os.remove(self.raw_block_path(del_height)) - except FileNotFoundError: - pass - - def clear_excess_undo_info(self): - """Clear excess undo info. Only most recent N are kept.""" - prefix = b'U' - min_height = self.min_undo_height(self.db_height) - keys = [] - for key, hist in self.utxo_db.iterator(prefix=prefix): - height, = unpack('>I', key[-4:]) - if height >= min_height: - break - keys.append(key) - - if keys: - with self.utxo_db.write_batch() as batch: - for key in keys: - batch.delete(key) - self.logger.info(f'deleted {len(keys):,d} stale undo entries') - - # delete old block files - prefix = self.raw_block_prefix() - paths = [path for path in glob(f'{prefix}[0-9]*') - if len(path) > len(prefix) - and int(path[len(prefix):]) < min_height] - if paths: - for path in paths: - try: - os.remove(path) - except FileNotFoundError: - pass - self.logger.info(f'deleted {len(paths):,d} stale block files') - - # -- UTXO database - - def read_utxo_state(self): - state = self.utxo_db.get(b'state') if not state: self.db_height = -1 self.db_tx_count = 0 @@ -663,83 +1029,52 @@ class LevelDB: self.utxo_flush_count = 0 self.wall_time = 0 self.first_sync = True + self.hist_flush_count = 0 + self.hist_comp_flush_count = -1 + self.hist_comp_cursor = -1 + self.hist_db_version = max(self.DB_VERSIONS) else: - state = ast.literal_eval(state.decode()) - if not isinstance(state, dict): - raise self.DBError('failed reading state from DB') - self.db_version = state['db_version'] + self.db_version = state.db_version if self.db_version not in self.DB_VERSIONS: - raise self.DBError(f'your UTXO DB version is {self.db_version} but this ' + raise DBError(f'your DB version is {self.db_version} but this ' f'software only handles versions {self.DB_VERSIONS}') # backwards compat - genesis_hash = state['genesis'] - if isinstance(genesis_hash, bytes): - genesis_hash = genesis_hash.decode() - if genesis_hash != self.coin.GENESIS_HASH: - raise self.DBError(f'DB genesis hash {genesis_hash} does not ' + genesis_hash = state.genesis + if genesis_hash.hex() != self.coin.GENESIS_HASH: + raise DBError(f'DB genesis hash {genesis_hash} does not ' f'match coin {self.coin.GENESIS_HASH}') - self.db_height = state['height'] - self.db_tx_count = state['tx_count'] - self.db_tip = state['tip'] - self.utxo_flush_count = state['utxo_flush_count'] - self.wall_time = state['wall_time'] - self.first_sync = state['first_sync'] + self.db_height = state.height + self.db_tx_count = state.tx_count + self.db_tip = state.tip + self.utxo_flush_count = state.utxo_flush_count + self.wall_time = state.wall_time + self.first_sync = state.first_sync + self.hist_flush_count = state.hist_flush_count + self.hist_comp_flush_count = state.comp_flush_count + self.hist_comp_cursor = state.comp_cursor + self.hist_db_version = state.db_version - # These are our state as we move ahead of DB state - self.fs_height = self.db_height - self.fs_tx_count = self.db_tx_count - self.last_flush_tx_count = self.fs_tx_count - - # Log some stats - self.logger.info(f'DB version: {self.db_version:d}') - self.logger.info(f'coin: {self.coin.NAME}') - self.logger.info(f'network: {self.coin.NET}') - self.logger.info(f'height: {self.db_height:,d}') - self.logger.info(f'tip: {hash_to_hex_str(self.db_tip)}') - self.logger.info(f'tx count: {self.db_tx_count:,d}') - if self.utxo_db.for_sync: - self.logger.info(f'flushing DB cache at {self.env.cache_MB:,d} MB') - if self.first_sync: - self.logger.info(f'sync time so far: {util.formatted_time(self.wall_time)}') - - def write_utxo_state(self, batch): - """Write (UTXO) state to the batch.""" - state = { - 'genesis': self.coin.GENESIS_HASH, - 'height': self.db_height, - 'tx_count': self.db_tx_count, - 'tip': self.db_tip, - 'utxo_flush_count': self.utxo_flush_count, - 'wall_time': self.wall_time, - 'first_sync': self.first_sync, - 'db_version': self.db_version, - } - batch.put(b'state', repr(state).encode()) - - def set_flush_count(self, count): - self.utxo_flush_count = count - with self.utxo_db.write_batch() as batch: - self.write_utxo_state(batch) + def assert_db_state(self): + state = self.prefix_db.db_state.get() + assert self.db_version == state.db_version + assert self.db_height == state.height + assert self.db_tx_count == state.tx_count + assert self.db_tip == state.tip + assert self.first_sync == state.first_sync async def all_utxos(self, hashX): """Return all UTXOs for an address sorted in no particular order.""" def read_utxos(): utxos = [] utxos_append = utxos.append - s_unpack = unpack fs_tx_hash = self.fs_tx_hash - # Key: b'u' + address_hashX + tx_idx + tx_num - # Value: the UTXO value as a 64-bit unsigned integer - prefix = b'u' + hashX - for db_key, db_value in self.utxo_db.iterator(prefix=prefix): - tx_pos, tx_num = s_unpack(' MemPoolTx - hashXs: hashX -> set of all hashes of txs touching the hashX - """ - - def __init__(self, coin, api, refresh_secs=1.0, log_status_secs=120.0): - assert isinstance(api, MemPoolAPI) + def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0): self.coin = coin - self.api = api + self._daemon = daemon + self._db = db + self._touched_mp = {} + self._touched_bp = {} + self._highest_block = -1 + self.logger = class_logger(__name__, self.__class__.__name__) self.txs = {} self.hashXs = defaultdict(set) # None can be a key @@ -113,10 +64,11 @@ class MemPool: self.refresh_secs = refresh_secs self.log_status_secs = log_status_secs # Prevents mempool refreshes during fee histogram calculation - self.lock = asyncio.Lock() + self.lock = state_lock self.wakeup = asyncio.Event() self.mempool_process_time_metric = mempool_process_time_metric self.notified_mempool_txs = set() + self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None async def _logging(self, synchronized_event): """Print regular logs of mempool stats.""" @@ -132,40 +84,6 @@ class MemPool: await asyncio.sleep(self.log_status_secs) await synchronized_event.wait() - async def _refresh_histogram(self, synchronized_event): - while True: - await synchronized_event.wait() - async with self.lock: - self._update_histogram(100_000) - await asyncio.sleep(self.coin.MEMPOOL_HISTOGRAM_REFRESH_SECS) - - def _update_histogram(self, bin_size): - # Build a histogram by fee rate - histogram = defaultdict(int) - for tx in self.txs.values(): - histogram[tx.fee // tx.size] += tx.size - - # Now compact it. For efficiency, get_fees returns a - # compact histogram with variable bin size. The compact - # histogram is an array of (fee_rate, vsize) values. - # vsize_n is the cumulative virtual size of mempool - # transactions with a fee rate in the interval - # [rate_(n-1), rate_n)], and rate_(n-1) > rate_n. - # Intervals are chosen to create tranches containing at - # least 100kb of transactions - compact = [] - cum_size = 0 - r = 0 # ? - for fee_rate, size in sorted(histogram.items(), reverse=True): - cum_size += size - if cum_size + r > bin_size: - compact.append((fee_rate, cum_size)) - r += cum_size - bin_size - cum_size = 0 - bin_size *= 1.1 - self.logger.info(f'compact fee histogram: {compact}') - self.cached_compact_histogram = compact - def _accept_transactions(self, tx_map, utxo_map, touched): """Accept transactions in tx_map to the mempool if all their inputs can be found in the existing mempool or a utxo_map from the @@ -223,9 +141,9 @@ class MemPool: """Refresh our view of the daemon's mempool.""" while True: start = time.perf_counter() - height = self.api.cached_height() - hex_hashes = await self.api.mempool_hashes() - if height != await self.api.height(): + height = self._daemon.cached_height() + hex_hashes = await self._daemon.mempool_hashes() + if height != await self._daemon.height(): continue hashes = {hex_str_to_hash(hh) for hh in hex_hashes} async with self.lock: @@ -237,7 +155,7 @@ class MemPool: } synchronized_event.set() synchronized_event.clear() - await self.api.on_mempool(touched, new_touched, height) + await self.on_mempool(touched, new_touched, height) duration = time.perf_counter() - start self.mempool_process_time_metric.observe(duration) try: @@ -292,8 +210,7 @@ class MemPool: async def _fetch_and_accept(self, hashes, all_hashes, touched): """Fetch a list of mempool transactions.""" - hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes) - raw_txs = await self.api.raw_transactions(hex_hashes_iter) + raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes)) to_hashX = self.coin.hashX_from_script deserializer = self.coin.DESERIALIZER @@ -323,7 +240,7 @@ class MemPool: prevouts = tuple(prevout for tx in tx_map.values() for prevout in tx.prevouts if prevout[0] not in all_hashes) - utxos = await self.api.lookup_utxos(prevouts) + utxos = await self._db.lookup_utxos(prevouts) utxo_map = dict(zip(prevouts, utxos)) return self._accept_transactions(tx_map, utxo_map, touched) @@ -407,3 +324,37 @@ class MemPool: if unspent_inputs: return -1 return 0 + + async def _maybe_notify(self, new_touched): + tmp, tbp = self._touched_mp, self._touched_bp + common = set(tmp).intersection(tbp) + if common: + height = max(common) + elif tmp and max(tmp) == self._highest_block: + height = self._highest_block + else: + # Either we are processing a block and waiting for it to + # come in, or we have not yet had a mempool update for the + # new block height + return + touched = tmp.pop(height) + for old in [h for h in tmp if h <= height]: + del tmp[old] + for old in [h for h in tbp if h <= height]: + touched.update(tbp.pop(old)) + # print("notify", height, len(touched), len(new_touched)) + await self.notify_sessions(height, touched, new_touched) + + async def start(self, height, session_manager: 'LBRYSessionManager'): + self._highest_block = height + self.notify_sessions = session_manager._notify_sessions + await self.notify_sessions(height, set(), set()) + + async def on_mempool(self, touched, new_touched, height): + self._touched_mp[height] = touched + await self._maybe_notify(new_touched) + + async def on_block(self, touched, height): + self._touched_bp[height] = touched + self._highest_block = height + await self._maybe_notify(set()) diff --git a/lbry/wallet/server/merkle.py b/lbry/wallet/server/merkle.py index 1a42b0185..8cf1ca08b 100644 --- a/lbry/wallet/server/merkle.py +++ b/lbry/wallet/server/merkle.py @@ -43,10 +43,12 @@ class Merkle: def __init__(self, hash_func=double_sha256): self.hash_func = hash_func - def tree_depth(self, hash_count): - return self.branch_length(hash_count) + 1 + @staticmethod + def tree_depth(hash_count): + return Merkle.branch_length(hash_count) + 1 - def branch_length(self, hash_count): + @staticmethod + def branch_length(hash_count): """Return the length of a merkle branch given the number of hashes.""" if not isinstance(hash_count, int): raise TypeError('hash_count must be an integer') @@ -54,7 +56,8 @@ class Merkle: raise ValueError('hash_count must be at least 1') return ceil(log(hash_count, 2)) - def branch_and_root(self, hashes, index, length=None): + @staticmethod + def branch_and_root(hashes, index, length=None, hash_func=double_sha256): """Return a (merkle branch, merkle_root) pair given hashes, and the index of one of those hashes. """ @@ -64,7 +67,7 @@ class Merkle: # This also asserts hashes is not empty if not 0 <= index < len(hashes): raise ValueError(f"index '{index}/{len(hashes)}' out of range") - natural_length = self.branch_length(len(hashes)) + natural_length = Merkle.branch_length(len(hashes)) if length is None: length = natural_length else: @@ -73,7 +76,6 @@ class Merkle: if length < natural_length: raise ValueError('length out of range') - hash_func = self.hash_func branch = [] for _ in range(length): if len(hashes) & 1: @@ -85,44 +87,47 @@ class Merkle: return branch, hashes[0] - def root(self, hashes, length=None): + @staticmethod + def root(hashes, length=None): """Return the merkle root of a non-empty iterable of binary hashes.""" - branch, root = self.branch_and_root(hashes, 0, length) + branch, root = Merkle.branch_and_root(hashes, 0, length) return root - def root_from_proof(self, hash, branch, index): - """Return the merkle root given a hash, a merkle branch to it, and - its index in the hashes array. + # @staticmethod + # def root_from_proof(hash, branch, index, hash_func=double_sha256): + # """Return the merkle root given a hash, a merkle branch to it, and + # its index in the hashes array. + # + # branch is an iterable sorted deepest to shallowest. If the + # returned root is the expected value then the merkle proof is + # verified. + # + # The caller should have confirmed the length of the branch with + # branch_length(). Unfortunately this is not easily done for + # bitcoin transactions as the number of transactions in a block + # is unknown to an SPV client. + # """ + # for elt in branch: + # if index & 1: + # hash = hash_func(elt + hash) + # else: + # hash = hash_func(hash + elt) + # index >>= 1 + # if index: + # raise ValueError('index out of range for branch') + # return hash - branch is an iterable sorted deepest to shallowest. If the - returned root is the expected value then the merkle proof is - verified. - - The caller should have confirmed the length of the branch with - branch_length(). Unfortunately this is not easily done for - bitcoin transactions as the number of transactions in a block - is unknown to an SPV client. - """ - hash_func = self.hash_func - for elt in branch: - if index & 1: - hash = hash_func(elt + hash) - else: - hash = hash_func(hash + elt) - index >>= 1 - if index: - raise ValueError('index out of range for branch') - return hash - - def level(self, hashes, depth_higher): + @staticmethod + def level(hashes, depth_higher): """Return a level of the merkle tree of hashes the given depth higher than the bottom row of the original tree.""" size = 1 << depth_higher - root = self.root + root = Merkle.root return [root(hashes[n: n + size], depth_higher) for n in range(0, len(hashes), size)] - def branch_and_root_from_level(self, level, leaf_hashes, index, + @staticmethod + def branch_and_root_from_level(level, leaf_hashes, index, depth_higher): """Return a (merkle branch, merkle_root) pair when a merkle-tree has a level cached. @@ -146,10 +151,10 @@ class Merkle: if not isinstance(leaf_hashes, list): raise TypeError("leaf_hashes must be a list") leaf_index = (index >> depth_higher) << depth_higher - leaf_branch, leaf_root = self.branch_and_root( + leaf_branch, leaf_root = Merkle.branch_and_root( leaf_hashes, index - leaf_index, depth_higher) index >>= depth_higher - level_branch, root = self.branch_and_root(level, index) + level_branch, root = Merkle.branch_and_root(level, index) # Check last so that we know index is in-range if leaf_root != level[index]: raise ValueError('leaf hashes inconsistent with level') diff --git a/lbry/wallet/server/server.py b/lbry/wallet/server/server.py index cbec5c93b..2a0a2111e 100644 --- a/lbry/wallet/server/server.py +++ b/lbry/wallet/server/server.py @@ -5,66 +5,13 @@ from concurrent.futures.thread import ThreadPoolExecutor import typing import lbry -from lbry.wallet.server.mempool import MemPool, MemPoolAPI +from lbry.wallet.server.mempool import MemPool +from lbry.wallet.server.block_processor import BlockProcessor +from lbry.wallet.server.leveldb import LevelDB +from lbry.wallet.server.session import LBRYSessionManager from lbry.prometheus import PrometheusServer -class Notifications: - # hashX notifications come from two sources: new blocks and - # mempool refreshes. - # - # A user with a pending transaction is notified after the block it - # gets in is processed. Block processing can take an extended - # time, and the prefetcher might poll the daemon after the mempool - # code in any case. In such cases the transaction will not be in - # the mempool after the mempool refresh. We want to avoid - # notifying clients twice - for the mempool refresh and when the - # block is done. This object handles that logic by deferring - # notifications appropriately. - - def __init__(self): - self._touched_mp = {} - self._touched_bp = {} - self.notified_mempool_txs = set() - self._highest_block = -1 - - async def _maybe_notify(self, new_touched): - tmp, tbp = self._touched_mp, self._touched_bp - common = set(tmp).intersection(tbp) - if common: - height = max(common) - elif tmp and max(tmp) == self._highest_block: - height = self._highest_block - else: - # Either we are processing a block and waiting for it to - # come in, or we have not yet had a mempool update for the - # new block height - return - touched = tmp.pop(height) - for old in [h for h in tmp if h <= height]: - del tmp[old] - for old in [h for h in tbp if h <= height]: - touched.update(tbp.pop(old)) - await self.notify(height, touched, new_touched) - - async def notify(self, height, touched, new_touched): - pass - - async def start(self, height, notify_func): - self._highest_block = height - self.notify = notify_func - await self.notify(height, set(), set()) - - async def on_mempool(self, touched, new_touched, height): - self._touched_mp[height] = touched - await self._maybe_notify(new_touched) - - async def on_block(self, touched, height): - self._touched_bp[height] = touched - self._highest_block = height - await self._maybe_notify(set()) - - class Server: def __init__(self, env): @@ -73,26 +20,13 @@ class Server: self.shutdown_event = asyncio.Event() self.cancellable_tasks = [] - self.notifications = notifications = Notifications() self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url) - self.db = db = env.coin.DB(env) - self.bp = bp = env.coin.BLOCK_PROCESSOR(env, db, daemon, notifications) + self.db = db = LevelDB(env) + self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event) self.prometheus_server: typing.Optional[PrometheusServer] = None - # Set notifications up to implement the MemPoolAPI - notifications.height = daemon.height - notifications.cached_height = daemon.cached_height - notifications.mempool_hashes = daemon.mempool_hashes - notifications.raw_transactions = daemon.getrawtransactions - notifications.lookup_utxos = db.lookup_utxos - - MemPoolAPI.register(Notifications) - self.mempool = mempool = MemPool(env.coin, notifications) - - notifications.notified_mempool_txs = self.mempool.notified_mempool_txs - - self.session_mgr = env.coin.SESSION_MANAGER( - env, db, bp, daemon, mempool, self.shutdown_event + self.session_mgr = LBRYSessionManager( + env, db, bp, daemon, self.shutdown_event ) self._indexer_task = None @@ -120,8 +54,8 @@ class Server: await _start_cancellable(self.bp.fetch_and_process_blocks) await self.db.populate_header_merkle_cache() - await _start_cancellable(self.mempool.keep_synchronized) - await _start_cancellable(self.session_mgr.serve, self.notifications) + await _start_cancellable(self.bp.mempool.keep_synchronized) + await _start_cancellable(self.session_mgr.serve, self.bp.mempool) async def stop(self): for task in reversed(self.cancellable_tasks): @@ -135,7 +69,7 @@ class Server: def run(self): loop = asyncio.get_event_loop() - executor = ThreadPoolExecutor(1) + executor = ThreadPoolExecutor(self.env.max_query_workers) loop.set_default_executor(executor) def __exit(): diff --git a/lbry/wallet/server/session.py b/lbry/wallet/server/session.py index 0df85d88b..3983756be 100644 --- a/lbry/wallet/server/session.py +++ b/lbry/wallet/server/session.py @@ -21,18 +21,20 @@ from elasticsearch import ConnectionTimeout from prometheus_client import Counter, Info, Histogram, Gauge import lbry -from lbry.error import TooManyClaimSearchParametersError +from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG -from lbry.wallet.server.block_processor import LBRYBlockProcessor -from lbry.wallet.server.db.writer import LBRYLevelDB +from lbry.schema.result import Outputs +from lbry.wallet.server.block_processor import BlockProcessor +from lbry.wallet.server.leveldb import LevelDB from lbry.wallet.server.websocket import AdminWebSocket from lbry.wallet.server.metrics import ServerLoadData, APICallMetrics from lbry.wallet.rpc.framing import NewlineFramer + import lbry.wallet.server.version as VERSION from lbry.wallet.rpc import ( RPCSession, JSONRPCAutoDetect, JSONRPCConnection, - handler_invocation, RPCError, Request, JSONRPC + handler_invocation, RPCError, Request, JSONRPC, Notification, Batch ) from lbry.wallet.server import text from lbry.wallet.server import util @@ -175,14 +177,13 @@ class SessionManager: namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS ) - def __init__(self, env: 'Env', db: LBRYLevelDB, bp: LBRYBlockProcessor, daemon: 'Daemon', mempool: 'MemPool', - shutdown_event: asyncio.Event): + def __init__(self, env: 'Env', db: LevelDB, bp: BlockProcessor, daemon: 'Daemon', shutdown_event: asyncio.Event): env.max_send = max(350000, env.max_send) self.env = env self.db = db self.bp = bp self.daemon = daemon - self.mempool = mempool + self.mempool = bp.mempool self.shutdown_event = shutdown_event self.logger = util.class_logger(__name__, self.__class__.__name__) self.servers: typing.Dict[str, asyncio.AbstractServer] = {} @@ -263,16 +264,6 @@ class SessionManager: await self._start_external_servers() paused = False - async def _log_sessions(self): - """Periodically log sessions.""" - log_interval = self.env.log_sessions - if log_interval: - while True: - await sleep(log_interval) - data = self._session_data(for_log=True) - for line in text.sessions_lines(data): - self.logger.info(line) - self.logger.info(json.dumps(self._get_info())) def _group_map(self): group_map = defaultdict(list) @@ -376,23 +367,6 @@ class SessionManager: 'version': lbry.__version__, } - def _session_data(self, for_log): - """Returned to the RPC 'sessions' call.""" - now = time.time() - sessions = sorted(self.sessions.values(), key=lambda s: s.start_time) - return [(session.session_id, - session.flags(), - session.peer_address_str(for_log=for_log), - session.client_version, - session.protocol_version_string(), - session.count_pending_items(), - session.txs_sent, - session.sub_count(), - session.recv_count, session.recv_size, - session.send_count, session.send_size, - now - session.start_time) - for session in sessions] - def _group_data(self): """Returned to the RPC 'groups' call.""" result = [] @@ -537,23 +511,19 @@ class SessionManager: return lines - async def rpc_sessions(self): - """Return statistics about connected sessions.""" - return self._session_data(for_log=False) - - async def rpc_reorg(self, count): - """Force a reorg of the given number of blocks. - - count: number of blocks to reorg - """ - count = non_negative_integer(count) - if not self.bp.force_chain_reorg(count): - raise RPCError(BAD_REQUEST, 'still catching up with daemon') - return f'scheduled a reorg of {count:,d} blocks' + # async def rpc_reorg(self, count): + # """Force a reorg of the given number of blocks. + # + # count: number of blocks to reorg + # """ + # count = non_negative_integer(count) + # if not self.bp.force_chain_reorg(count): + # raise RPCError(BAD_REQUEST, 'still catching up with daemon') + # return f'scheduled a reorg of {count:,d} blocks' # --- External Interface - async def serve(self, notifications, server_listening_event): + async def serve(self, mempool, server_listening_event): """Start the RPC server if enabled. When the event is triggered, start TCP and SSL servers.""" try: @@ -567,7 +537,7 @@ class SessionManager: if self.env.drop_client is not None: self.logger.info(f'drop clients matching: {self.env.drop_client.pattern}') # Start notifications; initialize hsub_results - await notifications.start(self.db.db_height, self._notify_sessions) + await mempool.start(self.db.db_height, self) await self.start_other() await self._start_external_servers() server_listening_event.set() @@ -576,7 +546,6 @@ class SessionManager: # because we connect to ourself await asyncio.wait([ self._clear_stale_sessions(), - self._log_sessions(), self._manage_servers() ]) finally: @@ -663,19 +632,25 @@ class SessionManager: for hashX in touched.intersection(self.mempool_statuses.keys()): self.mempool_statuses.pop(hashX, None) - touched.intersection_update(self.hashx_subscriptions_by_session.keys()) + await asyncio.get_event_loop().run_in_executor( + None, touched.intersection_update, self.hashx_subscriptions_by_session.keys() + ) - if touched or (height_changed and self.mempool_statuses): + if touched or new_touched or (height_changed and self.mempool_statuses): notified_hashxs = 0 - notified_sessions = 0 + session_hashxes_to_notify = defaultdict(list) to_notify = touched if height_changed else new_touched + for hashX in to_notify: + if hashX not in self.hashx_subscriptions_by_session: + continue for session_id in self.hashx_subscriptions_by_session[hashX]: - asyncio.create_task(self.sessions[session_id].send_history_notification(hashX)) - notified_sessions += 1 - notified_hashxs += 1 - if notified_sessions: - self.logger.info(f'notified {notified_sessions} sessions/{notified_hashxs:,d} touched addresses') + session_hashxes_to_notify[session_id].append(hashX) + notified_hashxs += 1 + for session_id, hashXes in session_hashxes_to_notify.items(): + asyncio.create_task(self.sessions[session_id].send_history_notifications(*hashXes)) + if session_hashxes_to_notify: + self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses') def add_session(self, session): self.sessions[id(session)] = session @@ -746,16 +721,6 @@ class SessionBase(RPCSession): def toggle_logging(self): self.log_me = not self.log_me - def flags(self): - """Status flags.""" - status = self.kind[0] - if self.is_closing(): - status += 'C' - if self.log_me: - status += 'L' - status += str(self._concurrency.max_concurrent) - return status - def connection_made(self, transport): """Handle an incoming client connection.""" super().connection_made(transport) @@ -812,21 +777,21 @@ class LBRYSessionManager(SessionManager): super().__init__(*args, **kwargs) self.query_executor = None self.websocket = None - self.metrics = ServerLoadData() + # self.metrics = ServerLoadData() self.metrics_loop = None self.running = False if self.env.websocket_host is not None and self.env.websocket_port is not None: self.websocket = AdminWebSocket(self) - async def process_metrics(self): - while self.running: - data = self.metrics.to_json_and_reset({ - 'sessions': self.session_count(), - 'height': self.db.db_height, - }) - if self.websocket is not None: - self.websocket.send_message(data) - await asyncio.sleep(1) + # async def process_metrics(self): + # while self.running: + # data = self.metrics.to_json_and_reset({ + # 'sessions': self.session_count(), + # 'height': self.db.db_height, + # }) + # if self.websocket is not None: + # self.websocket.send_message(data) + # await asyncio.sleep(1) async def start_other(self): self.running = True @@ -838,13 +803,9 @@ class LBRYSessionManager(SessionManager): ) if self.websocket is not None: await self.websocket.start() - if self.env.track_metrics: - self.metrics_loop = asyncio.create_task(self.process_metrics()) async def stop_other(self): self.running = False - if self.env.track_metrics: - self.metrics_loop.cancel() if self.websocket is not None: await self.websocket.stop() self.query_executor.shutdown() @@ -887,6 +848,8 @@ class LBRYElectrumX(SessionBase): 'blockchain.transaction.get_height': cls.transaction_get_height, 'blockchain.claimtrie.search': cls.claimtrie_search, 'blockchain.claimtrie.resolve': cls.claimtrie_resolve, + 'blockchain.claimtrie.getclaimbyid': cls.claimtrie_getclaimbyid, + # 'blockchain.claimtrie.getclaimsbyids': cls.claimtrie_getclaimsbyids, 'blockchain.block.get_server_height': cls.get_server_height, 'mempool.get_fee_histogram': cls.mempool_compact_histogram, 'blockchain.block.headers': cls.block_headers, @@ -915,8 +878,8 @@ class LBRYElectrumX(SessionBase): self.protocol_tuple = self.PROTOCOL_MIN self.protocol_string = None self.daemon = self.session_mgr.daemon - self.bp: LBRYBlockProcessor = self.session_mgr.bp - self.db: LBRYLevelDB = self.bp.db + self.bp: BlockProcessor = self.session_mgr.bp + self.db: LevelDB = self.bp.db @classmethod def protocol_min_max_strings(cls): @@ -939,7 +902,7 @@ class LBRYElectrumX(SessionBase): 'donation_address': env.donation_address, 'daily_fee': env.daily_fee, 'hash_function': 'sha256', - 'trending_algorithm': env.trending_algorithms[0] + 'trending_algorithm': 'variable_decay' }) async def server_features_async(self): @@ -956,32 +919,57 @@ class LBRYElectrumX(SessionBase): def sub_count(self): return len(self.hashX_subs) - async def send_history_notification(self, hashX): - start = time.perf_counter() - alias = self.hashX_subs[hashX] - if len(alias) == 64: - method = 'blockchain.scripthash.subscribe' - else: - method = 'blockchain.address.subscribe' - try: - self.session_mgr.notifications_in_flight_metric.inc() - status = await self.address_status(hashX) - self.session_mgr.address_history_metric.observe(time.perf_counter() - start) + async def send_history_notifications(self, *hashXes: typing.Iterable[bytes]): + notifications = [] + for hashX in hashXes: + alias = self.hashX_subs[hashX] + if len(alias) == 64: + method = 'blockchain.scripthash.subscribe' + else: + method = 'blockchain.address.subscribe' start = time.perf_counter() - await self.send_notification(method, (alias, status)) + db_history = await self.session_mgr.limited_history(hashX) + mempool = self.mempool.transaction_summaries(hashX) + + status = ''.join(f'{hash_to_hex_str(tx_hash)}:' + f'{height:d}:' + for tx_hash, height in db_history) + status += ''.join(f'{hash_to_hex_str(tx.hash)}:' + f'{-tx.has_unconfirmed_inputs:d}:' + for tx in mempool) + if status: + status = sha256(status.encode()).hex() + else: + status = None + if mempool: + self.session_mgr.mempool_statuses[hashX] = status + else: + self.session_mgr.mempool_statuses.pop(hashX, None) + + self.session_mgr.address_history_metric.observe(time.perf_counter() - start) + notifications.append((method, (alias, status))) + + start = time.perf_counter() + self.session_mgr.notifications_in_flight_metric.inc() + for method, args in notifications: + self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc() + try: + await self.send_notifications( + Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications]) + ) self.session_mgr.notifications_sent_metric.observe(time.perf_counter() - start) finally: self.session_mgr.notifications_in_flight_metric.dec() - def get_metrics_or_placeholder_for_api(self, query_name): - """ Do not hold on to a reference to the metrics - returned by this method past an `await` or - you may be working with a stale metrics object. - """ - if self.env.track_metrics: - return self.session_mgr.metrics.for_api(query_name) - else: - return APICallMetrics(query_name) + # def get_metrics_or_placeholder_for_api(self, query_name): + # """ Do not hold on to a reference to the metrics + # returned by this method past an `await` or + # you may be working with a stale metrics object. + # """ + # if self.env.track_metrics: + # # return self.session_mgr.metrics.for_api(query_name) + # else: + # return APICallMetrics(query_name) async def run_in_executor(self, query_name, func, kwargs): start = time.perf_counter() @@ -994,55 +982,87 @@ class LBRYElectrumX(SessionBase): raise except Exception: log.exception("dear devs, please handle this exception better") - metrics = self.get_metrics_or_placeholder_for_api(query_name) - metrics.query_error(start, {}) self.session_mgr.db_error_metric.inc() raise RPCError(JSONRPC.INTERNAL_ERROR, 'unknown server error') else: - if self.env.track_metrics: - metrics = self.get_metrics_or_placeholder_for_api(query_name) - (result, metrics_data) = result - metrics.query_response(start, metrics_data) return base64.b64encode(result).decode() finally: self.session_mgr.pending_query_metric.dec() self.session_mgr.executor_time_metric.observe(time.perf_counter() - start) - async def run_and_cache_query(self, query_name, kwargs): - start = time.perf_counter() - if isinstance(kwargs, dict): - kwargs['release_time'] = format_release_time(kwargs.get('release_time')) - try: - self.session_mgr.pending_query_metric.inc() - return await self.db.search_index.session_query(query_name, kwargs) - except ConnectionTimeout: - self.session_mgr.interrupt_count_metric.inc() - raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out') - finally: - self.session_mgr.pending_query_metric.dec() - self.session_mgr.executor_time_metric.observe(time.perf_counter() - start) + # async def run_and_cache_query(self, query_name, kwargs): + # start = time.perf_counter() + # if isinstance(kwargs, dict): + # kwargs['release_time'] = format_release_time(kwargs.get('release_time')) + # try: + # self.session_mgr.pending_query_metric.inc() + # return await self.db.search_index.session_query(query_name, kwargs) + # except ConnectionTimeout: + # self.session_mgr.interrupt_count_metric.inc() + # raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out') + # finally: + # self.session_mgr.pending_query_metric.dec() + # self.session_mgr.executor_time_metric.observe(time.perf_counter() - start) async def mempool_compact_histogram(self): return self.mempool.compact_fee_histogram() async def claimtrie_search(self, **kwargs): - if kwargs: + start = time.perf_counter() + if 'release_time' in kwargs: + release_time = kwargs.pop('release_time') try: - return await self.run_and_cache_query('search', kwargs) - except TooManyClaimSearchParametersError as err: - await asyncio.sleep(2) - self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.", - self.peer_address()[0], err.key, err.limit) - return RPCError(1, str(err)) + kwargs['release_time'] = format_release_time(release_time) + except ValueError: + pass + try: + self.session_mgr.pending_query_metric.inc() + if 'channel' in kwargs: + channel_url = kwargs.pop('channel') + _, channel_claim, _ = await self.db.fs_resolve(channel_url) + if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)): + return Outputs.to_base64([], [], 0, None, None) + kwargs['channel_id'] = channel_claim.claim_hash.hex() + return await self.db.search_index.cached_search(kwargs) + except ConnectionTimeout: + self.session_mgr.interrupt_count_metric.inc() + raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out') + except TooManyClaimSearchParametersError as err: + await asyncio.sleep(2) + self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.", + self.peer_address()[0], err.key, err.limit) + return RPCError(1, str(err)) + finally: + self.session_mgr.pending_query_metric.dec() + self.session_mgr.executor_time_metric.observe(time.perf_counter() - start) async def claimtrie_resolve(self, *urls): - if urls: - count = len(urls) - try: - self.session_mgr.urls_to_resolve_count_metric.inc(count) - return await self.run_and_cache_query('resolve', urls) - finally: - self.session_mgr.resolved_url_count_metric.inc(count) + rows, extra = [], [] + for url in urls: + self.session_mgr.urls_to_resolve_count_metric.inc() + stream, channel, repost = await self.db.fs_resolve(url) + self.session_mgr.resolved_url_count_metric.inc() + if isinstance(channel, ResolveCensoredError): + rows.append(channel) + extra.append(channel.censor_row) + elif isinstance(stream, ResolveCensoredError): + rows.append(stream) + extra.append(stream.censor_row) + elif channel and not stream: + rows.append(channel) + # print("resolved channel", channel.name.decode()) + if repost: + extra.append(repost) + elif stream: + # print("resolved stream", stream.name.decode()) + rows.append(stream) + if channel: + # print("and channel", channel.name.decode()) + extra.append(channel) + if repost: + extra.append(repost) + # print("claimtrie resolve %i rows %i extrat" % (len(rows), len(extra))) + return Outputs.to_base64(rows, extra, 0, None, None) async def get_server_height(self): return self.bp.height @@ -1057,6 +1077,15 @@ class LBRYElectrumX(SessionBase): return -1 return None + async def claimtrie_getclaimbyid(self, claim_id): + rows = [] + extra = [] + stream = await self.db.fs_getclaimbyid(claim_id) + if not stream: + stream = LookupError(f"Could not find claim at {claim_id}") + rows.append(stream) + return Outputs.to_base64(rows, extra, 0, None, None) + def assert_tx_hash(self, value): '''Raise an RPCError if the value is not a valid transaction hash.''' diff --git a/lbry/wallet/server/storage.py b/lbry/wallet/server/storage.py deleted file mode 100644 index 127166204..000000000 --- a/lbry/wallet/server/storage.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (c) 2016-2017, the ElectrumX authors -# -# All rights reserved. -# -# See the file "LICENCE" for information about the copyright -# and warranty status of this software. - -"""Backend database abstraction.""" - -import os -from functools import partial - -from lbry.wallet.server import util - - -def db_class(db_dir, name): - """Returns a DB engine class.""" - for db_class in util.subclasses(Storage): - if db_class.__name__.lower() == name.lower(): - db_class.import_module() - return partial(db_class, db_dir) - raise RuntimeError(f'unrecognised DB engine "{name}"') - - -class Storage: - """Abstract base class of the DB backend abstraction.""" - - def __init__(self, db_dir, name, for_sync): - self.db_dir = db_dir - self.is_new = not os.path.exists(os.path.join(db_dir, name)) - self.for_sync = for_sync or self.is_new - self.open(name, create=self.is_new) - - @classmethod - def import_module(cls): - """Import the DB engine module.""" - raise NotImplementedError - - def open(self, name, create): - """Open an existing database or create a new one.""" - raise NotImplementedError - - def close(self): - """Close an existing database.""" - raise NotImplementedError - - def get(self, key): - raise NotImplementedError - - def put(self, key, value): - raise NotImplementedError - - def write_batch(self): - """Return a context manager that provides `put` and `delete`. - - Changes should only be committed when the context manager - closes without an exception. - """ - raise NotImplementedError - - def iterator(self, prefix=b'', reverse=False): - """Return an iterator that yields (key, value) pairs from the - database sorted by key. - - If `prefix` is set, only keys starting with `prefix` will be - included. If `reverse` is True the items are returned in - reverse order. - """ - raise NotImplementedError - - -class LevelDB(Storage): - """LevelDB database engine.""" - - @classmethod - def import_module(cls): - import plyvel - cls.module = plyvel - - def open(self, name, create, lru_cache_size=None): - mof = 10000 - path = os.path.join(self.db_dir, name) - # Use snappy compression (the default) - self.db = self.module.DB(path, create_if_missing=create, max_open_files=mof) - self.close = self.db.close - self.get = self.db.get - self.put = self.db.put - self.iterator = self.db.iterator - self.write_batch = partial(self.db.write_batch, transaction=True, sync=True) - - -class RocksDB(Storage): - """RocksDB database engine.""" - - @classmethod - def import_module(cls): - import rocksdb - cls.module = rocksdb - - def open(self, name, create): - mof = 512 if self.for_sync else 128 - path = os.path.join(self.db_dir, name) - # Use snappy compression (the default) - options = self.module.Options(create_if_missing=create, - use_fsync=True, - target_file_size_base=33554432, - max_open_files=mof) - self.db = self.module.DB(path, options) - self.get = self.db.get - self.put = self.db.put - - def close(self): - # PyRocksDB doesn't provide a close method; hopefully this is enough - self.db = self.get = self.put = None - import gc - gc.collect() - - def write_batch(self): - return RocksDBWriteBatch(self.db) - - def iterator(self, prefix=b'', reverse=False): - return RocksDBIterator(self.db, prefix, reverse) - - -class RocksDBWriteBatch: - """A write batch for RocksDB.""" - - def __init__(self, db): - self.batch = RocksDB.module.WriteBatch() - self.db = db - - def __enter__(self): - return self.batch - - def __exit__(self, exc_type, exc_val, exc_tb): - if not exc_val: - self.db.write(self.batch) - - -class RocksDBIterator: - """An iterator for RocksDB.""" - - def __init__(self, db, prefix, reverse): - self.prefix = prefix - if reverse: - self.iterator = reversed(db.iteritems()) - nxt_prefix = util.increment_byte_string(prefix) - if nxt_prefix: - self.iterator.seek(nxt_prefix) - try: - next(self.iterator) - except StopIteration: - self.iterator.seek(nxt_prefix) - else: - self.iterator.seek_to_last() - else: - self.iterator = db.iteritems() - self.iterator.seek(prefix) - - def __iter__(self): - return self - - def __next__(self): - k, v = next(self.iterator) - if not k.startswith(self.prefix): - raise StopIteration - return k, v diff --git a/lbry/wallet/server/tx.py b/lbry/wallet/server/tx.py index 411162155..33cf3da3a 100644 --- a/lbry/wallet/server/tx.py +++ b/lbry/wallet/server/tx.py @@ -26,7 +26,7 @@ # and warranty status of this software. """Transaction-related classes and functions.""" - +import typing from collections import namedtuple from lbry.wallet.server.hash import sha256, double_sha256, hash_to_hex_str @@ -41,11 +41,20 @@ ZERO = bytes(32) MINUS_1 = 4294967295 -class Tx(namedtuple("Tx", "version inputs outputs locktime raw")): - """Class representing a transaction.""" +class Tx(typing.NamedTuple): + version: int + inputs: typing.List['TxInput'] + outputs: typing.List['TxOutput'] + locktime: int + raw: bytes -class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")): +class TxInput(typing.NamedTuple): + prev_hash: bytes + prev_idx: int + script: bytes + sequence: int + """Class representing a transaction input.""" def __str__(self): script = self.script.hex() @@ -65,7 +74,9 @@ class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")): )) -class TxOutput(namedtuple("TxOutput", "value pk_script")): +class TxOutput(typing.NamedTuple): + value: int + pk_script: bytes def serialize(self): return b''.join(( diff --git a/lbry/wallet/server/util.py b/lbry/wallet/server/util.py index bc27f7d51..d78b23bb5 100644 --- a/lbry/wallet/server/util.py +++ b/lbry/wallet/server/util.py @@ -340,7 +340,7 @@ pack_le_int64 = struct_le_q.pack pack_le_uint16 = struct_le_H.pack pack_le_uint32 = struct_le_I.pack pack_be_uint64 = lambda x: x.to_bytes(8, byteorder='big') -pack_be_uint16 = struct_be_H.pack +pack_be_uint16 = lambda x: x.to_bytes(2, byteorder='big') pack_be_uint32 = struct_be_I.pack pack_byte = structB.pack diff --git a/setup.py b/setup.py index 2838b1f7f..56832e8eb 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh: PLYVEL = [] if sys.platform.startswith('linux'): - PLYVEL.append('plyvel==1.0.5') + PLYVEL.append('plyvel==1.3.0') setup( name=__name__, diff --git a/tests/integration/blockchain/test_blockchain_reorganization.py b/tests/integration/blockchain/test_blockchain_reorganization.py index 3f7a1f0b1..72724a68e 100644 --- a/tests/integration/blockchain/test_blockchain_reorganization.py +++ b/tests/integration/blockchain/test_blockchain_reorganization.py @@ -22,7 +22,7 @@ class BlockchainReorganizationTests(CommandTestCase): self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode()) self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex()) - txids = await asyncio.get_event_loop().run_in_executor(bp.db.executor, get_txids) + txids = await asyncio.get_event_loop().run_in_executor(None, get_txids) txs = await bp.db.fs_transactions(txids) block_txs = (await bp.daemon.deserialised_block(block_hash))['tx'] self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions') @@ -57,11 +57,29 @@ class BlockchainReorganizationTests(CommandTestCase): await self.assertBlockHash(209) await self.assertBlockHash(210) await self.assertBlockHash(211) + still_valid = await self.daemon.jsonrpc_stream_create( + 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(still_valid) + await self.blockchain.generate(1) + await self.ledger.on_header.where(lambda e: e.height == 212) + claim_id = still_valid.outputs[0].claim_id + c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id'] + c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id'] + c3 = (await self.resolve(f'still-valid'))['claim_id'] + self.assertTrue(c1 == c2 == c3) + + abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id) + await self.blockchain.generate(1) + await self.ledger.on_header.where(lambda e: e.height == 213) + c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}') + c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}']) + c3 = await self.daemon.jsonrpc_resolve([f'still-valid']) async def test_reorg_change_claim_height(self): # sanity check - txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft') - self.assertListEqual(txos, []) + result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both + self.assertIn('error', result) still_valid = await self.daemon.jsonrpc_stream_create( 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') @@ -82,17 +100,15 @@ class BlockchainReorganizationTests(CommandTestCase): self.assertEqual(self.ledger.headers.height, 208) await self.assertBlockHash(208) - txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft') - self.assertEqual(1, len(txos)) - txo = txos[0] - self.assertEqual(txo.tx_ref.id, broadcast_tx.id) - self.assertEqual(txo.tx_ref.height, 208) + claim = await self.resolve('hovercraft') + self.assertEqual(claim['txid'], broadcast_tx.id) + self.assertEqual(claim['height'], 208) # check that our tx is in block 208 as returned by lbrycrdd invalidated_block_hash = (await self.ledger.headers.hash(208)).decode() block_207 = await self.blockchain.get_block(invalidated_block_hash) - self.assertIn(txo.tx_ref.id, block_207['tx']) - self.assertEqual(208, txos[0].tx_ref.height) + self.assertIn(claim['txid'], block_207['tx']) + self.assertEqual(208, claim['height']) # reorg the last block dropping our claim tx await self.blockchain.invalidate_block(invalidated_block_hash) @@ -109,11 +125,20 @@ class BlockchainReorganizationTests(CommandTestCase): reorg_block_hash = await self.blockchain.get_block_hash(208) self.assertNotEqual(invalidated_block_hash, reorg_block_hash) block_207 = await self.blockchain.get_block(reorg_block_hash) - self.assertNotIn(txo.tx_ref.id, block_207['tx']) + self.assertNotIn(claim['txid'], block_207['tx']) client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode() self.assertEqual(client_reorg_block_hash, reorg_block_hash) + # verify the dropped claim is no longer returned by claim search + self.assertDictEqual( + {'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}}, + await self.resolve('hovercraft') + ) + + # verify the claim published a block earlier wasn't also reverted + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + # broadcast the claim in a different block new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode()) self.assertEqual(broadcast_tx.id, new_txid) @@ -123,14 +148,88 @@ class BlockchainReorganizationTests(CommandTestCase): await asyncio.wait_for(self.on_header(210), 1.0) # verify the claim is in the new block and that it is returned by claim_search - block_210 = await self.blockchain.get_block((await self.ledger.headers.hash(210)).decode()) - self.assertIn(txo.tx_ref.id, block_210['tx']) - txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft') - self.assertEqual(1, len(txos)) - self.assertEqual(txos[0].tx_ref.id, new_txid) - self.assertEqual(210, txos[0].tx_ref.height) + republished = await self.resolve('hovercraft') + self.assertEqual(210, republished['height']) + self.assertEqual(claim['claim_id'], republished['claim_id']) # this should still be unchanged - txos, _, _, _ = await self.ledger.claim_search([], name='still-valid') - self.assertEqual(1, len(txos)) - self.assertEqual(207, txos[0].tx_ref.height) + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + async def test_reorg_drop_claim(self): + # sanity check + result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both + self.assertIn('error', result) + + still_valid = await self.daemon.jsonrpc_stream_create( + 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(still_valid) + await self.generate(1) + + # create a claim and verify it's returned by claim_search + self.assertEqual(self.ledger.headers.height, 207) + await self.assertBlockHash(207) + + broadcast_tx = await self.daemon.jsonrpc_stream_create( + 'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(broadcast_tx) + await self.generate(1) + await self.ledger.wait(broadcast_tx, self.blockchain.block_expected) + self.assertEqual(self.ledger.headers.height, 208) + await self.assertBlockHash(208) + + claim = await self.resolve('hovercraft') + self.assertEqual(claim['txid'], broadcast_tx.id) + self.assertEqual(claim['height'], 208) + + # check that our tx is in block 208 as returned by lbrycrdd + invalidated_block_hash = (await self.ledger.headers.hash(208)).decode() + block_207 = await self.blockchain.get_block(invalidated_block_hash) + self.assertIn(claim['txid'], block_207['tx']) + self.assertEqual(208, claim['height']) + + # reorg the last block dropping our claim tx + await self.blockchain.invalidate_block(invalidated_block_hash) + await self.blockchain.clear_mempool() + await self.blockchain.generate(2) + + # wait for the client to catch up and verify the reorg + await asyncio.wait_for(self.on_header(209), 3.0) + await self.assertBlockHash(207) + await self.assertBlockHash(208) + await self.assertBlockHash(209) + + # verify the claim was dropped from block 208 as returned by lbrycrdd + reorg_block_hash = await self.blockchain.get_block_hash(208) + self.assertNotEqual(invalidated_block_hash, reorg_block_hash) + block_207 = await self.blockchain.get_block(reorg_block_hash) + self.assertNotIn(claim['txid'], block_207['tx']) + + client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode() + self.assertEqual(client_reorg_block_hash, reorg_block_hash) + + # verify the dropped claim is no longer returned by claim search + self.assertDictEqual( + {'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}}, + await self.resolve('hovercraft') + ) + + # verify the claim published a block earlier wasn't also reverted + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + # broadcast the claim in a different block + new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode()) + self.assertEqual(broadcast_tx.id, new_txid) + await self.blockchain.generate(1) + + # wait for the client to catch up + await asyncio.wait_for(self.on_header(210), 1.0) + + # verify the claim is in the new block and that it is returned by claim_search + republished = await self.resolve('hovercraft') + self.assertEqual(210, republished['height']) + self.assertEqual(claim['claim_id'], republished['claim_id']) + + # this should still be unchanged + self.assertEqual(207, (await self.resolve('still-valid'))['height']) diff --git a/tests/integration/blockchain/test_network.py b/tests/integration/blockchain/test_network.py index 9447dc835..171d457c4 100644 --- a/tests/integration/blockchain/test_network.py +++ b/tests/integration/blockchain/test_network.py @@ -33,7 +33,7 @@ class NetworkTests(IntegrationTestCase): 'donation_address': '', 'daily_fee': '0', 'server_version': lbry.__version__, - 'trending_algorithm': 'zscore', + 'trending_algorithm': 'variable_decay', }, await self.ledger.network.get_server_features()) # await self.conductor.spv_node.stop() payment_address, donation_address = await self.account.get_addresses(limit=2) @@ -58,7 +58,7 @@ class NetworkTests(IntegrationTestCase): 'donation_address': donation_address, 'daily_fee': '42', 'server_version': lbry.__version__, - 'trending_algorithm': 'zscore', + 'trending_algorithm': 'variable_decay', }, await self.ledger.network.get_server_features()) @@ -176,10 +176,19 @@ class UDPServerFailDiscoveryTest(AsyncioTestCase): class ServerPickingTestCase(AsyncioTestCase): - async def _make_udp_server(self, port): + async def _make_udp_server(self, port, latency) -> StatusServer: s = StatusServer() - await s.start(0, b'\x00' * 32, '127.0.0.1', port) + await s.start(0, b'\x00' * 32, 'US', '127.0.0.1', port, True) + s.set_available() + sendto = s._protocol.transport.sendto + + def mock_sendto(data, addr): + self.loop.call_later(latency, sendto, data, addr) + + s._protocol.transport.sendto = mock_sendto + self.addCleanup(s.stop) + return s async def _make_fake_server(self, latency=1.0, port=1): # local fake server with artificial latency @@ -191,23 +200,24 @@ class ServerPickingTestCase(AsyncioTestCase): return {'height': 1} server = await self.loop.create_server(lambda: FakeSession(), host='127.0.0.1', port=port) self.addCleanup(server.close) - await self._make_udp_server(port) + await self._make_udp_server(port, latency) return '127.0.0.1', port async def _make_bad_server(self, port=42420): async def echo(reader, writer): while True: writer.write(await reader.read()) + server = await asyncio.start_server(echo, host='127.0.0.1', port=port) self.addCleanup(server.close) - await self._make_udp_server(port) + await self._make_udp_server(port, 0) return '127.0.0.1', port - async def _test_pick_fastest(self): + async def test_pick_fastest(self): ledger = Mock(config={ 'default_servers': [ # fast but unhealthy, should be discarded - await self._make_bad_server(), + # await self._make_bad_server(), ('localhost', 1), ('example.that.doesnt.resolve', 9000), await self._make_fake_server(latency=1.0, port=1340), @@ -223,7 +233,7 @@ class ServerPickingTestCase(AsyncioTestCase): await asyncio.wait_for(network.on_connected.first, timeout=10) self.assertTrue(network.is_connected) self.assertTupleEqual(network.client.server, ('127.0.0.1', 1337)) - self.assertTrue(all([not session.is_closing() for session in network.session_pool.available_sessions])) + # self.assertTrue(all([not session.is_closing() for session in network.session_pool.available_sessions])) # ensure we are connected to all of them after a while - await asyncio.sleep(1) - self.assertEqual(len(list(network.session_pool.available_sessions)), 3) + # await asyncio.sleep(1) + # self.assertEqual(len(list(network.session_pool.available_sessions)), 3) diff --git a/tests/integration/blockchain/test_resolve_command.py b/tests/integration/blockchain/test_resolve_command.py deleted file mode 100644 index 37b548ff9..000000000 --- a/tests/integration/blockchain/test_resolve_command.py +++ /dev/null @@ -1,410 +0,0 @@ -import asyncio -import json -import hashlib -from binascii import hexlify, unhexlify -from lbry.testcase import CommandTestCase -from lbry.wallet.transaction import Transaction, Output -from lbry.schema.compat import OldClaimMessage -from lbry.crypto.hash import sha256 -from lbry.crypto.base58 import Base58 - - -class BaseResolveTestCase(CommandTestCase): - - async def assertResolvesToClaimId(self, name, claim_id): - other = await self.resolve(name) - if claim_id is None: - self.assertIn('error', other) - self.assertEqual(other['error']['name'], 'NOT_FOUND') - else: - self.assertEqual(claim_id, other['claim_id']) - - -class ResolveCommand(BaseResolveTestCase): - - async def test_resolve_response(self): - channel_id = self.get_claim_id( - await self.channel_create('@abc', '0.01') - ) - - # resolving a channel @abc - response = await self.resolve('lbry://@abc') - self.assertEqual(response['name'], '@abc') - self.assertEqual(response['value_type'], 'channel') - self.assertEqual(response['meta']['claims_in_channel'], 0) - - await self.stream_create('foo', '0.01', channel_id=channel_id) - await self.stream_create('foo2', '0.01', channel_id=channel_id) - - # resolving a channel @abc with some claims in it - response['confirmations'] += 2 - response['meta']['claims_in_channel'] = 2 - self.assertEqual(response, await self.resolve('lbry://@abc')) - - # resolving claim foo within channel @abc - claim = await self.resolve('lbry://@abc/foo') - self.assertEqual(claim['name'], 'foo') - self.assertEqual(claim['value_type'], 'stream') - self.assertEqual(claim['signing_channel']['name'], '@abc') - self.assertTrue(claim['is_channel_signature_valid']) - self.assertEqual( - claim['timestamp'], - self.ledger.headers.estimated_timestamp(claim['height']) - ) - self.assertEqual( - claim['signing_channel']['timestamp'], - self.ledger.headers.estimated_timestamp(claim['signing_channel']['height']) - ) - - # resolving claim foo by itself - self.assertEqual(claim, await self.resolve('lbry://foo')) - # resolving from the given permanent url - self.assertEqual(claim, await self.resolve(claim['permanent_url'])) - - # resolving multiple at once - response = await self.out(self.daemon.jsonrpc_resolve(['lbry://foo', 'lbry://foo2'])) - self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response)) - claim = response['lbry://foo2'] - self.assertEqual(claim['name'], 'foo2') - self.assertEqual(claim['value_type'], 'stream') - self.assertEqual(claim['signing_channel']['name'], '@abc') - self.assertTrue(claim['is_channel_signature_valid']) - - # resolve has correct confirmations - tx_details = await self.blockchain.get_raw_transaction(claim['txid']) - self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations']) - - # resolve handles invalid data - await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1") - await self.generate(1) - response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish")) - self.assertSetEqual({'lbry://gibberish'}, set(response)) - claim = response['lbry://gibberish'] - self.assertEqual(claim['name'], 'gibberish') - self.assertNotIn('value', claim) - - # resolve retries - await self.conductor.spv_node.stop() - resolve_task = asyncio.create_task(self.resolve('foo')) - await self.conductor.spv_node.start(self.conductor.blockchain_node) - self.assertIsNotNone((await resolve_task)['claim_id']) - - async def test_winning_by_effective_amount(self): - # first one remains winner unless something else changes - claim_id1 = self.get_claim_id( - await self.channel_create('@foo', allow_duplicate_name=True)) - await self.assertResolvesToClaimId('@foo', claim_id1) - claim_id2 = self.get_claim_id( - await self.channel_create('@foo', allow_duplicate_name=True)) - await self.assertResolvesToClaimId('@foo', claim_id1) - claim_id3 = self.get_claim_id( - await self.channel_create('@foo', allow_duplicate_name=True)) - await self.assertResolvesToClaimId('@foo', claim_id1) - # supports change the winner - await self.support_create(claim_id3, '0.09') - await self.assertResolvesToClaimId('@foo', claim_id3) - await self.support_create(claim_id2, '0.19') - await self.assertResolvesToClaimId('@foo', claim_id2) - await self.support_create(claim_id1, '0.29') - await self.assertResolvesToClaimId('@foo', claim_id1) - - async def test_advanced_resolve(self): - claim_id1 = self.get_claim_id( - await self.stream_create('foo', '0.7', allow_duplicate_name=True)) - claim_id2 = self.get_claim_id( - await self.stream_create('foo', '0.8', allow_duplicate_name=True)) - claim_id3 = self.get_claim_id( - await self.stream_create('foo', '0.9', allow_duplicate_name=True)) - # plain winning claim - await self.assertResolvesToClaimId('foo', claim_id3) - # amount order resolution - await self.assertResolvesToClaimId('foo$1', claim_id3) - await self.assertResolvesToClaimId('foo$2', claim_id2) - await self.assertResolvesToClaimId('foo$3', claim_id1) - await self.assertResolvesToClaimId('foo$4', None) - - async def test_partial_claim_id_resolve(self): - # add some noise - await self.channel_create('@abc', '0.1', allow_duplicate_name=True) - await self.channel_create('@abc', '0.2', allow_duplicate_name=True) - await self.channel_create('@abc', '1.0', allow_duplicate_name=True) - - channel_id = self.get_claim_id( - await self.channel_create('@abc', '1.1', allow_duplicate_name=True)) - await self.assertResolvesToClaimId(f'@abc', channel_id) - await self.assertResolvesToClaimId(f'@abc#{channel_id[:10]}', channel_id) - await self.assertResolvesToClaimId(f'@abc#{channel_id}', channel_id) - channel = (await self.claim_search(claim_id=channel_id))[0] - await self.assertResolvesToClaimId(channel['short_url'], channel_id) - await self.assertResolvesToClaimId(channel['canonical_url'], channel_id) - await self.assertResolvesToClaimId(channel['permanent_url'], channel_id) - - # add some noise - await self.stream_create('foo', '0.1', allow_duplicate_name=True, channel_id=channel['claim_id']) - await self.stream_create('foo', '0.2', allow_duplicate_name=True, channel_id=channel['claim_id']) - await self.stream_create('foo', '0.3', allow_duplicate_name=True, channel_id=channel['claim_id']) - - claim_id1 = self.get_claim_id( - await self.stream_create('foo', '0.7', allow_duplicate_name=True, channel_id=channel['claim_id'])) - claim1 = (await self.claim_search(claim_id=claim_id1))[0] - await self.assertResolvesToClaimId('foo', claim_id1) - await self.assertResolvesToClaimId('@abc/foo', claim_id1) - await self.assertResolvesToClaimId(claim1['short_url'], claim_id1) - await self.assertResolvesToClaimId(claim1['canonical_url'], claim_id1) - await self.assertResolvesToClaimId(claim1['permanent_url'], claim_id1) - - claim_id2 = self.get_claim_id( - await self.stream_create('foo', '0.8', allow_duplicate_name=True, channel_id=channel['claim_id'])) - claim2 = (await self.claim_search(claim_id=claim_id2))[0] - await self.assertResolvesToClaimId('foo', claim_id2) - await self.assertResolvesToClaimId('@abc/foo', claim_id2) - await self.assertResolvesToClaimId(claim2['short_url'], claim_id2) - await self.assertResolvesToClaimId(claim2['canonical_url'], claim_id2) - await self.assertResolvesToClaimId(claim2['permanent_url'], claim_id2) - - async def test_abandoned_channel_with_signed_claims(self): - channel = (await self.channel_create('@abc', '1.0'))['outputs'][0] - orphan_claim = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel['claim_id']) - abandoned_channel_id = channel['claim_id'] - await self.channel_abandon(txid=channel['txid'], nout=0) - channel = (await self.channel_create('@abc', '1.0'))['outputs'][0] - orphan_claim_id = self.get_claim_id(orphan_claim) - - # Original channel doesn't exists anymore, so the signature is invalid. For invalid signatures, resolution is - # only possible outside a channel - self.assertEqual( - {'error': { - 'name': 'NOT_FOUND', - 'text': 'Could not find claim at "lbry://@abc/on-channel-claim".', - }}, - await self.resolve('lbry://@abc/on-channel-claim') - ) - response = await self.resolve('lbry://on-channel-claim') - self.assertFalse(response['is_channel_signature_valid']) - self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel']) - direct_uri = 'lbry://on-channel-claim#' + orphan_claim_id - response = await self.resolve(direct_uri) - self.assertFalse(response['is_channel_signature_valid']) - self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel']) - await self.stream_abandon(claim_id=orphan_claim_id) - - uri = 'lbry://@abc/on-channel-claim' - # now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore) - valid_claim = await self.stream_create('on-channel-claim', '0.00000001', channel_id=channel['claim_id']) - # resolves normally - response = await self.resolve(uri) - self.assertTrue(response['is_channel_signature_valid']) - - # ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition) - await self.stream_create( - 'on-channel-claim', '0.00000001', channel_id=channel['claim_id'], allow_duplicate_name=True - ) - - # it still resolves! but to the older claim - response = await self.resolve(uri) - self.assertTrue(response['is_channel_signature_valid']) - self.assertEqual(response['txid'], valid_claim['txid']) - claims = await self.claim_search(name='on-channel-claim') - self.assertEqual(2, len(claims)) - self.assertEqual( - {channel['claim_id']}, {claim['signing_channel']['claim_id'] for claim in claims} - ) - - async def test_normalization_resolution(self): - - one = 'ΣίσυφοςfiÆ' - two = 'ΣΊΣΥΦΟσFIæ' - - _ = await self.stream_create(one, '0.1') - c = await self.stream_create(two, '0.2') - - winner_id = self.get_claim_id(c) - - r1 = await self.resolve(f'lbry://{one}') - r2 = await self.resolve(f'lbry://{two}') - - self.assertEqual(winner_id, r1['claim_id']) - self.assertEqual(winner_id, r2['claim_id']) - - async def test_resolve_old_claim(self): - channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0') - await self.confirm_tx(channel.id) - address = channel.outputs[0].get_address(self.account.ledger) - claim = generate_signed_legacy(address, channel.outputs[0]) - tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account) - await tx.sign([self.account]) - await self.broadcast(tx) - await self.confirm_tx(tx.id) - - response = await self.resolve('@olds/example') - self.assertTrue(response['is_channel_signature_valid']) - - claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature)) - tx = await Transaction.claim_create( - 'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account - ) - await tx.sign([self.account]) - await self.broadcast(tx) - await self.confirm_tx(tx.id) - - response = await self.resolve('bad_example') - self.assertFalse(response['is_channel_signature_valid']) - self.assertEqual( - {'error': { - 'name': 'NOT_FOUND', - 'text': 'Could not find claim at "@olds/bad_example".', - }}, - await self.resolve('@olds/bad_example') - ) - - async def test_resolve_with_includes(self): - wallet2 = await self.daemon.jsonrpc_wallet_create('wallet2', create_account=True) - address2 = await self.daemon.jsonrpc_address_unused(wallet_id=wallet2.id) - - await self.wallet_send('1.0', address2) - - stream = await self.stream_create( - 'priced', '0.1', wallet_id=wallet2.id, - fee_amount='0.5', fee_currency='LBC', fee_address=address2 - ) - stream_id = self.get_claim_id(stream) - - resolve = await self.resolve('priced') - self.assertNotIn('is_my_output', resolve) - self.assertNotIn('purchase_receipt', resolve) - self.assertNotIn('sent_supports', resolve) - self.assertNotIn('sent_tips', resolve) - self.assertNotIn('received_tips', resolve) - - # is_my_output - resolve = await self.resolve('priced', include_is_my_output=True) - self.assertFalse(resolve['is_my_output']) - resolve = await self.resolve('priced', wallet_id=wallet2.id, include_is_my_output=True) - self.assertTrue(resolve['is_my_output']) - - # purchase receipt - resolve = await self.resolve('priced', include_purchase_receipt=True) - self.assertNotIn('purchase_receipt', resolve) - await self.purchase_create(stream_id) - resolve = await self.resolve('priced', include_purchase_receipt=True) - self.assertEqual('0.5', resolve['purchase_receipt']['amount']) - - # my supports and my tips - resolve = await self.resolve( - 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True - ) - self.assertEqual('0.0', resolve['sent_supports']) - self.assertEqual('0.0', resolve['sent_tips']) - self.assertEqual('0.0', resolve['received_tips']) - await self.support_create(stream_id, '0.3') - await self.support_create(stream_id, '0.2') - await self.support_create(stream_id, '0.4', tip=True) - await self.support_create(stream_id, '0.5', tip=True) - resolve = await self.resolve( - 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True - ) - self.assertEqual('0.5', resolve['sent_supports']) - self.assertEqual('0.9', resolve['sent_tips']) - self.assertEqual('0.0', resolve['received_tips']) - - resolve = await self.resolve( - 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True, - wallet_id=wallet2.id - ) - self.assertEqual('0.0', resolve['sent_supports']) - self.assertEqual('0.0', resolve['sent_tips']) - self.assertEqual('0.9', resolve['received_tips']) - self.assertEqual('1.4', resolve['meta']['support_amount']) - - # make sure nothing is leaked between wallets through cached tx/txos - resolve = await self.resolve('priced') - self.assertNotIn('is_my_output', resolve) - self.assertNotIn('purchase_receipt', resolve) - self.assertNotIn('sent_supports', resolve) - self.assertNotIn('sent_tips', resolve) - self.assertNotIn('received_tips', resolve) - - -class ResolveAfterReorg(BaseResolveTestCase): - - async def reorg(self, start): - blocks = self.ledger.headers.height - start - self.blockchain.block_expected = start - 1 - # go back to start - await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode()) - # go to previous + 1 - await self.generate(blocks + 2) - - async def test_reorg(self): - self.assertEqual(self.ledger.headers.height, 206) - - channel_name = '@abc' - channel_id = self.get_claim_id( - await self.channel_create(channel_name, '0.01') - ) - self.assertNotIn('error', await self.resolve(channel_name)) - await self.reorg(206) - self.assertNotIn('error', await self.resolve(channel_name)) - - stream_name = 'foo' - stream_id = self.get_claim_id( - await self.stream_create(stream_name, '0.01', channel_id=channel_id) - ) - self.assertNotIn('error', await self.resolve(stream_name)) - await self.reorg(206) - self.assertNotIn('error', await self.resolve(stream_name)) - - await self.support_create(stream_id, '0.01') - self.assertNotIn('error', await self.resolve(stream_name)) - await self.reorg(206) - self.assertNotIn('error', await self.resolve(stream_name)) - - await self.stream_abandon(stream_id) - self.assertNotIn('error', await self.resolve(channel_name)) - self.assertIn('error', await self.resolve(stream_name)) - await self.reorg(206) - self.assertNotIn('error', await self.resolve(channel_name)) - self.assertIn('error', await self.resolve(stream_name)) - - await self.channel_abandon(channel_id) - self.assertIn('error', await self.resolve(channel_name)) - self.assertIn('error', await self.resolve(stream_name)) - await self.reorg(206) - self.assertIn('error', await self.resolve(channel_name)) - self.assertIn('error', await self.resolve(stream_name)) - - -def generate_signed_legacy(address: bytes, output: Output): - decoded_address = Base58.decode(address) - claim = OldClaimMessage() - claim.ParseFromString(unhexlify( - '080110011aee04080112a604080410011a2b4865726520617265203520526561736f6e73204920e29da4e' - 'fb88f204e657874636c6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e' - '657874636c6f75643a2068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e206' - '6696e64206d65206f6e20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a' - '2f2f666f72756d2e6865617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733' - 'a2f2f6f6666746f706963616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f7061747265' - '6f6e2e636f6d2f7468656c696e757867616d65720a202a204d657263683a2068747470733a2f2f7465657' - '37072696e672e636f6d2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a2054' - '77697463683a2068747470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723' - 'a2068747470733a2f2f747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a6874' - '7470733a2f2f7777772e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0' - 'f546865204c696e75782047616d6572321c436f7079726967687465642028636f6e746163742061757468' - '6f722938004a2968747470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f46725464424' - '34f535f666352005a001a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22' - 'f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a406' - '2b2dd4c45e364030fbfad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c' - '0b68498382b2701b22c03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b51' - )) - claim.ClearField("publisherSignature") - digest = sha256(b''.join([ - decoded_address, - claim.SerializeToString(), - output.claim_hash[::-1] - ])) - signature = output.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256) - claim.publisherSignature.version = 1 - claim.publisherSignature.signatureType = 1 - claim.publisherSignature.signature = signature - claim.publisherSignature.certificateId = output.claim_hash[::-1] - return claim diff --git a/tests/integration/blockchain/test_wallet_server_sessions.py b/tests/integration/blockchain/test_wallet_server_sessions.py index 0b079bbdc..4f7930c05 100644 --- a/tests/integration/blockchain/test_wallet_server_sessions.py +++ b/tests/integration/blockchain/test_wallet_server_sessions.py @@ -5,7 +5,7 @@ import lbry.wallet from lbry.error import ServerPaymentFeeAboveMaxAllowedError from lbry.wallet.network import ClientSession from lbry.wallet.rpc import RPCError -from lbry.wallet.server.db.elasticsearch.sync import run as run_sync, make_es_index +from lbry.wallet.server.db.elasticsearch.sync import run_sync, make_es_index from lbry.wallet.server.session import LBRYElectrumX from lbry.testcase import IntegrationTestCase, CommandTestCase from lbry.wallet.orchstr8.node import SPVNode @@ -104,8 +104,11 @@ class TestESSync(CommandTestCase): async def resync(): await db.search_index.start() db.search_index.clear_caches() - await run_sync(db.sql._db_path, 1, 0, 0, index_name=db.search_index.index) + await run_sync(index_name=db.search_index.index, db=db) self.assertEqual(10, len(await self.claim_search(order_by=['height']))) + + self.assertEqual(0, len(await self.claim_search(order_by=['height']))) + await resync() # this time we will test a migration from unversioned to v1 @@ -192,17 +195,18 @@ class TestHubDiscovery(CommandTestCase): ) -class TestStress(CommandTestCase): - async def test_flush_over_66_thousand(self): - history = self.conductor.spv_node.server.db.history - history.flush_count = 66_000 - history.flush() - self.assertEqual(history.flush_count, 66_001) - await self.generate(1) - self.assertEqual(history.flush_count, 66_002) +class TestStressFlush(CommandTestCase): + # async def test_flush_over_66_thousand(self): + # history = self.conductor.spv_node.server.db.history + # history.flush_count = 66_000 + # history.flush() + # self.assertEqual(history.flush_count, 66_001) + # await self.generate(1) + # self.assertEqual(history.flush_count, 66_002) async def test_thousands_claim_ids_on_search(self): await self.stream_create() with self.assertRaises(RPCError) as err: await self.claim_search(not_channel_ids=[("%040x" % i) for i in range(8196)]) - self.assertEqual(err.exception.message, 'not_channel_ids cant have more than 2048 items.') + # in the go hub this doesnt have a `.` at the end, in python it does + self.assertTrue(err.exception.message.startswith('not_channel_ids cant have more than 2048 items')) diff --git a/tests/integration/claims/__init__.py b/tests/integration/claims/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/blockchain/test_claim_commands.py b/tests/integration/claims/test_claim_commands.py similarity index 98% rename from tests/integration/blockchain/test_claim_commands.py rename to tests/integration/claims/test_claim_commands.py index 2836f7671..7d3e44656 100644 --- a/tests/integration/blockchain/test_claim_commands.py +++ b/tests/integration/claims/test_claim_commands.py @@ -182,6 +182,9 @@ class ClaimSearchCommand(ClaimTestCase): claims = [three, two, signed] await self.assertFindsClaims(claims, channel_ids=[self.channel_id]) await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}") + await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}", valid_channel_signature=True) + await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}", has_channel_signature=True, valid_channel_signature=True) + await self.assertFindsClaims([], channel=f"@abc#{self.channel_id}", has_channel_signature=True, invalid_channel_signature=True) # fixme await self.assertFindsClaims([], channel=f"@inexistent") await self.assertFindsClaims([three, two, signed2, signed], channel_ids=[channel_id2, self.channel_id]) await self.channel_abandon(claim_id=self.channel_id) @@ -810,10 +813,15 @@ class TransactionOutputCommands(ClaimTestCase): stream_id = self.get_claim_id(await self.stream_create()) await self.support_create(stream_id, '0.3') await self.support_create(stream_id, '0.2') - await self.generate(day_blocks) + await self.generate(day_blocks // 2) + await self.stream_update(stream_id) + await self.generate(day_blocks // 2) await self.support_create(stream_id, '0.4') await self.support_create(stream_id, '0.5') - await self.generate(day_blocks) + await self.stream_update(stream_id) + await self.generate(day_blocks // 2) + await self.stream_update(stream_id) + await self.generate(day_blocks // 2) await self.support_create(stream_id, '0.6') plot = await self.txo_plot(type='support') @@ -1484,12 +1492,10 @@ class StreamCommands(ClaimTestCase): filtering_channel_id = self.get_claim_id( await self.channel_create('@filtering', '0.1') ) - self.conductor.spv_node.server.db.sql.filtering_channel_hashes.add( - unhexlify(filtering_channel_id)[::-1] - ) - self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.filtered_streams)) + self.conductor.spv_node.server.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id)) + self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_streams)) await self.stream_repost(bad_content_id, 'filter1', '0.1', channel_name='@filtering') - self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.filtered_streams)) + self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_streams)) # search for filtered content directly result = await self.out(self.daemon.jsonrpc_claim_search(name='bad_content')) @@ -1531,12 +1537,16 @@ class StreamCommands(ClaimTestCase): blocking_channel_id = self.get_claim_id( await self.channel_create('@blocking', '0.1') ) - self.conductor.spv_node.server.db.sql.blocking_channel_hashes.add( - unhexlify(blocking_channel_id)[::-1] - ) - self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.blocked_streams)) + # test setting from env vars and starting from scratch + await self.conductor.spv_node.stop(False) + await self.conductor.spv_node.start(self.conductor.blockchain_node, + extraconf={'BLOCKING_CHANNEL_IDS': blocking_channel_id, + 'FILTERING_CHANNEL_IDS': filtering_channel_id}) + await self.daemon.wallet_manager.reset() + + self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_streams)) await self.stream_repost(bad_content_id, 'block1', '0.1', channel_name='@blocking') - self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.blocked_streams)) + self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_streams)) # blocked content is not resolveable error = (await self.resolve('lbry://@some_channel/bad_content'))['error'] @@ -1559,9 +1569,9 @@ class StreamCommands(ClaimTestCase): self.assertEqual('@bad_channel', result['items'][1]['name']) # filter channel out - self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.filtered_channels)) + self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_channels)) await self.stream_repost(bad_channel_id, 'filter2', '0.1', channel_name='@filtering') - self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.filtered_channels)) + self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_channels)) # same claim search as previous now returns 0 results result = await self.out(self.daemon.jsonrpc_claim_search(any_tags=['bad-stuff'], order_by=['height'])) @@ -1586,9 +1596,9 @@ class StreamCommands(ClaimTestCase): self.assertEqual(worse_content_id, result['claim_id']) # block channel - self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.blocked_channels)) + self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_channels)) await self.stream_repost(bad_channel_id, 'block2', '0.1', channel_name='@blocking') - self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.blocked_channels)) + self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_channels)) # channel, claim in channel or claim individually no longer resolve self.assertEqual((await self.resolve('lbry://@bad_channel'))['error']['name'], 'BLOCKED') @@ -1760,6 +1770,16 @@ class StreamCommands(ClaimTestCase): self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=self.account.id), 3) self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=account2_id), 1) + self.assertEqual(3, len(await self.claim_search(release_time='>0', order_by=['release_time']))) + self.assertEqual(3, len(await self.claim_search(release_time='>=0', order_by=['release_time']))) + self.assertEqual(4, len(await self.claim_search(order_by=['release_time']))) + self.assertEqual(4, len(await self.claim_search(release_time=' 1: + colliding_claim_ids.extend(prefixes[chan[:2]]) + break + first_claim = first_claims_one_char_shortid[colliding_claim_ids[0][:1]] + await self.assertResolvesToClaimId( + f'@abc#{colliding_claim_ids[0][:1]}', first_claim + ) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0][:2]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0][:7]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0][:17]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[0]}', colliding_claim_ids[0]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:3]}', colliding_claim_ids[1]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:7]}', colliding_claim_ids[1]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:17]}', colliding_claim_ids[1]) + await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1]}', colliding_claim_ids[1]) + + async def test_abandon_channel_and_claims_in_same_tx(self): + channel_id = self.get_claim_id( + await self.channel_create('@abc', '0.01') + ) + await self.stream_create('foo', '0.01', channel_id=channel_id) + await self.channel_update(channel_id, bid='0.001') + foo2_id = self.get_claim_id(await self.stream_create('foo2', '0.01', channel_id=channel_id)) + await self.stream_update(foo2_id, bid='0.0001', channel_id=channel_id, confirm=False) + tx = await self.stream_create('foo3', '0.01', channel_id=channel_id, confirm=False, return_tx=True) + await self.ledger.wait(tx) + + # db = self.conductor.spv_node.server.bp.db + # claims = list(db.all_claims_producer()) + # print("claims", claims) + await self.daemon.jsonrpc_txo_spend(blocking=True) + await self.generate(1) + await self.assertNoClaimForName('@abc') + await self.assertNoClaimForName('foo') + await self.assertNoClaimForName('foo2') + await self.assertNoClaimForName('foo3') + + async def test_resolve_response(self): + channel_id = self.get_claim_id( + await self.channel_create('@abc', '0.01') + ) + + # resolving a channel @abc + response = await self.resolve('lbry://@abc') + self.assertEqual(response['name'], '@abc') + self.assertEqual(response['value_type'], 'channel') + self.assertEqual(response['meta']['claims_in_channel'], 0) + + await self.stream_create('foo', '0.01', channel_id=channel_id) + await self.stream_create('foo2', '0.01', channel_id=channel_id) + + # resolving a channel @abc with some claims in it + response['confirmations'] += 2 + response['meta']['claims_in_channel'] = 2 + self.assertEqual(response, await self.resolve('lbry://@abc')) + + # resolving claim foo within channel @abc + claim = await self.resolve('lbry://@abc/foo') + self.assertEqual(claim['name'], 'foo') + self.assertEqual(claim['value_type'], 'stream') + self.assertEqual(claim['signing_channel']['name'], '@abc') + self.assertTrue(claim['is_channel_signature_valid']) + self.assertEqual( + claim['timestamp'], + self.ledger.headers.estimated_timestamp(claim['height']) + ) + self.assertEqual( + claim['signing_channel']['timestamp'], + self.ledger.headers.estimated_timestamp(claim['signing_channel']['height']) + ) + + # resolving claim foo by itself + self.assertEqual(claim, await self.resolve('lbry://foo')) + # resolving from the given permanent url + self.assertEqual(claim, await self.resolve(claim['permanent_url'])) + + # resolving multiple at once + response = await self.out(self.daemon.jsonrpc_resolve(['lbry://foo', 'lbry://foo2'])) + self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response)) + claim = response['lbry://foo2'] + self.assertEqual(claim['name'], 'foo2') + self.assertEqual(claim['value_type'], 'stream') + self.assertEqual(claim['signing_channel']['name'], '@abc') + self.assertTrue(claim['is_channel_signature_valid']) + + # resolve has correct confirmations + tx_details = await self.blockchain.get_raw_transaction(claim['txid']) + self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations']) + + # resolve handles invalid data + await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1") + await self.generate(1) + response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish")) + self.assertSetEqual({'lbry://gibberish'}, set(response)) + claim = response['lbry://gibberish'] + self.assertEqual(claim['name'], 'gibberish') + self.assertNotIn('value', claim) + + # resolve retries + await self.conductor.spv_node.stop() + resolve_task = asyncio.create_task(self.resolve('foo')) + await self.conductor.spv_node.start(self.conductor.blockchain_node) + self.assertIsNotNone((await resolve_task)['claim_id']) + + async def test_winning_by_effective_amount(self): + # first one remains winner unless something else changes + claim_id1 = self.get_claim_id( + await self.channel_create('@foo', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('@foo', claim_id1) + claim_id2 = self.get_claim_id( + await self.channel_create('@foo', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('@foo', claim_id1) + claim_id3 = self.get_claim_id( + await self.channel_create('@foo', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('@foo', claim_id1) + # supports change the winner + await self.support_create(claim_id3, '0.09') + await self.assertResolvesToClaimId('@foo', claim_id3) + await self.support_create(claim_id2, '0.19') + await self.assertResolvesToClaimId('@foo', claim_id2) + await self.support_create(claim_id1, '0.29') + await self.assertResolvesToClaimId('@foo', claim_id1) + + await self.support_abandon(claim_id1) + await self.assertResolvesToClaimId('@foo', claim_id2) + + async def test_advanced_resolve(self): + claim_id1 = self.get_claim_id( + await self.stream_create('foo', '0.7', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('foo$1', claim_id1) + claim_id2 = self.get_claim_id( + await self.stream_create('foo', '0.8', allow_duplicate_name=True)) + await self.assertResolvesToClaimId('foo$1', claim_id2) + await self.assertResolvesToClaimId('foo$2', claim_id1) + claim_id3 = self.get_claim_id( + await self.stream_create('foo', '0.9', allow_duplicate_name=True)) + # plain winning claim + await self.assertResolvesToClaimId('foo', claim_id3) + + # amount order resolution + await self.assertResolvesToClaimId('foo$1', claim_id3) + await self.assertResolvesToClaimId('foo$2', claim_id2) + await self.assertResolvesToClaimId('foo$3', claim_id1) + await self.assertResolvesToClaimId('foo$4', None) + + # async def test_partial_claim_id_resolve(self): + # # add some noise + # await self.channel_create('@abc', '0.1', allow_duplicate_name=True) + # await self.channel_create('@abc', '0.2', allow_duplicate_name=True) + # await self.channel_create('@abc', '1.0', allow_duplicate_name=True) + # + # channel_id = self.get_claim_id(await self.channel_create('@abc', '1.1', allow_duplicate_name=True)) + # await self.assertResolvesToClaimId(f'@abc', channel_id) + # await self.assertResolvesToClaimId(f'@abc#{channel_id[:10]}', channel_id) + # await self.assertResolvesToClaimId(f'@abc#{channel_id}', channel_id) + # + # channel = await self.claim_get(channel_id) + # await self.assertResolvesToClaimId(channel['short_url'], channel_id) + # await self.assertResolvesToClaimId(channel['canonical_url'], channel_id) + # await self.assertResolvesToClaimId(channel['permanent_url'], channel_id) + # + # # add some noise + # await self.stream_create('foo', '0.1', allow_duplicate_name=True, channel_id=channel['claim_id']) + # await self.stream_create('foo', '0.2', allow_duplicate_name=True, channel_id=channel['claim_id']) + # await self.stream_create('foo', '0.3', allow_duplicate_name=True, channel_id=channel['claim_id']) + # + # claim_id1 = self.get_claim_id( + # await self.stream_create('foo', '0.7', allow_duplicate_name=True, channel_id=channel['claim_id'])) + # claim1 = await self.claim_get(claim_id=claim_id1) + # + # await self.assertResolvesToClaimId('foo', claim_id1) + # await self.assertResolvesToClaimId('@abc/foo', claim_id1) + # await self.assertResolvesToClaimId(claim1['short_url'], claim_id1) + # await self.assertResolvesToClaimId(claim1['canonical_url'], claim_id1) + # await self.assertResolvesToClaimId(claim1['permanent_url'], claim_id1) + # + # claim_id2 = self.get_claim_id( + # await self.stream_create('foo', '0.8', allow_duplicate_name=True, channel_id=channel['claim_id'])) + # claim2 = await self.claim_get(claim_id=claim_id2) + # await self.assertResolvesToClaimId('foo', claim_id2) + # await self.assertResolvesToClaimId('@abc/foo', claim_id2) + # await self.assertResolvesToClaimId(claim2['short_url'], claim_id2) + # await self.assertResolvesToClaimId(claim2['canonical_url'], claim_id2) + # await self.assertResolvesToClaimId(claim2['permanent_url'], claim_id2) + + async def test_abandoned_channel_with_signed_claims(self): + channel = (await self.channel_create('@abc', '1.0'))['outputs'][0] + orphan_claim = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel['claim_id']) + abandoned_channel_id = channel['claim_id'] + await self.channel_abandon(txid=channel['txid'], nout=0) + channel = (await self.channel_create('@abc', '1.0'))['outputs'][0] + orphan_claim_id = self.get_claim_id(orphan_claim) + + # Original channel doesn't exists anymore, so the signature is invalid. For invalid signatures, resolution is + # only possible outside a channel + self.assertEqual( + {'error': { + 'name': 'NOT_FOUND', + 'text': 'Could not find claim at "lbry://@abc/on-channel-claim".', + }}, + await self.resolve('lbry://@abc/on-channel-claim') + ) + response = await self.resolve('lbry://on-channel-claim') + self.assertFalse(response['is_channel_signature_valid']) + self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel']) + direct_uri = 'lbry://on-channel-claim#' + orphan_claim_id + response = await self.resolve(direct_uri) + self.assertFalse(response['is_channel_signature_valid']) + self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel']) + await self.stream_abandon(claim_id=orphan_claim_id) + + uri = 'lbry://@abc/on-channel-claim' + # now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore) + valid_claim = await self.stream_create('on-channel-claim', '0.00000001', channel_id=channel['claim_id']) + # resolves normally + response = await self.resolve(uri) + self.assertTrue(response['is_channel_signature_valid']) + + # ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition) + await self.stream_create( + 'on-channel-claim', '0.00000001', channel_id=channel['claim_id'], allow_duplicate_name=True + ) + + # it still resolves! but to the older claim + response = await self.resolve(uri) + self.assertTrue(response['is_channel_signature_valid']) + self.assertEqual(response['txid'], valid_claim['txid']) + claims = [await self.resolve('on-channel-claim'), await self.resolve('on-channel-claim$2')] + self.assertEqual(2, len(claims)) + self.assertEqual( + {channel['claim_id']}, {claim['signing_channel']['claim_id'] for claim in claims} + ) + + async def test_normalization_resolution(self): + + one = 'ΣίσυφοςfiÆ' + two = 'ΣΊΣΥΦΟσFIæ' + + c1 = await self.stream_create(one, '0.1') + c2 = await self.stream_create(two, '0.2') + + loser_id = self.get_claim_id(c1) + winner_id = self.get_claim_id(c2) + + # winning_one = await self.check_lbrycrd_winning(one) + await self.assertMatchClaimIsWinning(two, winner_id) + + claim1 = await self.resolve(f'lbry://{one}') + claim2 = await self.resolve(f'lbry://{two}') + claim3 = await self.resolve(f'lbry://{one}:{winner_id[:5]}') + claim4 = await self.resolve(f'lbry://{two}:{winner_id[:5]}') + + claim5 = await self.resolve(f'lbry://{one}:{loser_id[:5]}') + claim6 = await self.resolve(f'lbry://{two}:{loser_id[:5]}') + + self.assertEqual(winner_id, claim1['claim_id']) + self.assertEqual(winner_id, claim2['claim_id']) + self.assertEqual(winner_id, claim3['claim_id']) + self.assertEqual(winner_id, claim4['claim_id']) + + self.assertEqual(two, claim1['name']) + self.assertEqual(two, claim2['name']) + self.assertEqual(two, claim3['name']) + self.assertEqual(two, claim4['name']) + + self.assertEqual(loser_id, claim5['claim_id']) + self.assertEqual(loser_id, claim6['claim_id']) + self.assertEqual(one, claim5['name']) + self.assertEqual(one, claim6['name']) + + async def test_resolve_old_claim(self): + channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0') + await self.confirm_tx(channel.id) + address = channel.outputs[0].get_address(self.account.ledger) + claim = generate_signed_legacy(address, channel.outputs[0]) + tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account) + await tx.sign([self.account]) + await self.broadcast(tx) + await self.confirm_tx(tx.id) + + response = await self.resolve('@olds/example') + self.assertTrue(response['is_channel_signature_valid']) + + claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature)) + tx = await Transaction.claim_create( + 'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account + ) + await tx.sign([self.account]) + await self.broadcast(tx) + await self.confirm_tx(tx.id) + + response = await self.resolve('bad_example') + self.assertFalse(response['is_channel_signature_valid']) + self.assertEqual( + {'error': { + 'name': 'NOT_FOUND', + 'text': 'Could not find claim at "@olds/bad_example".', + }}, + await self.resolve('@olds/bad_example') + ) + + async def test_resolve_with_includes(self): + wallet2 = await self.daemon.jsonrpc_wallet_create('wallet2', create_account=True) + address2 = await self.daemon.jsonrpc_address_unused(wallet_id=wallet2.id) + + await self.wallet_send('1.0', address2) + + stream = await self.stream_create( + 'priced', '0.1', wallet_id=wallet2.id, + fee_amount='0.5', fee_currency='LBC', fee_address=address2 + ) + stream_id = self.get_claim_id(stream) + + resolve = await self.resolve('priced') + self.assertNotIn('is_my_output', resolve) + self.assertNotIn('purchase_receipt', resolve) + self.assertNotIn('sent_supports', resolve) + self.assertNotIn('sent_tips', resolve) + self.assertNotIn('received_tips', resolve) + + # is_my_output + resolve = await self.resolve('priced', include_is_my_output=True) + self.assertFalse(resolve['is_my_output']) + resolve = await self.resolve('priced', wallet_id=wallet2.id, include_is_my_output=True) + self.assertTrue(resolve['is_my_output']) + + # purchase receipt + resolve = await self.resolve('priced', include_purchase_receipt=True) + self.assertNotIn('purchase_receipt', resolve) + await self.purchase_create(stream_id) + resolve = await self.resolve('priced', include_purchase_receipt=True) + self.assertEqual('0.5', resolve['purchase_receipt']['amount']) + + # my supports and my tips + resolve = await self.resolve( + 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True + ) + self.assertEqual('0.0', resolve['sent_supports']) + self.assertEqual('0.0', resolve['sent_tips']) + self.assertEqual('0.0', resolve['received_tips']) + await self.support_create(stream_id, '0.3') + await self.support_create(stream_id, '0.2') + await self.support_create(stream_id, '0.4', tip=True) + await self.support_create(stream_id, '0.5', tip=True) + resolve = await self.resolve( + 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True + ) + self.assertEqual('0.5', resolve['sent_supports']) + self.assertEqual('0.9', resolve['sent_tips']) + self.assertEqual('0.0', resolve['received_tips']) + + resolve = await self.resolve( + 'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True, + wallet_id=wallet2.id + ) + self.assertEqual('0.0', resolve['sent_supports']) + self.assertEqual('0.0', resolve['sent_tips']) + self.assertEqual('0.9', resolve['received_tips']) + self.assertEqual('1.4', resolve['meta']['support_amount']) + + # make sure nothing is leaked between wallets through cached tx/txos + resolve = await self.resolve('priced') + self.assertNotIn('is_my_output', resolve) + self.assertNotIn('purchase_receipt', resolve) + self.assertNotIn('sent_supports', resolve) + self.assertNotIn('sent_tips', resolve) + self.assertNotIn('received_tips', resolve) + + +class ResolveClaimTakeovers(BaseResolveTestCase): + async def test_channel_invalidation(self): + channel_id = (await self.channel_create('@test', '0.1'))['outputs'][0]['claim_id'] + channel_id2 = (await self.channel_create('@other', '0.1'))['outputs'][0]['claim_id'] + + async def make_claim(name, amount, channel_id=None): + return ( + await self.stream_create(name, amount, channel_id=channel_id) + )['outputs'][0]['claim_id'] + + unsigned_then_signed = await make_claim('unsigned_then_signed', '0.1') + unsigned_then_updated_then_signed = await make_claim('unsigned_then_updated_then_signed', '0.1') + signed_then_unsigned = await make_claim( + 'signed_then_unsigned', '0.01', channel_id=channel_id + ) + signed_then_signed_different_chan = await make_claim( + 'signed_then_signed_different_chan', '0.01', channel_id=channel_id + ) + + self.assertIn("error", await self.resolve('@test/unsigned_then_signed')) + await self.assertMatchClaimIsWinning('unsigned_then_signed', unsigned_then_signed) + self.assertIn("error", await self.resolve('@test/unsigned_then_updated_then_signed')) + await self.assertMatchClaimIsWinning('unsigned_then_updated_then_signed', unsigned_then_updated_then_signed) + self.assertDictEqual( + await self.resolve('@test/signed_then_unsigned'), await self.resolve('signed_then_unsigned') + ) + await self.assertMatchClaimIsWinning('signed_then_unsigned', signed_then_unsigned) + # sign 'unsigned_then_signed' and update it + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + unsigned_then_signed, '0.09', channel_id=channel_id)) + + await self.ledger.wait(await self.daemon.jsonrpc_stream_update(unsigned_then_updated_then_signed, '0.09')) + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + unsigned_then_updated_then_signed, '0.09', channel_id=channel_id)) + + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + signed_then_unsigned, '0.09', clear_channel=True)) + + await self.ledger.wait(await self.daemon.jsonrpc_stream_update( + signed_then_signed_different_chan, '0.09', channel_id=channel_id2)) + + await self.daemon.jsonrpc_txo_spend(type='channel', claim_id=channel_id) + + signed3 = await make_claim('signed3', '0.01', channel_id=channel_id) + signed4 = await make_claim('signed4', '0.01', channel_id=channel_id2) + + self.assertIn("error", await self.resolve('@test')) + self.assertIn("error", await self.resolve('@test/signed1')) + self.assertIn("error", await self.resolve('@test/unsigned_then_updated_then_signed')) + self.assertIn("error", await self.resolve('@test/unsigned_then_signed')) + self.assertIn("error", await self.resolve('@test/signed3')) + self.assertIn("error", await self.resolve('@test/signed4')) + + await self.assertMatchClaimIsWinning('signed_then_unsigned', signed_then_unsigned) + await self.assertMatchClaimIsWinning('unsigned_then_signed', unsigned_then_signed) + await self.assertMatchClaimIsWinning('unsigned_then_updated_then_signed', unsigned_then_updated_then_signed) + await self.assertMatchClaimIsWinning('signed_then_signed_different_chan', signed_then_signed_different_chan) + await self.assertMatchClaimIsWinning('signed3', signed3) + await self.assertMatchClaimIsWinning('signed4', signed4) + + self.assertDictEqual(await self.resolve('@other/signed_then_signed_different_chan'), + await self.resolve('signed_then_signed_different_chan')) + self.assertDictEqual(await self.resolve('@other/signed4'), + await self.resolve('signed4')) + + async def _test_activation_delay(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(9) + # not yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + # the new claim should have activated + await self.assertMatchClaimIsWinning(name, second_claim_id) + return first_claim_id, second_claim_id + + async def test_activation_delay(self): + await self._test_activation_delay() + + async def test_activation_delay_then_abandon_then_reclaim(self): + name = 'derp' + first_claim_id, second_claim_id = await self._test_activation_delay() + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=second_claim_id) + await self.generate(1) + await self.assertNoClaimForName(name) + await self._test_activation_delay() + + async def create_stream_claim(self, amount: str, name='derp') -> str: + return (await self.stream_create(name, amount, allow_duplicate_name=True))['outputs'][0]['claim_id'] + + async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int, + non_winning_claims: List[ClaimStateValue]): + self.assertEqual(height, self.conductor.spv_node.server.bp.db.db_height) + await self.assertMatchClaimIsWinning(name, winning_claim_id) + for non_winning in non_winning_claims: + claim = await self.assertMatchClaim( + non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd + ) + self.assertEqual(non_winning.activation_height, claim.activation_height) + self.assertEqual(last_takeover_height, claim.last_takeover_height) + + async def test_delay_takeover_with_update(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(9) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=550, name=name, winning_claim_id=third_claim_id, last_takeover_height=550, + non_winning_claims=[ + ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True), + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True) + ] + ) + + async def test_delay_takeover_with_update_then_update_to_lower_before_takeover(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(8) + await self.assertNameState( + height=548, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.09') + + await self.generate(1) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=559, active_in_lbrycrd=False) + ] + ) + await self.generate(10) + await self.assertNameState( + height=559, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=559, active_in_lbrycrd=True) + ] + ) + + async def test_delay_takeover_with_update_then_update_to_lower_on_takeover(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(8) + await self.assertNameState( + height=548, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.09') + await self.generate(1) + await self.assertNameState( + height=550, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=560, active_in_lbrycrd=False) + ] + ) + await self.generate(10) + await self.assertNameState( + height=560, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=560, active_in_lbrycrd=True) + ] + ) + + async def test_delay_takeover_with_update_then_update_to_lower_after_takeover(self): + name = 'derp' + first_claim_id = await self.create_stream_claim('0.2', name) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + second_claim_id = await self.create_stream_claim('0.1', name) + third_claim_id = await self.create_stream_claim('0.1', name) + await self.generate(8) + await self.assertNameState( + height=537, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=False), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + await self.generate(1) + await self.assertNameState( + height=538, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=539, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=539, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.21') + await self.generate(1) + await self.assertNameState( + height=540, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(8) + await self.assertNameState( + height=548, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=549, name=name, winning_claim_id=first_claim_id, last_takeover_height=207, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=550, active_in_lbrycrd=False) + ] + ) + + await self.generate(1) + await self.assertNameState( + height=550, name=name, winning_claim_id=third_claim_id, last_takeover_height=550, + non_winning_claims=[ + ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True), + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True) + ] + ) + + await self.daemon.jsonrpc_stream_update(third_claim_id, '0.09') + await self.generate(1) + await self.assertNameState( + height=551, name=name, winning_claim_id=first_claim_id, last_takeover_height=551, + non_winning_claims=[ + ClaimStateValue(second_claim_id, activation_height=538, active_in_lbrycrd=True), + ClaimStateValue(third_claim_id, activation_height=551, active_in_lbrycrd=True) + ] + ) + + async def test_resolve_signed_claims_with_fees(self): + channel_name = '@abc' + channel_id = self.get_claim_id( + await self.channel_create(channel_name, '0.01') + ) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + stream_name = 'foo' + stream_with_no_fee = self.get_claim_id( + await self.stream_create(stream_name, '0.01', channel_id=channel_id) + ) + stream_with_fee = self.get_claim_id( + await self.stream_create('with_a_fee', '0.01', channel_id=channel_id, fee_amount='1', fee_currency='LBC') + ) + greater_than_or_equal_to_zero = [ + claim['claim_id'] for claim in ( + await self.conductor.spv_node.server.bp.db.search_index.search( + channel_id=channel_id, fee_amount=">=0" + ))[0] + ] + self.assertEqual(2, len(greater_than_or_equal_to_zero)) + self.assertSetEqual(set(greater_than_or_equal_to_zero), {stream_with_no_fee, stream_with_fee}) + greater_than_zero = [ + claim['claim_id'] for claim in ( + await self.conductor.spv_node.server.bp.db.search_index.search( + channel_id=channel_id, fee_amount=">0" + ))[0] + ] + self.assertEqual(1, len(greater_than_zero)) + self.assertSetEqual(set(greater_than_zero), {stream_with_fee}) + equal_to_zero = [ + claim['claim_id'] for claim in ( + await self.conductor.spv_node.server.bp.db.search_index.search( + channel_id=channel_id, fee_amount="<=0" + ))[0] + ] + self.assertEqual(1, len(equal_to_zero)) + self.assertSetEqual(set(equal_to_zero), {stream_with_no_fee}) + + async def test_spec_example(self): + # https://spec.lbry.com/#claim-activation-example + # this test has adjusted block heights from the example because it uses the regtest chain instead of mainnet + # on regtest, claims expire much faster, so we can't do the ~1000 block delay in the spec example exactly + + name = 'test' + await self.generate(494) + address = (await self.account.receiving.get_addresses(True))[0] + await self.blockchain.send_to_address(address, 400.0) + await self.account.ledger.on_address.first + await self.generate(100) + self.assertEqual(800, self.conductor.spv_node.server.bp.db.db_height) + + # Block 801: Claim A for 10 LBC is accepted. + # It is the first claim, so it immediately becomes active and controlling. + # State: A(10) is controlling + claim_id_A = (await self.stream_create(name, '10.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1121: Claim B for 20 LBC is accepted. + # Its activation height is 1121 + min(4032, floor((1121-801) / 32)) = 1121 + 10 = 1131. + # State: A(10) is controlling, B(20) is accepted. + await self.generate(32 * 10 - 1) + self.assertEqual(1120, self.conductor.spv_node.server.bp.db.db_height) + claim_id_B = (await self.stream_create(name, '20.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + claim_B, _, _ = await self.conductor.spv_node.server.bp.db.fs_resolve(f"{name}:{claim_id_B}") + self.assertEqual(1121, self.conductor.spv_node.server.bp.db.db_height) + self.assertEqual(1131, claim_B.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1122: Support X for 14 LBC for claim A is accepted. + # Since it is a support for the controlling claim, it activates immediately. + # State: A(10+14) is controlling, B(20) is accepted. + await self.support_create(claim_id_A, bid='14.0') + self.assertEqual(1122, self.conductor.spv_node.server.bp.db.db_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1123: Claim C for 50 LBC is accepted. + # The activation height is 1123 + min(4032, floor((1123-801) / 32)) = 1123 + 10 = 1133. + # State: A(10+14) is controlling, B(20) is accepted, C(50) is accepted. + claim_id_C = (await self.stream_create(name, '50.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertEqual(1123, self.conductor.spv_node.server.bp.db.db_height) + claim_C, _, _ = await self.conductor.spv_node.server.bp.db.fs_resolve(f"{name}:{claim_id_C}") + self.assertEqual(1133, claim_C.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + await self.generate(7) + self.assertEqual(1130, self.conductor.spv_node.server.bp.db.db_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + await self.generate(1) + + # Block 1131: Claim B activates. It has 20 LBC, while claim A has 24 LBC (10 original + 14 from support X). There is no takeover, and claim A remains controlling. + # State: A(10+14) is controlling, B(20) is active, C(50) is accepted. + self.assertEqual(1131, self.conductor.spv_node.server.bp.db.db_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1132: Claim D for 300 LBC is accepted. The activation height is 1132 + min(4032, floor((1132-801) / 32)) = 1132 + 10 = 1142. + # State: A(10+14) is controlling, B(20) is active, C(50) is accepted, D(300) is accepted. + claim_id_D = (await self.stream_create(name, '300.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertEqual(1132, self.conductor.spv_node.server.bp.db.db_height) + claim_D, _, _ = await self.conductor.spv_node.server.bp.db.fs_resolve(f"{name}:{claim_id_D}") + self.assertEqual(False, claim_D.is_controlling) + self.assertEqual(801, claim_D.last_takeover_height) + self.assertEqual(1142, claim_D.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_A) + + # Block 1133: Claim C activates. It has 50 LBC, while claim A has 24 LBC, so a takeover is initiated. The takeover height for this name is set to 1133, and therefore the activation delay for all the claims becomes min(4032, floor((1133-1133) / 32)) = 0. All the claims become active. The totals for each claim are recalculated, and claim D becomes controlling because it has the highest total. + # State: A(10+14) is active, B(20) is active, C(50) is active, D(300) is controlling + await self.generate(1) + self.assertEqual(1133, self.conductor.spv_node.server.bp.db.db_height) + claim_D, _, _ = await self.conductor.spv_node.server.bp.db.fs_resolve(f"{name}:{claim_id_D}") + self.assertEqual(True, claim_D.is_controlling) + self.assertEqual(1133, claim_D.last_takeover_height) + self.assertEqual(1133, claim_D.activation_height) + await self.assertMatchClaimIsWinning(name, claim_id_D) + + async def test_early_takeover(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # block 305, activates at 308 (but gets triggered early by the takeover by the second claim) + third_claim_id = (await self.stream_create(name, '0.3', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_zero_delay(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # on block 307 make a third claim with a yet higher amount, it takes over with no delay because the + # second claim activates and begins the takeover on this block + third_claim_id = (await self.stream_create(name, '0.3', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_from_support_zero_delay(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + third_claim_id = (await self.stream_create(name, '0.19', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + tx = await self.daemon.jsonrpc_support_create(third_claim_id, '0.1') + await self.ledger.wait(tx) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_from_support_and_claim_zero_delay(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + + file_path = self.create_upload_file(data=b'hi!') + tx = await self.daemon.jsonrpc_stream_create(name, '0.19', file_path=file_path, allow_duplicate_name=True) + await self.ledger.wait(tx) + third_claim_id = tx.outputs[0].claim_id + + wallet = self.daemon.wallet_manager.get_wallet_or_default(None) + funding_accounts = wallet.get_accounts_or_all(None) + amount = self.daemon.get_dewies_or_error("amount", '0.1') + account = wallet.get_account_or_default(None) + claim_address = await account.receiving.get_or_create_usable_address() + tx = await Transaction.support( + 'derp', third_claim_id, amount, claim_address, funding_accounts, funding_accounts[0], None + ) + await tx.sign(funding_accounts) + await self.daemon.broadcast_or_release(tx, True) + await self.ledger.wait(tx) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_early_takeover_abandoned_controlling_support(self): + name = 'derp' + # block 207 + first_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0][ + 'claim_id'] + tx = await self.daemon.jsonrpc_support_create(first_claim_id, '0.2') + await self.ledger.wait(tx) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(96) + # block 304, activates at 307 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0][ + 'claim_id'] + # block 305, activates at 308 (but gets triggered early by the takeover by the second claim) + third_claim_id = (await self.stream_create(name, '0.3', allow_duplicate_name=True))['outputs'][0][ + 'claim_id'] + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.daemon.jsonrpc_txo_spend(type='support', txid=tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, third_claim_id) + + async def test_block_takeover_with_delay_1_support(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + self.assertEqual(first_claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # prevent the takeover by adding a support one block before the takeover happens + await self.support_create(first_claim_id, bid='1.0') + # one more block until activation + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + + async def test_block_takeover_with_delay_0_support(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + await self.assertMatchClaimIsWinning(name, first_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(9) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # prevent the takeover by adding a support on the same block the takeover would happen + await self.support_create(first_claim_id, bid='1.0') + await self.assertMatchClaimIsWinning(name, first_claim_id) + + async def _test_almost_prevent_takeover(self, name: str, blocks: int = 9): + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(blocks) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # prevent the takeover by adding a support on the same block the takeover would happen + tx = await self.daemon.jsonrpc_support_create(first_claim_id, '1.0') + await self.ledger.wait(tx) + return first_claim_id, second_claim_id, tx + + async def test_almost_prevent_takeover_remove_support_same_block_supported(self): + name = 'derp' + first_claim_id, second_claim_id, tx = await self._test_almost_prevent_takeover(name, 9) + await self.daemon.jsonrpc_txo_spend(type='support', txid=tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_almost_prevent_takeover_remove_support_one_block_after_supported(self): + name = 'derp' + first_claim_id, second_claim_id, tx = await self._test_almost_prevent_takeover(name, 8) + await self.generate(1) + await self.daemon.jsonrpc_txo_spend(type='support', txid=tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_abandon_before_takeover(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # abandon the winning claim + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + await self.generate(1) + # the takeover and activation should happen a block earlier than they would have absent the abandon + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_abandon_before_takeover_no_delay_update(self): # TODO: fix race condition line 506 + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(320) + # block 527 + # a claim of higher amount made now will have a takeover delay of 10 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + # block 528 + # sanity check + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.assertMatchClaimsForName(name) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.assertMatchClaimsForName(name) + # abandon the winning claim + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + await self.daemon.jsonrpc_stream_update(second_claim_id, '0.1') + await self.generate(1) + + # the takeover and activation should happen a block earlier than they would have absent the abandon + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.assertMatchClaimsForName(name) + await self.generate(1) + # await self.ledger.on_header.where(lambda e: e.height == 537) + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.assertMatchClaimsForName(name) + + async def test_abandon_controlling_support_before_pending_takeover(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + controlling_support_tx = await self.daemon.jsonrpc_support_create(first_claim_id, '0.9') + await self.ledger.wait(controlling_support_tx) + self.assertEqual(first_claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.generate(321) + + second_claim_id = (await self.stream_create(name, '0.9', allow_duplicate_name=True))['outputs'][0]['claim_id'] + + self.assertNotEqual(first_claim_id, second_claim_id) + # takeover should not have happened yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(8) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # abandon the support that causes the winning claim to have the highest staked + tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, first_claim_id) + # await self.assertMatchClaim(second_claim_id) + + await self.generate(1) + + await self.assertMatchClaim(first_claim_id) + await self.assertMatchClaimIsWinning(name, second_claim_id) + + async def test_remove_controlling_support(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.2'))['outputs'][0]['claim_id'] + first_support_tx = await self.daemon.jsonrpc_support_create(first_claim_id, '0.9') + await self.ledger.wait(first_support_tx) + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(320) # give the first claim long enough for a 10 block takeover delay + await self.assertMatchClaimIsWinning(name, first_claim_id) + + # make a second claim which will take over the name + second_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + self.assertNotEqual(first_claim_id, second_claim_id) + second_claim_support_tx = await self.daemon.jsonrpc_support_create(second_claim_id, '1.5') + await self.ledger.wait(second_claim_support_tx) + await self.generate(1) # neither the second claim or its support have activated yet + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(9) # claim activates, but is not yet winning + await self.assertMatchClaimIsWinning(name, first_claim_id) + + await self.generate(1) # support activates, takeover happens + await self.assertMatchClaimIsWinning(name, second_claim_id) + + await self.daemon.jsonrpc_txo_spend(type='support', claim_id=second_claim_id, blocking=True) + await self.generate(1) # support activates, takeover happens + await self.assertMatchClaimIsWinning(name, first_claim_id) + + async def test_claim_expiration(self): + name = 'derp' + # starts at height 206 + vanishing_claim = (await self.stream_create('vanish', '0.1'))['outputs'][0]['claim_id'] + + await self.generate(493) + # in block 701 and 702 + first_claim_id = (await self.stream_create(name, '0.3'))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning('vanish', vanishing_claim) + await self.generate(100) # block 801, expiration fork happened + await self.assertNoClaimForName('vanish') + # second claim is in block 802 + second_claim_id = (await self.stream_create(name, '0.2', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(498) + await self.assertMatchClaimIsWinning(name, first_claim_id) + await self.generate(1) + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.generate(100) + await self.assertMatchClaimIsWinning(name, second_claim_id) + await self.generate(1) + await self.assertNoClaimForName(name) + + async def _test_add_non_winning_already_claimed(self): + name = 'derp' + # initially claim the name + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + self.assertEqual(first_claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex()) + await self.generate(32) + + second_claim_id = (await self.stream_create(name, '0.01', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertNoClaim(second_claim_id) + self.assertEqual( + len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 1 + ) + await self.generate(1) + await self.assertMatchClaim(second_claim_id) + self.assertEqual( + len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 2 + ) + + async def test_abandon_controlling_same_block_as_new_claim(self): + name = 'derp' + + first_claim_id = (await self.stream_create(name, '0.1'))['outputs'][0]['claim_id'] + await self.generate(64) + await self.assertNameState(271, name, first_claim_id, last_takeover_height=207, non_winning_claims=[]) + + await self.daemon.jsonrpc_txo_spend(type='stream', claim_id=first_claim_id) + second_claim_id = (await self.stream_create(name, '0.1', allow_duplicate_name=True))['outputs'][0]['claim_id'] + await self.assertNameState(272, name, second_claim_id, last_takeover_height=272, non_winning_claims=[]) + + async def test_trending(self): + async def get_trending_score(claim_id): + return (await self.conductor.spv_node.server.bp.db.search_index.search( + claim_id=claim_id + ))[0][0]['trending_score'] + + claim_id1 = (await self.stream_create('derp', '1.0'))['outputs'][0]['claim_id'] + claim_id2 = (await self.stream_create('derp', '1.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + claim_id3 = (await self.stream_create('derp', '1.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + claim_id4 = (await self.stream_create('derp', '1.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + claim_id5 = (await self.stream_create('derp', '1.0', allow_duplicate_name=True))['outputs'][0]['claim_id'] + + COIN = 1E9 + + height = 99000 + + self.conductor.spv_node.server.bp._add_claim_activation_change_notification( + claim_id1, height, True, 1 * COIN, 1_000_000 * COIN + ) + self.conductor.spv_node.server.bp._add_claim_activation_change_notification( + claim_id2, height, True, 1 * COIN, 100_000 * COIN + ) + self.conductor.spv_node.server.bp._add_claim_activation_change_notification( + claim_id2, height + 1, False, 100_001 * COIN, 100_000 * COIN + ) + self.conductor.spv_node.server.bp._add_claim_activation_change_notification( + claim_id3, height, True, 1 * COIN, 1_000 * COIN + ) + self.conductor.spv_node.server.bp._add_claim_activation_change_notification( + claim_id4, height, True, 1 * COIN, 10 * COIN + ) + await self.generate(1) + + self.assertEqual(3.1711298570548195e+76, await get_trending_score(claim_id1)) + self.assertEqual(-1.369652719234026e+74, await get_trending_score(claim_id2)) + self.assertEqual(2.925275298842502e+75, await get_trending_score(claim_id3)) + self.assertEqual(5.193711055804491e+74, await get_trending_score(claim_id4)) + self.assertEqual(0.6690521635580086, await get_trending_score(claim_id5)) + + self.conductor.spv_node.server.bp._add_claim_activation_change_notification( + claim_id5, height + 100, True, 2 * COIN, 10 * COIN + ) + await self.generate(1) + self.assertEqual(5.664516565750028e+74, await get_trending_score(claim_id5)) + + search_results = (await self.conductor.spv_node.server.bp.db.search_index.search(claim_name="derp"))[0] + self.assertEqual(5, len(search_results)) + self.assertListEqual([claim_id1, claim_id3, claim_id4, claim_id2, claim_id5], [c['claim_id'] for c in search_results]) + + +class ResolveAfterReorg(BaseResolveTestCase): + async def reorg(self, start): + blocks = self.ledger.headers.height - start + self.blockchain.block_expected = start - 1 + # go back to start + await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode()) + # go to previous + 1 + await self.generate(blocks + 2) + + async def assertBlockHash(self, height): + bp = self.conductor.spv_node.server.bp + block_hash = await self.blockchain.get_block_hash(height) + + self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode()) + self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex()) + txids = [ + tx_hash[::-1].hex() for tx_hash in bp.db.get_block_txs(height) + ] + txs = await bp.db.fs_transactions(txids) + block_txs = (await bp.daemon.deserialised_block(block_hash))['tx'] + self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions') + self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order') + + async def test_reorg(self): + self.assertEqual(self.ledger.headers.height, 206) + + channel_name = '@abc' + channel_id = self.get_claim_id( + await self.channel_create(channel_name, '0.01') + ) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + await self.reorg(206) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + + # await self.assertNoClaimForName(channel_name) + # self.assertNotIn('error', await self.resolve(channel_name)) + + stream_name = 'foo' + stream_id = self.get_claim_id( + await self.stream_create(stream_name, '0.01', channel_id=channel_id) + ) + self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex()) + await self.reorg(206) + self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex()) + + await self.support_create(stream_id, '0.01') + self.assertNotIn('error', await self.resolve(stream_name)) + self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex()) + await self.reorg(206) + # self.assertNotIn('error', await self.resolve(stream_name)) + self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex()) + + await self.stream_abandon(stream_id) + self.assertNotIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + await self.assertNoClaimForName(stream_name) + # TODO: check @abc/foo too + + await self.reorg(206) + self.assertNotIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex()) + await self.assertNoClaimForName(stream_name) + + await self.channel_abandon(channel_id) + self.assertIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + await self.reorg(206) + self.assertIn('error', await self.resolve(channel_name)) + self.assertIn('error', await self.resolve(stream_name)) + + async def test_reorg_change_claim_height(self): + # sanity check + result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both + self.assertIn('error', result) + + still_valid = await self.daemon.jsonrpc_stream_create( + 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(still_valid) + await self.generate(1) + # create a claim and verify it's returned by claim_search + self.assertEqual(self.ledger.headers.height, 207) + await self.assertBlockHash(207) + + broadcast_tx = await self.daemon.jsonrpc_stream_create( + 'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(broadcast_tx) + await self.support_create(still_valid.outputs[0].claim_id, '0.01') + + # await self.generate(1) + await self.ledger.wait(broadcast_tx, self.blockchain.block_expected) + self.assertEqual(self.ledger.headers.height, 208) + await self.assertBlockHash(208) + + claim = await self.resolve('hovercraft') + self.assertEqual(claim['txid'], broadcast_tx.id) + self.assertEqual(claim['height'], 208) + + # check that our tx is in block 208 as returned by lbrycrdd + invalidated_block_hash = (await self.ledger.headers.hash(208)).decode() + block_207 = await self.blockchain.get_block(invalidated_block_hash) + self.assertIn(claim['txid'], block_207['tx']) + self.assertEqual(208, claim['height']) + + # reorg the last block dropping our claim tx + await self.blockchain.invalidate_block(invalidated_block_hash) + await self.blockchain.clear_mempool() + await self.blockchain.generate(2) + + # wait for the client to catch up and verify the reorg + await asyncio.wait_for(self.on_header(209), 3.0) + await self.assertBlockHash(207) + await self.assertBlockHash(208) + await self.assertBlockHash(209) + + # verify the claim was dropped from block 208 as returned by lbrycrdd + reorg_block_hash = await self.blockchain.get_block_hash(208) + self.assertNotEqual(invalidated_block_hash, reorg_block_hash) + block_207 = await self.blockchain.get_block(reorg_block_hash) + self.assertNotIn(claim['txid'], block_207['tx']) + + client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode() + self.assertEqual(client_reorg_block_hash, reorg_block_hash) + + # verify the dropped claim is no longer returned by claim search + self.assertDictEqual( + {'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}}, + await self.resolve('hovercraft') + ) + + # verify the claim published a block earlier wasn't also reverted + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + # broadcast the claim in a different block + new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode()) + self.assertEqual(broadcast_tx.id, new_txid) + await self.blockchain.generate(1) + + # wait for the client to catch up + await asyncio.wait_for(self.on_header(210), 1.0) + + # verify the claim is in the new block and that it is returned by claim_search + republished = await self.resolve('hovercraft') + self.assertEqual(210, republished['height']) + self.assertEqual(claim['claim_id'], republished['claim_id']) + + # this should still be unchanged + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + async def test_reorg_drop_claim(self): + # sanity check + result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both + self.assertIn('error', result) + + still_valid = await self.daemon.jsonrpc_stream_create( + 'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(still_valid) + await self.generate(1) + + # create a claim and verify it's returned by claim_search + self.assertEqual(self.ledger.headers.height, 207) + await self.assertBlockHash(207) + + broadcast_tx = await self.daemon.jsonrpc_stream_create( + 'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!') + ) + await self.ledger.wait(broadcast_tx) + await self.generate(1) + await self.ledger.wait(broadcast_tx, self.blockchain.block_expected) + self.assertEqual(self.ledger.headers.height, 208) + await self.assertBlockHash(208) + + claim = await self.resolve('hovercraft') + self.assertEqual(claim['txid'], broadcast_tx.id) + self.assertEqual(claim['height'], 208) + + # check that our tx is in block 208 as returned by lbrycrdd + invalidated_block_hash = (await self.ledger.headers.hash(208)).decode() + block_207 = await self.blockchain.get_block(invalidated_block_hash) + self.assertIn(claim['txid'], block_207['tx']) + self.assertEqual(208, claim['height']) + + # reorg the last block dropping our claim tx + await self.blockchain.invalidate_block(invalidated_block_hash) + await self.blockchain.clear_mempool() + await self.blockchain.generate(2) + + # wait for the client to catch up and verify the reorg + await asyncio.wait_for(self.on_header(209), 3.0) + await self.assertBlockHash(207) + await self.assertBlockHash(208) + await self.assertBlockHash(209) + + # verify the claim was dropped from block 208 as returned by lbrycrdd + reorg_block_hash = await self.blockchain.get_block_hash(208) + self.assertNotEqual(invalidated_block_hash, reorg_block_hash) + block_207 = await self.blockchain.get_block(reorg_block_hash) + self.assertNotIn(claim['txid'], block_207['tx']) + + client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode() + self.assertEqual(client_reorg_block_hash, reorg_block_hash) + + # verify the dropped claim is no longer returned by claim search + self.assertDictEqual( + {'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}}, + await self.resolve('hovercraft') + ) + + # verify the claim published a block earlier wasn't also reverted + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + # broadcast the claim in a different block + new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode()) + self.assertEqual(broadcast_tx.id, new_txid) + await self.blockchain.generate(1) + + # wait for the client to catch up + await asyncio.wait_for(self.on_header(210), 1.0) + + # verify the claim is in the new block and that it is returned by claim_search + republished = await self.resolve('hovercraft') + self.assertEqual(210, republished['height']) + self.assertEqual(claim['claim_id'], republished['claim_id']) + + # this should still be unchanged + self.assertEqual(207, (await self.resolve('still-valid'))['height']) + + +def generate_signed_legacy(address: bytes, output: Output): + decoded_address = Base58.decode(address) + claim = OldClaimMessage() + claim.ParseFromString(unhexlify( + '080110011aee04080112a604080410011a2b4865726520617265203520526561736f6e73204920e29da4e' + 'fb88f204e657874636c6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e' + '657874636c6f75643a2068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e206' + '6696e64206d65206f6e20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a' + '2f2f666f72756d2e6865617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733' + 'a2f2f6f6666746f706963616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f7061747265' + '6f6e2e636f6d2f7468656c696e757867616d65720a202a204d657263683a2068747470733a2f2f7465657' + '37072696e672e636f6d2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a2054' + '77697463683a2068747470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723' + 'a2068747470733a2f2f747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a6874' + '7470733a2f2f7777772e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0' + 'f546865204c696e75782047616d6572321c436f7079726967687465642028636f6e746163742061757468' + '6f722938004a2968747470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f46725464424' + '34f535f666352005a001a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22' + 'f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a406' + '2b2dd4c45e364030fbfad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c' + '0b68498382b2701b22c03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b51' + )) + claim.ClearField("publisherSignature") + digest = sha256(b''.join([ + decoded_address, + claim.SerializeToString(), + output.claim_hash[::-1] + ])) + signature = output.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256) + claim.publisherSignature.version = 1 + claim.publisherSignature.signatureType = 1 + claim.publisherSignature.signature = signature + claim.publisherSignature.certificateId = output.claim_hash[::-1] + return claim diff --git a/tests/integration/transactions/__init__.py b/tests/integration/transactions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/blockchain/test_internal_transaction_api.py b/tests/integration/transactions/test_internal_transaction_api.py similarity index 77% rename from tests/integration/blockchain/test_internal_transaction_api.py rename to tests/integration/transactions/test_internal_transaction_api.py index 6eba5e229..7f0f0c161 100644 --- a/tests/integration/blockchain/test_internal_transaction_api.py +++ b/tests/integration/transactions/test_internal_transaction_api.py @@ -17,13 +17,14 @@ class BasicTransactionTest(IntegrationTestCase): await self.account.ensure_address_gap() address1, address2 = await self.account.receiving.get_addresses(limit=2, only_usable=True) + notifications = asyncio.create_task(asyncio.wait( + [asyncio.ensure_future(self.on_address_update(address1)), + asyncio.ensure_future(self.on_address_update(address2))] + )) sendtxid1 = await self.blockchain.send_to_address(address1, 5) sendtxid2 = await self.blockchain.send_to_address(address2, 5) await self.blockchain.generate(1) - await asyncio.wait([ - self.on_transaction_id(sendtxid1), - self.on_transaction_id(sendtxid2) - ]) + await notifications self.assertEqual(d2l(await self.account.get_balance()), '10.0') @@ -44,18 +45,18 @@ class BasicTransactionTest(IntegrationTestCase): stream_txo.sign(channel_txo) await stream_tx.sign([self.account]) + notifications = asyncio.create_task(asyncio.wait( + [asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))] + )) + await self.broadcast(channel_tx) await self.broadcast(stream_tx) - await asyncio.wait([ # mempool - self.ledger.wait(channel_tx), - self.ledger.wait(stream_tx) - ]) + await notifications + notifications = asyncio.create_task(asyncio.wait( + [asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))] + )) await self.blockchain.generate(1) - await asyncio.wait([ # confirmed - self.ledger.wait(channel_tx), - self.ledger.wait(stream_tx) - ]) - + await notifications self.assertEqual(d2l(await self.account.get_balance()), '7.985786') self.assertEqual(d2l(await self.account.get_balance(include_claims=True)), '9.985786') @@ -63,10 +64,12 @@ class BasicTransactionTest(IntegrationTestCase): self.assertEqual(response['lbry://@bar/foo'].claim.claim_type, 'stream') abandon_tx = await Transaction.create([Input.spend(stream_tx.outputs[0])], [], [self.account], self.account) + notify = asyncio.create_task(self.ledger.wait(abandon_tx)) await self.broadcast(abandon_tx) - await self.ledger.wait(abandon_tx) + await notify + notify = asyncio.create_task(self.ledger.wait(abandon_tx)) await self.blockchain.generate(1) - await self.ledger.wait(abandon_tx) + await notify response = await self.ledger.resolve([], ['lbry://@bar/foo']) self.assertIn('error', response['lbry://@bar/foo']) diff --git a/tests/integration/blockchain/test_transaction_commands.py b/tests/integration/transactions/test_transaction_commands.py similarity index 100% rename from tests/integration/blockchain/test_transaction_commands.py rename to tests/integration/transactions/test_transaction_commands.py diff --git a/tests/integration/blockchain/test_transactions.py b/tests/integration/transactions/test_transactions.py similarity index 90% rename from tests/integration/blockchain/test_transactions.py rename to tests/integration/transactions/test_transactions.py index 8690698a6..fea0b18fb 100644 --- a/tests/integration/blockchain/test_transactions.py +++ b/tests/integration/transactions/test_transactions.py @@ -17,14 +17,15 @@ class BasicTransactionTests(IntegrationTestCase): # send 10 coins to first 10 receiving addresses and then 10 transactions worth 10 coins each # to the 10th receiving address for a total of 30 UTXOs on the entire account - sends = list(chain( - (self.blockchain.send_to_address(address, 10) for address in addresses[:10]), - (self.blockchain.send_to_address(addresses[9], 10) for _ in range(10)) - )) + for i in range(10): + notification = asyncio.ensure_future(self.on_address_update(addresses[i])) + txid = await self.blockchain.send_to_address(addresses[i], 10) + await notification + notification = asyncio.ensure_future(self.on_address_update(addresses[9])) + txid = await self.blockchain.send_to_address(addresses[9], 10) + await notification + # use batching to reduce issues with send_to_address on cli - for batch in range(0, len(sends), 10): - txids = await asyncio.gather(*sends[batch:batch+10]) - await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) await self.assertBalance(self.account, '200.0') self.assertEqual(20, await self.account.get_utxo_count()) @@ -136,7 +137,7 @@ class BasicTransactionTests(IntegrationTestCase): await self.assertBalance(self.account, '0.0') address = await self.account.receiving.get_or_create_usable_address() # evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it - original_summary = self.conductor.spv_node.server.mempool.transaction_summaries + original_summary = self.conductor.spv_node.server.bp.mempool.transaction_summaries def random_summary(*args, **kwargs): summary = original_summary(*args, **kwargs) @@ -145,7 +146,7 @@ class BasicTransactionTests(IntegrationTestCase): while summary == ordered: random.shuffle(summary) return summary - self.conductor.spv_node.server.mempool.transaction_summaries = random_summary + self.conductor.spv_node.server.bp.mempool.transaction_summaries = random_summary # 10 unconfirmed txs, all from blockchain wallet sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)] # use batching to reduce issues with send_to_address on cli @@ -175,11 +176,6 @@ class BasicTransactionTests(IntegrationTestCase): self.assertEqual(21, len((await self.ledger.get_local_status_and_history(address))[1])) self.assertEqual(0, len(self.ledger._known_addresses_out_of_sync)) - def wait_for_txid(self, txid, address): - return self.ledger.on_transaction.where( - lambda e: e.tx.id == txid and e.address == address - ) - async def _test_transaction(self, send_amount, address, inputs, change): tx = await Transaction.create( [], [Output.pay_pubkey_hash(send_amount, self.ledger.address_to_hash160(address))], [self.account], @@ -204,6 +200,7 @@ class BasicTransactionTests(IntegrationTestCase): async def test_sqlite_coin_chooser(self): wallet_manager = WalletManager([self.wallet], {self.ledger.get_id(): self.ledger}) await self.blockchain.generate(300) + await self.assertBalance(self.account, '0.0') address = await self.account.receiving.get_or_create_usable_address() other_account = self.wallet.generate_account(self.ledger) @@ -211,14 +208,26 @@ class BasicTransactionTests(IntegrationTestCase): self.ledger.coin_selection_strategy = 'sqlite' await self.ledger.subscribe_account(self.account) - txids = [] - txids.append(await self.blockchain.send_to_address(address, 1.0)) - txids.append(await self.blockchain.send_to_address(address, 1.0)) - txids.append(await self.blockchain.send_to_address(address, 3.0)) - txids.append(await self.blockchain.send_to_address(address, 5.0)) - txids.append(await self.blockchain.send_to_address(address, 10.0)) + accepted = asyncio.ensure_future(self.on_address_update(address)) + txid = await self.blockchain.send_to_address(address, 1.0) + await accepted + + accepted = asyncio.ensure_future(self.on_address_update(address)) + txid = await self.blockchain.send_to_address(address, 1.0) + await accepted + + accepted = asyncio.ensure_future(self.on_address_update(address)) + txid = await self.blockchain.send_to_address(address, 3.0) + await accepted + + accepted = asyncio.ensure_future(self.on_address_update(address)) + txid = await self.blockchain.send_to_address(address, 5.0) + await accepted + + accepted = asyncio.ensure_future(self.on_address_update(address)) + txid = await self.blockchain.send_to_address(address, 10.0) + await accepted - await asyncio.wait([self.wait_for_txid(txid, address) for txid in txids], timeout=1) await self.assertBalance(self.account, '20.0') await self.assertSpendable([99992600, 99992600, 299992600, 499992600, 999992600]) diff --git a/tests/unit/wallet/server/reader.py b/tests/unit/wallet/server/reader.py deleted file mode 100644 index 0d8cb7d21..000000000 --- a/tests/unit/wallet/server/reader.py +++ /dev/null @@ -1,616 +0,0 @@ -import time -import struct -import sqlite3 -import logging -from operator import itemgetter -from typing import Tuple, List, Dict, Union, Type, Optional -from binascii import unhexlify -from decimal import Decimal -from contextvars import ContextVar -from functools import wraps -from itertools import chain -from dataclasses import dataclass - -from lbry.wallet.database import query, interpolate -from lbry.error import ResolveCensoredError -from lbry.schema.url import URL, normalize_name -from lbry.schema.tags import clean_tags -from lbry.schema.result import Outputs, Censor -from lbry.wallet import Ledger, RegTestLedger - -from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES - - -class SQLiteOperationalError(sqlite3.OperationalError): - def __init__(self, metrics): - super().__init__('sqlite query errored') - self.metrics = metrics - - -class SQLiteInterruptedError(sqlite3.OperationalError): - def __init__(self, metrics): - super().__init__('sqlite query interrupted') - self.metrics = metrics - - -ATTRIBUTE_ARRAY_MAX_LENGTH = 100 -sqlite3.enable_callback_tracebacks(True) - -INTEGER_PARAMS = { - 'height', 'creation_height', 'activation_height', 'expiration_height', - 'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount', - 'tx_position', 'channel_join', 'reposted', 'limit_claims_per_channel', - 'amount', 'effective_amount', 'support_amount', - 'trending_group', 'trending_mixed', - 'trending_local', 'trending_global', -} - -SEARCH_PARAMS = { - 'name', 'text', 'claim_id', 'claim_ids', 'txid', 'nout', 'channel', 'channel_ids', 'not_channel_ids', - 'public_key_id', 'claim_type', 'stream_types', 'media_types', 'fee_currency', - 'has_channel_signature', 'signature_valid', - 'any_tags', 'all_tags', 'not_tags', 'reposted_claim_id', - 'any_locations', 'all_locations', 'not_locations', - 'any_languages', 'all_languages', 'not_languages', - 'is_controlling', 'limit', 'offset', 'order_by', - 'no_totals', 'has_source' -} | INTEGER_PARAMS - - -ORDER_FIELDS = { - 'name', 'claim_hash' -} | INTEGER_PARAMS - - -@dataclass -class ReaderState: - db: sqlite3.Connection - stack: List[List] - metrics: Dict - is_tracking_metrics: bool - ledger: Type[Ledger] - query_timeout: float - log: logging.Logger - blocked_streams: Dict - blocked_channels: Dict - filtered_streams: Dict - filtered_channels: Dict - - def close(self): - self.db.close() - - def reset_metrics(self): - self.stack = [] - self.metrics = {} - - def set_query_timeout(self): - stop_at = time.perf_counter() + self.query_timeout - - def interruptor(): - if time.perf_counter() >= stop_at: - self.db.interrupt() - return - - self.db.set_progress_handler(interruptor, 100) - - def get_resolve_censor(self) -> Censor: - return Censor(Censor.RESOLVE) - - def get_search_censor(self, limit_claims_per_channel: int) -> Censor: - return Censor(Censor.SEARCH) - - -ctx: ContextVar[Optional[ReaderState]] = ContextVar('ctx') - - -def row_factory(cursor, row): - return { - k[0]: (set(row[i].split(',')) if k[0] == 'tags' else row[i]) - for i, k in enumerate(cursor.description) - } - - -def initializer(log, _path, _ledger_name, query_timeout, _measure=False, block_and_filter=None): - db = sqlite3.connect(_path, isolation_level=None, uri=True) - db.row_factory = row_factory - if block_and_filter: - blocked_streams, blocked_channels, filtered_streams, filtered_channels = block_and_filter - else: - blocked_streams = blocked_channels = filtered_streams = filtered_channels = {} - ctx.set( - ReaderState( - db=db, stack=[], metrics={}, is_tracking_metrics=_measure, - ledger=Ledger if _ledger_name == 'mainnet' else RegTestLedger, - query_timeout=query_timeout, log=log, - blocked_streams=blocked_streams, blocked_channels=blocked_channels, - filtered_streams=filtered_streams, filtered_channels=filtered_channels, - ) - ) - - -def cleanup(): - ctx.get().close() - ctx.set(None) - - -def measure(func): - @wraps(func) - def wrapper(*args, **kwargs): - state = ctx.get() - if not state.is_tracking_metrics: - return func(*args, **kwargs) - metric = {} - state.metrics.setdefault(func.__name__, []).append(metric) - state.stack.append([]) - start = time.perf_counter() - try: - return func(*args, **kwargs) - finally: - elapsed = int((time.perf_counter()-start)*1000) - metric['total'] = elapsed - metric['isolated'] = (elapsed-sum(state.stack.pop())) - if state.stack: - state.stack[-1].append(elapsed) - return wrapper - - -def reports_metrics(func): - @wraps(func) - def wrapper(*args, **kwargs): - state = ctx.get() - if not state.is_tracking_metrics: - return func(*args, **kwargs) - state.reset_metrics() - r = func(*args, **kwargs) - return r, state.metrics - return wrapper - - -@reports_metrics -def search_to_bytes(constraints) -> Union[bytes, Tuple[bytes, Dict]]: - return encode_result(search(constraints)) - - -@reports_metrics -def resolve_to_bytes(urls) -> Union[bytes, Tuple[bytes, Dict]]: - return encode_result(resolve(urls)) - - -def encode_result(result): - return Outputs.to_bytes(*result) - - -@measure -def execute_query(sql, values, row_offset: int, row_limit: int, censor: Censor) -> List: - context = ctx.get() - context.set_query_timeout() - try: - rows = context.db.execute(sql, values).fetchall() - return rows[row_offset:row_limit] - except sqlite3.OperationalError as err: - plain_sql = interpolate(sql, values) - if context.is_tracking_metrics: - context.metrics['execute_query'][-1]['sql'] = plain_sql - context.log.exception('failed running query', exc_info=err) - raise SQLiteOperationalError(context.metrics) - - -def claims_query(cols, for_count=False, **constraints) -> Tuple[str, Dict]: - if 'order_by' in constraints: - order_by_parts = constraints['order_by'] - if isinstance(order_by_parts, str): - order_by_parts = [order_by_parts] - sql_order_by = [] - for order_by in order_by_parts: - is_asc = order_by.startswith('^') - column = order_by[1:] if is_asc else order_by - if column not in ORDER_FIELDS: - raise NameError(f'{column} is not a valid order_by field') - if column == 'name': - column = 'normalized' - sql_order_by.append( - f"claim.{column} ASC" if is_asc else f"claim.{column} DESC" - ) - constraints['order_by'] = sql_order_by - - ops = {'<=': '__lte', '>=': '__gte', '<': '__lt', '>': '__gt'} - for constraint in INTEGER_PARAMS: - if constraint in constraints: - value = constraints.pop(constraint) - postfix = '' - if isinstance(value, str): - if len(value) >= 2 and value[:2] in ops: - postfix, value = ops[value[:2]], value[2:] - elif len(value) >= 1 and value[0] in ops: - postfix, value = ops[value[0]], value[1:] - if constraint == 'fee_amount': - value = Decimal(value)*1000 - constraints[f'claim.{constraint}{postfix}'] = int(value) - - if constraints.pop('is_controlling', False): - if {'sequence', 'amount_order'}.isdisjoint(constraints): - for_count = False - constraints['claimtrie.claim_hash__is_not_null'] = '' - if 'sequence' in constraints: - constraints['order_by'] = 'claim.activation_height ASC' - constraints['offset'] = int(constraints.pop('sequence')) - 1 - constraints['limit'] = 1 - if 'amount_order' in constraints: - constraints['order_by'] = 'claim.effective_amount DESC' - constraints['offset'] = int(constraints.pop('amount_order')) - 1 - constraints['limit'] = 1 - - if 'claim_id' in constraints: - claim_id = constraints.pop('claim_id') - if len(claim_id) == 40: - constraints['claim.claim_id'] = claim_id - else: - constraints['claim.claim_id__like'] = f'{claim_id[:40]}%' - elif 'claim_ids' in constraints: - constraints['claim.claim_id__in'] = set(constraints.pop('claim_ids')) - - if 'reposted_claim_id' in constraints: - constraints['claim.reposted_claim_hash'] = unhexlify(constraints.pop('reposted_claim_id'))[::-1] - - if 'name' in constraints: - constraints['claim.normalized'] = normalize_name(constraints.pop('name')) - - if 'public_key_id' in constraints: - constraints['claim.public_key_hash'] = ( - ctx.get().ledger.address_to_hash160(constraints.pop('public_key_id'))) - if 'channel_hash' in constraints: - constraints['claim.channel_hash'] = constraints.pop('channel_hash') - if 'channel_ids' in constraints: - channel_ids = constraints.pop('channel_ids') - if channel_ids: - constraints['claim.channel_hash__in'] = { - unhexlify(cid)[::-1] for cid in channel_ids if cid - } - if 'not_channel_ids' in constraints: - not_channel_ids = constraints.pop('not_channel_ids') - if not_channel_ids: - not_channel_ids_binary = { - unhexlify(ncid)[::-1] for ncid in not_channel_ids - } - constraints['claim.claim_hash__not_in#not_channel_ids'] = not_channel_ids_binary - if constraints.get('has_channel_signature', False): - constraints['claim.channel_hash__not_in'] = not_channel_ids_binary - else: - constraints['null_or_not_channel__or'] = { - 'claim.signature_valid__is_null': True, - 'claim.channel_hash__not_in': not_channel_ids_binary - } - if 'signature_valid' in constraints: - has_channel_signature = constraints.pop('has_channel_signature', False) - if has_channel_signature: - constraints['claim.signature_valid'] = constraints.pop('signature_valid') - else: - constraints['null_or_signature__or'] = { - 'claim.signature_valid__is_null': True, - 'claim.signature_valid': constraints.pop('signature_valid') - } - elif constraints.pop('has_channel_signature', False): - constraints['claim.signature_valid__is_not_null'] = True - - if 'txid' in constraints: - tx_hash = unhexlify(constraints.pop('txid'))[::-1] - nout = constraints.pop('nout', 0) - constraints['claim.txo_hash'] = tx_hash + struct.pack(' List: - if 'channel' in constraints: - channel_url = constraints.pop('channel') - match = resolve_url(channel_url) - if isinstance(match, dict): - constraints['channel_hash'] = match['claim_hash'] - else: - return [{'row_count': 0}] if cols == 'count(*) as row_count' else [] - row_offset = constraints.pop('offset', 0) - row_limit = constraints.pop('limit', 20) - sql, values = claims_query(cols, for_count, **constraints) - return execute_query(sql, values, row_offset, row_limit, censor) - - -@measure -def count_claims(**constraints) -> int: - constraints.pop('offset', None) - constraints.pop('limit', None) - constraints.pop('order_by', None) - count = select_claims(Censor(Censor.SEARCH), 'count(*) as row_count', for_count=True, **constraints) - return count[0]['row_count'] - - -def search_claims(censor: Censor, **constraints) -> List: - return select_claims( - censor, - """ - claimtrie.claim_hash as is_controlling, - claimtrie.last_take_over_height, - claim.claim_hash, claim.txo_hash, - claim.claims_in_channel, claim.reposted, - claim.height, claim.creation_height, - claim.activation_height, claim.expiration_height, - claim.effective_amount, claim.support_amount, - claim.trending_group, claim.trending_mixed, - claim.trending_local, claim.trending_global, - claim.short_url, claim.canonical_url, - claim.channel_hash, claim.reposted_claim_hash, - claim.signature_valid - """, **constraints - ) - - -def _get_referenced_rows(txo_rows: List[dict], censor_channels: List[bytes]): - censor = ctx.get().get_resolve_censor() - repost_hashes = set(filter(None, map(itemgetter('reposted_claim_hash'), txo_rows))) - channel_hashes = set(chain( - filter(None, map(itemgetter('channel_hash'), txo_rows)), - censor_channels - )) - - reposted_txos = [] - if repost_hashes: - reposted_txos = search_claims(censor, **{'claim.claim_hash__in': repost_hashes}) - channel_hashes |= set(filter(None, map(itemgetter('channel_hash'), reposted_txos))) - - channel_txos = [] - if channel_hashes: - channel_txos = search_claims(censor, **{'claim.claim_hash__in': channel_hashes}) - - # channels must come first for client side inflation to work properly - return channel_txos + reposted_txos - -@measure -def search(constraints) -> Tuple[List, List, int, int, Censor]: - assert set(constraints).issubset(SEARCH_PARAMS), \ - f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}" - total = None - limit_claims_per_channel = constraints.pop('limit_claims_per_channel', None) - if not constraints.pop('no_totals', False): - total = count_claims(**constraints) - constraints['offset'] = abs(constraints.get('offset', 0)) - constraints['limit'] = min(abs(constraints.get('limit', 10)), 50) - context = ctx.get() - search_censor = context.get_search_censor(limit_claims_per_channel) - txo_rows = search_claims(search_censor, **constraints) - extra_txo_rows = _get_referenced_rows(txo_rows, search_censor.censored.keys()) - return txo_rows, extra_txo_rows, constraints['offset'], total, search_censor - - -@measure -def resolve(urls) -> Tuple[List, List]: - txo_rows = [resolve_url(raw_url) for raw_url in urls] - extra_txo_rows = _get_referenced_rows( - [txo for txo in txo_rows if isinstance(txo, dict)], - [txo.censor_id for txo in txo_rows if isinstance(txo, ResolveCensoredError)] - ) - return txo_rows, extra_txo_rows - - -@measure -def resolve_url(raw_url): - censor = ctx.get().get_resolve_censor() - - try: - url = URL.parse(raw_url) - except ValueError as e: - return e - - channel = None - - if url.has_channel: - query = url.channel.to_dict() - if set(query) == {'name'}: - query['is_controlling'] = True - else: - query['order_by'] = ['^creation_height'] - matches = search_claims(censor, **query, limit=1) - if matches: - channel = matches[0] - elif censor.censored: - return ResolveCensoredError(raw_url, next(iter(censor.censored))) - else: - return LookupError(f'Could not find channel in "{raw_url}".') - - if url.has_stream: - query = url.stream.to_dict() - if channel is not None: - if set(query) == {'name'}: - # temporarily emulate is_controlling for claims in channel - query['order_by'] = ['effective_amount', '^height'] - else: - query['order_by'] = ['^channel_join'] - query['channel_hash'] = channel['claim_hash'] - query['signature_valid'] = 1 - elif set(query) == {'name'}: - query['is_controlling'] = 1 - matches = search_claims(censor, **query, limit=1) - if matches: - return matches[0] - elif censor.censored: - return ResolveCensoredError(raw_url, next(iter(censor.censored))) - else: - return LookupError(f'Could not find claim at "{raw_url}".') - - return channel - - -CLAIM_HASH_OR_REPOST_HASH_SQL = f""" -CASE WHEN claim.claim_type = {CLAIM_TYPES['repost']} - THEN claim.reposted_claim_hash - ELSE claim.claim_hash -END -""" - - -def _apply_constraints_for_array_attributes(constraints, attr, cleaner, for_count=False): - any_items = set(cleaner(constraints.pop(f'any_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH]) - all_items = set(cleaner(constraints.pop(f'all_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH]) - not_items = set(cleaner(constraints.pop(f'not_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH]) - - all_items = {item for item in all_items if item not in not_items} - any_items = {item for item in any_items if item not in not_items} - - any_queries = {} - - if attr == 'tag': - common_tags = any_items & COMMON_TAGS.keys() - if common_tags: - any_items -= common_tags - if len(common_tags) < 5: - for item in common_tags: - index_name = COMMON_TAGS[item] - any_queries[f'#_common_tag_{index_name}'] = f""" - EXISTS( - SELECT 1 FROM tag INDEXED BY tag_{index_name}_idx - WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash - AND tag = '{item}' - ) - """ - elif len(common_tags) >= 5: - constraints.update({ - f'$any_common_tag{i}': item for i, item in enumerate(common_tags) - }) - values = ', '.join( - f':$any_common_tag{i}' for i in range(len(common_tags)) - ) - any_queries[f'#_any_common_tags'] = f""" - EXISTS( - SELECT 1 FROM tag WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash - AND tag IN ({values}) - ) - """ - elif attr == 'language': - indexed_languages = any_items & set(INDEXED_LANGUAGES) - if indexed_languages: - any_items -= indexed_languages - for language in indexed_languages: - any_queries[f'#_any_common_languages_{language}'] = f""" - EXISTS( - SELECT 1 FROM language INDEXED BY language_{language}_idx - WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=language.claim_hash - AND language = '{language}' - ) - """ - - if any_items: - - constraints.update({ - f'$any_{attr}{i}': item for i, item in enumerate(any_items) - }) - values = ', '.join( - f':$any_{attr}{i}' for i in range(len(any_items)) - ) - if for_count or attr == 'tag': - if attr == 'tag': - any_queries[f'#_any_{attr}'] = f""" - ((claim.claim_type != {CLAIM_TYPES['repost']} - AND claim.claim_hash IN (SELECT claim_hash FROM tag WHERE tag IN ({values}))) OR - (claim.claim_type == {CLAIM_TYPES['repost']} AND - claim.reposted_claim_hash IN (SELECT claim_hash FROM tag WHERE tag IN ({values})))) - """ - else: - any_queries[f'#_any_{attr}'] = f""" - {CLAIM_HASH_OR_REPOST_HASH_SQL} IN ( - SELECT claim_hash FROM {attr} WHERE {attr} IN ({values}) - ) - """ - else: - any_queries[f'#_any_{attr}'] = f""" - EXISTS( - SELECT 1 FROM {attr} WHERE - {CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash - AND {attr} IN ({values}) - ) - """ - - if len(any_queries) == 1: - constraints.update(any_queries) - elif len(any_queries) > 1: - constraints[f'ORed_{attr}_queries__any'] = any_queries - - if all_items: - constraints[f'$all_{attr}_count'] = len(all_items) - constraints.update({ - f'$all_{attr}{i}': item for i, item in enumerate(all_items) - }) - values = ', '.join( - f':$all_{attr}{i}' for i in range(len(all_items)) - ) - if for_count: - constraints[f'#_all_{attr}'] = f""" - {CLAIM_HASH_OR_REPOST_HASH_SQL} IN ( - SELECT claim_hash FROM {attr} WHERE {attr} IN ({values}) - GROUP BY claim_hash HAVING COUNT({attr}) = :$all_{attr}_count - ) - """ - else: - constraints[f'#_all_{attr}'] = f""" - {len(all_items)}=( - SELECT count(*) FROM {attr} WHERE - {CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash - AND {attr} IN ({values}) - ) - """ - - if not_items: - constraints.update({ - f'$not_{attr}{i}': item for i, item in enumerate(not_items) - }) - values = ', '.join( - f':$not_{attr}{i}' for i in range(len(not_items)) - ) - if for_count: - if attr == 'tag': - constraints[f'#_not_{attr}'] = f""" - ((claim.claim_type != {CLAIM_TYPES['repost']} - AND claim.claim_hash NOT IN (SELECT claim_hash FROM tag WHERE tag IN ({values}))) OR - (claim.claim_type == {CLAIM_TYPES['repost']} AND - claim.reposted_claim_hash NOT IN (SELECT claim_hash FROM tag WHERE tag IN ({values})))) - """ - else: - constraints[f'#_not_{attr}'] = f""" - {CLAIM_HASH_OR_REPOST_HASH_SQL} NOT IN ( - SELECT claim_hash FROM {attr} WHERE {attr} IN ({values}) - ) - """ - else: - constraints[f'#_not_{attr}'] = f""" - NOT EXISTS( - SELECT 1 FROM {attr} WHERE - {CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash - AND {attr} IN ({values}) - ) - """ diff --git a/tests/unit/wallet/server/test_migration.py b/tests/unit/wallet/server/test_migration.py index 9aa8cccee..fe49c0e39 100644 --- a/tests/unit/wallet/server/test_migration.py +++ b/tests/unit/wallet/server/test_migration.py @@ -1,57 +1,57 @@ -import unittest -from shutil import rmtree -from tempfile import mkdtemp - -from lbry.wallet.server.history import History -from lbry.wallet.server.storage import LevelDB - - -# dumped from a real history database. Aside from the state, all records are : -STATE_RECORD = (b'state\x00\x00', b"{'flush_count': 21497, 'comp_flush_count': -1, 'comp_cursor': -1, 'db_version': 0}") -UNMIGRATED_RECORDS = { - '00538b2cbe4a5f1be2dc320241': 'f5ed500142ee5001', - '00538b48def1904014880501f2': 'b9a52a01baa52a01', - '00538cdcf989b74de32c5100ca': 'c973870078748700', - '00538d42d5df44603474284ae1': 'f5d9d802', - '00538d42d5df44603474284ae2': '75dad802', - '00538ebc879dac6ddbee9e0029': '3ca42f0042a42f00', - '00538ed1d391327208748200bc': '804e7d00af4e7d00', - '00538f3de41d9e33affa0300c2': '7de8810086e88100', - '00539007f87792d98422c505a5': '8c5a7202445b7202', - '0053902cf52ee9682d633b0575': 'eb0f64026c106402', - '005390e05674571551632205a2': 'a13d7102e13d7102', - '0053914ef25a9ceed927330584': '78096902960b6902', - '005391768113f69548f37a01b1': 'a5b90b0114ba0b01', - '005391a289812669e5b44c02c2': '33da8a016cdc8a01', -} - - -class TestHistoryDBMigration(unittest.TestCase): - def test_migrate_flush_count_from_16_to_32_bits(self): - self.history = History() - tmpdir = mkdtemp() - self.addCleanup(lambda: rmtree(tmpdir)) - LevelDB.import_module() - db = LevelDB(tmpdir, 'hist', True) - with db.write_batch() as batch: - for key, value in UNMIGRATED_RECORDS.items(): - batch.put(bytes.fromhex(key), bytes.fromhex(value)) - batch.put(*STATE_RECORD) - self.history.db = db - self.history.read_state() - self.assertEqual(21497, self.history.flush_count) - self.assertEqual(0, self.history.db_version) - self.assertTrue(self.history.needs_migration) - self.history.migrate() - self.assertFalse(self.history.needs_migration) - self.assertEqual(1, self.history.db_version) - for idx, (key, value) in enumerate(sorted(db.iterator())): - if key == b'state\x00\x00': - continue - key, counter = key[:-4], key[-4:] - expected_value = UNMIGRATED_RECORDS[key.hex() + counter.hex()[-4:]] - self.assertEqual(value.hex(), expected_value) - - -if __name__ == '__main__': - unittest.main() +# import unittest +# from shutil import rmtree +# from tempfile import mkdtemp +# +# from lbry.wallet.server.history import History +# from lbry.wallet.server.storage import LevelDB +# +# +# # dumped from a real history database. Aside from the state, all records are : +# STATE_RECORD = (b'state\x00\x00', b"{'flush_count': 21497, 'comp_flush_count': -1, 'comp_cursor': -1, 'db_version': 0}") +# UNMIGRATED_RECORDS = { +# '00538b2cbe4a5f1be2dc320241': 'f5ed500142ee5001', +# '00538b48def1904014880501f2': 'b9a52a01baa52a01', +# '00538cdcf989b74de32c5100ca': 'c973870078748700', +# '00538d42d5df44603474284ae1': 'f5d9d802', +# '00538d42d5df44603474284ae2': '75dad802', +# '00538ebc879dac6ddbee9e0029': '3ca42f0042a42f00', +# '00538ed1d391327208748200bc': '804e7d00af4e7d00', +# '00538f3de41d9e33affa0300c2': '7de8810086e88100', +# '00539007f87792d98422c505a5': '8c5a7202445b7202', +# '0053902cf52ee9682d633b0575': 'eb0f64026c106402', +# '005390e05674571551632205a2': 'a13d7102e13d7102', +# '0053914ef25a9ceed927330584': '78096902960b6902', +# '005391768113f69548f37a01b1': 'a5b90b0114ba0b01', +# '005391a289812669e5b44c02c2': '33da8a016cdc8a01', +# } +# +# +# class TestHistoryDBMigration(unittest.TestCase): +# def test_migrate_flush_count_from_16_to_32_bits(self): +# self.history = History() +# tmpdir = mkdtemp() +# self.addCleanup(lambda: rmtree(tmpdir)) +# LevelDB.import_module() +# db = LevelDB(tmpdir, 'hist', True) +# with db.write_batch() as batch: +# for key, value in UNMIGRATED_RECORDS.items(): +# batch.put(bytes.fromhex(key), bytes.fromhex(value)) +# batch.put(*STATE_RECORD) +# self.history.db = db +# self.history.read_state() +# self.assertEqual(21497, self.history.flush_count) +# self.assertEqual(0, self.history.db_version) +# self.assertTrue(self.history.needs_migration) +# self.history.migrate() +# self.assertFalse(self.history.needs_migration) +# self.assertEqual(1, self.history.db_version) +# for idx, (key, value) in enumerate(sorted(db.iterator())): +# if key == b'state\x00\x00': +# continue +# key, counter = key[:-4], key[-4:] +# expected_value = UNMIGRATED_RECORDS[key.hex() + counter.hex()[-4:]] +# self.assertEqual(value.hex(), expected_value) +# +# +# if __name__ == '__main__': +# unittest.main() diff --git a/tests/unit/wallet/server/test_revertable.py b/tests/unit/wallet/server/test_revertable.py new file mode 100644 index 000000000..f5729689a --- /dev/null +++ b/tests/unit/wallet/server/test_revertable.py @@ -0,0 +1,150 @@ +import unittest +import tempfile +import shutil +from lbry.wallet.server.db.revertable import RevertableOpStack, RevertableDelete, RevertablePut, OpStackIntegrity +from lbry.wallet.server.db.prefixes import ClaimToTXOPrefixRow, HubDB + + +class TestRevertableOpStack(unittest.TestCase): + def setUp(self): + self.fake_db = {} + self.stack = RevertableOpStack(self.fake_db.get) + + def tearDown(self) -> None: + self.stack.clear() + self.fake_db.clear() + + def process_stack(self): + for op in self.stack: + if op.is_put: + self.fake_db[op.key] = op.value + else: + self.fake_db.pop(op.key) + self.stack.clear() + + def update(self, key1: bytes, value1: bytes, key2: bytes, value2: bytes): + self.stack.append_op(RevertableDelete(key1, value1)) + self.stack.append_op(RevertablePut(key2, value2)) + + def test_simplify(self): + key1 = ClaimToTXOPrefixRow.pack_key(b'\x01' * 20) + key2 = ClaimToTXOPrefixRow.pack_key(b'\x02' * 20) + key3 = ClaimToTXOPrefixRow.pack_key(b'\x03' * 20) + key4 = ClaimToTXOPrefixRow.pack_key(b'\x04' * 20) + + val1 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'derp') + val2 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'oops') + val3 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'other') + + # check that we can't delete a non existent value + with self.assertRaises(OpStackIntegrity): + self.stack.append_op(RevertableDelete(key1, val1)) + + self.stack.append_op(RevertablePut(key1, val1)) + self.assertEqual(1, len(self.stack)) + self.stack.append_op(RevertableDelete(key1, val1)) + self.assertEqual(0, len(self.stack)) + + self.stack.append_op(RevertablePut(key1, val1)) + self.assertEqual(1, len(self.stack)) + # try to delete the wrong value + with self.assertRaises(OpStackIntegrity): + self.stack.append_op(RevertableDelete(key2, val2)) + + self.stack.append_op(RevertableDelete(key1, val1)) + self.assertEqual(0, len(self.stack)) + self.stack.append_op(RevertablePut(key2, val3)) + self.assertEqual(1, len(self.stack)) + + self.process_stack() + + self.assertDictEqual({key2: val3}, self.fake_db) + + # check that we can't put on top of the existing stored value + with self.assertRaises(OpStackIntegrity): + self.stack.append_op(RevertablePut(key2, val1)) + + self.assertEqual(0, len(self.stack)) + self.stack.append_op(RevertableDelete(key2, val3)) + self.assertEqual(1, len(self.stack)) + self.stack.append_op(RevertablePut(key2, val3)) + self.assertEqual(0, len(self.stack)) + + self.update(key2, val3, key2, val1) + self.assertEqual(2, len(self.stack)) + + self.process_stack() + self.assertDictEqual({key2: val1}, self.fake_db) + + self.update(key2, val1, key2, val2) + self.assertEqual(2, len(self.stack)) + self.update(key2, val2, key2, val3) + self.update(key2, val3, key2, val2) + self.update(key2, val2, key2, val3) + self.update(key2, val3, key2, val2) + with self.assertRaises(OpStackIntegrity): + self.update(key2, val3, key2, val2) + self.update(key2, val2, key2, val3) + self.assertEqual(2, len(self.stack)) + self.stack.append_op(RevertableDelete(key2, val3)) + self.process_stack() + self.assertDictEqual({}, self.fake_db) + + self.stack.append_op(RevertablePut(key2, val3)) + self.process_stack() + with self.assertRaises(OpStackIntegrity): + self.update(key2, val2, key2, val2) + self.update(key2, val3, key2, val2) + self.assertDictEqual({key2: val3}, self.fake_db) + undo = self.stack.get_undo_ops() + self.process_stack() + self.assertDictEqual({key2: val2}, self.fake_db) + self.stack.apply_packed_undo_ops(undo) + self.process_stack() + self.assertDictEqual({key2: val3}, self.fake_db) + + +class TestRevertablePrefixDB(unittest.TestCase): + def setUp(self): + self.tmp_dir = tempfile.mkdtemp() + self.db = HubDB(self.tmp_dir, cache_mb=1, max_open_files=32) + + def tearDown(self) -> None: + self.db.close() + shutil.rmtree(self.tmp_dir) + + def test_rollback(self): + name = 'derp' + claim_hash1 = 20 * b'\x00' + claim_hash2 = 20 * b'\x01' + claim_hash3 = 20 * b'\x02' + + takeover_height = 10000000 + + self.assertIsNone(self.db.claim_takeover.get(name)) + self.db.claim_takeover.stage_put((name,), (claim_hash1, takeover_height)) + self.db.commit(10000000) + self.assertEqual(10000000, self.db.claim_takeover.get(name).height) + + self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height)) + self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1)) + self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1)) + self.db.commit(10000001) + self.assertIsNone(self.db.claim_takeover.get(name)) + self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2)) + self.db.commit(10000002) + self.assertEqual(10000002, self.db.claim_takeover.get(name).height) + + self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2)) + self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3)) + self.db.commit(10000003) + self.assertEqual(10000003, self.db.claim_takeover.get(name).height) + + self.db.rollback(10000003) + self.assertEqual(10000002, self.db.claim_takeover.get(name).height) + self.db.rollback(10000002) + self.assertIsNone(self.db.claim_takeover.get(name)) + self.db.rollback(10000001) + self.assertEqual(10000000, self.db.claim_takeover.get(name).height) + self.db.rollback(10000000) + self.assertIsNone(self.db.claim_takeover.get(name)) diff --git a/tests/unit/wallet/server/test_sqldb.py b/tests/unit/wallet/server/test_sqldb.py deleted file mode 100644 index 52753ad99..000000000 --- a/tests/unit/wallet/server/test_sqldb.py +++ /dev/null @@ -1,765 +0,0 @@ -import unittest -import ecdsa -import hashlib -import logging -from binascii import hexlify -from typing import List, Tuple - -from lbry.wallet.constants import COIN, NULL_HASH32 -from lbry.schema.claim import Claim -from lbry.schema.result import Censor -from lbry.wallet.server.db import writer -from lbry.wallet.server.coin import LBCRegTest -from lbry.wallet.server.db.trending import zscore -from lbry.wallet.server.db.canonical import FindShortestID -from lbry.wallet.server.block_processor import Timer -from lbry.wallet.transaction import Transaction, Input, Output -try: - import reader -except: - from . import reader - - -def get_output(amount=COIN, pubkey_hash=NULL_HASH32): - return Transaction() \ - .add_outputs([Output.pay_pubkey_hash(amount, pubkey_hash)]) \ - .outputs[0] - - -def get_input(): - return Input.spend(get_output()) - - -def get_tx(): - return Transaction().add_inputs([get_input()]) - - -def search(**constraints) -> List: - return reader.search_claims(Censor(Censor.SEARCH), **constraints) - - -def censored_search(**constraints) -> Tuple[List, Censor]: - rows, _, _, _, censor = reader.search(constraints) - return rows, censor - - -class TestSQLDB(unittest.TestCase): - query_timeout = 0.25 - - def setUp(self): - self.first_sync = False - self.daemon_height = 1 - self.coin = LBCRegTest() - db_url = 'file:test_sqldb?mode=memory&cache=shared' - self.sql = writer.SQLDB(self, db_url, [], [], [zscore]) - self.addCleanup(self.sql.close) - self.sql.open() - reader.initializer( - logging.getLogger(__name__), db_url, 'regtest', - self.query_timeout, block_and_filter=( - self.sql.blocked_streams, self.sql.blocked_channels, - self.sql.filtered_streams, self.sql.filtered_channels - ) - ) - self.addCleanup(reader.cleanup) - self.timer = Timer('BlockProcessor') - self._current_height = 0 - self._txos = {} - - def _make_tx(self, output, txi=None): - tx = get_tx().add_outputs([output]) - if txi is not None: - tx.add_inputs([txi]) - self._txos[output.ref.hash] = output - return tx, tx.hash - - def _set_channel_key(self, channel, key): - private_key = ecdsa.SigningKey.from_string(key*32, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) - channel.private_key = private_key - channel.claim.channel.public_key_bytes = private_key.get_verifying_key().to_der() - channel.script.generate() - - def get_channel(self, title, amount, name='@foo', key=b'a'): - claim = Claim() - claim.channel.title = title - channel = Output.pay_claim_name_pubkey_hash(amount, name, claim, b'abc') - self._set_channel_key(channel, key) - return self._make_tx(channel) - - def get_channel_update(self, channel, amount, key=b'a'): - self._set_channel_key(channel, key) - return self._make_tx( - Output.pay_update_claim_pubkey_hash( - amount, channel.claim_name, channel.claim_id, channel.claim, b'abc' - ), - Input.spend(channel) - ) - - def get_stream(self, title, amount, name='foo', channel=None, **kwargs): - claim = Claim() - claim.stream.update(title=title, **kwargs) - result = self._make_tx(Output.pay_claim_name_pubkey_hash(amount, name, claim, b'abc')) - if channel: - result[0].outputs[0].sign(channel) - result[0]._reset() - return result - - def get_stream_update(self, tx, amount, channel=None): - stream = Transaction(tx[0].raw).outputs[0] - result = self._make_tx( - Output.pay_update_claim_pubkey_hash( - amount, stream.claim_name, stream.claim_id, stream.claim, b'abc' - ), - Input.spend(stream) - ) - if channel: - result[0].outputs[0].sign(channel) - result[0]._reset() - return result - - def get_repost(self, claim_id, amount, channel): - claim = Claim() - claim.repost.reference.claim_id = claim_id - result = self._make_tx(Output.pay_claim_name_pubkey_hash(amount, 'repost', claim, b'abc')) - result[0].outputs[0].sign(channel) - result[0]._reset() - return result - - def get_abandon(self, tx): - claim = Transaction(tx[0].raw).outputs[0] - return self._make_tx( - Output.pay_pubkey_hash(claim.amount, b'abc'), - Input.spend(claim) - ) - - def get_support(self, tx, amount): - claim = Transaction(tx[0].raw).outputs[0] - return self._make_tx( - Output.pay_support_pubkey_hash( - amount, claim.claim_name, claim.claim_id, b'abc' - ) - ) - - def get_controlling(self): - for claim in self.sql.execute("select claim.* from claimtrie natural join claim"): - txo = self._txos[claim.txo_hash] - controlling = txo.claim.stream.title, claim.amount, claim.effective_amount, claim.activation_height - return controlling - - def get_active(self): - controlling = self.get_controlling() - active = [] - for claim in self.sql.execute( - f"select * from claim where activation_height <= {self._current_height}"): - txo = self._txos[claim.txo_hash] - if controlling and controlling[0] == txo.claim.stream.title: - continue - active.append((txo.claim.stream.title, claim.amount, claim.effective_amount, claim.activation_height)) - return active - - def get_accepted(self): - accepted = [] - for claim in self.sql.execute( - f"select * from claim where activation_height > {self._current_height}"): - txo = self._txos[claim.txo_hash] - accepted.append((txo.claim.stream.title, claim.amount, claim.effective_amount, claim.activation_height)) - return accepted - - def advance(self, height, txs): - self._current_height = height - self.sql.advance_txs(height, txs, {'timestamp': 1}, self.daemon_height, self.timer) - return [otx[0].outputs[0] for otx in txs] - - def state(self, controlling=None, active=None, accepted=None): - self.assertEqual(controlling, self.get_controlling()) - self.assertEqual(active or [], self.get_active()) - self.assertEqual(accepted or [], self.get_accepted()) - - -class TestClaimtrie(TestSQLDB): - - def test_example_from_spec(self): - # https://spec.lbry.com/#claim-activation-example - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - advance(13, [stream]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[], - accepted=[] - ) - advance(1001, [self.get_stream('Claim B', 20*COIN)]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[], - accepted=[('Claim B', 20*COIN, 0, 1031)] - ) - advance(1010, [self.get_support(stream, 14*COIN)]) - state( - controlling=('Claim A', 10*COIN, 24*COIN, 13), - active=[], - accepted=[('Claim B', 20*COIN, 0, 1031)] - ) - advance(1020, [self.get_stream('Claim C', 50*COIN)]) - state( - controlling=('Claim A', 10*COIN, 24*COIN, 13), - active=[], - accepted=[ - ('Claim B', 20*COIN, 0, 1031), - ('Claim C', 50*COIN, 0, 1051)] - ) - advance(1031, []) - state( - controlling=('Claim A', 10*COIN, 24*COIN, 13), - active=[('Claim B', 20*COIN, 20*COIN, 1031)], - accepted=[('Claim C', 50*COIN, 0, 1051)] - ) - advance(1040, [self.get_stream('Claim D', 300*COIN)]) - state( - controlling=('Claim A', 10*COIN, 24*COIN, 13), - active=[('Claim B', 20*COIN, 20*COIN, 1031)], - accepted=[ - ('Claim C', 50*COIN, 0, 1051), - ('Claim D', 300*COIN, 0, 1072)] - ) - advance(1051, []) - state( - controlling=('Claim D', 300*COIN, 300*COIN, 1051), - active=[ - ('Claim A', 10*COIN, 24*COIN, 13), - ('Claim B', 20*COIN, 20*COIN, 1031), - ('Claim C', 50*COIN, 50*COIN, 1051)], - accepted=[] - ) - # beyond example - advance(1052, [self.get_stream_update(stream, 290*COIN)]) - state( - controlling=('Claim A', 290*COIN, 304*COIN, 13), - active=[ - ('Claim B', 20*COIN, 20*COIN, 1031), - ('Claim C', 50*COIN, 50*COIN, 1051), - ('Claim D', 300*COIN, 300*COIN, 1051), - ], - accepted=[] - ) - - def test_competing_claims_subsequent_blocks_height_wins(self): - advance, state = self.advance, self.state - advance(13, [self.get_stream('Claim A', 10*COIN)]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[], - accepted=[] - ) - advance(14, [self.get_stream('Claim B', 10*COIN)]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[('Claim B', 10*COIN, 10*COIN, 14)], - accepted=[] - ) - advance(15, [self.get_stream('Claim C', 10*COIN)]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[ - ('Claim B', 10*COIN, 10*COIN, 14), - ('Claim C', 10*COIN, 10*COIN, 15)], - accepted=[] - ) - - def test_competing_claims_in_single_block_position_wins(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - stream2 = self.get_stream('Claim B', 10*COIN) - advance(13, [stream, stream2]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[('Claim B', 10*COIN, 10*COIN, 13)], - accepted=[] - ) - - def test_competing_claims_in_single_block_effective_amount_wins(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - stream2 = self.get_stream('Claim B', 11*COIN) - advance(13, [stream, stream2]) - state( - controlling=('Claim B', 11*COIN, 11*COIN, 13), - active=[('Claim A', 10*COIN, 10*COIN, 13)], - accepted=[] - ) - - def test_winning_claim_deleted(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - stream2 = self.get_stream('Claim B', 11*COIN) - advance(13, [stream, stream2]) - state( - controlling=('Claim B', 11*COIN, 11*COIN, 13), - active=[('Claim A', 10*COIN, 10*COIN, 13)], - accepted=[] - ) - advance(14, [self.get_abandon(stream2)]) - state( - controlling=('Claim A', 10*COIN, 10*COIN, 13), - active=[], - accepted=[] - ) - - def test_winning_claim_deleted_and_new_claim_becomes_winner(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - stream2 = self.get_stream('Claim B', 11*COIN) - advance(13, [stream, stream2]) - state( - controlling=('Claim B', 11*COIN, 11*COIN, 13), - active=[('Claim A', 10*COIN, 10*COIN, 13)], - accepted=[] - ) - advance(15, [self.get_abandon(stream2), self.get_stream('Claim C', 12*COIN)]) - state( - controlling=('Claim C', 12*COIN, 12*COIN, 15), - active=[('Claim A', 10*COIN, 10*COIN, 13)], - accepted=[] - ) - - def test_winning_claim_expires_and_another_takes_over(self): - advance, state = self.advance, self.state - advance(10, [self.get_stream('Claim A', 11*COIN)]) - advance(20, [self.get_stream('Claim B', 10*COIN)]) - state( - controlling=('Claim A', 11*COIN, 11*COIN, 10), - active=[('Claim B', 10*COIN, 10*COIN, 20)], - accepted=[] - ) - advance(262984, []) - state( - controlling=('Claim B', 10*COIN, 10*COIN, 20), - active=[], - accepted=[] - ) - advance(262994, []) - state( - controlling=None, - active=[], - accepted=[] - ) - - def test_create_and_update_in_same_block(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - advance(10, [stream, self.get_stream_update(stream, 11*COIN)]) - self.assertTrue(search()[0]) - - def test_double_updates_in_same_block(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - advance(10, [stream]) - update = self.get_stream_update(stream, 11*COIN) - advance(20, [update, self.get_stream_update(update, 9*COIN)]) - self.assertTrue(search()[0]) - - def test_create_and_abandon_in_same_block(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - advance(10, [stream, self.get_abandon(stream)]) - self.assertFalse(search()) - - def test_update_and_abandon_in_same_block(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - advance(10, [stream]) - update = self.get_stream_update(stream, 11*COIN) - advance(20, [update, self.get_abandon(update)]) - self.assertFalse(search()) - - def test_create_update_and_delete_in_same_block(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - update = self.get_stream_update(stream, 11*COIN) - advance(10, [stream, update, self.get_abandon(update)]) - self.assertFalse(search()) - - def test_support_added_and_removed_in_same_block(self): - advance, state = self.advance, self.state - stream = self.get_stream('Claim A', 10*COIN) - advance(10, [stream]) - support = self.get_support(stream, COIN) - advance(20, [support, self.get_abandon(support)]) - self.assertEqual(search()[0]['support_amount'], 0) - - @staticmethod - def _get_x_with_claim_id_prefix(getter, prefix, cached_iteration=None, **kwargs): - iterations = cached_iteration+1 if cached_iteration else 100 - for i in range(cached_iteration or 1, iterations): - stream = getter(f'claim #{i}', COIN, **kwargs) - if stream[0].outputs[0].claim_id.startswith(prefix): - cached_iteration is None and print(f'Found "{prefix}" in {i} iterations.') - return stream - if cached_iteration: - raise ValueError(f'Failed to find "{prefix}" at cached iteration, run with None to find iteration.') - raise ValueError(f'Failed to find "{prefix}" in {iterations} iterations, try different values.') - - def get_channel_with_claim_id_prefix(self, prefix, cached_iteration=None, **kwargs): - return self._get_x_with_claim_id_prefix(self.get_channel, prefix, cached_iteration, **kwargs) - - def get_stream_with_claim_id_prefix(self, prefix, cached_iteration=None, **kwargs): - return self._get_x_with_claim_id_prefix(self.get_stream, prefix, cached_iteration, **kwargs) - - def test_canonical_url_and_channel_validation(self): - advance = self.advance - - tx_chan_a = self.get_channel_with_claim_id_prefix('a', 1, key=b'c') - tx_chan_ab = self.get_channel_with_claim_id_prefix('ab', 72, key=b'c') - txo_chan_a = tx_chan_a[0].outputs[0] - txo_chan_ab = tx_chan_ab[0].outputs[0] - advance(1, [tx_chan_a]) - advance(2, [tx_chan_ab]) - (r_ab, r_a) = search(order_by=['creation_height'], limit=2) - self.assertEqual("@foo#a", r_a['short_url']) - self.assertEqual("@foo#ab", r_ab['short_url']) - self.assertIsNone(r_a['canonical_url']) - self.assertIsNone(r_ab['canonical_url']) - self.assertEqual(0, r_a['claims_in_channel']) - self.assertEqual(0, r_ab['claims_in_channel']) - - tx_a = self.get_stream_with_claim_id_prefix('a', 2) - tx_ab = self.get_stream_with_claim_id_prefix('ab', 42) - tx_abc = self.get_stream_with_claim_id_prefix('abc', 65) - advance(3, [tx_a]) - advance(4, [tx_ab, tx_abc]) - (r_abc, r_ab, r_a) = search(order_by=['creation_height', 'tx_position'], limit=3) - self.assertEqual("foo#a", r_a['short_url']) - self.assertEqual("foo#ab", r_ab['short_url']) - self.assertEqual("foo#abc", r_abc['short_url']) - self.assertIsNone(r_a['canonical_url']) - self.assertIsNone(r_ab['canonical_url']) - self.assertIsNone(r_abc['canonical_url']) - - tx_a2 = self.get_stream_with_claim_id_prefix('a', 7, channel=txo_chan_a) - tx_ab2 = self.get_stream_with_claim_id_prefix('ab', 23, channel=txo_chan_a) - a2_claim = tx_a2[0].outputs[0] - ab2_claim = tx_ab2[0].outputs[0] - advance(6, [tx_a2]) - advance(7, [tx_ab2]) - (r_ab2, r_a2) = search(order_by=['creation_height'], limit=2) - self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url']) - self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url']) - self.assertEqual("@foo#a/foo#a", r_a2['canonical_url']) - self.assertEqual("@foo#a/foo#ab", r_ab2['canonical_url']) - self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel']) - - # change channel public key, invaliding stream claim signatures - advance(8, [self.get_channel_update(txo_chan_a, COIN, key=b'a')]) - (r_ab2, r_a2) = search(order_by=['creation_height'], limit=2) - self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url']) - self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url']) - self.assertIsNone(r_a2['canonical_url']) - self.assertIsNone(r_ab2['canonical_url']) - self.assertEqual(0, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel']) - - # reinstate previous channel public key (previous stream claim signatures become valid again) - channel_update = self.get_channel_update(txo_chan_a, COIN, key=b'c') - advance(9, [channel_update]) - (r_ab2, r_a2) = search(order_by=['creation_height'], limit=2) - self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url']) - self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url']) - self.assertEqual("@foo#a/foo#a", r_a2['canonical_url']) - self.assertEqual("@foo#a/foo#ab", r_ab2['canonical_url']) - self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel']) - self.assertEqual(0, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel']) - - # change channel of stream - self.assertEqual("@foo#a/foo#ab", search(claim_id=ab2_claim.claim_id, limit=1)[0]['canonical_url']) - tx_ab2 = self.get_stream_update(tx_ab2, COIN, txo_chan_ab) - advance(10, [tx_ab2]) - self.assertEqual("@foo#ab/foo#a", search(claim_id=ab2_claim.claim_id, limit=1)[0]['canonical_url']) - # TODO: currently there is a bug where stream leaving a channel does not update that channels claims count - self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel']) - # TODO: after bug is fixed remove test above and add test below - #self.assertEqual(1, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel']) - self.assertEqual(1, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel']) - - # claim abandon updates claims_in_channel - advance(11, [self.get_abandon(tx_ab2)]) - self.assertEqual(0, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel']) - - # delete channel, invaliding stream claim signatures - advance(12, [self.get_abandon(channel_update)]) - (r_a2,) = search(order_by=['creation_height'], limit=1) - self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url']) - self.assertIsNone(r_a2['canonical_url']) - - def test_resolve_issue_2448(self): - advance = self.advance - - tx_chan_a = self.get_channel_with_claim_id_prefix('a', 1, key=b'c') - tx_chan_ab = self.get_channel_with_claim_id_prefix('ab', 72, key=b'c') - txo_chan_a = tx_chan_a[0].outputs[0] - txo_chan_ab = tx_chan_ab[0].outputs[0] - advance(1, [tx_chan_a]) - advance(2, [tx_chan_ab]) - - self.assertEqual(reader.resolve_url("@foo#a")['claim_hash'], txo_chan_a.claim_hash) - self.assertEqual(reader.resolve_url("@foo#ab")['claim_hash'], txo_chan_ab.claim_hash) - - # update increase last height change of channel - advance(9, [self.get_channel_update(txo_chan_a, COIN, key=b'c')]) - - # make sure that activation_height is used instead of height (issue #2448) - self.assertEqual(reader.resolve_url("@foo#a")['claim_hash'], txo_chan_a.claim_hash) - self.assertEqual(reader.resolve_url("@foo#ab")['claim_hash'], txo_chan_ab.claim_hash) - - def test_canonical_find_shortest_id(self): - new_hash = 'abcdef0123456789beef' - other0 = '1bcdef0123456789beef' - other1 = 'ab1def0123456789beef' - other2 = 'abc1ef0123456789beef' - other3 = 'abcdef0123456789bee1' - f = FindShortestID() - f.step(other0, new_hash) - self.assertEqual('#a', f.finalize()) - f.step(other1, new_hash) - self.assertEqual('#abc', f.finalize()) - f.step(other2, new_hash) - self.assertEqual('#abcd', f.finalize()) - f.step(other3, new_hash) - self.assertEqual('#abcdef0123456789beef', f.finalize()) - - -class TestTrending(TestSQLDB): - - def test_trending(self): - advance, state = self.advance, self.state - no_trend = self.get_stream('Claim A', COIN) - downwards = self.get_stream('Claim B', COIN) - up_small = self.get_stream('Claim C', COIN) - up_medium = self.get_stream('Claim D', COIN) - up_biggly = self.get_stream('Claim E', COIN) - claims = advance(1, [up_biggly, up_medium, up_small, no_trend, downwards]) - for window in range(1, 8): - advance(zscore.TRENDING_WINDOW * window, [ - self.get_support(downwards, (20-window)*COIN), - self.get_support(up_small, int(20+(window/10)*COIN)), - self.get_support(up_medium, (20+(window*(2 if window == 7 else 1)))*COIN), - self.get_support(up_biggly, (20+(window*(3 if window == 7 else 1)))*COIN), - ]) - results = search(order_by=['trending_local']) - self.assertEqual([c.claim_id for c in claims], [hexlify(c['claim_hash'][::-1]).decode() for c in results]) - self.assertEqual([10, 6, 2, 0, -2], [int(c['trending_local']) for c in results]) - self.assertEqual([53, 38, -32, 0, -6], [int(c['trending_global']) for c in results]) - self.assertEqual([4, 4, 2, 0, 1], [int(c['trending_group']) for c in results]) - self.assertEqual([53, 38, 2, 0, -6], [int(c['trending_mixed']) for c in results]) - - def test_edge(self): - problematic = self.get_stream('Problem', COIN) - self.advance(1, [problematic]) - self.advance(zscore.TRENDING_WINDOW, [self.get_support(problematic, 53000000000)]) - self.advance(zscore.TRENDING_WINDOW * 2, [self.get_support(problematic, 500000000)]) - - -@unittest.skip("filtering/blocking is applied during ES sync, this needs to be ported to integration test") -class TestContentBlocking(TestSQLDB): - - def test_blocking_and_filtering(self): - # content claims and channels - tx0 = self.get_channel('A Channel', COIN, '@channel1') - regular_channel = tx0[0].outputs[0] - tx1 = self.get_stream('Claim One', COIN, 'claim1') - tx2 = self.get_stream('Claim Two', COIN, 'claim2', regular_channel) - tx3 = self.get_stream('Claim Three', COIN, 'claim3') - self.advance(1, [tx0, tx1, tx2, tx3]) - claim1, claim2, claim3 = tx1[0].outputs[0], tx2[0].outputs[0], tx3[0].outputs[0] - - # block and filter channels - tx0 = self.get_channel('Blocking Channel', COIN, '@block') - tx1 = self.get_channel('Filtering Channel', COIN, '@filter') - blocking_channel = tx0[0].outputs[0] - filtering_channel = tx1[0].outputs[0] - self.sql.blocking_channel_hashes.add(blocking_channel.claim_hash) - self.sql.filtering_channel_hashes.add(filtering_channel.claim_hash) - self.advance(2, [tx0, tx1]) - self.assertEqual({}, dict(self.sql.blocked_streams)) - self.assertEqual({}, dict(self.sql.blocked_channels)) - self.assertEqual({}, dict(self.sql.filtered_streams)) - self.assertEqual({}, dict(self.sql.filtered_channels)) - - # nothing blocked - results, _ = reader.resolve([ - claim1.claim_name, claim2.claim_name, - claim3.claim_name, regular_channel.claim_name - ]) - self.assertEqual(claim1.claim_hash, results[0]['claim_hash']) - self.assertEqual(claim2.claim_hash, results[1]['claim_hash']) - self.assertEqual(claim3.claim_hash, results[2]['claim_hash']) - self.assertEqual(regular_channel.claim_hash, results[3]['claim_hash']) - - # nothing filtered - results, censor = censored_search() - self.assertEqual(6, len(results)) - self.assertEqual(0, censor.total) - self.assertEqual({}, censor.censored) - - # block claim reposted to blocking channel, also gets filtered - repost_tx1 = self.get_repost(claim1.claim_id, COIN, blocking_channel) - repost1 = repost_tx1[0].outputs[0] - self.advance(3, [repost_tx1]) - self.assertEqual( - {repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.blocked_streams) - ) - self.assertEqual({}, dict(self.sql.blocked_channels)) - self.assertEqual( - {repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.filtered_streams) - ) - self.assertEqual({}, dict(self.sql.filtered_channels)) - - # claim is blocked from results by direct repost - results, censor = censored_search(text='Claim') - self.assertEqual(2, len(results)) - self.assertEqual(claim2.claim_hash, results[0]['claim_hash']) - self.assertEqual(claim3.claim_hash, results[1]['claim_hash']) - self.assertEqual(1, censor.total) - self.assertEqual({blocking_channel.claim_hash: 1}, censor.censored) - results, _ = reader.resolve([claim1.claim_name]) - self.assertEqual( - f"Resolve of 'claim1' was censored by channel with claim id '{blocking_channel.claim_id}'.", - results[0].args[0] - ) - results, _ = reader.resolve([ - claim2.claim_name, regular_channel.claim_name # claim2 and channel still resolved - ]) - self.assertEqual(claim2.claim_hash, results[0]['claim_hash']) - self.assertEqual(regular_channel.claim_hash, results[1]['claim_hash']) - - # block claim indirectly by blocking its parent channel - repost_tx2 = self.get_repost(regular_channel.claim_id, COIN, blocking_channel) - repost2 = repost_tx2[0].outputs[0] - self.advance(4, [repost_tx2]) - self.assertEqual( - {repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.blocked_streams) - ) - self.assertEqual( - {repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.blocked_channels) - ) - self.assertEqual( - {repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.filtered_streams) - ) - self.assertEqual( - {repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.filtered_channels) - ) - - # claim in blocked channel is filtered from search and can't resolve - results, censor = censored_search(text='Claim') - self.assertEqual(1, len(results)) - self.assertEqual(claim3.claim_hash, results[0]['claim_hash']) - self.assertEqual(2, censor.total) - self.assertEqual({blocking_channel.claim_hash: 2}, censor.censored) - results, _ = reader.resolve([ - claim2.claim_name, regular_channel.claim_name # claim2 and channel don't resolve - ]) - self.assertEqual( - f"Resolve of 'claim2' was censored by channel with claim id '{blocking_channel.claim_id}'.", - results[0].args[0] - ) - self.assertEqual( - f"Resolve of '@channel1' was censored by channel with claim id '{blocking_channel.claim_id}'.", - results[1].args[0] - ) - results, _ = reader.resolve([claim3.claim_name]) # claim3 still resolved - self.assertEqual(claim3.claim_hash, results[0]['claim_hash']) - - # filtered claim is only filtered and not blocked - repost_tx3 = self.get_repost(claim3.claim_id, COIN, filtering_channel) - repost3 = repost_tx3[0].outputs[0] - self.advance(5, [repost_tx3]) - self.assertEqual( - {repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.blocked_streams) - ) - self.assertEqual( - {repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.blocked_channels) - ) - self.assertEqual( - {repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash, - repost3.claim.repost.reference.claim_hash: filtering_channel.claim_hash}, - dict(self.sql.filtered_streams) - ) - self.assertEqual( - {repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash}, - dict(self.sql.filtered_channels) - ) - - # filtered claim doesn't return in search but is resolveable - results, censor = censored_search(text='Claim') - self.assertEqual(0, len(results)) - self.assertEqual(3, censor.total) - self.assertEqual({blocking_channel.claim_hash: 2, filtering_channel.claim_hash: 1}, censor.censored) - results, _ = reader.resolve([claim3.claim_name]) # claim3 still resolved - self.assertEqual(claim3.claim_hash, results[0]['claim_hash']) - - # abandon unblocks content - self.advance(6, [ - self.get_abandon(repost_tx1), - self.get_abandon(repost_tx2), - self.get_abandon(repost_tx3) - ]) - self.assertEqual({}, dict(self.sql.blocked_streams)) - self.assertEqual({}, dict(self.sql.blocked_channels)) - self.assertEqual({}, dict(self.sql.filtered_streams)) - self.assertEqual({}, dict(self.sql.filtered_channels)) - results, censor = censored_search(text='Claim') - self.assertEqual(3, len(results)) - self.assertEqual(0, censor.total) - results, censor = censored_search() - self.assertEqual(6, len(results)) - self.assertEqual(0, censor.total) - results, _ = reader.resolve([ - claim1.claim_name, claim2.claim_name, - claim3.claim_name, regular_channel.claim_name - ]) - self.assertEqual(claim1.claim_hash, results[0]['claim_hash']) - self.assertEqual(claim2.claim_hash, results[1]['claim_hash']) - self.assertEqual(claim3.claim_hash, results[2]['claim_hash']) - self.assertEqual(regular_channel.claim_hash, results[3]['claim_hash']) - - def test_pagination(self): - one, two, three, four, five, six, seven, filter_channel = self.advance(1, [ - self.get_stream('One', COIN), - self.get_stream('Two', COIN), - self.get_stream('Three', COIN), - self.get_stream('Four', COIN), - self.get_stream('Five', COIN), - self.get_stream('Six', COIN), - self.get_stream('Seven', COIN), - self.get_channel('Filtering Channel', COIN, '@filter'), - ]) - self.sql.filtering_channel_hashes.add(filter_channel.claim_hash) - - # nothing filtered - results, censor = censored_search(order_by='^height', offset=1, limit=3) - self.assertEqual(3, len(results)) - self.assertEqual( - [two.claim_hash, three.claim_hash, four.claim_hash], - [r['claim_hash'] for r in results] - ) - self.assertEqual(0, censor.total) - - # content filtered - repost1, repost2 = self.advance(2, [ - self.get_repost(one.claim_id, COIN, filter_channel), - self.get_repost(two.claim_id, COIN, filter_channel), - ]) - results, censor = censored_search(order_by='^height', offset=1, limit=3) - self.assertEqual(3, len(results)) - self.assertEqual( - [four.claim_hash, five.claim_hash, six.claim_hash], - [r['claim_hash'] for r in results] - ) - self.assertEqual(2, censor.total) - self.assertEqual({filter_channel.claim_hash: 2}, censor.censored) diff --git a/tox.ini b/tox.ini index ede5973fa..8ad5e37a9 100644 --- a/tox.ini +++ b/tox.ini @@ -12,11 +12,18 @@ setenv = commands = orchstr8 download blockchain: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.blockchain {posargs} + claims: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.claims {posargs} + takeovers: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.takeovers {posargs} + transactions: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.transactions {posargs} datanetwork: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.datanetwork {posargs} other: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.other {posargs} - -[testenv:blockchain_legacy_search] +[testenv:claims_legacy_search] setenv = ENABLE_LEGACY_SEARCH=1 commands = - coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.blockchain {posargs} + coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.claims {posargs} +[testenv:takeovers_legacy_search] +setenv = + ENABLE_LEGACY_SEARCH=1 +commands = + coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.takeovers {posargs}