Compare commits
28 commits
master
...
rocksdb-hu
Author | SHA1 | Date | |
---|---|---|---|
|
a586266831 | ||
|
bd284be69d | ||
|
d6035c1ead | ||
|
876a72f18d | ||
|
b82dc8e45f | ||
|
31144a490e | ||
|
a2e7afa87f | ||
|
6f6faef3dc | ||
|
607d7d9cc7 | ||
|
3c7307a2f4 | ||
|
bbfe263591 | ||
|
7fe34ebb78 | ||
|
ff8c08b289 | ||
|
d7707d0053 | ||
|
a7d64de361 | ||
|
8a02796b37 | ||
|
4f4ecd64cc | ||
|
e3a4dab6cb | ||
|
03f888f787 | ||
|
d072b9f70b | ||
|
8616fb96b1 | ||
|
ebec12522b | ||
|
611ad5c655 | ||
|
e06c8e8303 | ||
|
776dea58c2 | ||
|
28be7d8993 | ||
|
24622103cf | ||
|
2d8ed77806 |
39 changed files with 3223 additions and 2357 deletions
|
@ -15,7 +15,6 @@ RUN apt-get update && \
|
||||||
build-essential \
|
build-essential \
|
||||||
automake libtool \
|
automake libtool \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
libleveldb-dev \
|
|
||||||
python3.7 \
|
python3.7 \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
|
|
|
@ -7,7 +7,7 @@ from lbry.error import ResolveCensoredError
|
||||||
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
|
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
|
||||||
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
|
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from lbry.wallet.server.leveldb import ResolveResult
|
from lbry.wallet.server.db.common import ResolveResult
|
||||||
|
|
||||||
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
|
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
|
||||||
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
||||||
|
|
|
@ -285,6 +285,14 @@ class IntegrationTestCase(AsyncioTestCase):
|
||||||
lambda e: e.tx.id == tx.id and e.address == address
|
lambda e: e.tx.id == tx.id and e.address == address
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def generate(self, blocks):
|
||||||
|
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||||
|
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||||
|
self.conductor.spv_node.server.synchronized.clear()
|
||||||
|
await self.blockchain.generate(blocks)
|
||||||
|
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||||
|
await self.conductor.spv_node.server.synchronized.wait()
|
||||||
|
|
||||||
|
|
||||||
class FakeExchangeRateManager(ExchangeRateManager):
|
class FakeExchangeRateManager(ExchangeRateManager):
|
||||||
|
|
||||||
|
@ -456,8 +464,8 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
async def confirm_tx(self, txid, ledger=None):
|
async def confirm_tx(self, txid, ledger=None):
|
||||||
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
||||||
await self.on_transaction_id(txid, ledger)
|
await self.on_transaction_id(txid, ledger)
|
||||||
await self.generate(1)
|
on_tx = self.on_transaction_id(txid, ledger)
|
||||||
await self.on_transaction_id(txid, ledger)
|
await asyncio.wait([self.generate(1), on_tx], timeout=5)
|
||||||
return txid
|
return txid
|
||||||
|
|
||||||
async def on_transaction_dict(self, tx):
|
async def on_transaction_dict(self, tx):
|
||||||
|
@ -472,12 +480,6 @@ class CommandTestCase(IntegrationTestCase):
|
||||||
addresses.add(txo['address'])
|
addresses.add(txo['address'])
|
||||||
return list(addresses)
|
return list(addresses)
|
||||||
|
|
||||||
async def generate(self, blocks):
|
|
||||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
|
||||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
|
||||||
await self.blockchain.generate(blocks)
|
|
||||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
|
||||||
|
|
||||||
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
||||||
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
||||||
if confirm:
|
if confirm:
|
||||||
|
|
|
@ -6,12 +6,13 @@ __node_url__ = (
|
||||||
)
|
)
|
||||||
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
||||||
|
|
||||||
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||||
from .manager import WalletManager
|
from lbry.wallet.manager import WalletManager
|
||||||
from .network import Network
|
from lbry.wallet.network import Network
|
||||||
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||||
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic, DeterministicChannelKeyManager
|
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
|
||||||
from .transaction import Transaction, Output, Input
|
DeterministicChannelKeyManager
|
||||||
from .script import OutputScript, InputScript
|
from lbry.wallet.transaction import Transaction, Output, Input
|
||||||
from .database import SQLiteMixin, Database
|
from lbry.wallet.script import OutputScript, InputScript
|
||||||
from .header import Headers
|
from lbry.wallet.database import SQLiteMixin, Database
|
||||||
|
from lbry.wallet.header import Headers
|
||||||
|
|
|
@ -16,18 +16,18 @@ from lbry.crypto.hash import hash160, double_sha256, sha256
|
||||||
from lbry.crypto.base58 import Base58
|
from lbry.crypto.base58 import Base58
|
||||||
from lbry.utils import LRUCacheWithMetrics
|
from lbry.utils import LRUCacheWithMetrics
|
||||||
|
|
||||||
from .tasks import TaskGroup
|
from lbry.wallet.tasks import TaskGroup
|
||||||
from .database import Database
|
from lbry.wallet.database import Database
|
||||||
from .stream import StreamController
|
from lbry.wallet.stream import StreamController
|
||||||
from .dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from .account import Account, AddressManager, SingleKey
|
from lbry.wallet.account import Account, AddressManager, SingleKey
|
||||||
from .network import Network
|
from lbry.wallet.network import Network
|
||||||
from .transaction import Transaction, Output
|
from lbry.wallet.transaction import Transaction, Output
|
||||||
from .header import Headers, UnvalidatedHeaders
|
from lbry.wallet.header import Headers, UnvalidatedHeaders
|
||||||
from .checkpoints import HASHES
|
from lbry.wallet.checkpoints import HASHES
|
||||||
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||||
from .bip32 import PublicKey, PrivateKey
|
from lbry.wallet.bip32 import PublicKey, PrivateKey
|
||||||
from .coinselection import CoinSelector
|
from lbry.wallet.coinselection import CoinSelector
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -739,7 +739,7 @@ class Ledger(metaclass=LedgerRegistry):
|
||||||
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
||||||
if await self._wait_round(tx, height, addresses):
|
if await self._wait_round(tx, height, addresses):
|
||||||
return
|
return
|
||||||
raise asyncio.TimeoutError('Timed out waiting for transaction.')
|
raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
|
||||||
|
|
||||||
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
||||||
records = await self.db.get_addresses(address__in=addresses)
|
records = await self.db.get_addresses(address__in=addresses)
|
||||||
|
|
|
@ -12,13 +12,13 @@ from typing import List, Type, MutableSequence, MutableMapping, Optional
|
||||||
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
|
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
|
||||||
from lbry.conf import Config, NOT_SET
|
from lbry.conf import Config, NOT_SET
|
||||||
|
|
||||||
from .dewies import dewies_to_lbc
|
from lbry.wallet.dewies import dewies_to_lbc
|
||||||
from .account import Account
|
from lbry.wallet.account import Account
|
||||||
from .ledger import Ledger, LedgerRegistry
|
from lbry.wallet.ledger import Ledger, LedgerRegistry
|
||||||
from .transaction import Transaction, Output
|
from lbry.wallet.transaction import Transaction, Output
|
||||||
from .database import Database
|
from lbry.wallet.database import Database
|
||||||
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||||
from .rpc.jsonrpc import CodeMessageError
|
from lbry.wallet.rpc.jsonrpc import CodeMessageError
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
__hub_url__ = (
|
__hub_url__ = (
|
||||||
"https://github.com/lbryio/hub/releases/download/v0.2021.12.18.1/hub"
|
"https://github.com/lbryio/hub/releases/download/v0.2021.12.18.1/hub"
|
||||||
)
|
)
|
||||||
from .node import Conductor
|
from lbry.wallet.orchstr8.node import Conductor
|
||||||
from .service import ConductorService
|
from lbry.wallet.orchstr8.service import ConductorService
|
||||||
|
|
|
@ -16,11 +16,13 @@ import urllib.request
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
import lbry
|
import lbry
|
||||||
from lbry.wallet.server.server import Server
|
|
||||||
from lbry.wallet.server.env import Env
|
from lbry.wallet.server.env import Env
|
||||||
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
||||||
from lbry.conf import KnownHubsList, Config
|
from lbry.conf import KnownHubsList, Config
|
||||||
from lbry.wallet.orchstr8 import __hub_url__
|
from lbry.wallet.orchstr8 import __hub_url__
|
||||||
|
from lbry.wallet.server.block_processor import BlockProcessor
|
||||||
|
from lbry.wallet.server.chain_reader import BlockchainReaderServer
|
||||||
|
from lbry.wallet.server.db.elasticsearch.sync import ElasticWriter
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -189,49 +191,49 @@ class SPVNode:
|
||||||
self.coin_class = coin_class
|
self.coin_class = coin_class
|
||||||
self.controller = None
|
self.controller = None
|
||||||
self.data_path = None
|
self.data_path = None
|
||||||
self.server = None
|
self.server: Optional[BlockchainReaderServer] = None
|
||||||
|
self.writer: Optional[BlockProcessor] = None
|
||||||
|
self.es_writer: Optional[ElasticWriter] = None
|
||||||
self.hostname = 'localhost'
|
self.hostname = 'localhost'
|
||||||
self.port = 50001 + node_number # avoid conflict with default daemon
|
self.port = 50001 + node_number # avoid conflict with default daemon
|
||||||
self.udp_port = self.port
|
self.udp_port = self.port
|
||||||
self.session_timeout = 600
|
self.session_timeout = 600
|
||||||
self.rpc_port = '0' # disabled by default
|
|
||||||
self.stopped = False
|
self.stopped = False
|
||||||
self.index_name = uuid4().hex
|
self.index_name = uuid4().hex
|
||||||
|
|
||||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
||||||
self.data_path = tempfile.mkdtemp()
|
self.data_path = tempfile.mkdtemp()
|
||||||
conf = {
|
conf = {
|
||||||
'DESCRIPTION': '',
|
'description': '',
|
||||||
'PAYMENT_ADDRESS': '',
|
'payment_address': '',
|
||||||
'DAILY_FEE': '0',
|
'daily_fee': '0',
|
||||||
'DB_DIRECTORY': self.data_path,
|
'db_dir': self.data_path,
|
||||||
'DAEMON_URL': blockchain_node.rpc_url,
|
'daemon_url': blockchain_node.rpc_url,
|
||||||
'REORG_LIMIT': '100',
|
'reorg_limit': 100,
|
||||||
'HOST': self.hostname,
|
'host': self.hostname,
|
||||||
'TCP_PORT': str(self.port),
|
'tcp_port': self.port,
|
||||||
'UDP_PORT': str(self.udp_port),
|
'udp_port': self.udp_port,
|
||||||
'SESSION_TIMEOUT': str(self.session_timeout),
|
'session_timeout': self.session_timeout,
|
||||||
'MAX_QUERY_WORKERS': '0',
|
'max_query_workers': 0,
|
||||||
'INDIVIDUAL_TAG_INDEXES': '',
|
'es_index_prefix': self.index_name,
|
||||||
'RPC_PORT': self.rpc_port,
|
|
||||||
'ES_INDEX_PREFIX': self.index_name,
|
|
||||||
'ES_MODE': 'writer',
|
|
||||||
}
|
}
|
||||||
if extraconf:
|
if extraconf:
|
||||||
conf.update(extraconf)
|
conf.update(extraconf)
|
||||||
# TODO: don't use os.environ
|
env = Env(self.coin_class, **conf)
|
||||||
os.environ.update(conf)
|
self.writer = BlockProcessor(env)
|
||||||
self.server = Server(Env(self.coin_class))
|
self.server = BlockchainReaderServer(env)
|
||||||
self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
self.es_writer = ElasticWriter(env)
|
||||||
await self.server.start()
|
await self.writer.open()
|
||||||
|
await self.writer.start()
|
||||||
|
await asyncio.wait([self.server.start(), self.es_writer.start()])
|
||||||
|
|
||||||
async def stop(self, cleanup=True):
|
async def stop(self, cleanup=True):
|
||||||
if self.stopped:
|
if self.stopped:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
await self.server.db.search_index.delete_index()
|
await self.es_writer.stop(delete_index=True)
|
||||||
await self.server.db.search_index.stop()
|
|
||||||
await self.server.stop()
|
await self.server.stop()
|
||||||
|
await self.writer.stop()
|
||||||
self.stopped = True
|
self.stopped = True
|
||||||
finally:
|
finally:
|
||||||
cleanup and self.cleanup()
|
cleanup and self.cleanup()
|
||||||
|
|
|
@ -1,149 +1,31 @@
|
||||||
import time
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
import typing
|
import typing
|
||||||
|
import signal
|
||||||
|
|
||||||
from bisect import bisect_right
|
from bisect import bisect_right
|
||||||
from struct import pack, unpack
|
from struct import pack
|
||||||
from concurrent.futures.thread import ThreadPoolExecutor
|
from concurrent.futures.thread import ThreadPoolExecutor
|
||||||
from typing import Optional, List, Tuple, Set, DefaultDict, Dict, NamedTuple
|
from typing import Optional, List, Tuple, Set, DefaultDict, Dict
|
||||||
from prometheus_client import Gauge, Histogram
|
from prometheus_client import Gauge, Histogram
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
import lbry
|
import lbry
|
||||||
from lbry.schema.url import URL
|
|
||||||
from lbry.schema.claim import Claim
|
from lbry.schema.claim import Claim
|
||||||
from lbry.wallet.ledger import Ledger, TestNetLedger, RegTestLedger
|
from lbry.wallet.ledger import Ledger, TestNetLedger, RegTestLedger
|
||||||
from lbry.utils import LRUCache
|
from lbry.utils import LRUCache
|
||||||
from lbry.wallet.transaction import OutputScript, Output, Transaction
|
from lbry.wallet.transaction import OutputScript, Output, Transaction
|
||||||
from lbry.wallet.server.tx import Tx, TxOutput, TxInput
|
from lbry.wallet.server.tx import Tx, TxOutput, TxInput
|
||||||
from lbry.wallet.server.daemon import DaemonError
|
from lbry.wallet.server.hash import hash_to_hex_str
|
||||||
from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN
|
from lbry.wallet.server.util import class_logger
|
||||||
from lbry.wallet.server.util import chunks, class_logger
|
|
||||||
from lbry.crypto.hash import hash160
|
from lbry.crypto.hash import hash160
|
||||||
from lbry.wallet.server.mempool import MemPool
|
|
||||||
from lbry.wallet.server.db.prefixes import ACTIVATED_SUPPORT_TXO_TYPE, ACTIVATED_CLAIM_TXO_TYPE
|
from lbry.wallet.server.db.prefixes import ACTIVATED_SUPPORT_TXO_TYPE, ACTIVATED_CLAIM_TXO_TYPE
|
||||||
from lbry.wallet.server.db.prefixes import PendingActivationKey, PendingActivationValue, ClaimToTXOValue
|
from lbry.wallet.server.db.prefixes import PendingActivationKey, PendingActivationValue, ClaimToTXOValue
|
||||||
from lbry.wallet.server.udp import StatusServer
|
from lbry.wallet.server.prefetcher import Prefetcher
|
||||||
from lbry.wallet.server.db.revertable import RevertableOpStack
|
from lbry.wallet.server.db.db import HubDB
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.wallet.server.leveldb import LevelDB
|
from lbry.wallet.server.db.revertable import RevertableOpStack
|
||||||
|
|
||||||
|
|
||||||
class TrendingNotification(NamedTuple):
|
|
||||||
height: int
|
|
||||||
prev_amount: int
|
|
||||||
new_amount: int
|
|
||||||
|
|
||||||
|
|
||||||
class Prefetcher:
|
|
||||||
"""Prefetches blocks (in the forward direction only)."""
|
|
||||||
|
|
||||||
def __init__(self, daemon, coin, blocks_event):
|
|
||||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
|
||||||
self.daemon = daemon
|
|
||||||
self.coin = coin
|
|
||||||
self.blocks_event = blocks_event
|
|
||||||
self.blocks = []
|
|
||||||
self.caught_up = False
|
|
||||||
# Access to fetched_height should be protected by the semaphore
|
|
||||||
self.fetched_height = None
|
|
||||||
self.semaphore = asyncio.Semaphore()
|
|
||||||
self.refill_event = asyncio.Event()
|
|
||||||
# The prefetched block cache size. The min cache size has
|
|
||||||
# little effect on sync time.
|
|
||||||
self.cache_size = 0
|
|
||||||
self.min_cache_size = 10 * 1024 * 1024
|
|
||||||
# This makes the first fetch be 10 blocks
|
|
||||||
self.ave_size = self.min_cache_size // 10
|
|
||||||
self.polling_delay = 5
|
|
||||||
|
|
||||||
async def main_loop(self, bp_height):
|
|
||||||
"""Loop forever polling for more blocks."""
|
|
||||||
await self.reset_height(bp_height)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# Sleep a while if there is nothing to prefetch
|
|
||||||
await self.refill_event.wait()
|
|
||||||
if not await self._prefetch_blocks():
|
|
||||||
await asyncio.sleep(self.polling_delay)
|
|
||||||
except DaemonError as e:
|
|
||||||
self.logger.info(f'ignoring daemon error: {e}')
|
|
||||||
|
|
||||||
def get_prefetched_blocks(self):
|
|
||||||
"""Called by block processor when it is processing queued blocks."""
|
|
||||||
blocks = self.blocks
|
|
||||||
self.blocks = []
|
|
||||||
self.cache_size = 0
|
|
||||||
self.refill_event.set()
|
|
||||||
return blocks
|
|
||||||
|
|
||||||
async def reset_height(self, height):
|
|
||||||
"""Reset to prefetch blocks from the block processor's height.
|
|
||||||
|
|
||||||
Used in blockchain reorganisations. This coroutine can be
|
|
||||||
called asynchronously to the _prefetch_blocks coroutine so we
|
|
||||||
must synchronize with a semaphore.
|
|
||||||
"""
|
|
||||||
async with self.semaphore:
|
|
||||||
self.blocks.clear()
|
|
||||||
self.cache_size = 0
|
|
||||||
self.fetched_height = height
|
|
||||||
self.refill_event.set()
|
|
||||||
|
|
||||||
daemon_height = await self.daemon.height()
|
|
||||||
behind = daemon_height - height
|
|
||||||
if behind > 0:
|
|
||||||
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
|
|
||||||
f'({behind:,d} blocks behind)')
|
|
||||||
else:
|
|
||||||
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
|
|
||||||
|
|
||||||
async def _prefetch_blocks(self):
|
|
||||||
"""Prefetch some blocks and put them on the queue.
|
|
||||||
|
|
||||||
Repeats until the queue is full or caught up.
|
|
||||||
"""
|
|
||||||
daemon = self.daemon
|
|
||||||
daemon_height = await daemon.height()
|
|
||||||
async with self.semaphore:
|
|
||||||
while self.cache_size < self.min_cache_size:
|
|
||||||
# Try and catch up all blocks but limit to room in cache.
|
|
||||||
# Constrain fetch count to between 0 and 500 regardless;
|
|
||||||
# testnet can be lumpy.
|
|
||||||
cache_room = self.min_cache_size // self.ave_size
|
|
||||||
count = min(daemon_height - self.fetched_height, cache_room)
|
|
||||||
count = min(500, max(count, 0))
|
|
||||||
if not count:
|
|
||||||
self.caught_up = True
|
|
||||||
return False
|
|
||||||
|
|
||||||
first = self.fetched_height + 1
|
|
||||||
hex_hashes = await daemon.block_hex_hashes(first, count)
|
|
||||||
if self.caught_up:
|
|
||||||
self.logger.info('new block height {:,d} hash {}'
|
|
||||||
.format(first + count-1, hex_hashes[-1]))
|
|
||||||
blocks = await daemon.raw_blocks(hex_hashes)
|
|
||||||
|
|
||||||
assert count == len(blocks)
|
|
||||||
|
|
||||||
# Special handling for genesis block
|
|
||||||
if first == 0:
|
|
||||||
blocks[0] = self.coin.genesis_block(blocks[0])
|
|
||||||
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
|
|
||||||
|
|
||||||
# Update our recent average block size estimate
|
|
||||||
size = sum(len(block) for block in blocks)
|
|
||||||
if count >= 10:
|
|
||||||
self.ave_size = size // count
|
|
||||||
else:
|
|
||||||
self.ave_size = (size + (10 - count) * self.ave_size) // 10
|
|
||||||
|
|
||||||
self.blocks.extend(blocks)
|
|
||||||
self.cache_size += size
|
|
||||||
self.fetched_height += count
|
|
||||||
self.blocks_event.set()
|
|
||||||
|
|
||||||
self.refill_event.clear()
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class ChainError(Exception):
|
class ChainError(Exception):
|
||||||
|
@ -198,15 +80,19 @@ class BlockProcessor:
|
||||||
"reorg_count", "Number of reorgs", namespace=NAMESPACE
|
"reorg_count", "Number of reorgs", namespace=NAMESPACE
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, env, db: 'LevelDB', daemon, shutdown_event: asyncio.Event):
|
def __init__(self, env: 'Env'):
|
||||||
self.state_lock = asyncio.Lock()
|
self.cancellable_tasks = []
|
||||||
|
|
||||||
self.env = env
|
self.env = env
|
||||||
self.db = db
|
self.state_lock = asyncio.Lock()
|
||||||
self.daemon = daemon
|
self.daemon = env.coin.DAEMON(env.coin, env.daemon_url)
|
||||||
self._chain_executor = ThreadPoolExecutor(1, thread_name_prefix='block-processor')
|
self._chain_executor = ThreadPoolExecutor(1, thread_name_prefix='block-processor')
|
||||||
self._sync_reader_executor = ThreadPoolExecutor(1, thread_name_prefix='hub-es-sync')
|
self.db = HubDB(
|
||||||
self.mempool = MemPool(env.coin, daemon, db, self.state_lock)
|
env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes,
|
||||||
self.shutdown_event = shutdown_event
|
max_open_files=env.db_max_open_files, blocking_channel_ids=env.blocking_channel_ids,
|
||||||
|
filtering_channel_ids=env.filtering_channel_ids, executor=self._chain_executor
|
||||||
|
)
|
||||||
|
self.shutdown_event = asyncio.Event()
|
||||||
self.coin = env.coin
|
self.coin = env.coin
|
||||||
if env.coin.NET == 'mainnet':
|
if env.coin.NET == 'mainnet':
|
||||||
self.ledger = Ledger
|
self.ledger = Ledger
|
||||||
|
@ -221,7 +107,7 @@ class BlockProcessor:
|
||||||
self.tx_count = 0
|
self.tx_count = 0
|
||||||
|
|
||||||
self.blocks_event = asyncio.Event()
|
self.blocks_event = asyncio.Event()
|
||||||
self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
|
self.prefetcher = Prefetcher(self.daemon, env.coin, self.blocks_event)
|
||||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||||
|
|
||||||
# Meta
|
# Meta
|
||||||
|
@ -231,14 +117,13 @@ class BlockProcessor:
|
||||||
self.utxo_cache: Dict[Tuple[bytes, int], Tuple[bytes, int]] = {}
|
self.utxo_cache: Dict[Tuple[bytes, int], Tuple[bytes, int]] = {}
|
||||||
|
|
||||||
# Claimtrie cache
|
# Claimtrie cache
|
||||||
self.db_op_stack: Optional[RevertableOpStack] = None
|
self.db_op_stack: Optional['RevertableOpStack'] = None
|
||||||
|
|
||||||
# self.search_cache = {}
|
# self.search_cache = {}
|
||||||
self.resolve_cache = LRUCache(2**16)
|
self.resolve_cache = LRUCache(2**16)
|
||||||
self.resolve_outputs_cache = LRUCache(2 ** 16)
|
self.resolve_outputs_cache = LRUCache(2 ** 16)
|
||||||
|
|
||||||
self.history_cache = {}
|
self.history_cache = {}
|
||||||
self.status_server = StatusServer()
|
|
||||||
|
|
||||||
#################################
|
#################################
|
||||||
# attributes used for calculating stake activations and takeovers per block
|
# attributes used for calculating stake activations and takeovers per block
|
||||||
|
@ -275,7 +160,6 @@ class BlockProcessor:
|
||||||
|
|
||||||
self.removed_claims_to_send_es = set() # cumulative changes across blocks to send ES
|
self.removed_claims_to_send_es = set() # cumulative changes across blocks to send ES
|
||||||
self.touched_claims_to_send_es = set()
|
self.touched_claims_to_send_es = set()
|
||||||
self.activation_info_to_send_es: DefaultDict[str, List[TrendingNotification]] = defaultdict(list)
|
|
||||||
|
|
||||||
self.removed_claim_hashes: Set[bytes] = set() # per block changes
|
self.removed_claim_hashes: Set[bytes] = set() # per block changes
|
||||||
self.touched_claim_hashes: Set[bytes] = set()
|
self.touched_claim_hashes: Set[bytes] = set()
|
||||||
|
@ -297,18 +181,8 @@ class BlockProcessor:
|
||||||
self.pending_transaction_num_mapping: Dict[bytes, int] = {}
|
self.pending_transaction_num_mapping: Dict[bytes, int] = {}
|
||||||
self.pending_transactions: Dict[int, bytes] = {}
|
self.pending_transactions: Dict[int, bytes] = {}
|
||||||
|
|
||||||
async def claim_producer(self):
|
self._stopping = False
|
||||||
if self.db.db_height <= 1:
|
self._ready_to_stop = asyncio.Event()
|
||||||
return
|
|
||||||
|
|
||||||
for claim_hash in self.removed_claims_to_send_es:
|
|
||||||
yield 'delete', claim_hash.hex()
|
|
||||||
|
|
||||||
to_update = await asyncio.get_event_loop().run_in_executor(
|
|
||||||
self._sync_reader_executor, self.db.claims_producer, self.touched_claims_to_send_es
|
|
||||||
)
|
|
||||||
for claim in to_update:
|
|
||||||
yield 'update', claim
|
|
||||||
|
|
||||||
async def run_in_thread_with_lock(self, func, *args):
|
async def run_in_thread_with_lock(self, func, *args):
|
||||||
# Run in a thread to prevent blocking. Shielded so that
|
# Run in a thread to prevent blocking. Shielded so that
|
||||||
|
@ -326,6 +200,30 @@ class BlockProcessor:
|
||||||
return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args)
|
return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args)
|
||||||
return await asyncio.shield(run_in_thread())
|
return await asyncio.shield(run_in_thread())
|
||||||
|
|
||||||
|
async def check_mempool(self):
|
||||||
|
def fetch_mempool(mempool_prefix):
|
||||||
|
return {
|
||||||
|
k.tx_hash: v.raw_tx for (k, v) in mempool_prefix.iterate()
|
||||||
|
}
|
||||||
|
|
||||||
|
def update_mempool(mempool_prefix, to_put, to_delete):
|
||||||
|
for tx_hash, raw_tx in to_put:
|
||||||
|
mempool_prefix.stage_put((tx_hash,), (raw_tx,))
|
||||||
|
for tx_hash, raw_tx in to_delete.items():
|
||||||
|
mempool_prefix.stage_delete((tx_hash,), (raw_tx,))
|
||||||
|
|
||||||
|
current_mempool = await self.run_in_thread_with_lock(fetch_mempool, self.db.prefix_db.mempool_tx)
|
||||||
|
|
||||||
|
_to_put = []
|
||||||
|
for hh in await self.daemon.mempool_hashes():
|
||||||
|
tx_hash = bytes.fromhex(hh)[::-1]
|
||||||
|
if tx_hash in current_mempool:
|
||||||
|
current_mempool.pop(tx_hash)
|
||||||
|
else:
|
||||||
|
_to_put.append((tx_hash, bytes.fromhex(await self.daemon.getrawtransaction(hh))))
|
||||||
|
|
||||||
|
await self.run_in_thread_with_lock(update_mempool, self.db.prefix_db.mempool_tx, _to_put, current_mempool)
|
||||||
|
|
||||||
async def check_and_advance_blocks(self, raw_blocks):
|
async def check_and_advance_blocks(self, raw_blocks):
|
||||||
"""Process the list of raw blocks passed. Detects and handles
|
"""Process the list of raw blocks passed. Detects and handles
|
||||||
reorgs.
|
reorgs.
|
||||||
|
@ -348,29 +246,12 @@ class BlockProcessor:
|
||||||
await self.run_in_thread(self.advance_block, block)
|
await self.run_in_thread(self.advance_block, block)
|
||||||
await self.flush()
|
await self.flush()
|
||||||
|
|
||||||
self.logger.info("advanced to %i in %0.3fs", self.height, time.perf_counter() - start)
|
self.logger.info("writer advanced to %i in %0.3fs", self.height, time.perf_counter() - start)
|
||||||
if self.height == self.coin.nExtendedClaimExpirationForkHeight:
|
if self.height == self.coin.nExtendedClaimExpirationForkHeight:
|
||||||
self.logger.warning(
|
self.logger.warning(
|
||||||
"applying extended claim expiration fork on claims accepted by, %i", self.height
|
"applying extended claim expiration fork on claims accepted by, %i", self.height
|
||||||
)
|
)
|
||||||
await self.run_in_thread_with_lock(self.db.apply_expiration_extension_fork)
|
await self.run_in_thread_with_lock(self.db.apply_expiration_extension_fork)
|
||||||
if self.db.first_sync:
|
|
||||||
self.db.search_index.clear_caches()
|
|
||||||
self.touched_claims_to_send_es.clear()
|
|
||||||
self.removed_claims_to_send_es.clear()
|
|
||||||
self.activation_info_to_send_es.clear()
|
|
||||||
# TODO: we shouldnt wait on the search index updating before advancing to the next block
|
|
||||||
if not self.db.first_sync:
|
|
||||||
await self.db.reload_blocking_filtering_streams()
|
|
||||||
await self.db.search_index.claim_consumer(self.claim_producer())
|
|
||||||
await self.db.search_index.apply_filters(self.db.blocked_streams, self.db.blocked_channels,
|
|
||||||
self.db.filtered_streams, self.db.filtered_channels)
|
|
||||||
await self.db.search_index.update_trending_score(self.activation_info_to_send_es)
|
|
||||||
await self._es_caught_up()
|
|
||||||
self.db.search_index.clear_caches()
|
|
||||||
self.touched_claims_to_send_es.clear()
|
|
||||||
self.removed_claims_to_send_es.clear()
|
|
||||||
self.activation_info_to_send_es.clear()
|
|
||||||
# print("******************\n")
|
# print("******************\n")
|
||||||
except:
|
except:
|
||||||
self.logger.exception("advance blocks failed")
|
self.logger.exception("advance blocks failed")
|
||||||
|
@ -378,12 +259,9 @@ class BlockProcessor:
|
||||||
processed_time = time.perf_counter() - total_start
|
processed_time = time.perf_counter() - total_start
|
||||||
self.block_count_metric.set(self.height)
|
self.block_count_metric.set(self.height)
|
||||||
self.block_update_time_metric.observe(processed_time)
|
self.block_update_time_metric.observe(processed_time)
|
||||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
|
||||||
if not self.db.first_sync:
|
if not self.db.first_sync:
|
||||||
s = '' if len(blocks) == 1 else 's'
|
s = '' if len(blocks) == 1 else 's'
|
||||||
self.logger.info('processed {:,d} block{} in {:.1f}s'.format(len(blocks), s, processed_time))
|
self.logger.info('processed {:,d} block{} in {:.1f}s'.format(len(blocks), s, processed_time))
|
||||||
if self._caught_up_event.is_set():
|
|
||||||
await self.mempool.on_block(self.touched_hashXs, self.height)
|
|
||||||
self.touched_hashXs.clear()
|
self.touched_hashXs.clear()
|
||||||
elif hprevs[0] != chain[0]:
|
elif hprevs[0] != chain[0]:
|
||||||
min_start_height = max(self.height - self.coin.REORG_LIMIT, 0)
|
min_start_height = max(self.height - self.coin.REORG_LIMIT, 0)
|
||||||
|
@ -406,15 +284,6 @@ class BlockProcessor:
|
||||||
|
|
||||||
if self.env.cache_all_claim_txos:
|
if self.env.cache_all_claim_txos:
|
||||||
await self.db._read_claim_txos() # TODO: don't do this
|
await self.db._read_claim_txos() # TODO: don't do this
|
||||||
for touched in self.touched_claims_to_send_es:
|
|
||||||
if not self.db.get_claim_txo(touched):
|
|
||||||
self.removed_claims_to_send_es.add(touched)
|
|
||||||
self.touched_claims_to_send_es.difference_update(self.removed_claims_to_send_es)
|
|
||||||
await self.db.search_index.claim_consumer(self.claim_producer())
|
|
||||||
self.db.search_index.clear_caches()
|
|
||||||
self.touched_claims_to_send_es.clear()
|
|
||||||
self.removed_claims_to_send_es.clear()
|
|
||||||
self.activation_info_to_send_es.clear()
|
|
||||||
await self.prefetcher.reset_height(self.height)
|
await self.prefetcher.reset_height(self.height)
|
||||||
self.reorg_count_metric.inc()
|
self.reorg_count_metric.inc()
|
||||||
except:
|
except:
|
||||||
|
@ -438,7 +307,7 @@ class BlockProcessor:
|
||||||
def flush():
|
def flush():
|
||||||
self.db.write_db_state()
|
self.db.write_db_state()
|
||||||
if save_undo:
|
if save_undo:
|
||||||
self.db.prefix_db.commit(self.height)
|
self.db.prefix_db.commit(self.height, self.tip)
|
||||||
else:
|
else:
|
||||||
self.db.prefix_db.unsafe_commit()
|
self.db.prefix_db.unsafe_commit()
|
||||||
self.clear_after_advance_or_reorg()
|
self.clear_after_advance_or_reorg()
|
||||||
|
@ -749,8 +618,6 @@ class BlockProcessor:
|
||||||
self.support_txo_to_claim.pop(support_txo_to_clear)
|
self.support_txo_to_claim.pop(support_txo_to_clear)
|
||||||
self.support_txos_by_claim[claim_hash].clear()
|
self.support_txos_by_claim[claim_hash].clear()
|
||||||
self.support_txos_by_claim.pop(claim_hash)
|
self.support_txos_by_claim.pop(claim_hash)
|
||||||
if claim_hash.hex() in self.activation_info_to_send_es:
|
|
||||||
self.activation_info_to_send_es.pop(claim_hash.hex())
|
|
||||||
if normalized_name.startswith('@'): # abandon a channel, invalidate signatures
|
if normalized_name.startswith('@'): # abandon a channel, invalidate signatures
|
||||||
self._invalidate_channel_signatures(claim_hash)
|
self._invalidate_channel_signatures(claim_hash)
|
||||||
|
|
||||||
|
@ -1323,10 +1190,6 @@ class BlockProcessor:
|
||||||
self.touched_claim_hashes.add(controlling.claim_hash)
|
self.touched_claim_hashes.add(controlling.claim_hash)
|
||||||
self.touched_claim_hashes.add(winning)
|
self.touched_claim_hashes.add(winning)
|
||||||
|
|
||||||
def _add_claim_activation_change_notification(self, claim_id: str, height: int, prev_amount: int,
|
|
||||||
new_amount: int):
|
|
||||||
self.activation_info_to_send_es[claim_id].append(TrendingNotification(height, prev_amount, new_amount))
|
|
||||||
|
|
||||||
def _get_cumulative_update_ops(self, height: int):
|
def _get_cumulative_update_ops(self, height: int):
|
||||||
# update the last takeover height for names with takeovers
|
# update the last takeover height for names with takeovers
|
||||||
for name in self.taken_over_names:
|
for name in self.taken_over_names:
|
||||||
|
@ -1413,8 +1276,8 @@ class BlockProcessor:
|
||||||
or touched in self.pending_support_amount_change:
|
or touched in self.pending_support_amount_change:
|
||||||
# exclude sending notifications for claims/supports that activated but
|
# exclude sending notifications for claims/supports that activated but
|
||||||
# weren't added/spent in this block
|
# weren't added/spent in this block
|
||||||
self._add_claim_activation_change_notification(
|
self.db.prefix_db.trending_notification.stage_put(
|
||||||
touched.hex(), height, prev_effective_amount, new_effective_amount
|
(height, touched), (prev_effective_amount, new_effective_amount)
|
||||||
)
|
)
|
||||||
|
|
||||||
for channel_hash, count in self.pending_channel_counts.items():
|
for channel_hash, count in self.pending_channel_counts.items():
|
||||||
|
@ -1454,6 +1317,12 @@ class BlockProcessor:
|
||||||
spent_claims = {}
|
spent_claims = {}
|
||||||
txos = Transaction(tx.raw).outputs
|
txos = Transaction(tx.raw).outputs
|
||||||
|
|
||||||
|
# clean up mempool, delete txs that were already in mempool/staged to be added
|
||||||
|
# leave txs in mempool that werent in the block
|
||||||
|
mempool_tx = self.db.prefix_db.mempool_tx.get_pending(tx_hash)
|
||||||
|
if mempool_tx:
|
||||||
|
self.db.prefix_db.mempool_tx.stage_delete((tx_hash,), mempool_tx)
|
||||||
|
|
||||||
self.db.prefix_db.tx.stage_put(key_args=(tx_hash,), value_args=(tx.raw,))
|
self.db.prefix_db.tx.stage_put(key_args=(tx_hash,), value_args=(tx.raw,))
|
||||||
self.db.prefix_db.tx_num.stage_put(key_args=(tx_hash,), value_args=(tx_count,))
|
self.db.prefix_db.tx_num.stage_put(key_args=(tx_hash,), value_args=(tx_count,))
|
||||||
self.db.prefix_db.tx_hash.stage_put(key_args=(tx_count,), value_args=(tx_hash,))
|
self.db.prefix_db.tx_hash.stage_put(key_args=(tx_count,), value_args=(tx_hash,))
|
||||||
|
@ -1513,6 +1382,8 @@ class BlockProcessor:
|
||||||
# update effective amount and update sets of touched and deleted claims
|
# update effective amount and update sets of touched and deleted claims
|
||||||
self._get_cumulative_update_ops(height)
|
self._get_cumulative_update_ops(height)
|
||||||
|
|
||||||
|
self.db.prefix_db.touched_hashX.stage_put((height,), (list(sorted(self.touched_hashXs)),))
|
||||||
|
|
||||||
self.db.prefix_db.tx_count.stage_put(key_args=(height,), value_args=(tx_count,))
|
self.db.prefix_db.tx_count.stage_put(key_args=(height,), value_args=(tx_count,))
|
||||||
|
|
||||||
for hashX, new_history in self.hashXs_by_tx.items():
|
for hashX, new_history in self.hashXs_by_tx.items():
|
||||||
|
@ -1534,16 +1405,6 @@ class BlockProcessor:
|
||||||
self.db.headers.append(block.header)
|
self.db.headers.append(block.header)
|
||||||
self.tip = self.coin.header_hash(block.header)
|
self.tip = self.coin.header_hash(block.header)
|
||||||
|
|
||||||
min_height = self.db.min_undo_height(self.db.db_height)
|
|
||||||
if min_height > 0: # delete undos for blocks deep enough they can't be reorged
|
|
||||||
undo_to_delete = list(self.db.prefix_db.undo.iterate(start=(0,), stop=(min_height,)))
|
|
||||||
for (k, v) in undo_to_delete:
|
|
||||||
self.db.prefix_db.undo.stage_delete((k,), (v,))
|
|
||||||
touched_or_deleted_to_delete = list(self.db.prefix_db.touched_or_deleted.iterate(
|
|
||||||
start=(0,), stop=(min_height,))
|
|
||||||
)
|
|
||||||
for (k, v) in touched_or_deleted_to_delete:
|
|
||||||
self.db.prefix_db.touched_or_deleted.stage_delete(k, v)
|
|
||||||
|
|
||||||
self.db.fs_height = self.height
|
self.db.fs_height = self.height
|
||||||
self.db.fs_tx_count = self.tx_count
|
self.db.fs_tx_count = self.tx_count
|
||||||
|
@ -1582,7 +1443,6 @@ class BlockProcessor:
|
||||||
self.utxo_cache.clear()
|
self.utxo_cache.clear()
|
||||||
self.hashXs_by_tx.clear()
|
self.hashXs_by_tx.clear()
|
||||||
self.history_cache.clear()
|
self.history_cache.clear()
|
||||||
self.mempool.notified_mempool_txs.clear()
|
|
||||||
self.removed_claim_hashes.clear()
|
self.removed_claim_hashes.clear()
|
||||||
self.touched_claim_hashes.clear()
|
self.touched_claim_hashes.clear()
|
||||||
self.pending_reposted.clear()
|
self.pending_reposted.clear()
|
||||||
|
@ -1594,6 +1454,7 @@ class BlockProcessor:
|
||||||
self.pending_support_amount_change.clear()
|
self.pending_support_amount_change.clear()
|
||||||
self.resolve_cache.clear()
|
self.resolve_cache.clear()
|
||||||
self.resolve_outputs_cache.clear()
|
self.resolve_outputs_cache.clear()
|
||||||
|
self.touched_hashXs.clear()
|
||||||
|
|
||||||
async def backup_block(self):
|
async def backup_block(self):
|
||||||
assert len(self.db.prefix_db._op_stack) == 0
|
assert len(self.db.prefix_db._op_stack) == 0
|
||||||
|
@ -1606,8 +1467,8 @@ class BlockProcessor:
|
||||||
self.logger.info("backup block %i", self.height)
|
self.logger.info("backup block %i", self.height)
|
||||||
# Check and update self.tip
|
# Check and update self.tip
|
||||||
|
|
||||||
self.db.headers.pop()
|
|
||||||
self.db.tx_counts.pop()
|
self.db.tx_counts.pop()
|
||||||
|
reverted_block_hash = self.coin.header_hash(self.db.headers.pop())
|
||||||
self.tip = self.coin.header_hash(self.db.headers[-1])
|
self.tip = self.coin.header_hash(self.db.headers[-1])
|
||||||
if self.env.cache_all_tx_hashes:
|
if self.env.cache_all_tx_hashes:
|
||||||
while len(self.db.total_transactions) > self.db.tx_counts[-1]:
|
while len(self.db.total_transactions) > self.db.tx_counts[-1]:
|
||||||
|
@ -1647,7 +1508,7 @@ class BlockProcessor:
|
||||||
self.db.last_flush_tx_count = self.db.fs_tx_count
|
self.db.last_flush_tx_count = self.db.fs_tx_count
|
||||||
|
|
||||||
def rollback():
|
def rollback():
|
||||||
self.db.prefix_db.rollback(self.height + 1)
|
self.db.prefix_db.rollback(self.height + 1, reverted_block_hash)
|
||||||
self.db.es_sync_height = self.height
|
self.db.es_sync_height = self.height
|
||||||
self.db.write_db_state()
|
self.db.write_db_state()
|
||||||
self.db.prefix_db.unsafe_commit()
|
self.db.prefix_db.unsafe_commit()
|
||||||
|
@ -1701,32 +1562,38 @@ class BlockProcessor:
|
||||||
self.touched_hashXs.add(hashX)
|
self.touched_hashXs.add(hashX)
|
||||||
return hashX
|
return hashX
|
||||||
|
|
||||||
async def _process_prefetched_blocks(self):
|
async def process_blocks_and_mempool_forever(self):
|
||||||
"""Loop forever processing blocks as they arrive."""
|
"""Loop forever processing blocks as they arrive."""
|
||||||
while True:
|
try:
|
||||||
if self.height == self.daemon.cached_height():
|
while not self._stopping:
|
||||||
if not self._caught_up_event.is_set():
|
if self.height == self.daemon.cached_height():
|
||||||
await self._first_caught_up()
|
if not self._caught_up_event.is_set():
|
||||||
self._caught_up_event.set()
|
await self._first_caught_up()
|
||||||
await self.blocks_event.wait()
|
self._caught_up_event.set()
|
||||||
self.blocks_event.clear()
|
try:
|
||||||
blocks = self.prefetcher.get_prefetched_blocks()
|
await asyncio.wait_for(self.blocks_event.wait(), 0.1)
|
||||||
try:
|
except asyncio.TimeoutError:
|
||||||
await self.check_and_advance_blocks(blocks)
|
pass
|
||||||
except Exception:
|
self.blocks_event.clear()
|
||||||
self.logger.exception("error while processing txs")
|
blocks = self.prefetcher.get_prefetched_blocks()
|
||||||
raise
|
if self._stopping:
|
||||||
|
break
|
||||||
|
if not blocks:
|
||||||
|
try:
|
||||||
|
await self.check_mempool()
|
||||||
|
await self.run_in_thread_with_lock(self.db.prefix_db.unsafe_commit)
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception("error while updating mempool txs")
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
await self.check_and_advance_blocks(blocks)
|
||||||
|
except Exception:
|
||||||
|
self.logger.exception("error while processing txs")
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
self._ready_to_stop.set()
|
||||||
|
|
||||||
async def _es_caught_up(self):
|
|
||||||
self.db.es_sync_height = self.height
|
|
||||||
|
|
||||||
def flush():
|
|
||||||
assert len(self.db.prefix_db._op_stack) == 0
|
|
||||||
self.db.write_db_state()
|
|
||||||
self.db.prefix_db.unsafe_commit()
|
|
||||||
self.db.assert_db_state()
|
|
||||||
|
|
||||||
await self.run_in_thread_with_lock(flush)
|
|
||||||
|
|
||||||
async def _first_caught_up(self):
|
async def _first_caught_up(self):
|
||||||
self.logger.info(f'caught up to height {self.height}')
|
self.logger.info(f'caught up to height {self.height}')
|
||||||
|
@ -1747,6 +1614,13 @@ class BlockProcessor:
|
||||||
f'height {self.height:,d}, halting here.')
|
f'height {self.height:,d}, halting here.')
|
||||||
self.shutdown_event.set()
|
self.shutdown_event.set()
|
||||||
|
|
||||||
|
async def open(self):
|
||||||
|
self.db.open_db()
|
||||||
|
self.height = self.db.db_height
|
||||||
|
self.tip = self.db.db_tip
|
||||||
|
self.tx_count = self.db.db_tx_count
|
||||||
|
await self.db.initialize_caches()
|
||||||
|
|
||||||
async def fetch_and_process_blocks(self, caught_up_event):
|
async def fetch_and_process_blocks(self, caught_up_event):
|
||||||
"""Fetch, process and index blocks from the daemon.
|
"""Fetch, process and index blocks from the daemon.
|
||||||
|
|
||||||
|
@ -1762,16 +1636,9 @@ class BlockProcessor:
|
||||||
|
|
||||||
self._caught_up_event = caught_up_event
|
self._caught_up_event = caught_up_event
|
||||||
try:
|
try:
|
||||||
self.db.open_db()
|
|
||||||
self.height = self.db.db_height
|
|
||||||
self.tip = self.db.db_tip
|
|
||||||
self.tx_count = self.db.db_tx_count
|
|
||||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
|
||||||
await self.db.initialize_caches()
|
|
||||||
await self.db.search_index.start()
|
|
||||||
await asyncio.wait([
|
await asyncio.wait([
|
||||||
self.prefetcher.main_loop(self.height),
|
self.prefetcher.main_loop(self.height),
|
||||||
self._process_prefetched_blocks()
|
self.process_blocks_and_mempool_forever()
|
||||||
])
|
])
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
raise
|
raise
|
||||||
|
@ -1779,9 +1646,49 @@ class BlockProcessor:
|
||||||
self.logger.exception("Block processing failed!")
|
self.logger.exception("Block processing failed!")
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
self.status_server.stop()
|
|
||||||
# Shut down block processing
|
# Shut down block processing
|
||||||
self.logger.info('closing the DB for a clean shutdown...')
|
self.logger.info('closing the DB for a clean shutdown...')
|
||||||
self._sync_reader_executor.shutdown(wait=True)
|
|
||||||
self._chain_executor.shutdown(wait=True)
|
self._chain_executor.shutdown(wait=True)
|
||||||
self.db.close()
|
self.db.close()
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
self._stopping = False
|
||||||
|
env = self.env
|
||||||
|
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||||
|
self.logger.info(f'software version: {lbry.__version__}')
|
||||||
|
self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||||
|
self.logger.info(f'event loop policy: {env.loop_policy}')
|
||||||
|
self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||||
|
|
||||||
|
await self.daemon.height()
|
||||||
|
|
||||||
|
def _start_cancellable(run, *args):
|
||||||
|
_flag = asyncio.Event()
|
||||||
|
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||||
|
return _flag.wait()
|
||||||
|
|
||||||
|
await _start_cancellable(self.fetch_and_process_blocks)
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
self._stopping = True
|
||||||
|
await self._ready_to_stop.wait()
|
||||||
|
for task in reversed(self.cancellable_tasks):
|
||||||
|
task.cancel()
|
||||||
|
await asyncio.wait(self.cancellable_tasks)
|
||||||
|
self.shutdown_event.set()
|
||||||
|
await self.daemon.close()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
def __exit():
|
||||||
|
raise SystemExit()
|
||||||
|
try:
|
||||||
|
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||||
|
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||||
|
loop.run_until_complete(self.start())
|
||||||
|
loop.run_until_complete(self.shutdown_event.wait())
|
||||||
|
except (SystemExit, KeyboardInterrupt):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
loop.run_until_complete(self.stop())
|
||||||
|
|
236
lbry/wallet/server/chain_reader.py
Normal file
236
lbry/wallet/server/chain_reader.py
Normal file
|
@ -0,0 +1,236 @@
|
||||||
|
import signal
|
||||||
|
import logging
|
||||||
|
import asyncio
|
||||||
|
from concurrent.futures.thread import ThreadPoolExecutor
|
||||||
|
import typing
|
||||||
|
|
||||||
|
import lbry
|
||||||
|
from lbry.wallet.server.mempool import MemPool
|
||||||
|
from lbry.wallet.server.db.prefixes import DBState
|
||||||
|
from lbry.wallet.server.udp import StatusServer
|
||||||
|
from lbry.wallet.server.db.db import HubDB
|
||||||
|
from lbry.wallet.server.db.elasticsearch.notifier import ElasticNotifierClientProtocol
|
||||||
|
from lbry.wallet.server.session import LBRYSessionManager
|
||||||
|
from lbry.prometheus import PrometheusServer
|
||||||
|
|
||||||
|
|
||||||
|
class BlockchainReader:
|
||||||
|
def __init__(self, env, secondary_name: str, thread_workers: int = 1, thread_prefix: str = 'blockchain-reader'):
|
||||||
|
self.env = env
|
||||||
|
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
|
||||||
|
self.shutdown_event = asyncio.Event()
|
||||||
|
self.cancellable_tasks = []
|
||||||
|
self._executor = ThreadPoolExecutor(thread_workers, thread_name_prefix=thread_prefix)
|
||||||
|
|
||||||
|
self.db = HubDB(
|
||||||
|
env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes,
|
||||||
|
secondary_name=secondary_name, max_open_files=-1, blocking_channel_ids=env.blocking_channel_ids,
|
||||||
|
filtering_channel_ids=env.filtering_channel_ids, executor=self._executor
|
||||||
|
)
|
||||||
|
self.last_state: typing.Optional[DBState] = None
|
||||||
|
self._refresh_interval = 0.1
|
||||||
|
self._lock = asyncio.Lock()
|
||||||
|
|
||||||
|
def _detect_changes(self):
|
||||||
|
try:
|
||||||
|
self.db.prefix_db.try_catch_up_with_primary()
|
||||||
|
except:
|
||||||
|
self.log.exception('failed to update secondary db')
|
||||||
|
raise
|
||||||
|
state = self.db.prefix_db.db_state.get()
|
||||||
|
if not state or state.height <= 0:
|
||||||
|
return
|
||||||
|
# if state and self.last_state and self.db.headers and self.last_state.tip == self.db.coin.header_hash(self.db.headers[-1]):
|
||||||
|
# return
|
||||||
|
if self.last_state and self.last_state.height > state.height:
|
||||||
|
self.log.warning("reorg detected, waiting until the writer has flushed the new blocks to advance")
|
||||||
|
return
|
||||||
|
last_height = 0 if not self.last_state else self.last_state.height
|
||||||
|
if self.last_state:
|
||||||
|
while True:
|
||||||
|
if self.db.headers[-1] == self.db.prefix_db.header.get(last_height, deserialize_value=False):
|
||||||
|
self.log.info("connects to block %i", last_height)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self.log.warning("disconnect block %i", last_height)
|
||||||
|
self.unwind()
|
||||||
|
last_height -= 1
|
||||||
|
self.db.read_db_state()
|
||||||
|
if not self.last_state or last_height < state.height:
|
||||||
|
for height in range(last_height + 1, state.height + 1):
|
||||||
|
self.log.info("advancing to %i", height)
|
||||||
|
self.advance(height)
|
||||||
|
self.clear_caches()
|
||||||
|
self.last_state = state
|
||||||
|
|
||||||
|
async def poll_for_changes(self):
|
||||||
|
await asyncio.get_event_loop().run_in_executor(self._executor, self._detect_changes)
|
||||||
|
|
||||||
|
async def refresh_blocks_forever(self, synchronized: asyncio.Event):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
async with self._lock:
|
||||||
|
await self.poll_for_changes()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
self.log.exception("blockchain reader main loop encountered an unexpected error")
|
||||||
|
raise
|
||||||
|
await asyncio.sleep(self._refresh_interval)
|
||||||
|
synchronized.set()
|
||||||
|
|
||||||
|
def clear_caches(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def advance(self, height: int):
|
||||||
|
tx_count = self.db.prefix_db.tx_count.get(height).tx_count
|
||||||
|
assert tx_count not in self.db.tx_counts, f'boom {tx_count} in {len(self.db.tx_counts)} tx counts'
|
||||||
|
assert len(self.db.tx_counts) == height, f"{len(self.db.tx_counts)} != {height}"
|
||||||
|
self.db.tx_counts.append(tx_count)
|
||||||
|
self.db.headers.append(self.db.prefix_db.header.get(height, deserialize_value=False))
|
||||||
|
|
||||||
|
def unwind(self):
|
||||||
|
self.db.tx_counts.pop()
|
||||||
|
self.db.headers.pop()
|
||||||
|
|
||||||
|
|
||||||
|
class BlockchainReaderServer(BlockchainReader):
|
||||||
|
def __init__(self, env):
|
||||||
|
super().__init__(env, 'lbry-reader', thread_workers=1, thread_prefix='hub-worker')
|
||||||
|
self.history_cache = {}
|
||||||
|
self.resolve_outputs_cache = {}
|
||||||
|
self.resolve_cache = {}
|
||||||
|
self.notifications_to_send = []
|
||||||
|
self.status_server = StatusServer()
|
||||||
|
self.daemon = env.coin.DAEMON(env.coin, env.daemon_url) # only needed for broadcasting txs
|
||||||
|
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
||||||
|
self.mempool = MemPool(self.env.coin, self.db)
|
||||||
|
self.session_manager = LBRYSessionManager(
|
||||||
|
env, self.db, self.mempool, self.history_cache, self.resolve_cache,
|
||||||
|
self.resolve_outputs_cache, self.daemon,
|
||||||
|
self.shutdown_event,
|
||||||
|
on_available_callback=self.status_server.set_available,
|
||||||
|
on_unavailable_callback=self.status_server.set_unavailable
|
||||||
|
)
|
||||||
|
self.mempool.session_manager = self.session_manager
|
||||||
|
self.es_notifications = asyncio.Queue()
|
||||||
|
self.es_notification_client = ElasticNotifierClientProtocol(self.es_notifications)
|
||||||
|
self.synchronized = asyncio.Event()
|
||||||
|
self._es_height = None
|
||||||
|
self._es_block_hash = None
|
||||||
|
|
||||||
|
def clear_caches(self):
|
||||||
|
self.history_cache.clear()
|
||||||
|
self.resolve_outputs_cache.clear()
|
||||||
|
self.resolve_cache.clear()
|
||||||
|
# self.clear_search_cache()
|
||||||
|
# self.mempool.notified_mempool_txs.clear()
|
||||||
|
|
||||||
|
def clear_search_cache(self):
|
||||||
|
self.session_manager.search_index.clear_caches()
|
||||||
|
|
||||||
|
def advance(self, height: int):
|
||||||
|
super().advance(height)
|
||||||
|
touched_hashXs = self.db.prefix_db.touched_hashX.get(height).touched_hashXs
|
||||||
|
self.notifications_to_send.append((set(touched_hashXs), height))
|
||||||
|
|
||||||
|
def _detect_changes(self):
|
||||||
|
super()._detect_changes()
|
||||||
|
self.mempool.raw_mempool.clear()
|
||||||
|
self.mempool.raw_mempool.update(
|
||||||
|
{k.tx_hash: v.raw_tx for k, v in self.db.prefix_db.mempool_tx.iterate()}
|
||||||
|
)
|
||||||
|
|
||||||
|
async def poll_for_changes(self):
|
||||||
|
await super().poll_for_changes()
|
||||||
|
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||||
|
if self.notifications_to_send:
|
||||||
|
for (touched, height) in self.notifications_to_send:
|
||||||
|
await self.mempool.on_block(touched, height)
|
||||||
|
self.log.info("reader advanced to %i", height)
|
||||||
|
if self._es_height == self.db.db_height:
|
||||||
|
self.synchronized.set()
|
||||||
|
await self.mempool.refresh_hashes(self.db.db_height)
|
||||||
|
self.notifications_to_send.clear()
|
||||||
|
|
||||||
|
async def receive_es_notifications(self, synchronized: asyncio.Event):
|
||||||
|
await asyncio.get_event_loop().create_connection(
|
||||||
|
lambda: self.es_notification_client, '127.0.0.1', self.env.elastic_notifier_port
|
||||||
|
)
|
||||||
|
synchronized.set()
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
self._es_height, self._es_block_hash = await self.es_notifications.get()
|
||||||
|
self.clear_search_cache()
|
||||||
|
if self.last_state and self._es_block_hash == self.last_state.tip:
|
||||||
|
self.synchronized.set()
|
||||||
|
self.log.info("es and reader are in sync")
|
||||||
|
else:
|
||||||
|
self.log.info("es and reader are not yet in sync %s vs %s", self._es_height, self.db.db_height)
|
||||||
|
finally:
|
||||||
|
self.es_notification_client.close()
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
env = self.env
|
||||||
|
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||||
|
self.log.info(f'software version: {lbry.__version__}')
|
||||||
|
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||||
|
self.log.info(f'event loop policy: {env.loop_policy}')
|
||||||
|
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||||
|
await self.daemon.height()
|
||||||
|
|
||||||
|
def _start_cancellable(run, *args):
|
||||||
|
_flag = asyncio.Event()
|
||||||
|
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||||
|
return _flag.wait()
|
||||||
|
|
||||||
|
self.db.open_db()
|
||||||
|
await self.db.initialize_caches()
|
||||||
|
|
||||||
|
self.last_state = self.db.read_db_state()
|
||||||
|
|
||||||
|
await self.start_prometheus()
|
||||||
|
if self.env.udp_port:
|
||||||
|
await self.status_server.start(
|
||||||
|
0, bytes.fromhex(self.env.coin.GENESIS_HASH)[::-1], self.env.country,
|
||||||
|
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
||||||
|
)
|
||||||
|
await _start_cancellable(self.receive_es_notifications)
|
||||||
|
await _start_cancellable(self.refresh_blocks_forever)
|
||||||
|
await self.session_manager.search_index.start()
|
||||||
|
await _start_cancellable(self.session_manager.serve, self.mempool)
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
self.status_server.stop()
|
||||||
|
async with self._lock:
|
||||||
|
for task in reversed(self.cancellable_tasks):
|
||||||
|
task.cancel()
|
||||||
|
await asyncio.wait(self.cancellable_tasks)
|
||||||
|
self.session_manager.search_index.stop()
|
||||||
|
self.db.close()
|
||||||
|
if self.prometheus_server:
|
||||||
|
await self.prometheus_server.stop()
|
||||||
|
self.prometheus_server = None
|
||||||
|
await self.daemon.close()
|
||||||
|
self._executor.shutdown(wait=True)
|
||||||
|
self.shutdown_event.set()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
def __exit():
|
||||||
|
raise SystemExit()
|
||||||
|
try:
|
||||||
|
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||||
|
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||||
|
loop.run_until_complete(self.start())
|
||||||
|
loop.run_until_complete(self.shutdown_event.wait())
|
||||||
|
except (SystemExit, KeyboardInterrupt):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
loop.run_until_complete(self.stop())
|
||||||
|
|
||||||
|
async def start_prometheus(self):
|
||||||
|
if not self.prometheus_server and self.env.prometheus_port:
|
||||||
|
self.prometheus_server = PrometheusServer()
|
||||||
|
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)
|
|
@ -2,33 +2,55 @@ import logging
|
||||||
import traceback
|
import traceback
|
||||||
import argparse
|
import argparse
|
||||||
from lbry.wallet.server.env import Env
|
from lbry.wallet.server.env import Env
|
||||||
from lbry.wallet.server.server import Server
|
from lbry.wallet.server.block_processor import BlockProcessor
|
||||||
|
from lbry.wallet.server.chain_reader import BlockchainReaderServer
|
||||||
|
from lbry.wallet.server.db.elasticsearch.sync import ElasticWriter
|
||||||
|
|
||||||
|
|
||||||
def get_argument_parser():
|
def get_args_and_setup_logging(name):
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
prog="lbry-hub"
|
prog=name
|
||||||
)
|
)
|
||||||
Env.contribute_to_arg_parser(parser)
|
Env.contribute_to_arg_parser(parser)
|
||||||
return parser
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = get_argument_parser()
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
||||||
logging.info('lbry.server starting')
|
|
||||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def run_writer_forever():
|
||||||
|
args = get_args_and_setup_logging('lbry-hub-writer')
|
||||||
try:
|
try:
|
||||||
server = Server(Env.from_arg_parser(args))
|
block_processor = BlockProcessor(Env.from_arg_parser(args))
|
||||||
|
block_processor.run()
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
logging.critical('block processor terminated abnormally')
|
||||||
|
else:
|
||||||
|
logging.info('block processor terminated normally')
|
||||||
|
|
||||||
|
|
||||||
|
def run_server_forever():
|
||||||
|
args = get_args_and_setup_logging('lbry-hub-server')
|
||||||
|
|
||||||
|
try:
|
||||||
|
server = BlockchainReaderServer(Env.from_arg_parser(args))
|
||||||
server.run()
|
server.run()
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
logging.critical('lbry.server terminated abnormally')
|
logging.critical('server terminated abnormally')
|
||||||
else:
|
else:
|
||||||
logging.info('lbry.server terminated normally')
|
logging.info('server terminated normally')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
def run_es_sync_forever():
|
||||||
main()
|
args = get_args_and_setup_logging('lbry-hub-elastic-sync')
|
||||||
|
try:
|
||||||
|
server = ElasticWriter(Env.from_arg_parser(args))
|
||||||
|
server.run()
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
logging.critical('es writer terminated abnormally')
|
||||||
|
else:
|
||||||
|
logging.info('es writer terminated normally')
|
||||||
|
|
|
@ -12,9 +12,7 @@ from lbry.wallet.server.util import cachedproperty, subclasses
|
||||||
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
|
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
|
||||||
from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
||||||
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
||||||
from lbry.wallet.server.leveldb import LevelDB
|
|
||||||
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
||||||
from lbry.wallet.server.block_processor import BlockProcessor
|
|
||||||
|
|
||||||
|
|
||||||
Block = namedtuple("Block", "raw header transactions")
|
Block = namedtuple("Block", "raw header transactions")
|
||||||
|
@ -38,9 +36,7 @@ class Coin:
|
||||||
SESSIONCLS = LBRYElectrumX
|
SESSIONCLS = LBRYElectrumX
|
||||||
DESERIALIZER = lib_tx.Deserializer
|
DESERIALIZER = lib_tx.Deserializer
|
||||||
DAEMON = Daemon
|
DAEMON = Daemon
|
||||||
BLOCK_PROCESSOR = BlockProcessor
|
|
||||||
SESSION_MANAGER = LBRYSessionManager
|
SESSION_MANAGER = LBRYSessionManager
|
||||||
DB = LevelDB
|
|
||||||
HEADER_VALUES = [
|
HEADER_VALUES = [
|
||||||
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
|
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
|
||||||
]
|
]
|
||||||
|
@ -243,7 +239,6 @@ class LBC(Coin):
|
||||||
SESSIONCLS = LBRYElectrumX
|
SESSIONCLS = LBRYElectrumX
|
||||||
SESSION_MANAGER = LBRYSessionManager
|
SESSION_MANAGER = LBRYSessionManager
|
||||||
DESERIALIZER = DeserializerSegWit
|
DESERIALIZER = DeserializerSegWit
|
||||||
DB = LevelDB
|
|
||||||
NAME = "LBRY"
|
NAME = "LBRY"
|
||||||
SHORTNAME = "LBC"
|
SHORTNAME = "LBC"
|
||||||
NET = "mainnet"
|
NET = "mainnet"
|
||||||
|
|
|
@ -25,7 +25,7 @@ class DB_PREFIXES(enum.Enum):
|
||||||
reposted_claim = b'W'
|
reposted_claim = b'W'
|
||||||
|
|
||||||
undo = b'M'
|
undo = b'M'
|
||||||
claim_diff = b'Y'
|
touched_or_deleted = b'Y'
|
||||||
|
|
||||||
tx = b'B'
|
tx = b'B'
|
||||||
block_hash = b'C'
|
block_hash = b'C'
|
||||||
|
@ -39,4 +39,7 @@ class DB_PREFIXES(enum.Enum):
|
||||||
db_state = b's'
|
db_state = b's'
|
||||||
channel_count = b'Z'
|
channel_count = b'Z'
|
||||||
support_amount = b'a'
|
support_amount = b'a'
|
||||||
block_txs = b'b'
|
block_tx = b'b'
|
||||||
|
trending_notifications = b'c'
|
||||||
|
mempool_tx = b'd'
|
||||||
|
touched_hashX = b'e'
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
import typing
|
import typing
|
||||||
|
from typing import Optional
|
||||||
|
from lbry.error import ResolveCensoredError
|
||||||
|
|
||||||
CLAIM_TYPES = {
|
CLAIM_TYPES = {
|
||||||
'stream': 1,
|
'stream': 1,
|
||||||
|
@ -445,3 +447,31 @@ class ResolveResult(typing.NamedTuple):
|
||||||
channel_hash: typing.Optional[bytes]
|
channel_hash: typing.Optional[bytes]
|
||||||
reposted_claim_hash: typing.Optional[bytes]
|
reposted_claim_hash: typing.Optional[bytes]
|
||||||
signature_valid: typing.Optional[bool]
|
signature_valid: typing.Optional[bool]
|
||||||
|
|
||||||
|
|
||||||
|
class TrendingNotification(typing.NamedTuple):
|
||||||
|
height: int
|
||||||
|
prev_amount: int
|
||||||
|
new_amount: int
|
||||||
|
|
||||||
|
|
||||||
|
class UTXO(typing.NamedTuple):
|
||||||
|
tx_num: int
|
||||||
|
tx_pos: int
|
||||||
|
tx_hash: bytes
|
||||||
|
height: int
|
||||||
|
value: int
|
||||||
|
|
||||||
|
|
||||||
|
OptionalResolveResultOrError = Optional[typing.Union[ResolveResult, ResolveCensoredError, LookupError, ValueError]]
|
||||||
|
|
||||||
|
|
||||||
|
class ExpandedResolveResult(typing.NamedTuple):
|
||||||
|
stream: OptionalResolveResultOrError
|
||||||
|
channel: OptionalResolveResultOrError
|
||||||
|
repost: OptionalResolveResultOrError
|
||||||
|
reposted_channel: OptionalResolveResultOrError
|
||||||
|
|
||||||
|
|
||||||
|
class DBError(Exception):
|
||||||
|
"""Raised on general DB errors generally indicating corruption."""
|
||||||
|
|
File diff suppressed because it is too large
Load diff
150
lbry/wallet/server/db/elasticsearch/common.py
Normal file
150
lbry/wallet/server/db/elasticsearch/common.py
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
from decimal import Decimal
|
||||||
|
from typing import Iterable
|
||||||
|
|
||||||
|
from lbry.error import TooManyClaimSearchParametersError
|
||||||
|
from lbry.schema.tags import clean_tags
|
||||||
|
from lbry.schema.url import normalize_name
|
||||||
|
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
||||||
|
from lbry.wallet.server.db.elasticsearch.constants import REPLACEMENTS, FIELDS, TEXT_FIELDS, RANGE_FIELDS
|
||||||
|
|
||||||
|
|
||||||
|
def expand_query(**kwargs):
|
||||||
|
if "amount_order" in kwargs:
|
||||||
|
kwargs["limit"] = 1
|
||||||
|
kwargs["order_by"] = "effective_amount"
|
||||||
|
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
||||||
|
if 'name' in kwargs:
|
||||||
|
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
||||||
|
if kwargs.get('is_controlling') is False:
|
||||||
|
kwargs.pop('is_controlling')
|
||||||
|
query = {'must': [], 'must_not': []}
|
||||||
|
collapse = None
|
||||||
|
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
||||||
|
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
key = key.replace('claim.', '')
|
||||||
|
many = key.endswith('__in') or isinstance(value, list)
|
||||||
|
if many and len(value) > 2048:
|
||||||
|
raise TooManyClaimSearchParametersError(key, 2048)
|
||||||
|
if many:
|
||||||
|
key = key.replace('__in', '')
|
||||||
|
value = list(filter(None, value))
|
||||||
|
if value is None or isinstance(value, list) and len(value) == 0:
|
||||||
|
continue
|
||||||
|
key = REPLACEMENTS.get(key, key)
|
||||||
|
if key in FIELDS:
|
||||||
|
partial_id = False
|
||||||
|
if key == 'claim_type':
|
||||||
|
if isinstance(value, str):
|
||||||
|
value = CLAIM_TYPES[value]
|
||||||
|
else:
|
||||||
|
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||||
|
elif key == 'stream_type':
|
||||||
|
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||||
|
if key == '_id':
|
||||||
|
if isinstance(value, Iterable):
|
||||||
|
value = [item[::-1].hex() for item in value]
|
||||||
|
else:
|
||||||
|
value = value[::-1].hex()
|
||||||
|
if not many and key in ('_id', 'claim_id') and len(value) < 20:
|
||||||
|
partial_id = True
|
||||||
|
if key in ('signature_valid', 'has_source'):
|
||||||
|
continue # handled later
|
||||||
|
if key in TEXT_FIELDS:
|
||||||
|
key += '.keyword'
|
||||||
|
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
||||||
|
if partial_id:
|
||||||
|
query['must'].append({"prefix": {"claim_id": value}})
|
||||||
|
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
||||||
|
operator_length = 2 if value[:2] in ops else 1
|
||||||
|
operator, value = value[:operator_length], value[operator_length:]
|
||||||
|
if key == 'fee_amount':
|
||||||
|
value = str(Decimal(value)*1000)
|
||||||
|
query['must'].append({"range": {key: {ops[operator]: value}}})
|
||||||
|
elif many:
|
||||||
|
query['must'].append({"terms": {key: value}})
|
||||||
|
else:
|
||||||
|
if key == 'fee_amount':
|
||||||
|
value = str(Decimal(value)*1000)
|
||||||
|
query['must'].append({"term": {key: {"value": value}}})
|
||||||
|
elif key == 'not_channel_ids':
|
||||||
|
for channel_id in value:
|
||||||
|
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
||||||
|
query['must_not'].append({"term": {'_id': channel_id}})
|
||||||
|
elif key == 'channel_ids':
|
||||||
|
query['must'].append({"terms": {'channel_id.keyword': value}})
|
||||||
|
elif key == 'claim_ids':
|
||||||
|
query['must'].append({"terms": {'claim_id.keyword': value}})
|
||||||
|
elif key == 'media_types':
|
||||||
|
query['must'].append({"terms": {'media_type.keyword': value}})
|
||||||
|
elif key == 'any_languages':
|
||||||
|
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
||||||
|
elif key == 'any_languages':
|
||||||
|
query['must'].append({"terms": {'languages': value}})
|
||||||
|
elif key == 'all_languages':
|
||||||
|
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
||||||
|
elif key == 'any_tags':
|
||||||
|
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
||||||
|
elif key == 'all_tags':
|
||||||
|
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||||
|
elif key == 'not_tags':
|
||||||
|
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||||
|
elif key == 'not_claim_id':
|
||||||
|
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
||||||
|
elif key == 'limit_claims_per_channel':
|
||||||
|
collapse = ('channel_id.keyword', value)
|
||||||
|
if kwargs.get('has_channel_signature'):
|
||||||
|
query['must'].append({"exists": {"field": "signature"}})
|
||||||
|
if 'signature_valid' in kwargs:
|
||||||
|
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||||
|
elif 'signature_valid' in kwargs:
|
||||||
|
query.setdefault('should', [])
|
||||||
|
query["minimum_should_match"] = 1
|
||||||
|
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
||||||
|
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||||
|
if 'has_source' in kwargs:
|
||||||
|
query.setdefault('should', [])
|
||||||
|
query["minimum_should_match"] = 1
|
||||||
|
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
||||||
|
query['should'].append(
|
||||||
|
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
||||||
|
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
||||||
|
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
||||||
|
if kwargs.get('text'):
|
||||||
|
query['must'].append(
|
||||||
|
{"simple_query_string":
|
||||||
|
{"query": kwargs["text"], "fields": [
|
||||||
|
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
||||||
|
]}})
|
||||||
|
query = {
|
||||||
|
"_source": {"excludes": ["description", "title"]},
|
||||||
|
'query': {'bool': query},
|
||||||
|
"sort": [],
|
||||||
|
}
|
||||||
|
if "limit" in kwargs:
|
||||||
|
query["size"] = kwargs["limit"]
|
||||||
|
if 'offset' in kwargs:
|
||||||
|
query["from"] = kwargs["offset"]
|
||||||
|
if 'order_by' in kwargs:
|
||||||
|
if isinstance(kwargs["order_by"], str):
|
||||||
|
kwargs["order_by"] = [kwargs["order_by"]]
|
||||||
|
for value in kwargs['order_by']:
|
||||||
|
if 'trending_group' in value:
|
||||||
|
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
||||||
|
continue
|
||||||
|
is_asc = value.startswith('^')
|
||||||
|
value = value[1:] if is_asc else value
|
||||||
|
value = REPLACEMENTS.get(value, value)
|
||||||
|
if value in TEXT_FIELDS:
|
||||||
|
value += '.keyword'
|
||||||
|
query['sort'].append({value: "asc" if is_asc else "desc"})
|
||||||
|
if collapse:
|
||||||
|
query["collapse"] = {
|
||||||
|
"field": collapse[0],
|
||||||
|
"inner_hits": {
|
||||||
|
"name": collapse[0],
|
||||||
|
"size": collapse[1],
|
||||||
|
"sort": query["sort"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return query
|
117
lbry/wallet/server/db/elasticsearch/fast_ar_trending.py
Normal file
117
lbry/wallet/server/db/elasticsearch/fast_ar_trending.py
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
FAST_AR_TRENDING_SCRIPT = """
|
||||||
|
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
|
||||||
|
|
||||||
|
double logsumexp(double x, double y)
|
||||||
|
{
|
||||||
|
double top;
|
||||||
|
if(x > y)
|
||||||
|
top = x;
|
||||||
|
else
|
||||||
|
top = y;
|
||||||
|
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
|
||||||
|
return(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
double logdiffexp(double big, double small)
|
||||||
|
{
|
||||||
|
return big + Math.log(1.0 - Math.exp(small - big));
|
||||||
|
}
|
||||||
|
|
||||||
|
double squash(double x)
|
||||||
|
{
|
||||||
|
if(x < 0.0)
|
||||||
|
return -Math.log(1.0 - x);
|
||||||
|
else
|
||||||
|
return Math.log(x + 1.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
double unsquash(double x)
|
||||||
|
{
|
||||||
|
if(x < 0.0)
|
||||||
|
return 1.0 - Math.exp(-x);
|
||||||
|
else
|
||||||
|
return Math.exp(x) - 1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
double log_to_squash(double x)
|
||||||
|
{
|
||||||
|
return logsumexp(x, 0.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
double squash_to_log(double x)
|
||||||
|
{
|
||||||
|
//assert x > 0.0;
|
||||||
|
return logdiffexp(x, 0.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
double squashed_add(double x, double y)
|
||||||
|
{
|
||||||
|
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
|
||||||
|
// Cases where the signs are the same
|
||||||
|
if (x < 0.0 && y < 0.0)
|
||||||
|
return -logsumexp(-x, logdiffexp(-y, 0.0));
|
||||||
|
if (x >= 0.0 && y >= 0.0)
|
||||||
|
return logsumexp(x, logdiffexp(y, 0.0));
|
||||||
|
// Where the signs differ
|
||||||
|
if (x >= 0.0 && y < 0.0)
|
||||||
|
if (Math.abs(x) >= Math.abs(y))
|
||||||
|
return logsumexp(0.0, logdiffexp(x, -y));
|
||||||
|
else
|
||||||
|
return -logsumexp(0.0, logdiffexp(-y, x));
|
||||||
|
if (x < 0.0 && y >= 0.0)
|
||||||
|
{
|
||||||
|
// Addition is commutative, hooray for new math
|
||||||
|
return squashed_add(y, x);
|
||||||
|
}
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
double squashed_multiply(double x, double y)
|
||||||
|
{
|
||||||
|
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
|
||||||
|
int sign;
|
||||||
|
if(x*y >= 0.0)
|
||||||
|
sign = 1;
|
||||||
|
else
|
||||||
|
sign = -1;
|
||||||
|
return sign*logsumexp(squash_to_log(Math.abs(x))
|
||||||
|
+ squash_to_log(Math.abs(y)), 0.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Squashed inflated units
|
||||||
|
double inflateUnits(int height) {
|
||||||
|
double timescale = 576.0; // Half life of 400 = e-folding time of a day
|
||||||
|
// by coincidence, so may as well go with it
|
||||||
|
return log_to_squash(height / timescale);
|
||||||
|
}
|
||||||
|
|
||||||
|
double spikePower(double newAmount) {
|
||||||
|
if (newAmount < 50.0) {
|
||||||
|
return(0.5);
|
||||||
|
} else if (newAmount < 85.0) {
|
||||||
|
return(newAmount / 100.0);
|
||||||
|
} else {
|
||||||
|
return(0.85);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double spikeMass(double oldAmount, double newAmount) {
|
||||||
|
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
||||||
|
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
||||||
|
double power = spikePower(newAmount);
|
||||||
|
if (oldAmount > newAmount) {
|
||||||
|
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||||
|
} else {
|
||||||
|
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i in params.src.changes) {
|
||||||
|
double units = inflateUnits(i.height);
|
||||||
|
if (ctx._source.trending_score == null) {
|
||||||
|
ctx._source.trending_score = 0.0;
|
||||||
|
}
|
||||||
|
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
|
||||||
|
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
|
||||||
|
}
|
||||||
|
"""
|
55
lbry/wallet/server/db/elasticsearch/notifier.py
Normal file
55
lbry/wallet/server/db/elasticsearch/notifier.py
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
import struct
|
||||||
|
import typing
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ElasticNotifierProtocol(asyncio.Protocol):
|
||||||
|
"""notifies the reader when ES has written updates"""
|
||||||
|
|
||||||
|
def __init__(self, listeners):
|
||||||
|
self._listeners = listeners
|
||||||
|
self.transport: typing.Optional[asyncio.Transport] = None
|
||||||
|
|
||||||
|
def connection_made(self, transport):
|
||||||
|
self.transport = transport
|
||||||
|
self._listeners.append(self)
|
||||||
|
log.info("got es notifier connection")
|
||||||
|
|
||||||
|
def connection_lost(self, exc) -> None:
|
||||||
|
self._listeners.remove(self)
|
||||||
|
self.transport = None
|
||||||
|
|
||||||
|
def send_height(self, height: int, block_hash: bytes):
|
||||||
|
log.info("notify es update '%s'", height)
|
||||||
|
self.transport.write(struct.pack(b'>Q32s', height, block_hash))
|
||||||
|
|
||||||
|
|
||||||
|
class ElasticNotifierClientProtocol(asyncio.Protocol):
|
||||||
|
"""notifies the reader when ES has written updates"""
|
||||||
|
|
||||||
|
def __init__(self, notifications: asyncio.Queue):
|
||||||
|
self.notifications = notifications
|
||||||
|
self.transport: typing.Optional[asyncio.Transport] = None
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.transport and not self.transport.is_closing():
|
||||||
|
self.transport.close()
|
||||||
|
|
||||||
|
def connection_made(self, transport):
|
||||||
|
self.transport = transport
|
||||||
|
log.info("connected to es notifier")
|
||||||
|
|
||||||
|
def connection_lost(self, exc) -> None:
|
||||||
|
self.transport = None
|
||||||
|
|
||||||
|
def data_received(self, data: bytes) -> None:
|
||||||
|
try:
|
||||||
|
height, block_hash = struct.unpack(b'>Q32s', data)
|
||||||
|
except:
|
||||||
|
log.exception("failed to decode %s", (data or b'').hex())
|
||||||
|
raise
|
||||||
|
self.notifications.put_nowait((height, block_hash))
|
|
@ -1,138 +1,318 @@
|
||||||
import os
|
import os
|
||||||
import argparse
|
import time
|
||||||
|
import signal
|
||||||
|
import json
|
||||||
|
import typing
|
||||||
|
from collections import defaultdict
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from elasticsearch import AsyncElasticsearch
|
from elasticsearch import AsyncElasticsearch, NotFoundError
|
||||||
from elasticsearch.helpers import async_streaming_bulk
|
from elasticsearch.helpers import async_streaming_bulk
|
||||||
from lbry.wallet.server.env import Env
|
|
||||||
from lbry.wallet.server.leveldb import LevelDB
|
from lbry.schema.result import Censor
|
||||||
from lbry.wallet.server.db.elasticsearch.search import SearchIndex, IndexVersionMismatch
|
from lbry.wallet.server.db.elasticsearch.search import IndexVersionMismatch
|
||||||
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS
|
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS, INDEX_DEFAULT_SETTINGS
|
||||||
|
from lbry.wallet.server.db.elasticsearch.common import expand_query
|
||||||
|
from lbry.wallet.server.db.elasticsearch.notifier import ElasticNotifierProtocol
|
||||||
|
from lbry.wallet.server.db.elasticsearch.fast_ar_trending import FAST_AR_TRENDING_SCRIPT
|
||||||
|
from lbry.wallet.server.chain_reader import BlockchainReader
|
||||||
|
from lbry.wallet.server.db.revertable import RevertableOp
|
||||||
|
from lbry.wallet.server.db import DB_PREFIXES
|
||||||
|
|
||||||
|
|
||||||
async def get_recent_claims(env, index_name='claims', db=None):
|
log = logging.getLogger()
|
||||||
log = logging.getLogger()
|
|
||||||
need_open = db is None
|
|
||||||
db = db or LevelDB(env)
|
|
||||||
try:
|
|
||||||
if need_open:
|
|
||||||
db.open_db()
|
|
||||||
if db.es_sync_height == db.db_height or db.db_height <= 0:
|
|
||||||
return
|
|
||||||
if need_open:
|
|
||||||
await db.initialize_caches()
|
|
||||||
log.info(f"catching up ES ({db.es_sync_height}) to leveldb height: {db.db_height}")
|
|
||||||
cnt = 0
|
|
||||||
touched_claims = set()
|
|
||||||
deleted_claims = set()
|
|
||||||
for height in range(db.es_sync_height, db.db_height + 1):
|
|
||||||
touched_or_deleted = db.prefix_db.touched_or_deleted.get(height)
|
|
||||||
touched_claims.update(touched_or_deleted.touched_claims)
|
|
||||||
deleted_claims.update(touched_or_deleted.deleted_claims)
|
|
||||||
touched_claims.difference_update(deleted_claims)
|
|
||||||
|
|
||||||
for deleted in deleted_claims:
|
|
||||||
|
class ElasticWriter(BlockchainReader):
|
||||||
|
VERSION = 1
|
||||||
|
|
||||||
|
def __init__(self, env):
|
||||||
|
super().__init__(env, 'lbry-elastic-writer', thread_workers=1, thread_prefix='lbry-elastic-writer')
|
||||||
|
# self._refresh_interval = 0.1
|
||||||
|
self._task = None
|
||||||
|
self.index = self.env.es_index_prefix + 'claims'
|
||||||
|
self._elastic_host = env.elastic_host
|
||||||
|
self._elastic_port = env.elastic_port
|
||||||
|
self.sync_timeout = 1800
|
||||||
|
self.sync_client = AsyncElasticsearch(
|
||||||
|
[{'host': self._elastic_host, 'port': self._elastic_port}], timeout=self.sync_timeout
|
||||||
|
)
|
||||||
|
self._es_info_path = os.path.join(env.db_dir, 'es_info')
|
||||||
|
self._last_wrote_height = 0
|
||||||
|
self._last_wrote_block_hash = None
|
||||||
|
|
||||||
|
self._touched_claims = set()
|
||||||
|
self._deleted_claims = set()
|
||||||
|
|
||||||
|
self._removed_during_undo = set()
|
||||||
|
|
||||||
|
self._trending = defaultdict(list)
|
||||||
|
self._advanced = True
|
||||||
|
self.synchronized = asyncio.Event()
|
||||||
|
self._listeners: typing.List[ElasticNotifierProtocol] = []
|
||||||
|
|
||||||
|
async def run_es_notifier(self, synchronized: asyncio.Event):
|
||||||
|
server = await asyncio.get_event_loop().create_server(
|
||||||
|
lambda: ElasticNotifierProtocol(self._listeners), '127.0.0.1', self.env.elastic_notifier_port
|
||||||
|
)
|
||||||
|
self.log.info("ES notifier server listening on TCP localhost:%i", self.env.elastic_notifier_port)
|
||||||
|
synchronized.set()
|
||||||
|
async with server:
|
||||||
|
await server.serve_forever()
|
||||||
|
|
||||||
|
def notify_es_notification_listeners(self, height: int, block_hash: bytes):
|
||||||
|
for p in self._listeners:
|
||||||
|
p.send_height(height, block_hash)
|
||||||
|
self.log.info("notify listener %i", height)
|
||||||
|
|
||||||
|
def _read_es_height(self):
|
||||||
|
with open(self._es_info_path, 'r') as f:
|
||||||
|
info = json.loads(f.read())
|
||||||
|
self._last_wrote_height = int(info.get('height', 0))
|
||||||
|
self._last_wrote_block_hash = info.get('block_hash', None)
|
||||||
|
|
||||||
|
async def read_es_height(self):
|
||||||
|
await asyncio.get_event_loop().run_in_executor(self._executor, self._read_es_height)
|
||||||
|
|
||||||
|
def write_es_height(self, height: int, block_hash: str):
|
||||||
|
with open(self._es_info_path, 'w') as f:
|
||||||
|
f.write(json.dumps({'height': height, 'block_hash': block_hash}, indent=2))
|
||||||
|
self._last_wrote_height = height
|
||||||
|
self._last_wrote_block_hash = block_hash
|
||||||
|
|
||||||
|
async def get_index_version(self) -> int:
|
||||||
|
try:
|
||||||
|
template = await self.sync_client.indices.get_template(self.index)
|
||||||
|
return template[self.index]['version']
|
||||||
|
except NotFoundError:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
async def set_index_version(self, version):
|
||||||
|
await self.sync_client.indices.put_template(
|
||||||
|
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
||||||
|
)
|
||||||
|
|
||||||
|
async def start_index(self) -> bool:
|
||||||
|
if self.sync_client:
|
||||||
|
return False
|
||||||
|
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
||||||
|
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
await self.sync_client.cluster.health(wait_for_status='yellow')
|
||||||
|
break
|
||||||
|
except ConnectionError:
|
||||||
|
self.log.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||||
|
acked = res.get('acknowledged', False)
|
||||||
|
if acked:
|
||||||
|
await self.set_index_version(self.VERSION)
|
||||||
|
return acked
|
||||||
|
index_version = await self.get_index_version()
|
||||||
|
if index_version != self.VERSION:
|
||||||
|
self.log.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||||
|
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
return acked
|
||||||
|
|
||||||
|
async def stop_index(self):
|
||||||
|
if self.sync_client:
|
||||||
|
await self.sync_client.close()
|
||||||
|
self.sync_client = None
|
||||||
|
|
||||||
|
def delete_index(self):
|
||||||
|
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||||
|
|
||||||
|
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||||
|
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
||||||
|
if channels:
|
||||||
|
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||||
|
else:
|
||||||
|
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||||
|
key = 'channel_id' if channels else 'claim_id'
|
||||||
|
update['script'] = {
|
||||||
|
"source": f"ctx._source.censor_type={censor_type}; "
|
||||||
|
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
||||||
|
"lang": "painless",
|
||||||
|
"params": blockdict
|
||||||
|
}
|
||||||
|
return update
|
||||||
|
|
||||||
|
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||||
|
if filtered_streams:
|
||||||
|
await self.sync_client.update_by_query(
|
||||||
|
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
if filtered_channels:
|
||||||
|
await self.sync_client.update_by_query(
|
||||||
|
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
await self.sync_client.update_by_query(
|
||||||
|
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
if blocked_streams:
|
||||||
|
await self.sync_client.update_by_query(
|
||||||
|
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
if blocked_channels:
|
||||||
|
await self.sync_client.update_by_query(
|
||||||
|
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
await self.sync_client.update_by_query(
|
||||||
|
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
|
||||||
|
async def _claim_producer(self):
|
||||||
|
for deleted in self._deleted_claims:
|
||||||
yield {
|
yield {
|
||||||
'_index': index_name,
|
'_index': self.index,
|
||||||
'_op_type': 'delete',
|
'_op_type': 'delete',
|
||||||
'_id': deleted.hex()
|
'_id': deleted.hex()
|
||||||
}
|
}
|
||||||
for touched in touched_claims:
|
for touched in self._touched_claims:
|
||||||
claim = db.claim_producer(touched)
|
claim = self.db.claim_producer(touched)
|
||||||
if claim:
|
if claim:
|
||||||
yield {
|
yield {
|
||||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||||
'_id': claim['claim_id'],
|
'_id': claim['claim_id'],
|
||||||
'_index': index_name,
|
'_index': self.index,
|
||||||
'_op_type': 'update',
|
'_op_type': 'update',
|
||||||
'doc_as_upsert': True
|
'doc_as_upsert': True
|
||||||
}
|
}
|
||||||
cnt += 1
|
for claim_hash, notifications in self._trending.items():
|
||||||
else:
|
|
||||||
logging.warning("could not sync claim %s", touched.hex())
|
|
||||||
if cnt % 10000 == 0:
|
|
||||||
logging.info("%i claims sent to ES", cnt)
|
|
||||||
|
|
||||||
db.es_sync_height = db.db_height
|
|
||||||
db.write_db_state()
|
|
||||||
db.prefix_db.unsafe_commit()
|
|
||||||
db.assert_db_state()
|
|
||||||
|
|
||||||
logging.info("finished sending %i claims to ES, deleted %i", cnt, len(deleted_claims))
|
|
||||||
finally:
|
|
||||||
if need_open:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
|
|
||||||
async def get_all_claims(env, index_name='claims', db=None):
|
|
||||||
need_open = db is None
|
|
||||||
db = db or LevelDB(env)
|
|
||||||
if need_open:
|
|
||||||
db.open_db()
|
|
||||||
await db.initialize_caches()
|
|
||||||
logging.info("Fetching claims to send ES from leveldb")
|
|
||||||
try:
|
|
||||||
cnt = 0
|
|
||||||
async for claim in db.all_claims_producer():
|
|
||||||
yield {
|
yield {
|
||||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
'_id': claim_hash.hex(),
|
||||||
'_id': claim['claim_id'],
|
'_index': self.index,
|
||||||
'_index': index_name,
|
|
||||||
'_op_type': 'update',
|
'_op_type': 'update',
|
||||||
'doc_as_upsert': True
|
'script': {
|
||||||
|
'lang': 'painless',
|
||||||
|
'source': FAST_AR_TRENDING_SCRIPT,
|
||||||
|
'params': {'src': {
|
||||||
|
'changes': [
|
||||||
|
{
|
||||||
|
'height': notify_height,
|
||||||
|
'prev_amount': trending_v.previous_amount / 1E8,
|
||||||
|
'new_amount': trending_v.new_amount / 1E8,
|
||||||
|
} for (notify_height, trending_v) in notifications
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
cnt += 1
|
|
||||||
if cnt % 10000 == 0:
|
|
||||||
logging.info("sent %i claims to ES", cnt)
|
|
||||||
finally:
|
|
||||||
if need_open:
|
|
||||||
db.close()
|
|
||||||
|
|
||||||
|
def advance(self, height: int):
|
||||||
|
super().advance(height)
|
||||||
|
|
||||||
async def make_es_index_and_run_sync(env: Env, clients=32, force=False, db=None, index_name='claims'):
|
touched_or_deleted = self.db.prefix_db.touched_or_deleted.get(height)
|
||||||
index = SearchIndex(env.es_index_prefix, elastic_host=env.elastic_host, elastic_port=env.elastic_port)
|
for k, v in self.db.prefix_db.trending_notification.iterate((height,)):
|
||||||
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
|
self._trending[k.claim_hash].append((k.height, v))
|
||||||
try:
|
if touched_or_deleted:
|
||||||
created = await index.start()
|
readded_after_reorg = self._removed_during_undo.intersection(touched_or_deleted.touched_claims)
|
||||||
except IndexVersionMismatch as err:
|
self._deleted_claims.difference_update(readded_after_reorg)
|
||||||
logging.info(
|
self._touched_claims.update(touched_or_deleted.touched_claims)
|
||||||
"dropping ES search index (version %s) for upgrade to version %s", err.got_version, err.expected_version
|
self._deleted_claims.update(touched_or_deleted.deleted_claims)
|
||||||
)
|
self._touched_claims.difference_update(self._deleted_claims)
|
||||||
await index.delete_index()
|
for to_del in touched_or_deleted.deleted_claims:
|
||||||
await index.stop()
|
if to_del in self._trending:
|
||||||
created = await index.start()
|
self._trending.pop(to_del)
|
||||||
finally:
|
self.log.info("advanced to %i, %i touched %i to delete (%i %i)", height, len(touched_or_deleted.touched_claims), len(touched_or_deleted.deleted_claims),
|
||||||
index.stop()
|
len(self._touched_claims), len(self._deleted_claims))
|
||||||
|
self._advanced = True
|
||||||
|
|
||||||
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
|
def unwind(self):
|
||||||
if force or created:
|
self.db.tx_counts.pop()
|
||||||
claim_generator = get_all_claims(env, index_name=index_name, db=db)
|
reverted_block_hash = self.db.coin.header_hash(self.db.headers.pop())
|
||||||
else:
|
packed = self.db.prefix_db.undo.get(len(self.db.tx_counts), reverted_block_hash)
|
||||||
claim_generator = get_recent_claims(env, index_name=index_name, db=db)
|
touched_or_deleted = None
|
||||||
try:
|
claims_to_delete = []
|
||||||
async for ok, item in async_streaming_bulk(es, claim_generator, request_timeout=600, raise_on_error=False):
|
# find and apply the touched_or_deleted items in the undos for the reverted blocks
|
||||||
if not ok:
|
assert packed, f'missing undo information for block {len(self.db.tx_counts)}'
|
||||||
logging.warning("indexing failed for an item: %s", item)
|
while packed:
|
||||||
await es.indices.refresh(index=index_name)
|
op, packed = RevertableOp.unpack(packed)
|
||||||
finally:
|
if op.is_delete and op.key.startswith(DB_PREFIXES.touched_or_deleted.value):
|
||||||
await es.close()
|
assert touched_or_deleted is None, 'only should have one match'
|
||||||
|
touched_or_deleted = self.db.prefix_db.touched_or_deleted.unpack_value(op.value)
|
||||||
|
elif op.is_delete and op.key.startswith(DB_PREFIXES.claim_to_txo.value):
|
||||||
|
v = self.db.prefix_db.claim_to_txo.unpack_value(op.value)
|
||||||
|
if v.root_tx_num == v.tx_num and v.root_tx_num > self.db.tx_counts[-1]:
|
||||||
|
claims_to_delete.append(self.db.prefix_db.claim_to_txo.unpack_key(op.key).claim_hash)
|
||||||
|
if touched_or_deleted:
|
||||||
|
self._touched_claims.update(set(touched_or_deleted.deleted_claims).union(
|
||||||
|
touched_or_deleted.touched_claims.difference(set(claims_to_delete))))
|
||||||
|
self._deleted_claims.update(claims_to_delete)
|
||||||
|
self._removed_during_undo.update(claims_to_delete)
|
||||||
|
self._advanced = True
|
||||||
|
self.log.warning("delete %i claim and upsert %i from reorg", len(self._deleted_claims), len(self._touched_claims))
|
||||||
|
|
||||||
|
async def poll_for_changes(self):
|
||||||
|
await super().poll_for_changes()
|
||||||
|
cnt = 0
|
||||||
|
success = 0
|
||||||
|
if self._advanced:
|
||||||
|
if self._touched_claims or self._deleted_claims or self._trending:
|
||||||
|
async for ok, item in async_streaming_bulk(
|
||||||
|
self.sync_client, self._claim_producer(),
|
||||||
|
raise_on_error=False):
|
||||||
|
cnt += 1
|
||||||
|
if not ok:
|
||||||
|
self.log.warning("indexing failed for an item: %s", item)
|
||||||
|
else:
|
||||||
|
success += 1
|
||||||
|
await self.sync_client.indices.refresh(self.index)
|
||||||
|
self.write_es_height(self.db.db_height, self.db.db_tip[::-1].hex())
|
||||||
|
self.log.info("Indexing block %i done. %i/%i successful", self._last_wrote_height, success, cnt)
|
||||||
|
self._touched_claims.clear()
|
||||||
|
self._deleted_claims.clear()
|
||||||
|
self._removed_during_undo.clear()
|
||||||
|
self._trending.clear()
|
||||||
|
self._advanced = False
|
||||||
|
self.synchronized.set()
|
||||||
|
self.notify_es_notification_listeners(self._last_wrote_height, self.db.db_tip)
|
||||||
|
|
||||||
def run_elastic_sync():
|
@property
|
||||||
logging.basicConfig(level=logging.INFO)
|
def last_synced_height(self) -> int:
|
||||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
return self._last_wrote_height
|
||||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
|
||||||
|
|
||||||
logging.info('lbry.server starting')
|
async def start(self):
|
||||||
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
|
env = self.env
|
||||||
parser.add_argument("-c", "--clients", type=int, default=32)
|
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||||
parser.add_argument("-f", "--force", default=False, action='store_true')
|
|
||||||
Env.contribute_to_arg_parser(parser)
|
|
||||||
args = parser.parse_args()
|
|
||||||
env = Env.from_arg_parser(args)
|
|
||||||
|
|
||||||
if not os.path.exists(os.path.join(args.db_dir, 'lbry-leveldb')):
|
def _start_cancellable(run, *args):
|
||||||
logging.info("DB path doesnt exist, nothing to sync to ES")
|
_flag = asyncio.Event()
|
||||||
return
|
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||||
|
return _flag.wait()
|
||||||
|
|
||||||
asyncio.run(make_es_index_and_run_sync(env, clients=args.clients, force=args.force))
|
self.db.open_db()
|
||||||
|
await self.db.initialize_caches()
|
||||||
|
await self.start_index()
|
||||||
|
self.last_state = self.db.read_db_state()
|
||||||
|
|
||||||
|
await _start_cancellable(self.run_es_notifier)
|
||||||
|
await _start_cancellable(self.refresh_blocks_forever)
|
||||||
|
|
||||||
|
async def stop(self, delete_index=False):
|
||||||
|
async with self._lock:
|
||||||
|
while self.cancellable_tasks:
|
||||||
|
t = self.cancellable_tasks.pop()
|
||||||
|
if not t.done():
|
||||||
|
t.cancel()
|
||||||
|
if delete_index:
|
||||||
|
await self.delete_index()
|
||||||
|
await self.stop_index()
|
||||||
|
self._executor.shutdown(wait=True)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
def __exit():
|
||||||
|
raise SystemExit()
|
||||||
|
try:
|
||||||
|
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||||
|
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||||
|
loop.run_until_complete(self.start())
|
||||||
|
loop.run_until_complete(self.shutdown_event.wait())
|
||||||
|
except (SystemExit, KeyboardInterrupt):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
loop.run_until_complete(self.stop())
|
||||||
|
|
141
lbry/wallet/server/db/interface.py
Normal file
141
lbry/wallet/server/db/interface.py
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
import struct
|
||||||
|
import typing
|
||||||
|
|
||||||
|
import rocksdb
|
||||||
|
from typing import Optional
|
||||||
|
from lbry.wallet.server.db import DB_PREFIXES
|
||||||
|
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||||
|
|
||||||
|
|
||||||
|
class PrefixDB:
|
||||||
|
"""
|
||||||
|
Base class for a revertable rocksdb database (a rocksdb db where each set of applied changes can be undone)
|
||||||
|
"""
|
||||||
|
UNDO_KEY_STRUCT = struct.Struct(b'>Q32s')
|
||||||
|
PARTIAL_UNDO_KEY_STRUCT = struct.Struct(b'>Q')
|
||||||
|
|
||||||
|
def __init__(self, path, max_open_files=64, secondary_path='', max_undo_depth: int = 200, unsafe_prefixes=None):
|
||||||
|
column_family_options = {
|
||||||
|
prefix.value: rocksdb.ColumnFamilyOptions() for prefix in DB_PREFIXES
|
||||||
|
} if secondary_path else {}
|
||||||
|
self.column_families: typing.Dict[bytes, 'rocksdb.ColumnFamilyHandle'] = {}
|
||||||
|
self._db = rocksdb.DB(
|
||||||
|
path, rocksdb.Options(
|
||||||
|
create_if_missing=True, use_fsync=True, target_file_size_base=33554432,
|
||||||
|
max_open_files=max_open_files if not secondary_path else -1
|
||||||
|
), secondary_name=secondary_path, column_families=column_family_options
|
||||||
|
)
|
||||||
|
for prefix in DB_PREFIXES:
|
||||||
|
cf = self._db.get_column_family(prefix.value)
|
||||||
|
if cf is None and not secondary_path:
|
||||||
|
self._db.create_column_family(prefix.value, rocksdb.ColumnFamilyOptions())
|
||||||
|
cf = self._db.get_column_family(prefix.value)
|
||||||
|
self.column_families[prefix.value] = cf
|
||||||
|
|
||||||
|
self._op_stack = RevertableOpStack(self.get, unsafe_prefixes=unsafe_prefixes)
|
||||||
|
self._max_undo_depth = max_undo_depth
|
||||||
|
|
||||||
|
def unsafe_commit(self):
|
||||||
|
"""
|
||||||
|
Write staged changes to the database without keeping undo information
|
||||||
|
Changes written cannot be undone
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if not len(self._op_stack):
|
||||||
|
return
|
||||||
|
with self._db.write_batch(sync=True) as batch:
|
||||||
|
batch_put = batch.put
|
||||||
|
batch_delete = batch.delete
|
||||||
|
get_column_family = self.column_families.__getitem__
|
||||||
|
for staged_change in self._op_stack:
|
||||||
|
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||||
|
if staged_change.is_put:
|
||||||
|
batch_put((column_family, staged_change.key), staged_change.value)
|
||||||
|
else:
|
||||||
|
batch_delete((column_family, staged_change.key))
|
||||||
|
finally:
|
||||||
|
self._op_stack.clear()
|
||||||
|
|
||||||
|
def commit(self, height: int, block_hash: bytes):
|
||||||
|
"""
|
||||||
|
Write changes for a block height to the database and keep undo information so that the changes can be reverted
|
||||||
|
"""
|
||||||
|
undo_ops = self._op_stack.get_undo_ops()
|
||||||
|
delete_undos = []
|
||||||
|
if height > self._max_undo_depth:
|
||||||
|
delete_undos.extend(self._db.iterator(
|
||||||
|
start=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(0),
|
||||||
|
iterate_upper_bound=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
|
||||||
|
include_value=False
|
||||||
|
))
|
||||||
|
try:
|
||||||
|
undo_c_f = self.column_families[DB_PREFIXES.undo.value]
|
||||||
|
with self._db.write_batch(sync=True) as batch:
|
||||||
|
batch_put = batch.put
|
||||||
|
batch_delete = batch.delete
|
||||||
|
get_column_family = self.column_families.__getitem__
|
||||||
|
for staged_change in self._op_stack:
|
||||||
|
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||||
|
if staged_change.is_put:
|
||||||
|
batch_put((column_family, staged_change.key), staged_change.value)
|
||||||
|
else:
|
||||||
|
batch_delete((column_family, staged_change.key))
|
||||||
|
for undo_to_delete in delete_undos:
|
||||||
|
batch_delete((undo_c_f, undo_to_delete))
|
||||||
|
batch_put((undo_c_f, DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)), undo_ops)
|
||||||
|
finally:
|
||||||
|
self._op_stack.clear()
|
||||||
|
|
||||||
|
def rollback(self, height: int, block_hash: bytes):
|
||||||
|
"""
|
||||||
|
Revert changes for a block height
|
||||||
|
"""
|
||||||
|
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)
|
||||||
|
undo_c_f = self.column_families[DB_PREFIXES.undo.value]
|
||||||
|
undo_info = self._db.get((undo_c_f, undo_key))
|
||||||
|
self._op_stack.apply_packed_undo_ops(undo_info)
|
||||||
|
try:
|
||||||
|
with self._db.write_batch(sync=True) as batch:
|
||||||
|
batch_put = batch.put
|
||||||
|
batch_delete = batch.delete
|
||||||
|
get_column_family = self.column_families.__getitem__
|
||||||
|
for staged_change in self._op_stack:
|
||||||
|
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||||
|
if staged_change.is_put:
|
||||||
|
batch_put((column_family, staged_change.key), staged_change.value)
|
||||||
|
else:
|
||||||
|
batch_delete((column_family, staged_change.key))
|
||||||
|
# batch_delete(undo_key)
|
||||||
|
finally:
|
||||||
|
self._op_stack.clear()
|
||||||
|
|
||||||
|
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||||
|
cf = self.column_families[key[:1]]
|
||||||
|
return self._db.get((cf, key), fill_cache=fill_cache)
|
||||||
|
|
||||||
|
def iterator(self, start: bytes, column_family: 'rocksdb.ColumnFamilyHandle' = None,
|
||||||
|
iterate_lower_bound: bytes = None, iterate_upper_bound: bytes = None,
|
||||||
|
reverse: bool = False, include_key: bool = True, include_value: bool = True,
|
||||||
|
fill_cache: bool = True, prefix_same_as_start: bool = True, auto_prefix_mode: bool = True):
|
||||||
|
return self._db.iterator(
|
||||||
|
start=start, column_family=column_family, iterate_lower_bound=iterate_lower_bound,
|
||||||
|
iterate_upper_bound=iterate_upper_bound, reverse=reverse, include_key=include_key,
|
||||||
|
include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=prefix_same_as_start,
|
||||||
|
auto_prefix_mode=auto_prefix_mode
|
||||||
|
)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._db.close()
|
||||||
|
|
||||||
|
def try_catch_up_with_primary(self):
|
||||||
|
self._db.try_catch_up_with_primary()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self) -> bool:
|
||||||
|
return self._db.is_closed
|
||||||
|
|
||||||
|
def stage_raw_put(self, key: bytes, value: bytes):
|
||||||
|
self._op_stack.append_op(RevertablePut(key, value))
|
||||||
|
|
||||||
|
def stage_raw_delete(self, key: bytes, value: bytes):
|
||||||
|
self._op_stack.append_op(RevertableDelete(key, value))
|
|
@ -4,9 +4,12 @@ import array
|
||||||
import base64
|
import base64
|
||||||
from typing import Union, Tuple, NamedTuple, Optional
|
from typing import Union, Tuple, NamedTuple, Optional
|
||||||
from lbry.wallet.server.db import DB_PREFIXES
|
from lbry.wallet.server.db import DB_PREFIXES
|
||||||
from lbry.wallet.server.db.db import KeyValueStorage, PrefixDB
|
from lbry.wallet.server.db.interface import PrefixDB
|
||||||
|
from lbry.wallet.server.db.common import TrendingNotification
|
||||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||||
from lbry.schema.url import normalize_name
|
from lbry.schema.url import normalize_name
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
import rocksdb
|
||||||
|
|
||||||
ACTIVATED_CLAIM_TXO_TYPE = 1
|
ACTIVATED_CLAIM_TXO_TYPE = 1
|
||||||
ACTIVATED_SUPPORT_TXO_TYPE = 2
|
ACTIVATED_SUPPORT_TXO_TYPE = 2
|
||||||
|
@ -38,21 +41,32 @@ class PrefixRow(metaclass=PrefixRowType):
|
||||||
value_struct: struct.Struct
|
value_struct: struct.Struct
|
||||||
key_part_lambdas = []
|
key_part_lambdas = []
|
||||||
|
|
||||||
def __init__(self, db: KeyValueStorage, op_stack: RevertableOpStack):
|
def __init__(self, db: 'rocksdb.DB', op_stack: RevertableOpStack):
|
||||||
self._db = db
|
self._db = db
|
||||||
self._op_stack = op_stack
|
self._op_stack = op_stack
|
||||||
|
self._column_family = self._db.get_column_family(self.prefix)
|
||||||
|
if not self._column_family.is_valid:
|
||||||
|
raise RuntimeError('column family is not valid')
|
||||||
|
|
||||||
def iterate(self, prefix=None, start=None, stop=None,
|
def iterate(self, prefix=None, start=None, stop=None, reverse: bool = False, include_key: bool = True,
|
||||||
reverse: bool = False, include_key: bool = True, include_value: bool = True,
|
include_value: bool = True, fill_cache: bool = True, deserialize_key: bool = True,
|
||||||
fill_cache: bool = True, deserialize_key: bool = True, deserialize_value: bool = True):
|
deserialize_value: bool = True):
|
||||||
if not prefix and not start and not stop:
|
if not prefix and not start and not stop:
|
||||||
prefix = ()
|
prefix = ()
|
||||||
if prefix is not None:
|
if prefix is not None:
|
||||||
prefix = self.pack_partial_key(*prefix)
|
prefix = self.pack_partial_key(*prefix)
|
||||||
if start is not None:
|
if stop is None:
|
||||||
start = self.pack_partial_key(*start)
|
try:
|
||||||
if stop is not None:
|
stop = (int.from_bytes(prefix, byteorder='big') + 1).to_bytes(len(prefix), byteorder='big')
|
||||||
stop = self.pack_partial_key(*stop)
|
except OverflowError:
|
||||||
|
stop = (int.from_bytes(prefix, byteorder='big') + 1).to_bytes(len(prefix) + 1, byteorder='big')
|
||||||
|
else:
|
||||||
|
stop = self.pack_partial_key(*stop)
|
||||||
|
else:
|
||||||
|
if start is not None:
|
||||||
|
start = self.pack_partial_key(*start)
|
||||||
|
if stop is not None:
|
||||||
|
stop = self.pack_partial_key(*stop)
|
||||||
|
|
||||||
if deserialize_key:
|
if deserialize_key:
|
||||||
key_getter = lambda k: self.unpack_key(k)
|
key_getter = lambda k: self.unpack_key(k)
|
||||||
|
@ -63,25 +77,27 @@ class PrefixRow(metaclass=PrefixRowType):
|
||||||
else:
|
else:
|
||||||
value_getter = lambda v: v
|
value_getter = lambda v: v
|
||||||
|
|
||||||
|
it = self._db.iterator(
|
||||||
|
start or prefix, self._column_family, iterate_lower_bound=None,
|
||||||
|
iterate_upper_bound=stop, reverse=reverse, include_key=include_key,
|
||||||
|
include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=True
|
||||||
|
)
|
||||||
|
|
||||||
if include_key and include_value:
|
if include_key and include_value:
|
||||||
for k, v in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse,
|
for k, v in it:
|
||||||
fill_cache=fill_cache):
|
yield key_getter(k[1]), value_getter(v)
|
||||||
yield key_getter(k), value_getter(v)
|
|
||||||
elif include_key:
|
elif include_key:
|
||||||
for k in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_value=False,
|
for k in it:
|
||||||
fill_cache=fill_cache):
|
yield key_getter(k[1])
|
||||||
yield key_getter(k)
|
|
||||||
elif include_value:
|
elif include_value:
|
||||||
for v in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_key=False,
|
for v in it:
|
||||||
fill_cache=fill_cache):
|
|
||||||
yield value_getter(v)
|
yield value_getter(v)
|
||||||
else:
|
else:
|
||||||
for _ in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_key=False,
|
for _ in it:
|
||||||
include_value=False, fill_cache=fill_cache):
|
|
||||||
yield None
|
yield None
|
||||||
|
|
||||||
def get(self, *key_args, fill_cache=True, deserialize_value=True):
|
def get(self, *key_args, fill_cache=True, deserialize_value=True):
|
||||||
v = self._db.get(self.pack_key(*key_args), fill_cache=fill_cache)
|
v = self._db.get((self._column_family, self.pack_key(*key_args)), fill_cache=fill_cache)
|
||||||
if v:
|
if v:
|
||||||
return v if not deserialize_value else self.unpack_value(v)
|
return v if not deserialize_value else self.unpack_value(v)
|
||||||
|
|
||||||
|
@ -93,7 +109,7 @@ class PrefixRow(metaclass=PrefixRowType):
|
||||||
return last_op.value if not deserialize_value else self.unpack_value(last_op.value)
|
return last_op.value if not deserialize_value else self.unpack_value(last_op.value)
|
||||||
else: # it's a delete
|
else: # it's a delete
|
||||||
return
|
return
|
||||||
v = self._db.get(packed_key, fill_cache=fill_cache)
|
v = self._db.get((self._column_family, packed_key), fill_cache=fill_cache)
|
||||||
if v:
|
if v:
|
||||||
return v if not deserialize_value else self.unpack_value(v)
|
return v if not deserialize_value else self.unpack_value(v)
|
||||||
|
|
||||||
|
@ -117,7 +133,7 @@ class PrefixRow(metaclass=PrefixRowType):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def unpack_key(cls, key: bytes):
|
def unpack_key(cls, key: bytes):
|
||||||
assert key[:1] == cls.prefix
|
assert key[:1] == cls.prefix, f"prefix should be {cls.prefix}, got {key[:1]}"
|
||||||
return cls.key_struct.unpack(key[1:])
|
return cls.key_struct.unpack(key[1:])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -205,14 +221,14 @@ class TxHashValue(NamedTuple):
|
||||||
tx_hash: bytes
|
tx_hash: bytes
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})"
|
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||||
|
|
||||||
|
|
||||||
class TxNumKey(NamedTuple):
|
class TxNumKey(NamedTuple):
|
||||||
tx_hash: bytes
|
tx_hash: bytes
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})"
|
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||||
|
|
||||||
|
|
||||||
class TxNumValue(NamedTuple):
|
class TxNumValue(NamedTuple):
|
||||||
|
@ -223,14 +239,14 @@ class TxKey(NamedTuple):
|
||||||
tx_hash: bytes
|
tx_hash: bytes
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})"
|
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||||
|
|
||||||
|
|
||||||
class TxValue(NamedTuple):
|
class TxValue(NamedTuple):
|
||||||
raw_tx: bytes
|
raw_tx: bytes
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx)})"
|
return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx).decode()})"
|
||||||
|
|
||||||
|
|
||||||
class BlockHeaderKey(NamedTuple):
|
class BlockHeaderKey(NamedTuple):
|
||||||
|
@ -1103,24 +1119,28 @@ class RepostedPrefixRow(PrefixRow):
|
||||||
return cls.pack_key(reposted_claim_hash, tx_num, position), cls.pack_value(claim_hash)
|
return cls.pack_key(reposted_claim_hash, tx_num, position), cls.pack_value(claim_hash)
|
||||||
|
|
||||||
|
|
||||||
|
class UndoKey(NamedTuple):
|
||||||
|
height: int
|
||||||
|
block_hash: bytes
|
||||||
|
|
||||||
|
|
||||||
class UndoPrefixRow(PrefixRow):
|
class UndoPrefixRow(PrefixRow):
|
||||||
prefix = DB_PREFIXES.undo.value
|
prefix = DB_PREFIXES.undo.value
|
||||||
key_struct = struct.Struct(b'>Q')
|
key_struct = struct.Struct(b'>Q32s')
|
||||||
|
|
||||||
key_part_lambdas = [
|
key_part_lambdas = [
|
||||||
lambda: b'',
|
lambda: b'',
|
||||||
struct.Struct(b'>Q').pack
|
struct.Struct(b'>Q').pack,
|
||||||
|
struct.Struct(b'>Q32s').pack
|
||||||
]
|
]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def pack_key(cls, height: int):
|
def pack_key(cls, height: int, block_hash: bytes):
|
||||||
return super().pack_key(height)
|
return super().pack_key(height, block_hash)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def unpack_key(cls, key: bytes) -> int:
|
def unpack_key(cls, key: bytes) -> UndoKey:
|
||||||
assert key[:1] == cls.prefix
|
return UndoKey(*super().unpack_key(key))
|
||||||
height, = cls.key_struct.unpack(key[1:])
|
|
||||||
return height
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def pack_value(cls, undo_ops: bytes) -> bytes:
|
def pack_value(cls, undo_ops: bytes) -> bytes:
|
||||||
|
@ -1131,8 +1151,8 @@ class UndoPrefixRow(PrefixRow):
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def pack_item(cls, height: int, undo_ops: bytes):
|
def pack_item(cls, height: int, block_hash: bytes, undo_ops: bytes):
|
||||||
return cls.pack_key(height), cls.pack_value(undo_ops)
|
return cls.pack_key(height, block_hash), cls.pack_value(undo_ops)
|
||||||
|
|
||||||
|
|
||||||
class BlockHashPrefixRow(PrefixRow):
|
class BlockHashPrefixRow(PrefixRow):
|
||||||
|
@ -1422,7 +1442,7 @@ class HashXHistoryPrefixRow(PrefixRow):
|
||||||
|
|
||||||
|
|
||||||
class TouchedOrDeletedPrefixRow(PrefixRow):
|
class TouchedOrDeletedPrefixRow(PrefixRow):
|
||||||
prefix = DB_PREFIXES.claim_diff.value
|
prefix = DB_PREFIXES.touched_or_deleted.value
|
||||||
key_struct = struct.Struct(b'>L')
|
key_struct = struct.Struct(b'>L')
|
||||||
value_struct = struct.Struct(b'>LL')
|
value_struct = struct.Struct(b'>LL')
|
||||||
key_part_lambdas = [
|
key_part_lambdas = [
|
||||||
|
@ -1566,7 +1586,7 @@ class DBStatePrefixRow(PrefixRow):
|
||||||
|
|
||||||
|
|
||||||
class BlockTxsPrefixRow(PrefixRow):
|
class BlockTxsPrefixRow(PrefixRow):
|
||||||
prefix = DB_PREFIXES.block_txs.value
|
prefix = DB_PREFIXES.block_tx.value
|
||||||
key_struct = struct.Struct(b'>L')
|
key_struct = struct.Struct(b'>L')
|
||||||
key_part_lambdas = [
|
key_part_lambdas = [
|
||||||
lambda: b'',
|
lambda: b'',
|
||||||
|
@ -1595,41 +1615,139 @@ class BlockTxsPrefixRow(PrefixRow):
|
||||||
return cls.pack_key(height), cls.pack_value(tx_hashes)
|
return cls.pack_key(height), cls.pack_value(tx_hashes)
|
||||||
|
|
||||||
|
|
||||||
class LevelDBStore(KeyValueStorage):
|
class MempoolTxKey(NamedTuple):
|
||||||
def __init__(self, path: str, cache_mb: int, max_open_files: int):
|
tx_hash: bytes
|
||||||
import plyvel
|
|
||||||
self.db = plyvel.DB(
|
|
||||||
path, create_if_missing=True, max_open_files=max_open_files,
|
|
||||||
lru_cache_size=cache_mb * 1024 * 1024, write_buffer_size=64 * 1024 * 1024,
|
|
||||||
max_file_size=1024 * 1024 * 64, bloom_filter_bits=32
|
|
||||||
)
|
|
||||||
|
|
||||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
def __str__(self):
|
||||||
return self.db.get(key, fill_cache=fill_cache)
|
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||||
|
|
||||||
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
|
|
||||||
include_key=True, include_value=True, fill_cache=True):
|
|
||||||
return self.db.iterator(
|
|
||||||
reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop,
|
|
||||||
prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache
|
|
||||||
)
|
|
||||||
|
|
||||||
def write_batch(self, transaction: bool = False, sync: bool = False):
|
class MempoolTxValue(NamedTuple):
|
||||||
return self.db.write_batch(transaction=transaction, sync=sync)
|
raw_tx: bytes
|
||||||
|
|
||||||
def close(self):
|
def __str__(self):
|
||||||
return self.db.close()
|
return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx).decode()})"
|
||||||
|
|
||||||
@property
|
|
||||||
def closed(self) -> bool:
|
class MempoolTXPrefixRow(PrefixRow):
|
||||||
return self.db.closed
|
prefix = DB_PREFIXES.mempool_tx.value
|
||||||
|
key_struct = struct.Struct(b'>32s')
|
||||||
|
|
||||||
|
key_part_lambdas = [
|
||||||
|
lambda: b'',
|
||||||
|
struct.Struct(b'>32s').pack
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_key(cls, tx_hash: bytes) -> bytes:
|
||||||
|
return super().pack_key(tx_hash)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unpack_key(cls, tx_hash: bytes) -> MempoolTxKey:
|
||||||
|
return MempoolTxKey(*super().unpack_key(tx_hash))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_value(cls, tx: bytes) -> bytes:
|
||||||
|
return tx
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unpack_value(cls, data: bytes) -> MempoolTxValue:
|
||||||
|
return MempoolTxValue(data)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_item(cls, tx_hash: bytes, raw_tx: bytes):
|
||||||
|
return cls.pack_key(tx_hash), cls.pack_value(raw_tx)
|
||||||
|
|
||||||
|
|
||||||
|
class TrendingNotificationKey(typing.NamedTuple):
|
||||||
|
height: int
|
||||||
|
claim_hash: bytes
|
||||||
|
|
||||||
|
|
||||||
|
class TrendingNotificationValue(typing.NamedTuple):
|
||||||
|
previous_amount: int
|
||||||
|
new_amount: int
|
||||||
|
|
||||||
|
|
||||||
|
class TrendingNotificationPrefixRow(PrefixRow):
|
||||||
|
prefix = DB_PREFIXES.trending_notifications.value
|
||||||
|
key_struct = struct.Struct(b'>L20s')
|
||||||
|
value_struct = struct.Struct(b'>QQ')
|
||||||
|
key_part_lambdas = [
|
||||||
|
lambda: b'',
|
||||||
|
struct.Struct(b'>L').pack,
|
||||||
|
struct.Struct(b'>L20s').pack
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_key(cls, height: int, claim_hash: bytes):
|
||||||
|
return super().pack_key(height, claim_hash)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unpack_key(cls, key: bytes) -> TrendingNotificationKey:
|
||||||
|
return TrendingNotificationKey(*super().unpack_key(key))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_value(cls, previous_amount: int, new_amount: int) -> bytes:
|
||||||
|
return super().pack_value(previous_amount, new_amount)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unpack_value(cls, data: bytes) -> TrendingNotificationValue:
|
||||||
|
return TrendingNotificationValue(*super().unpack_value(data))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_item(cls, height, claim_hash, previous_amount, new_amount):
|
||||||
|
return cls.pack_key(height, claim_hash), cls.pack_value(previous_amount, new_amount)
|
||||||
|
|
||||||
|
|
||||||
|
class TouchedHashXKey(NamedTuple):
|
||||||
|
height: int
|
||||||
|
|
||||||
|
|
||||||
|
class TouchedHashXValue(NamedTuple):
|
||||||
|
touched_hashXs: typing.List[bytes]
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"{self.__class__.__name__}(touched_hashXs=[{', '.join(map(lambda x: x.hex(), self.touched_hashXs))}])"
|
||||||
|
|
||||||
|
|
||||||
|
class TouchedHashXPrefixRow(PrefixRow):
|
||||||
|
prefix = DB_PREFIXES.touched_hashX.value
|
||||||
|
key_struct = struct.Struct(b'>L')
|
||||||
|
|
||||||
|
key_part_lambdas = [
|
||||||
|
lambda: b'',
|
||||||
|
struct.Struct(b'>L').pack
|
||||||
|
]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_key(cls, height: int):
|
||||||
|
return super().pack_key(height)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unpack_key(cls, key: bytes) -> TouchedHashXKey:
|
||||||
|
return TouchedHashXKey(*super().unpack_key(key))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_value(cls, touched: typing.List[bytes]) -> bytes:
|
||||||
|
assert all(map(lambda item: len(item) == 11, touched))
|
||||||
|
return b''.join(touched)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def unpack_value(cls, data: bytes) -> TouchedHashXValue:
|
||||||
|
return TouchedHashXValue([data[idx*11:(idx*11)+11] for idx in range(len(data) // 11)])
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pack_item(cls, height: int, touched: typing.List[bytes]):
|
||||||
|
return cls.pack_key(height), cls.pack_value(touched)
|
||||||
|
|
||||||
|
|
||||||
class HubDB(PrefixDB):
|
class HubDB(PrefixDB):
|
||||||
def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 512,
|
def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 512,
|
||||||
unsafe_prefixes: Optional[typing.Set[bytes]] = None):
|
secondary_path: str = '', unsafe_prefixes: Optional[typing.Set[bytes]] = None):
|
||||||
db = LevelDBStore(path, cache_mb, max_open_files)
|
super().__init__(path, max_open_files=max_open_files, secondary_path=secondary_path,
|
||||||
super().__init__(db, reorg_limit, unsafe_prefixes=unsafe_prefixes)
|
max_undo_depth=reorg_limit, unsafe_prefixes=unsafe_prefixes)
|
||||||
|
db = self._db
|
||||||
self.claim_to_support = ClaimToSupportPrefixRow(db, self._op_stack)
|
self.claim_to_support = ClaimToSupportPrefixRow(db, self._op_stack)
|
||||||
self.support_to_claim = SupportToClaimPrefixRow(db, self._op_stack)
|
self.support_to_claim = SupportToClaimPrefixRow(db, self._op_stack)
|
||||||
self.claim_to_txo = ClaimToTXOPrefixRow(db, self._op_stack)
|
self.claim_to_txo = ClaimToTXOPrefixRow(db, self._op_stack)
|
||||||
|
@ -1660,6 +1778,9 @@ class HubDB(PrefixDB):
|
||||||
self.db_state = DBStatePrefixRow(db, self._op_stack)
|
self.db_state = DBStatePrefixRow(db, self._op_stack)
|
||||||
self.support_amount = SupportAmountPrefixRow(db, self._op_stack)
|
self.support_amount = SupportAmountPrefixRow(db, self._op_stack)
|
||||||
self.block_txs = BlockTxsPrefixRow(db, self._op_stack)
|
self.block_txs = BlockTxsPrefixRow(db, self._op_stack)
|
||||||
|
self.mempool_tx = MempoolTXPrefixRow(db, self._op_stack)
|
||||||
|
self.trending_notification = TrendingNotificationPrefixRow(db, self._op_stack)
|
||||||
|
self.touched_hashX = TouchedHashXPrefixRow(db, self._op_stack)
|
||||||
|
|
||||||
|
|
||||||
def auto_decode_item(key: bytes, value: bytes) -> Union[Tuple[NamedTuple, NamedTuple], Tuple[bytes, bytes]]:
|
def auto_decode_item(key: bytes, value: bytes) -> Union[Tuple[NamedTuple, NamedTuple], Tuple[bytes, bytes]]:
|
||||||
|
|
|
@ -30,13 +30,14 @@ class Env:
|
||||||
|
|
||||||
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
|
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
|
||||||
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
|
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
|
||||||
chain=None, es_index_prefix=None, es_mode=None, cache_MB=None, reorg_limit=None, tcp_port=None,
|
chain=None, es_index_prefix=None, cache_MB=None, reorg_limit=None, tcp_port=None,
|
||||||
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
|
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
|
||||||
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
|
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
|
||||||
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
|
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
|
||||||
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
|
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
|
||||||
session_timeout=None, drop_client=None, description=None, daily_fee=None,
|
session_timeout=None, drop_client=None, description=None, daily_fee=None,
|
||||||
database_query_timeout=None, db_max_open_files=512):
|
database_query_timeout=None, db_max_open_files=512, elastic_notifier_port=None,
|
||||||
|
blocking_channel_ids=None, filtering_channel_ids=None):
|
||||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||||
|
|
||||||
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
|
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
|
||||||
|
@ -47,6 +48,8 @@ class Env:
|
||||||
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
|
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
|
||||||
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
|
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
|
||||||
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
|
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
|
||||||
|
self.elastic_notifier_port = elastic_notifier_port if elastic_notifier_port is not None else self.integer('ELASTIC_NOTIFIER_PORT', 19080)
|
||||||
|
|
||||||
self.loop_policy = self.set_event_loop_policy(
|
self.loop_policy = self.set_event_loop_policy(
|
||||||
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
|
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
|
||||||
)
|
)
|
||||||
|
@ -66,7 +69,6 @@ class Env:
|
||||||
else:
|
else:
|
||||||
self.coin = LBCRegTest
|
self.coin = LBCRegTest
|
||||||
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
|
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
|
||||||
self.es_mode = es_mode if es_mode is not None else self.default('ES_MODE', 'writer')
|
|
||||||
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
|
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
|
||||||
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
||||||
# Server stuff
|
# Server stuff
|
||||||
|
@ -116,6 +118,10 @@ class Env:
|
||||||
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
|
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
|
||||||
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
|
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
|
||||||
|
|
||||||
|
# Filtering / Blocking
|
||||||
|
self.blocking_channel_ids = (blocking_channel_ids if blocking_channel_ids is not None else self.default('BLOCKING_CHANNEL_IDS', '')).split(' ')
|
||||||
|
self.filtering_channel_ids = (filtering_channel_ids if filtering_channel_ids is not None else self.default('FILTERING_CHANNEL_IDS', '')).split(' ')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def default(cls, envvar, default):
|
def default(cls, envvar, default):
|
||||||
return environ.get(envvar, default)
|
return environ.get(envvar, default)
|
||||||
|
@ -322,8 +328,6 @@ class Env:
|
||||||
help='elasticsearch host')
|
help='elasticsearch host')
|
||||||
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
|
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
|
||||||
help='elasticsearch port')
|
help='elasticsearch port')
|
||||||
parser.add_argument('--es_mode', default=cls.default('ES_MODE', 'writer'), type=str,
|
|
||||||
choices=['reader', 'writer'])
|
|
||||||
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
|
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
|
||||||
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
|
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
|
||||||
choices=['default', 'uvloop'])
|
choices=['default', 'uvloop'])
|
||||||
|
@ -371,7 +375,7 @@ class Env:
|
||||||
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
|
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
|
||||||
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
|
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
|
||||||
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
|
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
|
||||||
es_mode=args.es_mode, cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
|
cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
|
||||||
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
|
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
|
||||||
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
|
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
|
||||||
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
|
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -11,14 +11,13 @@ import itertools
|
||||||
import time
|
import time
|
||||||
import attr
|
import attr
|
||||||
import typing
|
import typing
|
||||||
from typing import Set, Optional, Callable, Awaitable
|
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from prometheus_client import Histogram
|
from prometheus_client import Histogram
|
||||||
from lbry.wallet.server.hash import hash_to_hex_str, hex_str_to_hash
|
from lbry.wallet.server.util import class_logger
|
||||||
from lbry.wallet.server.util import class_logger, chunks
|
|
||||||
from lbry.wallet.server.leveldb import UTXO
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.wallet.server.session import LBRYSessionManager
|
from lbry.wallet.server.session import LBRYSessionManager
|
||||||
|
from wallet.server.db.db import LevelDB
|
||||||
|
|
||||||
|
|
||||||
@attr.s(slots=True)
|
@attr.s(slots=True)
|
||||||
|
@ -50,70 +49,99 @@ mempool_process_time_metric = Histogram(
|
||||||
|
|
||||||
|
|
||||||
class MemPool:
|
class MemPool:
|
||||||
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0):
|
def __init__(self, coin, db: 'LevelDB', refresh_secs=1.0):
|
||||||
self.coin = coin
|
self.coin = coin
|
||||||
self._daemon = daemon
|
|
||||||
self._db = db
|
self._db = db
|
||||||
self._touched_mp = {}
|
|
||||||
self._touched_bp = {}
|
|
||||||
self._highest_block = -1
|
|
||||||
|
|
||||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||||
self.txs = {}
|
self.txs = {}
|
||||||
self.hashXs = defaultdict(set) # None can be a key
|
self.raw_mempool = {}
|
||||||
self.cached_compact_histogram = []
|
self.touched_hashXs: typing.DefaultDict[bytes, typing.Set[bytes]] = defaultdict(set) # None can be a key
|
||||||
self.refresh_secs = refresh_secs
|
self.refresh_secs = refresh_secs
|
||||||
self.log_status_secs = log_status_secs
|
|
||||||
# Prevents mempool refreshes during fee histogram calculation
|
|
||||||
self.lock = state_lock
|
|
||||||
self.wakeup = asyncio.Event()
|
|
||||||
self.mempool_process_time_metric = mempool_process_time_metric
|
self.mempool_process_time_metric = mempool_process_time_metric
|
||||||
self.notified_mempool_txs = set()
|
self.session_manager: typing.Optional['LBRYSessionManager'] = None
|
||||||
self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
|
|
||||||
|
|
||||||
async def _logging(self, synchronized_event):
|
async def refresh_hashes(self, height: int):
|
||||||
"""Print regular logs of mempool stats."""
|
|
||||||
self.logger.info('beginning processing of daemon mempool. '
|
|
||||||
'This can take some time...')
|
|
||||||
start = time.perf_counter()
|
start = time.perf_counter()
|
||||||
await synchronized_event.wait()
|
new_touched = await self._process_mempool()
|
||||||
elapsed = time.perf_counter() - start
|
await self.on_mempool(set(self.touched_hashXs), new_touched, height)
|
||||||
self.logger.info(f'synced in {elapsed:.2f}s')
|
duration = time.perf_counter() - start
|
||||||
while True:
|
self.mempool_process_time_metric.observe(duration)
|
||||||
self.logger.info(f'{len(self.txs):,d} txs '
|
|
||||||
f'touching {len(self.hashXs):,d} addresses')
|
|
||||||
await asyncio.sleep(self.log_status_secs)
|
|
||||||
await synchronized_event.wait()
|
|
||||||
|
|
||||||
def _accept_transactions(self, tx_map, utxo_map, touched):
|
async def _process_mempool(self) -> typing.Set[bytes]: # returns list of new touched hashXs
|
||||||
"""Accept transactions in tx_map to the mempool if all their inputs
|
# Re-sync with the new set of hashes
|
||||||
can be found in the existing mempool or a utxo_map from the
|
|
||||||
DB.
|
|
||||||
|
|
||||||
Returns an (unprocessed tx_map, unspent utxo_map) pair.
|
# hashXs = self.hashXs # hashX: [tx_hash, ...]
|
||||||
"""
|
touched_hashXs = set()
|
||||||
hashXs = self.hashXs
|
|
||||||
txs = self.txs
|
|
||||||
|
|
||||||
deferred = {}
|
# Remove txs that aren't in mempool anymore
|
||||||
unspent = set(utxo_map)
|
for tx_hash in set(self.txs).difference(self.raw_mempool.keys()):
|
||||||
# Try to find all prevouts so we can accept the TX
|
tx = self.txs.pop(tx_hash)
|
||||||
for hash, tx in tx_map.items():
|
tx_hashXs = {hashX for hashX, value in tx.in_pairs}.union({hashX for hashX, value in tx.out_pairs})
|
||||||
in_pairs = []
|
for hashX in tx_hashXs:
|
||||||
try:
|
if hashX in self.touched_hashXs and tx_hash in self.touched_hashXs[hashX]:
|
||||||
for prevout in tx.prevouts:
|
self.touched_hashXs[hashX].remove(tx_hash)
|
||||||
utxo = utxo_map.get(prevout)
|
if not self.touched_hashXs[hashX]:
|
||||||
if not utxo:
|
self.touched_hashXs.pop(hashX)
|
||||||
prev_hash, prev_index = prevout
|
touched_hashXs.update(tx_hashXs)
|
||||||
# Raises KeyError if prev_hash is not in txs
|
|
||||||
utxo = txs[prev_hash].out_pairs[prev_index]
|
tx_map = {}
|
||||||
in_pairs.append(utxo)
|
for tx_hash, raw_tx in self.raw_mempool.items():
|
||||||
except KeyError:
|
if tx_hash in self.txs:
|
||||||
deferred[hash] = tx
|
|
||||||
continue
|
continue
|
||||||
|
tx, tx_size = self.coin.DESERIALIZER(raw_tx).read_tx_and_vsize()
|
||||||
|
# Convert the inputs and outputs into (hashX, value) pairs
|
||||||
|
# Drop generation-like inputs from MemPoolTx.prevouts
|
||||||
|
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
|
||||||
|
for txin in tx.inputs
|
||||||
|
if not txin.is_generation())
|
||||||
|
txout_pairs = tuple((self.coin.hashX_from_script(txout.pk_script), txout.value)
|
||||||
|
for txout in tx.outputs)
|
||||||
|
|
||||||
# Spend the prevouts
|
tx_map[tx_hash] = MemPoolTx(txin_pairs, None, txout_pairs, 0, tx_size, raw_tx)
|
||||||
unspent.difference_update(tx.prevouts)
|
|
||||||
|
# Determine all prevouts not in the mempool, and fetch the
|
||||||
|
# UTXO information from the database. Failed prevout lookups
|
||||||
|
# return None - concurrent database updates happen - which is
|
||||||
|
# relied upon by _accept_transactions. Ignore prevouts that are
|
||||||
|
# generation-like.
|
||||||
|
# prevouts = tuple(prevout for tx in tx_map.values()
|
||||||
|
# for prevout in tx.prevouts
|
||||||
|
# if prevout[0] not in self.raw_mempool)
|
||||||
|
# utxos = await self._db.lookup_utxos(prevouts)
|
||||||
|
# utxo_map = dict(zip(prevouts, utxos))
|
||||||
|
# unspent = set(utxo_map)
|
||||||
|
|
||||||
|
for tx_hash, tx in tx_map.items():
|
||||||
|
in_pairs = []
|
||||||
|
for prevout in tx.prevouts:
|
||||||
|
# utxo = utxo_map.get(prevout)
|
||||||
|
# if not utxo:
|
||||||
|
prev_hash, prev_index = prevout
|
||||||
|
if prev_hash in self.txs: # accepted mempool
|
||||||
|
utxo = self.txs[prev_hash].out_pairs[prev_index]
|
||||||
|
elif prev_hash in tx_map: # this set of changes
|
||||||
|
utxo = tx_map[prev_hash].out_pairs[prev_index]
|
||||||
|
else: # get it from the db
|
||||||
|
prev_tx_num = self._db.prefix_db.tx_num.get(prev_hash)
|
||||||
|
if not prev_tx_num:
|
||||||
|
continue
|
||||||
|
prev_tx_num = prev_tx_num.tx_num
|
||||||
|
hashX_val = self._db.prefix_db.hashX_utxo.get(tx_hash[:4], prev_tx_num, prev_index)
|
||||||
|
if not hashX_val:
|
||||||
|
continue
|
||||||
|
hashX = hashX_val.hashX
|
||||||
|
utxo_value = self._db.prefix_db.utxo.get(hashX, prev_tx_num, prev_index)
|
||||||
|
utxo = (hashX, utxo_value.amount)
|
||||||
|
# if not prev_raw:
|
||||||
|
# print("derp", prev_hash[::-1].hex())
|
||||||
|
# print(self._db.get_tx_num(prev_hash))
|
||||||
|
# prev_tx, prev_tx_size = self.coin.DESERIALIZER(prev_raw.raw_tx).read_tx_and_vsize()
|
||||||
|
# prev_txo = prev_tx.outputs[prev_index]
|
||||||
|
# utxo = (self.coin.hashX_from_script(prev_txo.pk_script), prev_txo.value)
|
||||||
|
in_pairs.append(utxo)
|
||||||
|
|
||||||
|
# # Spend the prevouts
|
||||||
|
# unspent.difference_update(tx.prevouts)
|
||||||
|
|
||||||
# Save the in_pairs, compute the fee and accept the TX
|
# Save the in_pairs, compute the fee and accept the TX
|
||||||
tx.in_pairs = tuple(in_pairs)
|
tx.in_pairs = tuple(in_pairs)
|
||||||
|
@ -121,198 +149,26 @@ class MemPool:
|
||||||
# because some in_parts would be missing
|
# because some in_parts would be missing
|
||||||
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
|
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
|
||||||
sum(v for _, v in tx.out_pairs)))
|
sum(v for _, v in tx.out_pairs)))
|
||||||
txs[hash] = tx
|
self.txs[tx_hash] = tx
|
||||||
|
# print(f"added {tx_hash[::-1].hex()} reader to mempool")
|
||||||
|
|
||||||
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
|
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
|
||||||
touched.add(hashX)
|
self.touched_hashXs[hashX].add(tx_hash)
|
||||||
hashXs[hashX].add(hash)
|
touched_hashXs.add(hashX)
|
||||||
|
# utxo_map = {prevout: utxo_map[prevout] for prevout in unspent}
|
||||||
|
|
||||||
return deferred, {prevout: utxo_map[prevout] for prevout in unspent}
|
return touched_hashXs
|
||||||
|
|
||||||
async def _mempool_loop(self, synchronized_event):
|
|
||||||
try:
|
|
||||||
return await self._refresh_hashes(synchronized_event)
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.exception("MEMPOOL DIED")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
async def _refresh_hashes(self, synchronized_event):
|
|
||||||
"""Refresh our view of the daemon's mempool."""
|
|
||||||
while True:
|
|
||||||
start = time.perf_counter()
|
|
||||||
height = self._daemon.cached_height()
|
|
||||||
hex_hashes = await self._daemon.mempool_hashes()
|
|
||||||
if height != await self._daemon.height():
|
|
||||||
continue
|
|
||||||
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
|
|
||||||
async with self.lock:
|
|
||||||
new_hashes = hashes.difference(self.notified_mempool_txs)
|
|
||||||
touched = await self._process_mempool(hashes)
|
|
||||||
self.notified_mempool_txs.update(new_hashes)
|
|
||||||
new_touched = {
|
|
||||||
touched_hashx for touched_hashx, txs in self.hashXs.items() if txs.intersection(new_hashes)
|
|
||||||
}
|
|
||||||
synchronized_event.set()
|
|
||||||
synchronized_event.clear()
|
|
||||||
await self.on_mempool(touched, new_touched, height)
|
|
||||||
duration = time.perf_counter() - start
|
|
||||||
self.mempool_process_time_metric.observe(duration)
|
|
||||||
try:
|
|
||||||
# we wait up to `refresh_secs` but go early if a broadcast happens (which triggers wakeup event)
|
|
||||||
await asyncio.wait_for(self.wakeup.wait(), timeout=self.refresh_secs)
|
|
||||||
except asyncio.TimeoutError:
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
self.wakeup.clear()
|
|
||||||
|
|
||||||
async def _process_mempool(self, all_hashes):
|
|
||||||
# Re-sync with the new set of hashes
|
|
||||||
txs = self.txs
|
|
||||||
|
|
||||||
hashXs = self.hashXs # hashX: [tx_hash, ...]
|
|
||||||
touched = set()
|
|
||||||
|
|
||||||
# First handle txs that have disappeared
|
|
||||||
for tx_hash in set(txs).difference(all_hashes):
|
|
||||||
tx = txs.pop(tx_hash)
|
|
||||||
tx_hashXs = {hashX for hashX, value in tx.in_pairs}
|
|
||||||
tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
|
|
||||||
for hashX in tx_hashXs:
|
|
||||||
hashXs[hashX].remove(tx_hash)
|
|
||||||
if not hashXs[hashX]:
|
|
||||||
del hashXs[hashX]
|
|
||||||
touched.update(tx_hashXs)
|
|
||||||
|
|
||||||
# Process new transactions
|
|
||||||
new_hashes = list(all_hashes.difference(txs))
|
|
||||||
if new_hashes:
|
|
||||||
fetches = []
|
|
||||||
for hashes in chunks(new_hashes, 200):
|
|
||||||
fetches.append(self._fetch_and_accept(hashes, all_hashes, touched))
|
|
||||||
tx_map = {}
|
|
||||||
utxo_map = {}
|
|
||||||
for fetch in asyncio.as_completed(fetches):
|
|
||||||
deferred, unspent = await fetch
|
|
||||||
tx_map.update(deferred)
|
|
||||||
utxo_map.update(unspent)
|
|
||||||
|
|
||||||
prior_count = 0
|
|
||||||
# FIXME: this is not particularly efficient
|
|
||||||
while tx_map and len(tx_map) != prior_count:
|
|
||||||
prior_count = len(tx_map)
|
|
||||||
tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map, touched)
|
|
||||||
|
|
||||||
if tx_map:
|
|
||||||
self.logger.info(f'{len(tx_map)} txs dropped')
|
|
||||||
|
|
||||||
return touched
|
|
||||||
|
|
||||||
async def _fetch_and_accept(self, hashes, all_hashes, touched):
|
|
||||||
"""Fetch a list of mempool transactions."""
|
|
||||||
raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes))
|
|
||||||
|
|
||||||
to_hashX = self.coin.hashX_from_script
|
|
||||||
deserializer = self.coin.DESERIALIZER
|
|
||||||
|
|
||||||
tx_map = {}
|
|
||||||
for hash, raw_tx in zip(hashes, raw_txs):
|
|
||||||
# The daemon may have evicted the tx from its
|
|
||||||
# mempool or it may have gotten in a block
|
|
||||||
if not raw_tx:
|
|
||||||
continue
|
|
||||||
tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
|
|
||||||
# Convert the inputs and outputs into (hashX, value) pairs
|
|
||||||
# Drop generation-like inputs from MemPoolTx.prevouts
|
|
||||||
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
|
|
||||||
for txin in tx.inputs
|
|
||||||
if not txin.is_generation())
|
|
||||||
txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
|
|
||||||
for txout in tx.outputs)
|
|
||||||
tx_map[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
|
|
||||||
0, tx_size, raw_tx)
|
|
||||||
|
|
||||||
# Determine all prevouts not in the mempool, and fetch the
|
|
||||||
# UTXO information from the database. Failed prevout lookups
|
|
||||||
# return None - concurrent database updates happen - which is
|
|
||||||
# relied upon by _accept_transactions. Ignore prevouts that are
|
|
||||||
# generation-like.
|
|
||||||
prevouts = tuple(prevout for tx in tx_map.values()
|
|
||||||
for prevout in tx.prevouts
|
|
||||||
if prevout[0] not in all_hashes)
|
|
||||||
utxos = await self._db.lookup_utxos(prevouts)
|
|
||||||
utxo_map = dict(zip(prevouts, utxos))
|
|
||||||
|
|
||||||
return self._accept_transactions(tx_map, utxo_map, touched)
|
|
||||||
|
|
||||||
#
|
|
||||||
# External interface
|
|
||||||
#
|
|
||||||
|
|
||||||
async def keep_synchronized(self, synchronized_event):
|
|
||||||
"""Keep the mempool synchronized with the daemon."""
|
|
||||||
await asyncio.wait([
|
|
||||||
self._mempool_loop(synchronized_event),
|
|
||||||
# self._refresh_histogram(synchronized_event),
|
|
||||||
self._logging(synchronized_event)
|
|
||||||
])
|
|
||||||
|
|
||||||
async def balance_delta(self, hashX):
|
|
||||||
"""Return the unconfirmed amount in the mempool for hashX.
|
|
||||||
|
|
||||||
Can be positive or negative.
|
|
||||||
"""
|
|
||||||
value = 0
|
|
||||||
if hashX in self.hashXs:
|
|
||||||
for hash in self.hashXs[hashX]:
|
|
||||||
tx = self.txs[hash]
|
|
||||||
value -= sum(v for h168, v in tx.in_pairs if h168 == hashX)
|
|
||||||
value += sum(v for h168, v in tx.out_pairs if h168 == hashX)
|
|
||||||
return value
|
|
||||||
|
|
||||||
def compact_fee_histogram(self):
|
|
||||||
"""Return a compact fee histogram of the current mempool."""
|
|
||||||
return self.cached_compact_histogram
|
|
||||||
|
|
||||||
async def potential_spends(self, hashX):
|
|
||||||
"""Return a set of (prev_hash, prev_idx) pairs from mempool
|
|
||||||
transactions that touch hashX.
|
|
||||||
|
|
||||||
None, some or all of these may be spends of the hashX, but all
|
|
||||||
actual spends of it (in the DB or mempool) will be included.
|
|
||||||
"""
|
|
||||||
result = set()
|
|
||||||
for tx_hash in self.hashXs.get(hashX, ()):
|
|
||||||
tx = self.txs[tx_hash]
|
|
||||||
result.update(tx.prevouts)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def transaction_summaries(self, hashX):
|
def transaction_summaries(self, hashX):
|
||||||
"""Return a list of MemPoolTxSummary objects for the hashX."""
|
"""Return a list of MemPoolTxSummary objects for the hashX."""
|
||||||
result = []
|
result = []
|
||||||
for tx_hash in self.hashXs.get(hashX, ()):
|
for tx_hash in self.touched_hashXs.get(hashX, ()):
|
||||||
tx = self.txs[tx_hash]
|
tx = self.txs[tx_hash]
|
||||||
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
|
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
|
||||||
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
|
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def unordered_UTXOs(self, hashX):
|
def get_mempool_height(self, tx_hash: bytes) -> int:
|
||||||
"""Return an unordered list of UTXO named tuples from mempool
|
|
||||||
transactions that pay to hashX.
|
|
||||||
|
|
||||||
This does not consider if any other mempool transactions spend
|
|
||||||
the outputs.
|
|
||||||
"""
|
|
||||||
utxos = []
|
|
||||||
for tx_hash in self.hashXs.get(hashX, ()):
|
|
||||||
tx = self.txs.get(tx_hash)
|
|
||||||
for pos, (hX, value) in enumerate(tx.out_pairs):
|
|
||||||
if hX == hashX:
|
|
||||||
utxos.append(UTXO(-1, pos, tx_hash, 0, value))
|
|
||||||
return utxos
|
|
||||||
|
|
||||||
def get_mempool_height(self, tx_hash):
|
|
||||||
# Height Progression
|
# Height Progression
|
||||||
# -2: not broadcast
|
# -2: not broadcast
|
||||||
# -1: in mempool but has unconfirmed inputs
|
# -1: in mempool but has unconfirmed inputs
|
||||||
|
@ -321,41 +177,57 @@ class MemPool:
|
||||||
if tx_hash not in self.txs:
|
if tx_hash not in self.txs:
|
||||||
return -2
|
return -2
|
||||||
tx = self.txs[tx_hash]
|
tx = self.txs[tx_hash]
|
||||||
unspent_inputs = sum(1 if hash in self.txs else 0 for hash, idx in tx.prevouts)
|
unspent_inputs = any(hash in self.raw_mempool for hash, idx in tx.prevouts)
|
||||||
if unspent_inputs:
|
if unspent_inputs:
|
||||||
return -1
|
return -1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
async def _maybe_notify(self, new_touched):
|
|
||||||
tmp, tbp = self._touched_mp, self._touched_bp
|
|
||||||
common = set(tmp).intersection(tbp)
|
|
||||||
if common:
|
|
||||||
height = max(common)
|
|
||||||
elif tmp and max(tmp) == self._highest_block:
|
|
||||||
height = self._highest_block
|
|
||||||
else:
|
|
||||||
# Either we are processing a block and waiting for it to
|
|
||||||
# come in, or we have not yet had a mempool update for the
|
|
||||||
# new block height
|
|
||||||
return
|
|
||||||
touched = tmp.pop(height)
|
|
||||||
for old in [h for h in tmp if h <= height]:
|
|
||||||
del tmp[old]
|
|
||||||
for old in [h for h in tbp if h <= height]:
|
|
||||||
touched.update(tbp.pop(old))
|
|
||||||
# print("notify", height, len(touched), len(new_touched))
|
|
||||||
await self.notify_sessions(height, touched, new_touched)
|
|
||||||
|
|
||||||
async def start(self, height, session_manager: 'LBRYSessionManager'):
|
async def start(self, height, session_manager: 'LBRYSessionManager'):
|
||||||
self._highest_block = height
|
|
||||||
self.notify_sessions = session_manager._notify_sessions
|
self.notify_sessions = session_manager._notify_sessions
|
||||||
await self.notify_sessions(height, set(), set())
|
await self._notify_sessions(height, set(), set())
|
||||||
|
|
||||||
async def on_mempool(self, touched, new_touched, height):
|
async def on_mempool(self, touched, new_touched, height):
|
||||||
self._touched_mp[height] = touched
|
await self._notify_sessions(height, touched, new_touched)
|
||||||
await self._maybe_notify(new_touched)
|
|
||||||
|
|
||||||
async def on_block(self, touched, height):
|
async def on_block(self, touched, height):
|
||||||
self._touched_bp[height] = touched
|
await self._notify_sessions(height, touched, set())
|
||||||
self._highest_block = height
|
|
||||||
await self._maybe_notify(set())
|
async def _notify_sessions(self, height, touched, new_touched):
|
||||||
|
"""Notify sessions about height changes and touched addresses."""
|
||||||
|
height_changed = height != self.session_manager.notified_height
|
||||||
|
if height_changed:
|
||||||
|
await self.session_manager._refresh_hsub_results(height)
|
||||||
|
|
||||||
|
if not self.session_manager.sessions:
|
||||||
|
return
|
||||||
|
|
||||||
|
if height_changed:
|
||||||
|
header_tasks = [
|
||||||
|
session.send_notification('blockchain.headers.subscribe', (self.session_manager.hsub_results[session.subscribe_headers_raw], ))
|
||||||
|
for session in self.session_manager.sessions.values() if session.subscribe_headers
|
||||||
|
]
|
||||||
|
if header_tasks:
|
||||||
|
self.logger.info(f'notify {len(header_tasks)} sessions of new header')
|
||||||
|
asyncio.create_task(asyncio.wait(header_tasks))
|
||||||
|
for hashX in touched.intersection(self.session_manager.mempool_statuses.keys()):
|
||||||
|
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||||
|
# self.bp._chain_executor
|
||||||
|
await asyncio.get_event_loop().run_in_executor(
|
||||||
|
None, touched.intersection_update, self.session_manager.hashx_subscriptions_by_session.keys()
|
||||||
|
)
|
||||||
|
|
||||||
|
if touched or new_touched or (height_changed and self.session_manager.mempool_statuses):
|
||||||
|
notified_hashxs = 0
|
||||||
|
session_hashxes_to_notify = defaultdict(list)
|
||||||
|
to_notify = touched if height_changed else new_touched
|
||||||
|
|
||||||
|
for hashX in to_notify:
|
||||||
|
if hashX not in self.session_manager.hashx_subscriptions_by_session:
|
||||||
|
continue
|
||||||
|
for session_id in self.session_manager.hashx_subscriptions_by_session[hashX]:
|
||||||
|
session_hashxes_to_notify[session_id].append(hashX)
|
||||||
|
notified_hashxs += 1
|
||||||
|
for session_id, hashXes in session_hashxes_to_notify.items():
|
||||||
|
asyncio.create_task(self.session_manager.sessions[session_id].send_history_notifications(*hashXes))
|
||||||
|
if session_hashxes_to_notify:
|
||||||
|
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
|
||||||
|
|
119
lbry/wallet/server/prefetcher.py
Normal file
119
lbry/wallet/server/prefetcher.py
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
import asyncio
|
||||||
|
import typing
|
||||||
|
|
||||||
|
from lbry.wallet.server.util import chunks, class_logger
|
||||||
|
if typing.TYPE_CHECKING:
|
||||||
|
from lbry.wallet.server.daemon import LBCDaemon
|
||||||
|
from lbry.wallet.server.coin import Coin
|
||||||
|
|
||||||
|
|
||||||
|
class Prefetcher:
|
||||||
|
"""Prefetches blocks (in the forward direction only)."""
|
||||||
|
|
||||||
|
def __init__(self, daemon: 'LBCDaemon', coin: 'Coin', blocks_event: asyncio.Event):
|
||||||
|
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||||
|
self.daemon = daemon
|
||||||
|
self.coin = coin
|
||||||
|
self.blocks_event = blocks_event
|
||||||
|
self.blocks = []
|
||||||
|
self.caught_up = False
|
||||||
|
# Access to fetched_height should be protected by the semaphore
|
||||||
|
self.fetched_height = None
|
||||||
|
self.semaphore = asyncio.Semaphore()
|
||||||
|
self.refill_event = asyncio.Event()
|
||||||
|
# The prefetched block cache size. The min cache size has
|
||||||
|
# little effect on sync time.
|
||||||
|
self.cache_size = 0
|
||||||
|
self.min_cache_size = 10 * 1024 * 1024
|
||||||
|
# This makes the first fetch be 10 blocks
|
||||||
|
self.ave_size = self.min_cache_size // 10
|
||||||
|
self.polling_delay = 0.5
|
||||||
|
|
||||||
|
async def main_loop(self, bp_height):
|
||||||
|
"""Loop forever polling for more blocks."""
|
||||||
|
await self.reset_height(bp_height)
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
# Sleep a while if there is nothing to prefetch
|
||||||
|
await self.refill_event.wait()
|
||||||
|
if not await self._prefetch_blocks():
|
||||||
|
await asyncio.sleep(self.polling_delay)
|
||||||
|
finally:
|
||||||
|
self.logger.info("block pre-fetcher is shutting down")
|
||||||
|
|
||||||
|
def get_prefetched_blocks(self):
|
||||||
|
"""Called by block processor when it is processing queued blocks."""
|
||||||
|
blocks = self.blocks
|
||||||
|
self.blocks = []
|
||||||
|
self.cache_size = 0
|
||||||
|
self.refill_event.set()
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
async def reset_height(self, height):
|
||||||
|
"""Reset to prefetch blocks from the block processor's height.
|
||||||
|
|
||||||
|
Used in blockchain reorganisations. This coroutine can be
|
||||||
|
called asynchronously to the _prefetch_blocks coroutine so we
|
||||||
|
must synchronize with a semaphore.
|
||||||
|
"""
|
||||||
|
async with self.semaphore:
|
||||||
|
self.blocks.clear()
|
||||||
|
self.cache_size = 0
|
||||||
|
self.fetched_height = height
|
||||||
|
self.refill_event.set()
|
||||||
|
|
||||||
|
daemon_height = await self.daemon.height()
|
||||||
|
behind = daemon_height - height
|
||||||
|
if behind > 0:
|
||||||
|
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
|
||||||
|
f'({behind:,d} blocks behind)')
|
||||||
|
else:
|
||||||
|
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
|
||||||
|
|
||||||
|
async def _prefetch_blocks(self):
|
||||||
|
"""Prefetch some blocks and put them on the queue.
|
||||||
|
|
||||||
|
Repeats until the queue is full or caught up.
|
||||||
|
"""
|
||||||
|
daemon = self.daemon
|
||||||
|
daemon_height = await daemon.height()
|
||||||
|
async with self.semaphore:
|
||||||
|
while self.cache_size < self.min_cache_size:
|
||||||
|
# Try and catch up all blocks but limit to room in cache.
|
||||||
|
# Constrain fetch count to between 0 and 500 regardless;
|
||||||
|
# testnet can be lumpy.
|
||||||
|
cache_room = self.min_cache_size // self.ave_size
|
||||||
|
count = min(daemon_height - self.fetched_height, cache_room)
|
||||||
|
count = min(500, max(count, 0))
|
||||||
|
if not count:
|
||||||
|
self.caught_up = True
|
||||||
|
return False
|
||||||
|
|
||||||
|
first = self.fetched_height + 1
|
||||||
|
hex_hashes = await daemon.block_hex_hashes(first, count)
|
||||||
|
if self.caught_up:
|
||||||
|
self.logger.info('new block height {:,d} hash {}'
|
||||||
|
.format(first + count-1, hex_hashes[-1]))
|
||||||
|
blocks = await daemon.raw_blocks(hex_hashes)
|
||||||
|
|
||||||
|
assert count == len(blocks)
|
||||||
|
|
||||||
|
# Special handling for genesis block
|
||||||
|
if first == 0:
|
||||||
|
blocks[0] = self.coin.genesis_block(blocks[0])
|
||||||
|
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
|
||||||
|
|
||||||
|
# Update our recent average block size estimate
|
||||||
|
size = sum(len(block) for block in blocks)
|
||||||
|
if count >= 10:
|
||||||
|
self.ave_size = size // count
|
||||||
|
else:
|
||||||
|
self.ave_size = (size + (10 - count) * self.ave_size) // 10
|
||||||
|
|
||||||
|
self.blocks.extend(blocks)
|
||||||
|
self.cache_size += size
|
||||||
|
self.fetched_height += count
|
||||||
|
self.blocks_event.set()
|
||||||
|
|
||||||
|
self.refill_event.clear()
|
||||||
|
return True
|
|
@ -1,91 +0,0 @@
|
||||||
import signal
|
|
||||||
import logging
|
|
||||||
import asyncio
|
|
||||||
from concurrent.futures.thread import ThreadPoolExecutor
|
|
||||||
import typing
|
|
||||||
|
|
||||||
import lbry
|
|
||||||
from lbry.wallet.server.mempool import MemPool
|
|
||||||
from lbry.wallet.server.block_processor import BlockProcessor
|
|
||||||
from lbry.wallet.server.leveldb import LevelDB
|
|
||||||
from lbry.wallet.server.session import LBRYSessionManager
|
|
||||||
from lbry.prometheus import PrometheusServer
|
|
||||||
|
|
||||||
|
|
||||||
class Server:
|
|
||||||
|
|
||||||
def __init__(self, env):
|
|
||||||
self.env = env
|
|
||||||
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
|
|
||||||
self.shutdown_event = asyncio.Event()
|
|
||||||
self.cancellable_tasks = []
|
|
||||||
|
|
||||||
self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url)
|
|
||||||
self.db = db = LevelDB(env)
|
|
||||||
self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event)
|
|
||||||
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
|
||||||
|
|
||||||
self.session_mgr = LBRYSessionManager(
|
|
||||||
env, db, bp, daemon, self.shutdown_event
|
|
||||||
)
|
|
||||||
self._indexer_task = None
|
|
||||||
|
|
||||||
async def start(self):
|
|
||||||
env = self.env
|
|
||||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
|
||||||
self.log.info(f'software version: {lbry.__version__}')
|
|
||||||
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
|
|
||||||
self.log.info(f'event loop policy: {env.loop_policy}')
|
|
||||||
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
|
||||||
|
|
||||||
await self.daemon.height()
|
|
||||||
|
|
||||||
def _start_cancellable(run, *args):
|
|
||||||
_flag = asyncio.Event()
|
|
||||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
|
||||||
return _flag.wait()
|
|
||||||
|
|
||||||
await self.start_prometheus()
|
|
||||||
if self.env.udp_port:
|
|
||||||
await self.bp.status_server.start(
|
|
||||||
0, bytes.fromhex(self.bp.coin.GENESIS_HASH)[::-1], self.env.country,
|
|
||||||
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
|
||||||
)
|
|
||||||
await _start_cancellable(self.bp.fetch_and_process_blocks)
|
|
||||||
|
|
||||||
await self.db.populate_header_merkle_cache()
|
|
||||||
await _start_cancellable(self.bp.mempool.keep_synchronized)
|
|
||||||
await _start_cancellable(self.session_mgr.serve, self.bp.mempool)
|
|
||||||
|
|
||||||
async def stop(self):
|
|
||||||
for task in reversed(self.cancellable_tasks):
|
|
||||||
task.cancel()
|
|
||||||
await asyncio.wait(self.cancellable_tasks)
|
|
||||||
if self.prometheus_server:
|
|
||||||
await self.prometheus_server.stop()
|
|
||||||
self.prometheus_server = None
|
|
||||||
self.shutdown_event.set()
|
|
||||||
await self.daemon.close()
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
executor = ThreadPoolExecutor(self.env.max_query_workers, thread_name_prefix='hub-worker')
|
|
||||||
loop.set_default_executor(executor)
|
|
||||||
|
|
||||||
def __exit():
|
|
||||||
raise SystemExit()
|
|
||||||
try:
|
|
||||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
|
||||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
|
||||||
loop.run_until_complete(self.start())
|
|
||||||
loop.run_until_complete(self.shutdown_event.wait())
|
|
||||||
except (SystemExit, KeyboardInterrupt):
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
loop.run_until_complete(self.stop())
|
|
||||||
executor.shutdown(True)
|
|
||||||
|
|
||||||
async def start_prometheus(self):
|
|
||||||
if not self.prometheus_server and self.env.prometheus_port:
|
|
||||||
self.prometheus_server = PrometheusServer()
|
|
||||||
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)
|
|
|
@ -20,8 +20,7 @@ import lbry
|
||||||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||||
from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG
|
from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG
|
||||||
from lbry.schema.result import Outputs
|
from lbry.schema.result import Outputs
|
||||||
from lbry.wallet.server.block_processor import BlockProcessor
|
from lbry.wallet.server.db.db import HubDB
|
||||||
from lbry.wallet.server.leveldb import LevelDB
|
|
||||||
from lbry.wallet.server.websocket import AdminWebSocket
|
from lbry.wallet.server.websocket import AdminWebSocket
|
||||||
from lbry.wallet.rpc.framing import NewlineFramer
|
from lbry.wallet.rpc.framing import NewlineFramer
|
||||||
|
|
||||||
|
@ -34,9 +33,12 @@ from lbry.wallet.rpc import (
|
||||||
from lbry.wallet.server import util
|
from lbry.wallet.server import util
|
||||||
from lbry.wallet.server.hash import sha256, hash_to_hex_str, hex_str_to_hash, HASHX_LEN, Base58Error
|
from lbry.wallet.server.hash import sha256, hash_to_hex_str, hex_str_to_hash, HASHX_LEN, Base58Error
|
||||||
from lbry.wallet.server.daemon import DaemonError
|
from lbry.wallet.server.daemon import DaemonError
|
||||||
|
from lbry.wallet.server.db.elasticsearch import SearchIndex
|
||||||
|
|
||||||
if typing.TYPE_CHECKING:
|
if typing.TYPE_CHECKING:
|
||||||
from lbry.wallet.server.env import Env
|
from lbry.wallet.server.env import Env
|
||||||
from lbry.wallet.server.daemon import Daemon
|
from lbry.wallet.server.daemon import Daemon
|
||||||
|
from lbry.wallet.server.mempool import MemPool
|
||||||
|
|
||||||
BAD_REQUEST = 1
|
BAD_REQUEST = 1
|
||||||
DAEMON_ERROR = 2
|
DAEMON_ERROR = 2
|
||||||
|
@ -170,31 +172,43 @@ class SessionManager:
|
||||||
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self, env: 'Env', db: LevelDB, bp: BlockProcessor, daemon: 'Daemon', shutdown_event: asyncio.Event):
|
def __init__(self, env: 'Env', db: HubDB, mempool: 'MemPool', history_cache, resolve_cache, resolve_outputs_cache,
|
||||||
|
daemon: 'Daemon', shutdown_event: asyncio.Event,
|
||||||
|
on_available_callback: typing.Callable[[], None], on_unavailable_callback: typing.Callable[[], None]):
|
||||||
env.max_send = max(350000, env.max_send)
|
env.max_send = max(350000, env.max_send)
|
||||||
self.env = env
|
self.env = env
|
||||||
self.db = db
|
self.db = db
|
||||||
self.bp = bp
|
self.on_available_callback = on_available_callback
|
||||||
|
self.on_unavailable_callback = on_unavailable_callback
|
||||||
self.daemon = daemon
|
self.daemon = daemon
|
||||||
self.mempool = bp.mempool
|
self.mempool = mempool
|
||||||
self.shutdown_event = shutdown_event
|
self.shutdown_event = shutdown_event
|
||||||
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
||||||
self.servers: typing.Dict[str, asyncio.AbstractServer] = {}
|
self.servers: typing.Dict[str, asyncio.AbstractServer] = {}
|
||||||
self.sessions: typing.Dict[int, 'SessionBase'] = {}
|
self.sessions: typing.Dict[int, 'LBRYElectrumX'] = {}
|
||||||
self.hashx_subscriptions_by_session: typing.DefaultDict[str, typing.Set[int]] = defaultdict(set)
|
self.hashx_subscriptions_by_session: typing.DefaultDict[str, typing.Set[int]] = defaultdict(set)
|
||||||
self.mempool_statuses = {}
|
self.mempool_statuses = {}
|
||||||
self.cur_group = SessionGroup(0)
|
self.cur_group = SessionGroup(0)
|
||||||
self.txs_sent = 0
|
self.txs_sent = 0
|
||||||
self.start_time = time.time()
|
self.start_time = time.time()
|
||||||
self.history_cache = self.bp.history_cache
|
self.history_cache = history_cache
|
||||||
|
self.resolve_cache = resolve_cache
|
||||||
|
self.resolve_outputs_cache = resolve_outputs_cache
|
||||||
self.notified_height: typing.Optional[int] = None
|
self.notified_height: typing.Optional[int] = None
|
||||||
# Cache some idea of room to avoid recounting on each subscription
|
# Cache some idea of room to avoid recounting on each subscription
|
||||||
self.subs_room = 0
|
self.subs_room = 0
|
||||||
|
|
||||||
self.session_event = Event()
|
self.session_event = Event()
|
||||||
|
|
||||||
|
# Search index
|
||||||
|
self.search_index = SearchIndex(
|
||||||
|
self.env.es_index_prefix, self.env.database_query_timeout,
|
||||||
|
elastic_host=env.elastic_host, elastic_port=env.elastic_port
|
||||||
|
)
|
||||||
|
|
||||||
async def _start_server(self, kind, *args, **kw_args):
|
async def _start_server(self, kind, *args, **kw_args):
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
if kind == 'RPC':
|
if kind == 'RPC':
|
||||||
protocol_class = LocalRPC
|
protocol_class = LocalRPC
|
||||||
else:
|
else:
|
||||||
|
@ -243,7 +257,7 @@ class SessionManager:
|
||||||
await self.session_event.wait()
|
await self.session_event.wait()
|
||||||
self.session_event.clear()
|
self.session_event.clear()
|
||||||
if not paused and len(self.sessions) >= max_sessions:
|
if not paused and len(self.sessions) >= max_sessions:
|
||||||
self.bp.status_server.set_unavailable()
|
self.on_unavailable_callback()
|
||||||
self.logger.info(f'maximum sessions {max_sessions:,d} '
|
self.logger.info(f'maximum sessions {max_sessions:,d} '
|
||||||
f'reached, stopping new connections until '
|
f'reached, stopping new connections until '
|
||||||
f'count drops to {low_watermark:,d}')
|
f'count drops to {low_watermark:,d}')
|
||||||
|
@ -252,7 +266,7 @@ class SessionManager:
|
||||||
# Start listening for incoming connections if paused and
|
# Start listening for incoming connections if paused and
|
||||||
# session count has fallen
|
# session count has fallen
|
||||||
if paused and len(self.sessions) <= low_watermark:
|
if paused and len(self.sessions) <= low_watermark:
|
||||||
self.bp.status_server.set_available()
|
self.on_available_callback()
|
||||||
self.logger.info('resuming listening for incoming connections')
|
self.logger.info('resuming listening for incoming connections')
|
||||||
await self._start_external_servers()
|
await self._start_external_servers()
|
||||||
paused = False
|
paused = False
|
||||||
|
@ -533,7 +547,7 @@ class SessionManager:
|
||||||
await self.start_other()
|
await self.start_other()
|
||||||
await self._start_external_servers()
|
await self._start_external_servers()
|
||||||
server_listening_event.set()
|
server_listening_event.set()
|
||||||
self.bp.status_server.set_available()
|
self.on_available_callback()
|
||||||
# Peer discovery should start after the external servers
|
# Peer discovery should start after the external servers
|
||||||
# because we connect to ourself
|
# because we connect to ourself
|
||||||
await asyncio.wait([
|
await asyncio.wait([
|
||||||
|
@ -573,7 +587,7 @@ class SessionManager:
|
||||||
async def raw_header(self, height):
|
async def raw_header(self, height):
|
||||||
"""Return the binary header at the given height."""
|
"""Return the binary header at the given height."""
|
||||||
try:
|
try:
|
||||||
return self.db.raw_header(height)
|
return await self.db.raw_header(height)
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise RPCError(BAD_REQUEST, f'height {height:,d} '
|
raise RPCError(BAD_REQUEST, f'height {height:,d} '
|
||||||
'out of range') from None
|
'out of range') from None
|
||||||
|
@ -628,8 +642,9 @@ class SessionManager:
|
||||||
for hashX in touched.intersection(self.mempool_statuses.keys()):
|
for hashX in touched.intersection(self.mempool_statuses.keys()):
|
||||||
self.mempool_statuses.pop(hashX, None)
|
self.mempool_statuses.pop(hashX, None)
|
||||||
|
|
||||||
|
# self.bp._chain_executor
|
||||||
await asyncio.get_event_loop().run_in_executor(
|
await asyncio.get_event_loop().run_in_executor(
|
||||||
self.bp._chain_executor, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
|
None, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
|
||||||
)
|
)
|
||||||
|
|
||||||
if touched or new_touched or (height_changed and self.mempool_statuses):
|
if touched or new_touched or (height_changed and self.mempool_statuses):
|
||||||
|
@ -680,12 +695,12 @@ class SessionBase(RPCSession):
|
||||||
request_handlers: typing.Dict[str, typing.Callable] = {}
|
request_handlers: typing.Dict[str, typing.Callable] = {}
|
||||||
version = '0.5.7'
|
version = '0.5.7'
|
||||||
|
|
||||||
def __init__(self, session_mgr, db, mempool, kind):
|
def __init__(self, session_manager: 'LBRYSessionManager', db: 'LevelDB', mempool: 'MemPool', kind: str):
|
||||||
connection = JSONRPCConnection(JSONRPCAutoDetect)
|
connection = JSONRPCConnection(JSONRPCAutoDetect)
|
||||||
self.env = session_mgr.env
|
self.env = session_manager.env
|
||||||
super().__init__(connection=connection)
|
super().__init__(connection=connection)
|
||||||
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
||||||
self.session_mgr = session_mgr
|
self.session_manager = session_manager
|
||||||
self.db = db
|
self.db = db
|
||||||
self.mempool = mempool
|
self.mempool = mempool
|
||||||
self.kind = kind # 'RPC', 'TCP' etc.
|
self.kind = kind # 'RPC', 'TCP' etc.
|
||||||
|
@ -693,7 +708,7 @@ class SessionBase(RPCSession):
|
||||||
self.anon_logs = self.env.anon_logs
|
self.anon_logs = self.env.anon_logs
|
||||||
self.txs_sent = 0
|
self.txs_sent = 0
|
||||||
self.log_me = False
|
self.log_me = False
|
||||||
self.daemon_request = self.session_mgr.daemon_request
|
self.daemon_request = self.session_manager.daemon_request
|
||||||
# Hijack the connection so we can log messages
|
# Hijack the connection so we can log messages
|
||||||
self._receive_message_orig = self.connection.receive_message
|
self._receive_message_orig = self.connection.receive_message
|
||||||
self.connection.receive_message = self.receive_message
|
self.connection.receive_message = self.receive_message
|
||||||
|
@ -723,17 +738,17 @@ class SessionBase(RPCSession):
|
||||||
self.session_id = next(self.session_counter)
|
self.session_id = next(self.session_counter)
|
||||||
context = {'conn_id': f'{self.session_id}'}
|
context = {'conn_id': f'{self.session_id}'}
|
||||||
self.logger = util.ConnectionLogger(self.logger, context)
|
self.logger = util.ConnectionLogger(self.logger, context)
|
||||||
self.group = self.session_mgr.add_session(self)
|
self.group = self.session_manager.add_session(self)
|
||||||
self.session_mgr.session_count_metric.labels(version=self.client_version).inc()
|
self.session_manager.session_count_metric.labels(version=self.client_version).inc()
|
||||||
peer_addr_str = self.peer_address_str()
|
peer_addr_str = self.peer_address_str()
|
||||||
self.logger.info(f'{self.kind} {peer_addr_str}, '
|
self.logger.info(f'{self.kind} {peer_addr_str}, '
|
||||||
f'{self.session_mgr.session_count():,d} total')
|
f'{self.session_manager.session_count():,d} total')
|
||||||
|
|
||||||
def connection_lost(self, exc):
|
def connection_lost(self, exc):
|
||||||
"""Handle client disconnection."""
|
"""Handle client disconnection."""
|
||||||
super().connection_lost(exc)
|
super().connection_lost(exc)
|
||||||
self.session_mgr.remove_session(self)
|
self.session_manager.remove_session(self)
|
||||||
self.session_mgr.session_count_metric.labels(version=self.client_version).dec()
|
self.session_manager.session_count_metric.labels(version=self.client_version).dec()
|
||||||
msg = ''
|
msg = ''
|
||||||
if not self._can_send.is_set():
|
if not self._can_send.is_set():
|
||||||
msg += ' whilst paused'
|
msg += ' whilst paused'
|
||||||
|
@ -757,7 +772,7 @@ class SessionBase(RPCSession):
|
||||||
"""Handle an incoming request. ElectrumX doesn't receive
|
"""Handle an incoming request. ElectrumX doesn't receive
|
||||||
notifications from client sessions.
|
notifications from client sessions.
|
||||||
"""
|
"""
|
||||||
self.session_mgr.request_count_metric.labels(method=request.method, version=self.client_version).inc()
|
self.session_manager.request_count_metric.labels(method=request.method, version=self.client_version).inc()
|
||||||
if isinstance(request, Request):
|
if isinstance(request, Request):
|
||||||
handler = self.request_handlers.get(request.method)
|
handler = self.request_handlers.get(request.method)
|
||||||
handler = partial(handler, self)
|
handler = partial(handler, self)
|
||||||
|
@ -805,7 +820,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
PROTOCOL_MIN = VERSION.PROTOCOL_MIN
|
PROTOCOL_MIN = VERSION.PROTOCOL_MIN
|
||||||
PROTOCOL_MAX = VERSION.PROTOCOL_MAX
|
PROTOCOL_MAX = VERSION.PROTOCOL_MAX
|
||||||
max_errors = math.inf # don't disconnect people for errors! let them happen...
|
max_errors = math.inf # don't disconnect people for errors! let them happen...
|
||||||
session_mgr: LBRYSessionManager
|
session_manager: LBRYSessionManager
|
||||||
version = lbry.__version__
|
version = lbry.__version__
|
||||||
cached_server_features = {}
|
cached_server_features = {}
|
||||||
|
|
||||||
|
@ -816,17 +831,17 @@ class LBRYElectrumX(SessionBase):
|
||||||
'blockchain.block.get_header': cls.block_get_header,
|
'blockchain.block.get_header': cls.block_get_header,
|
||||||
'blockchain.estimatefee': cls.estimatefee,
|
'blockchain.estimatefee': cls.estimatefee,
|
||||||
'blockchain.relayfee': cls.relayfee,
|
'blockchain.relayfee': cls.relayfee,
|
||||||
'blockchain.scripthash.get_balance': cls.scripthash_get_balance,
|
# 'blockchain.scripthash.get_balance': cls.scripthash_get_balance,
|
||||||
'blockchain.scripthash.get_history': cls.scripthash_get_history,
|
'blockchain.scripthash.get_history': cls.scripthash_get_history,
|
||||||
'blockchain.scripthash.get_mempool': cls.scripthash_get_mempool,
|
'blockchain.scripthash.get_mempool': cls.scripthash_get_mempool,
|
||||||
'blockchain.scripthash.listunspent': cls.scripthash_listunspent,
|
# 'blockchain.scripthash.listunspent': cls.scripthash_listunspent,
|
||||||
'blockchain.scripthash.subscribe': cls.scripthash_subscribe,
|
'blockchain.scripthash.subscribe': cls.scripthash_subscribe,
|
||||||
'blockchain.transaction.broadcast': cls.transaction_broadcast,
|
'blockchain.transaction.broadcast': cls.transaction_broadcast,
|
||||||
'blockchain.transaction.get': cls.transaction_get,
|
'blockchain.transaction.get': cls.transaction_get,
|
||||||
'blockchain.transaction.get_batch': cls.transaction_get_batch,
|
'blockchain.transaction.get_batch': cls.transaction_get_batch,
|
||||||
'blockchain.transaction.info': cls.transaction_info,
|
'blockchain.transaction.info': cls.transaction_info,
|
||||||
'blockchain.transaction.get_merkle': cls.transaction_merkle,
|
'blockchain.transaction.get_merkle': cls.transaction_merkle,
|
||||||
'server.add_peer': cls.add_peer,
|
# 'server.add_peer': cls.add_peer,
|
||||||
'server.banner': cls.banner,
|
'server.banner': cls.banner,
|
||||||
'server.payment_address': cls.payment_address,
|
'server.payment_address': cls.payment_address,
|
||||||
'server.donation_address': cls.donation_address,
|
'server.donation_address': cls.donation_address,
|
||||||
|
@ -843,10 +858,10 @@ class LBRYElectrumX(SessionBase):
|
||||||
'blockchain.block.headers': cls.block_headers,
|
'blockchain.block.headers': cls.block_headers,
|
||||||
'server.ping': cls.ping,
|
'server.ping': cls.ping,
|
||||||
'blockchain.headers.subscribe': cls.headers_subscribe_False,
|
'blockchain.headers.subscribe': cls.headers_subscribe_False,
|
||||||
'blockchain.address.get_balance': cls.address_get_balance,
|
# 'blockchain.address.get_balance': cls.address_get_balance,
|
||||||
'blockchain.address.get_history': cls.address_get_history,
|
'blockchain.address.get_history': cls.address_get_history,
|
||||||
'blockchain.address.get_mempool': cls.address_get_mempool,
|
'blockchain.address.get_mempool': cls.address_get_mempool,
|
||||||
'blockchain.address.listunspent': cls.address_listunspent,
|
# 'blockchain.address.listunspent': cls.address_listunspent,
|
||||||
'blockchain.address.subscribe': cls.address_subscribe,
|
'blockchain.address.subscribe': cls.address_subscribe,
|
||||||
'blockchain.address.unsubscribe': cls.address_unsubscribe,
|
'blockchain.address.unsubscribe': cls.address_unsubscribe,
|
||||||
})
|
})
|
||||||
|
@ -865,9 +880,8 @@ class LBRYElectrumX(SessionBase):
|
||||||
self.sv_seen = False
|
self.sv_seen = False
|
||||||
self.protocol_tuple = self.PROTOCOL_MIN
|
self.protocol_tuple = self.PROTOCOL_MIN
|
||||||
self.protocol_string = None
|
self.protocol_string = None
|
||||||
self.daemon = self.session_mgr.daemon
|
self.daemon = self.session_manager.daemon
|
||||||
self.bp: BlockProcessor = self.session_mgr.bp
|
self.db: LevelDB = self.session_manager.db
|
||||||
self.db: LevelDB = self.bp.db
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def protocol_min_max_strings(cls):
|
def protocol_min_max_strings(cls):
|
||||||
|
@ -916,7 +930,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
else:
|
else:
|
||||||
method = 'blockchain.address.subscribe'
|
method = 'blockchain.address.subscribe'
|
||||||
start = time.perf_counter()
|
start = time.perf_counter()
|
||||||
db_history = await self.session_mgr.limited_history(hashX)
|
db_history = await self.session_manager.limited_history(hashX)
|
||||||
mempool = self.mempool.transaction_summaries(hashX)
|
mempool = self.mempool.transaction_summaries(hashX)
|
||||||
|
|
||||||
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
||||||
|
@ -930,24 +944,24 @@ class LBRYElectrumX(SessionBase):
|
||||||
else:
|
else:
|
||||||
status = None
|
status = None
|
||||||
if mempool:
|
if mempool:
|
||||||
self.session_mgr.mempool_statuses[hashX] = status
|
self.session_manager.mempool_statuses[hashX] = status
|
||||||
else:
|
else:
|
||||||
self.session_mgr.mempool_statuses.pop(hashX, None)
|
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||||
|
|
||||||
self.session_mgr.address_history_metric.observe(time.perf_counter() - start)
|
self.session_manager.address_history_metric.observe(time.perf_counter() - start)
|
||||||
notifications.append((method, (alias, status)))
|
notifications.append((method, (alias, status)))
|
||||||
|
|
||||||
start = time.perf_counter()
|
start = time.perf_counter()
|
||||||
self.session_mgr.notifications_in_flight_metric.inc()
|
self.session_manager.notifications_in_flight_metric.inc()
|
||||||
for method, args in notifications:
|
for method, args in notifications:
|
||||||
self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc()
|
self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc()
|
||||||
try:
|
try:
|
||||||
await self.send_notifications(
|
await self.send_notifications(
|
||||||
Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications])
|
Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications])
|
||||||
)
|
)
|
||||||
self.session_mgr.notifications_sent_metric.observe(time.perf_counter() - start)
|
self.session_manager.notifications_sent_metric.observe(time.perf_counter() - start)
|
||||||
finally:
|
finally:
|
||||||
self.session_mgr.notifications_in_flight_metric.dec()
|
self.session_manager.notifications_in_flight_metric.dec()
|
||||||
|
|
||||||
# def get_metrics_or_placeholder_for_api(self, query_name):
|
# def get_metrics_or_placeholder_for_api(self, query_name):
|
||||||
# """ Do not hold on to a reference to the metrics
|
# """ Do not hold on to a reference to the metrics
|
||||||
|
@ -955,7 +969,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
# you may be working with a stale metrics object.
|
# you may be working with a stale metrics object.
|
||||||
# """
|
# """
|
||||||
# if self.env.track_metrics:
|
# if self.env.track_metrics:
|
||||||
# # return self.session_mgr.metrics.for_api(query_name)
|
# # return self.session_manager.metrics.for_api(query_name)
|
||||||
# else:
|
# else:
|
||||||
# return APICallMetrics(query_name)
|
# return APICallMetrics(query_name)
|
||||||
|
|
||||||
|
@ -965,17 +979,17 @@ class LBRYElectrumX(SessionBase):
|
||||||
# if isinstance(kwargs, dict):
|
# if isinstance(kwargs, dict):
|
||||||
# kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
|
# kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
|
||||||
# try:
|
# try:
|
||||||
# self.session_mgr.pending_query_metric.inc()
|
# self.session_manager.pending_query_metric.inc()
|
||||||
# return await self.db.search_index.session_query(query_name, kwargs)
|
# return await self.db.search_index.session_query(query_name, kwargs)
|
||||||
# except ConnectionTimeout:
|
# except ConnectionTimeout:
|
||||||
# self.session_mgr.interrupt_count_metric.inc()
|
# self.session_manager.interrupt_count_metric.inc()
|
||||||
# raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
# raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||||
# finally:
|
# finally:
|
||||||
# self.session_mgr.pending_query_metric.dec()
|
# self.session_manager.pending_query_metric.dec()
|
||||||
# self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
# self.session_manager.executor_time_metric.observe(time.perf_counter() - start)
|
||||||
|
|
||||||
async def mempool_compact_histogram(self):
|
async def mempool_compact_histogram(self):
|
||||||
return self.mempool.compact_fee_histogram()
|
return [] #self.mempool.compact_fee_histogram()
|
||||||
|
|
||||||
async def claimtrie_search(self, **kwargs):
|
async def claimtrie_search(self, **kwargs):
|
||||||
start = time.perf_counter()
|
start = time.perf_counter()
|
||||||
|
@ -987,16 +1001,16 @@ class LBRYElectrumX(SessionBase):
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
self.session_mgr.pending_query_metric.inc()
|
self.session_manager.pending_query_metric.inc()
|
||||||
if 'channel' in kwargs:
|
if 'channel' in kwargs:
|
||||||
channel_url = kwargs.pop('channel')
|
channel_url = kwargs.pop('channel')
|
||||||
_, channel_claim, _, _ = await self.db.resolve(channel_url)
|
_, channel_claim, _, _ = await self.db.resolve(channel_url)
|
||||||
if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)):
|
if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)):
|
||||||
return Outputs.to_base64([], [], 0, None, None)
|
return Outputs.to_base64([], [], 0, None, None)
|
||||||
kwargs['channel_id'] = channel_claim.claim_hash.hex()
|
kwargs['channel_id'] = channel_claim.claim_hash.hex()
|
||||||
return await self.db.search_index.cached_search(kwargs)
|
return await self.session_manager.search_index.cached_search(kwargs)
|
||||||
except ConnectionTimeout:
|
except ConnectionTimeout:
|
||||||
self.session_mgr.interrupt_count_metric.inc()
|
self.session_manager.interrupt_count_metric.inc()
|
||||||
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||||
except TooManyClaimSearchParametersError as err:
|
except TooManyClaimSearchParametersError as err:
|
||||||
await asyncio.sleep(2)
|
await asyncio.sleep(2)
|
||||||
|
@ -1004,25 +1018,25 @@ class LBRYElectrumX(SessionBase):
|
||||||
self.peer_address()[0], err.key, err.limit)
|
self.peer_address()[0], err.key, err.limit)
|
||||||
return RPCError(1, str(err))
|
return RPCError(1, str(err))
|
||||||
finally:
|
finally:
|
||||||
self.session_mgr.pending_query_metric.dec()
|
self.session_manager.pending_query_metric.dec()
|
||||||
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
self.session_manager.executor_time_metric.observe(time.perf_counter() - start)
|
||||||
|
|
||||||
async def _cached_resolve_url(self, url):
|
async def _cached_resolve_url(self, url):
|
||||||
if url not in self.bp.resolve_cache:
|
if url not in self.session_manager.resolve_cache:
|
||||||
self.bp.resolve_cache[url] = await self.loop.run_in_executor(None, self.db._resolve, url)
|
self.session_manager.resolve_cache[url] = await self.loop.run_in_executor(None, self.db._resolve, url)
|
||||||
return self.bp.resolve_cache[url]
|
return self.session_manager.resolve_cache[url]
|
||||||
|
|
||||||
async def claimtrie_resolve(self, *urls) -> str:
|
async def claimtrie_resolve(self, *urls) -> str:
|
||||||
sorted_urls = tuple(sorted(urls))
|
sorted_urls = tuple(sorted(urls))
|
||||||
self.session_mgr.urls_to_resolve_count_metric.inc(len(sorted_urls))
|
self.session_manager.urls_to_resolve_count_metric.inc(len(sorted_urls))
|
||||||
try:
|
try:
|
||||||
if sorted_urls in self.bp.resolve_outputs_cache:
|
if sorted_urls in self.session_manager.resolve_outputs_cache:
|
||||||
return self.bp.resolve_outputs_cache[sorted_urls]
|
return self.session_manager.resolve_outputs_cache[sorted_urls]
|
||||||
rows, extra = [], []
|
rows, extra = [], []
|
||||||
for url in urls:
|
for url in urls:
|
||||||
if url not in self.bp.resolve_cache:
|
if url not in self.session_manager.resolve_cache:
|
||||||
self.bp.resolve_cache[url] = await self._cached_resolve_url(url)
|
self.session_manager.resolve_cache[url] = await self._cached_resolve_url(url)
|
||||||
stream, channel, repost, reposted_channel = self.bp.resolve_cache[url]
|
stream, channel, repost, reposted_channel = self.session_manager.resolve_cache[url]
|
||||||
if isinstance(channel, ResolveCensoredError):
|
if isinstance(channel, ResolveCensoredError):
|
||||||
rows.append(channel)
|
rows.append(channel)
|
||||||
extra.append(channel.censor_row)
|
extra.append(channel.censor_row)
|
||||||
|
@ -1047,15 +1061,15 @@ class LBRYElectrumX(SessionBase):
|
||||||
if reposted_channel:
|
if reposted_channel:
|
||||||
extra.append(reposted_channel)
|
extra.append(reposted_channel)
|
||||||
await asyncio.sleep(0)
|
await asyncio.sleep(0)
|
||||||
self.bp.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor(
|
self.session_manager.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor(
|
||||||
None, Outputs.to_base64, rows, extra, 0, None, None
|
None, Outputs.to_base64, rows, extra, 0, None, None
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
finally:
|
finally:
|
||||||
self.session_mgr.resolved_url_count_metric.inc(len(sorted_urls))
|
self.session_manager.resolved_url_count_metric.inc(len(sorted_urls))
|
||||||
|
|
||||||
async def get_server_height(self):
|
async def get_server_height(self):
|
||||||
return self.bp.height
|
return self.db.db_height
|
||||||
|
|
||||||
async def transaction_get_height(self, tx_hash):
|
async def transaction_get_height(self, tx_hash):
|
||||||
self.assert_tx_hash(tx_hash)
|
self.assert_tx_hash(tx_hash)
|
||||||
|
@ -1088,7 +1102,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
|
|
||||||
async def subscribe_headers_result(self):
|
async def subscribe_headers_result(self):
|
||||||
"""The result of a header subscription or notification."""
|
"""The result of a header subscription or notification."""
|
||||||
return self.session_mgr.hsub_results[self.subscribe_headers_raw]
|
return self.session_manager.hsub_results[self.subscribe_headers_raw]
|
||||||
|
|
||||||
async def _headers_subscribe(self, raw):
|
async def _headers_subscribe(self, raw):
|
||||||
"""Subscribe to get headers of new blocks."""
|
"""Subscribe to get headers of new blocks."""
|
||||||
|
@ -1125,7 +1139,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
# Note history is ordered and mempool unordered in electrum-server
|
# Note history is ordered and mempool unordered in electrum-server
|
||||||
# For mempool, height is -1 if it has unconfirmed inputs, otherwise 0
|
# For mempool, height is -1 if it has unconfirmed inputs, otherwise 0
|
||||||
|
|
||||||
db_history = await self.session_mgr.limited_history(hashX)
|
db_history = await self.session_manager.limited_history(hashX)
|
||||||
mempool = self.mempool.transaction_summaries(hashX)
|
mempool = self.mempool.transaction_summaries(hashX)
|
||||||
|
|
||||||
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
||||||
|
@ -1140,32 +1154,32 @@ class LBRYElectrumX(SessionBase):
|
||||||
status = None
|
status = None
|
||||||
|
|
||||||
if mempool:
|
if mempool:
|
||||||
self.session_mgr.mempool_statuses[hashX] = status
|
self.session_manager.mempool_statuses[hashX] = status
|
||||||
else:
|
else:
|
||||||
self.session_mgr.mempool_statuses.pop(hashX, None)
|
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||||
return status
|
return status
|
||||||
|
|
||||||
async def hashX_listunspent(self, hashX):
|
# async def hashX_listunspent(self, hashX):
|
||||||
"""Return the list of UTXOs of a script hash, including mempool
|
# """Return the list of UTXOs of a script hash, including mempool
|
||||||
effects."""
|
# effects."""
|
||||||
utxos = await self.db.all_utxos(hashX)
|
# utxos = await self.db.all_utxos(hashX)
|
||||||
utxos = sorted(utxos)
|
# utxos = sorted(utxos)
|
||||||
utxos.extend(await self.mempool.unordered_UTXOs(hashX))
|
# utxos.extend(await self.mempool.unordered_UTXOs(hashX))
|
||||||
spends = await self.mempool.potential_spends(hashX)
|
# spends = await self.mempool.potential_spends(hashX)
|
||||||
|
#
|
||||||
return [{'tx_hash': hash_to_hex_str(utxo.tx_hash),
|
# return [{'tx_hash': hash_to_hex_str(utxo.tx_hash),
|
||||||
'tx_pos': utxo.tx_pos,
|
# 'tx_pos': utxo.tx_pos,
|
||||||
'height': utxo.height, 'value': utxo.value}
|
# 'height': utxo.height, 'value': utxo.value}
|
||||||
for utxo in utxos
|
# for utxo in utxos
|
||||||
if (utxo.tx_hash, utxo.tx_pos) not in spends]
|
# if (utxo.tx_hash, utxo.tx_pos) not in spends]
|
||||||
|
|
||||||
async def hashX_subscribe(self, hashX, alias):
|
async def hashX_subscribe(self, hashX, alias):
|
||||||
self.hashX_subs[hashX] = alias
|
self.hashX_subs[hashX] = alias
|
||||||
self.session_mgr.hashx_subscriptions_by_session[hashX].add(id(self))
|
self.session_manager.hashx_subscriptions_by_session[hashX].add(id(self))
|
||||||
return await self.address_status(hashX)
|
return await self.address_status(hashX)
|
||||||
|
|
||||||
async def hashX_unsubscribe(self, hashX, alias):
|
async def hashX_unsubscribe(self, hashX, alias):
|
||||||
sessions = self.session_mgr.hashx_subscriptions_by_session[hashX]
|
sessions = self.session_manager.hashx_subscriptions_by_session[hashX]
|
||||||
sessions.remove(id(self))
|
sessions.remove(id(self))
|
||||||
if not sessions:
|
if not sessions:
|
||||||
self.hashX_subs.pop(hashX, None)
|
self.hashX_subs.pop(hashX, None)
|
||||||
|
@ -1177,10 +1191,10 @@ class LBRYElectrumX(SessionBase):
|
||||||
pass
|
pass
|
||||||
raise RPCError(BAD_REQUEST, f'{address} is not a valid address')
|
raise RPCError(BAD_REQUEST, f'{address} is not a valid address')
|
||||||
|
|
||||||
async def address_get_balance(self, address):
|
# async def address_get_balance(self, address):
|
||||||
"""Return the confirmed and unconfirmed balance of an address."""
|
# """Return the confirmed and unconfirmed balance of an address."""
|
||||||
hashX = self.address_to_hashX(address)
|
# hashX = self.address_to_hashX(address)
|
||||||
return await self.get_balance(hashX)
|
# return await self.get_balance(hashX)
|
||||||
|
|
||||||
async def address_get_history(self, address):
|
async def address_get_history(self, address):
|
||||||
"""Return the confirmed and unconfirmed history of an address."""
|
"""Return the confirmed and unconfirmed history of an address."""
|
||||||
|
@ -1192,10 +1206,10 @@ class LBRYElectrumX(SessionBase):
|
||||||
hashX = self.address_to_hashX(address)
|
hashX = self.address_to_hashX(address)
|
||||||
return self.unconfirmed_history(hashX)
|
return self.unconfirmed_history(hashX)
|
||||||
|
|
||||||
async def address_listunspent(self, address):
|
# async def address_listunspent(self, address):
|
||||||
"""Return the list of UTXOs of an address."""
|
# """Return the list of UTXOs of an address."""
|
||||||
hashX = self.address_to_hashX(address)
|
# hashX = self.address_to_hashX(address)
|
||||||
return await self.hashX_listunspent(hashX)
|
# return await self.hashX_listunspent(hashX)
|
||||||
|
|
||||||
async def address_subscribe(self, *addresses):
|
async def address_subscribe(self, *addresses):
|
||||||
"""Subscribe to an address.
|
"""Subscribe to an address.
|
||||||
|
@ -1216,16 +1230,16 @@ class LBRYElectrumX(SessionBase):
|
||||||
hashX = self.address_to_hashX(address)
|
hashX = self.address_to_hashX(address)
|
||||||
return await self.hashX_unsubscribe(hashX, address)
|
return await self.hashX_unsubscribe(hashX, address)
|
||||||
|
|
||||||
async def get_balance(self, hashX):
|
# async def get_balance(self, hashX):
|
||||||
utxos = await self.db.all_utxos(hashX)
|
# utxos = await self.db.all_utxos(hashX)
|
||||||
confirmed = sum(utxo.value for utxo in utxos)
|
# confirmed = sum(utxo.value for utxo in utxos)
|
||||||
unconfirmed = await self.mempool.balance_delta(hashX)
|
# unconfirmed = await self.mempool.balance_delta(hashX)
|
||||||
return {'confirmed': confirmed, 'unconfirmed': unconfirmed}
|
# return {'confirmed': confirmed, 'unconfirmed': unconfirmed}
|
||||||
|
|
||||||
async def scripthash_get_balance(self, scripthash):
|
# async def scripthash_get_balance(self, scripthash):
|
||||||
"""Return the confirmed and unconfirmed balance of a scripthash."""
|
# """Return the confirmed and unconfirmed balance of a scripthash."""
|
||||||
hashX = scripthash_to_hashX(scripthash)
|
# hashX = scripthash_to_hashX(scripthash)
|
||||||
return await self.get_balance(hashX)
|
# return await self.get_balance(hashX)
|
||||||
|
|
||||||
def unconfirmed_history(self, hashX):
|
def unconfirmed_history(self, hashX):
|
||||||
# Note unconfirmed history is unordered in electrum-server
|
# Note unconfirmed history is unordered in electrum-server
|
||||||
|
@ -1237,7 +1251,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
|
|
||||||
async def confirmed_and_unconfirmed_history(self, hashX):
|
async def confirmed_and_unconfirmed_history(self, hashX):
|
||||||
# Note history is ordered but unconfirmed is unordered in e-s
|
# Note history is ordered but unconfirmed is unordered in e-s
|
||||||
history = await self.session_mgr.limited_history(hashX)
|
history = await self.session_manager.limited_history(hashX)
|
||||||
conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height}
|
conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height}
|
||||||
for tx_hash, height in history]
|
for tx_hash, height in history]
|
||||||
return conf + self.unconfirmed_history(hashX)
|
return conf + self.unconfirmed_history(hashX)
|
||||||
|
@ -1252,10 +1266,10 @@ class LBRYElectrumX(SessionBase):
|
||||||
hashX = scripthash_to_hashX(scripthash)
|
hashX = scripthash_to_hashX(scripthash)
|
||||||
return self.unconfirmed_history(hashX)
|
return self.unconfirmed_history(hashX)
|
||||||
|
|
||||||
async def scripthash_listunspent(self, scripthash):
|
# async def scripthash_listunspent(self, scripthash):
|
||||||
"""Return the list of UTXOs of a scripthash."""
|
# """Return the list of UTXOs of a scripthash."""
|
||||||
hashX = scripthash_to_hashX(scripthash)
|
# hashX = scripthash_to_hashX(scripthash)
|
||||||
return await self.hashX_listunspent(hashX)
|
# return await self.hashX_listunspent(hashX)
|
||||||
|
|
||||||
async def scripthash_subscribe(self, scripthash):
|
async def scripthash_subscribe(self, scripthash):
|
||||||
"""Subscribe to a script hash.
|
"""Subscribe to a script hash.
|
||||||
|
@ -1290,7 +1304,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
|
|
||||||
max_size = self.MAX_CHUNK_SIZE
|
max_size = self.MAX_CHUNK_SIZE
|
||||||
count = min(count, max_size)
|
count = min(count, max_size)
|
||||||
headers, count = self.db.read_headers(start_height, count)
|
headers, count = await self.db.read_headers(start_height, count)
|
||||||
|
|
||||||
if b64:
|
if b64:
|
||||||
headers = self.db.encode_headers(start_height, count, headers)
|
headers = self.db.encode_headers(start_height, count, headers)
|
||||||
|
@ -1313,7 +1327,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
index = non_negative_integer(index)
|
index = non_negative_integer(index)
|
||||||
size = self.coin.CHUNK_SIZE
|
size = self.coin.CHUNK_SIZE
|
||||||
start_height = index * size
|
start_height = index * size
|
||||||
headers, _ = self.db.read_headers(start_height, size)
|
headers, _ = await self.db.read_headers(start_height, size)
|
||||||
return headers.hex()
|
return headers.hex()
|
||||||
|
|
||||||
async def block_get_header(self, height):
|
async def block_get_header(self, height):
|
||||||
|
@ -1321,7 +1335,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
|
|
||||||
height: the header's height"""
|
height: the header's height"""
|
||||||
height = non_negative_integer(height)
|
height = non_negative_integer(height)
|
||||||
return await self.session_mgr.electrum_header(height)
|
return await self.session_manager.electrum_header(height)
|
||||||
|
|
||||||
def is_tor(self):
|
def is_tor(self):
|
||||||
"""Try to detect if the connection is to a tor hidden service we are
|
"""Try to detect if the connection is to a tor hidden service we are
|
||||||
|
@ -1411,10 +1425,10 @@ class LBRYElectrumX(SessionBase):
|
||||||
self.close_after_send = True
|
self.close_after_send = True
|
||||||
raise RPCError(BAD_REQUEST, f'unsupported client: {client_name}')
|
raise RPCError(BAD_REQUEST, f'unsupported client: {client_name}')
|
||||||
if self.client_version != client_name[:17]:
|
if self.client_version != client_name[:17]:
|
||||||
self.session_mgr.session_count_metric.labels(version=self.client_version).dec()
|
self.session_manager.session_count_metric.labels(version=self.client_version).dec()
|
||||||
self.client_version = client_name[:17]
|
self.client_version = client_name[:17]
|
||||||
self.session_mgr.session_count_metric.labels(version=self.client_version).inc()
|
self.session_manager.session_count_metric.labels(version=self.client_version).inc()
|
||||||
self.session_mgr.client_version_metric.labels(version=self.client_version).inc()
|
self.session_manager.client_version_metric.labels(version=self.client_version).inc()
|
||||||
|
|
||||||
# Find the highest common protocol version. Disconnect if
|
# Find the highest common protocol version. Disconnect if
|
||||||
# that protocol version in unsupported.
|
# that protocol version in unsupported.
|
||||||
|
@ -1435,9 +1449,10 @@ class LBRYElectrumX(SessionBase):
|
||||||
raw_tx: the raw transaction as a hexadecimal string"""
|
raw_tx: the raw transaction as a hexadecimal string"""
|
||||||
# This returns errors as JSON RPC errors, as is natural
|
# This returns errors as JSON RPC errors, as is natural
|
||||||
try:
|
try:
|
||||||
hex_hash = await self.session_mgr.broadcast_transaction(raw_tx)
|
hex_hash = await self.session_manager.broadcast_transaction(raw_tx)
|
||||||
self.txs_sent += 1
|
self.txs_sent += 1
|
||||||
self.mempool.wakeup.set()
|
# self.mempool.wakeup.set()
|
||||||
|
# await asyncio.sleep(0.5)
|
||||||
self.logger.info(f'sent tx: {hex_hash}')
|
self.logger.info(f'sent tx: {hex_hash}')
|
||||||
return hex_hash
|
return hex_hash
|
||||||
except DaemonError as e:
|
except DaemonError as e:
|
||||||
|
@ -1451,7 +1466,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
return (await self.transaction_get_batch(tx_hash))[tx_hash]
|
return (await self.transaction_get_batch(tx_hash))[tx_hash]
|
||||||
|
|
||||||
async def transaction_get_batch(self, *tx_hashes):
|
async def transaction_get_batch(self, *tx_hashes):
|
||||||
self.session_mgr.tx_request_count_metric.inc(len(tx_hashes))
|
self.session_manager.tx_request_count_metric.inc(len(tx_hashes))
|
||||||
if len(tx_hashes) > 100:
|
if len(tx_hashes) > 100:
|
||||||
raise RPCError(BAD_REQUEST, f'too many tx hashes in request: {len(tx_hashes)}')
|
raise RPCError(BAD_REQUEST, f'too many tx hashes in request: {len(tx_hashes)}')
|
||||||
for tx_hash in tx_hashes:
|
for tx_hash in tx_hashes:
|
||||||
|
@ -1490,8 +1505,7 @@ class LBRYElectrumX(SessionBase):
|
||||||
'block_height': block_height
|
'block_height': block_height
|
||||||
}
|
}
|
||||||
await asyncio.sleep(0) # heavy call, give other tasks a chance
|
await asyncio.sleep(0) # heavy call, give other tasks a chance
|
||||||
|
self.session_manager.tx_replied_count_metric.inc(len(tx_hashes))
|
||||||
self.session_mgr.tx_replied_count_metric.inc(len(tx_hashes))
|
|
||||||
return batch_result
|
return batch_result
|
||||||
|
|
||||||
async def transaction_get(self, tx_hash, verbose=False):
|
async def transaction_get(self, tx_hash, verbose=False):
|
||||||
|
|
|
@ -98,7 +98,7 @@ class Deserializer:
|
||||||
TX_HASH_FN = staticmethod(double_sha256)
|
TX_HASH_FN = staticmethod(double_sha256)
|
||||||
|
|
||||||
def __init__(self, binary, start=0):
|
def __init__(self, binary, start=0):
|
||||||
assert isinstance(binary, bytes)
|
assert isinstance(binary, bytes), f"type {type(binary)} is not 'bytes'"
|
||||||
self.binary = binary
|
self.binary = binary
|
||||||
self.binary_length = len(binary)
|
self.binary_length = len(binary)
|
||||||
self.cursor = start
|
self.cursor = start
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
SNAPSHOT_HEIGHT="1049658"
|
SNAPSHOT_HEIGHT="1072108"
|
||||||
|
|
||||||
HUB_VOLUME_PATH="/var/lib/docker/volumes/${USER}_wallet_server"
|
HUB_VOLUME_PATH="/var/lib/docker/volumes/${USER}_wallet_server"
|
||||||
ES_VOLUME_PATH="/var/lib/docker/volumes/${USER}_es01"
|
ES_VOLUME_PATH="/var/lib/docker/volumes/${USER}_es01"
|
||||||
|
|
||||||
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar"
|
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar.gz"
|
||||||
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar"
|
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar.gz"
|
||||||
|
|
||||||
SNAPSHOT_URL="https://snapshots.lbry.com/hub/${SNAPSHOT_TAR_NAME}"
|
SNAPSHOT_URL="https://snapshots.lbry.com/hub/${SNAPSHOT_TAR_NAME}"
|
||||||
ES_SNAPSHOT_URL="https://snapshots.lbry.com/hub/${ES_SNAPSHOT_TAR_NAME}"
|
ES_SNAPSHOT_URL="https://snapshots.lbry.com/hub/${ES_SNAPSHOT_TAR_NAME}"
|
||||||
|
|
17
setup.py
17
setup.py
|
@ -7,9 +7,11 @@ BASE = os.path.dirname(__file__)
|
||||||
with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
|
with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
|
|
||||||
PLYVEL = []
|
|
||||||
if sys.platform.startswith('linux'):
|
ROCKSDB = []
|
||||||
PLYVEL.append('plyvel==1.3.0')
|
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
|
||||||
|
ROCKSDB.append('lbry-rocksdb==0.8.2')
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name=__name__,
|
name=__name__,
|
||||||
|
@ -28,9 +30,10 @@ setup(
|
||||||
entry_points={
|
entry_points={
|
||||||
'console_scripts': [
|
'console_scripts': [
|
||||||
'lbrynet=lbry.extras.cli:main',
|
'lbrynet=lbry.extras.cli:main',
|
||||||
'lbry-hub=lbry.wallet.server.cli:main',
|
'lbry-hub-writer=lbry.wallet.server.cli:run_writer_forever',
|
||||||
'orchstr8=lbry.wallet.orchstr8.cli:main',
|
'lbry-hub-server=lbry.wallet.server.cli:run_server_forever',
|
||||||
'lbry-hub-elastic-sync=lbry.wallet.server.db.elasticsearch.sync:run_elastic_sync'
|
'lbry-hub-elastic-sync=lbry.wallet.server.db.elasticsearch.sync:run_elastic_sync',
|
||||||
|
'orchstr8=lbry.wallet.orchstr8.cli:main'
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
install_requires=[
|
install_requires=[
|
||||||
|
@ -57,7 +60,7 @@ setup(
|
||||||
'pylru==1.1.0',
|
'pylru==1.1.0',
|
||||||
'elasticsearch==7.10.1',
|
'elasticsearch==7.10.1',
|
||||||
'grpcio==1.38.0'
|
'grpcio==1.38.0'
|
||||||
] + PLYVEL,
|
] + ROCKSDB,
|
||||||
extras_require={
|
extras_require={
|
||||||
'torrent': ['lbry-libtorrent'],
|
'torrent': ['lbry-libtorrent'],
|
||||||
'lint': ['pylint==2.10.0'],
|
'lint': ['pylint==2.10.0'],
|
||||||
|
|
|
@ -9,7 +9,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
VERBOSITY = logging.WARN
|
VERBOSITY = logging.WARN
|
||||||
|
|
||||||
async def assertBlockHash(self, height):
|
async def assertBlockHash(self, height):
|
||||||
bp = self.conductor.spv_node.server.bp
|
bp = self.conductor.spv_node.writer
|
||||||
|
|
||||||
def get_txids():
|
def get_txids():
|
||||||
return [
|
return [
|
||||||
|
@ -29,15 +29,16 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
||||||
|
|
||||||
async def test_reorg(self):
|
async def test_reorg(self):
|
||||||
bp = self.conductor.spv_node.server.bp
|
bp = self.conductor.spv_node.writer
|
||||||
bp.reorg_count_metric.set(0)
|
bp.reorg_count_metric.set(0)
|
||||||
# invalidate current block, move forward 2
|
# invalidate current block, move forward 2
|
||||||
height = 206
|
height = 206
|
||||||
self.assertEqual(self.ledger.headers.height, height)
|
self.assertEqual(self.ledger.headers.height, height)
|
||||||
await self.assertBlockHash(height)
|
await self.assertBlockHash(height)
|
||||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
block_hash = (await self.ledger.headers.hash(206)).decode()
|
||||||
|
await self.blockchain.invalidate_block(block_hash)
|
||||||
await self.blockchain.generate(2)
|
await self.blockchain.generate(2)
|
||||||
await self.ledger.on_header.where(lambda e: e.height == 207)
|
await asyncio.wait_for(self.on_header(207), 3.0)
|
||||||
self.assertEqual(self.ledger.headers.height, 207)
|
self.assertEqual(self.ledger.headers.height, 207)
|
||||||
await self.assertBlockHash(206)
|
await self.assertBlockHash(206)
|
||||||
await self.assertBlockHash(207)
|
await self.assertBlockHash(207)
|
||||||
|
@ -46,14 +47,14 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
# invalidate current block, move forward 3
|
# invalidate current block, move forward 3
|
||||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
||||||
await self.blockchain.generate(3)
|
await self.blockchain.generate(3)
|
||||||
await self.ledger.on_header.where(lambda e: e.height == 208)
|
await asyncio.wait_for(self.on_header(208), 3.0)
|
||||||
self.assertEqual(self.ledger.headers.height, 208)
|
self.assertEqual(self.ledger.headers.height, 208)
|
||||||
await self.assertBlockHash(206)
|
await self.assertBlockHash(206)
|
||||||
await self.assertBlockHash(207)
|
await self.assertBlockHash(207)
|
||||||
await self.assertBlockHash(208)
|
await self.assertBlockHash(208)
|
||||||
self.assertEqual(2, bp.reorg_count_metric._samples()[0][2])
|
self.assertEqual(2, bp.reorg_count_metric._samples()[0][2])
|
||||||
await self.blockchain.generate(3)
|
await self.blockchain.generate(3)
|
||||||
await self.ledger.on_header.where(lambda e: e.height == 211)
|
await asyncio.wait_for(self.on_header(211), 3.0)
|
||||||
await self.assertBlockHash(209)
|
await self.assertBlockHash(209)
|
||||||
await self.assertBlockHash(210)
|
await self.assertBlockHash(210)
|
||||||
await self.assertBlockHash(211)
|
await self.assertBlockHash(211)
|
||||||
|
@ -62,7 +63,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
)
|
)
|
||||||
await self.ledger.wait(still_valid)
|
await self.ledger.wait(still_valid)
|
||||||
await self.blockchain.generate(1)
|
await self.blockchain.generate(1)
|
||||||
await self.ledger.on_header.where(lambda e: e.height == 212)
|
await asyncio.wait_for(self.on_header(212), 1.0)
|
||||||
claim_id = still_valid.outputs[0].claim_id
|
claim_id = still_valid.outputs[0].claim_id
|
||||||
c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id']
|
c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id']
|
||||||
c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id']
|
c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id']
|
||||||
|
@ -71,7 +72,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
|
|
||||||
abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id)
|
abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id)
|
||||||
await self.blockchain.generate(1)
|
await self.blockchain.generate(1)
|
||||||
await self.ledger.on_header.where(lambda e: e.height == 213)
|
await asyncio.wait_for(self.on_header(213), 1.0)
|
||||||
c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}')
|
c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}')
|
||||||
c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}'])
|
c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}'])
|
||||||
c3 = await self.daemon.jsonrpc_resolve([f'still-valid'])
|
c3 = await self.daemon.jsonrpc_resolve([f'still-valid'])
|
||||||
|
@ -114,9 +115,8 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||||
await self.blockchain.clear_mempool()
|
await self.blockchain.clear_mempool()
|
||||||
await self.blockchain.generate(2)
|
await self.blockchain.generate(2)
|
||||||
|
|
||||||
# wait for the client to catch up and verify the reorg
|
|
||||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
await asyncio.wait_for(self.on_header(209), 3.0)
|
||||||
|
|
||||||
await self.assertBlockHash(207)
|
await self.assertBlockHash(207)
|
||||||
await self.assertBlockHash(208)
|
await self.assertBlockHash(208)
|
||||||
await self.assertBlockHash(209)
|
await self.assertBlockHash(209)
|
||||||
|
@ -142,9 +142,8 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
# broadcast the claim in a different block
|
# broadcast the claim in a different block
|
||||||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||||
self.assertEqual(broadcast_tx.id, new_txid)
|
self.assertEqual(broadcast_tx.id, new_txid)
|
||||||
await self.blockchain.generate(1)
|
|
||||||
|
|
||||||
# wait for the client to catch up
|
await self.blockchain.generate(1)
|
||||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||||
|
|
||||||
# verify the claim is in the new block and that it is returned by claim_search
|
# verify the claim is in the new block and that it is returned by claim_search
|
||||||
|
@ -222,8 +221,6 @@ class BlockchainReorganizationTests(CommandTestCase):
|
||||||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||||
self.assertEqual(broadcast_tx.id, new_txid)
|
self.assertEqual(broadcast_tx.id, new_txid)
|
||||||
await self.blockchain.generate(1)
|
await self.blockchain.generate(1)
|
||||||
|
|
||||||
# wait for the client to catch up
|
|
||||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||||
|
|
||||||
# verify the claim is in the new block and that it is returned by claim_search
|
# verify the claim is in the new block and that it is returned by claim_search
|
||||||
|
|
|
@ -86,7 +86,7 @@ class ReconnectTests(IntegrationTestCase):
|
||||||
await self.ledger.stop()
|
await self.ledger.stop()
|
||||||
initial_height = self.ledger.local_height_including_downloaded_height
|
initial_height = self.ledger.local_height_including_downloaded_height
|
||||||
await self.blockchain.generate(100)
|
await self.blockchain.generate(100)
|
||||||
while self.conductor.spv_node.server.session_mgr.notified_height < initial_height + 99: # off by 1
|
while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 99: # off by 1
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.1)
|
||||||
self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height)
|
self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height)
|
||||||
await self.ledger.headers.open()
|
await self.ledger.headers.open()
|
||||||
|
@ -169,7 +169,7 @@ class UDPServerFailDiscoveryTest(AsyncioTestCase):
|
||||||
self.addCleanup(conductor.stop_blockchain)
|
self.addCleanup(conductor.stop_blockchain)
|
||||||
await conductor.start_spv()
|
await conductor.start_spv()
|
||||||
self.addCleanup(conductor.stop_spv)
|
self.addCleanup(conductor.stop_spv)
|
||||||
self.assertFalse(conductor.spv_node.server.bp.status_server.is_running)
|
self.assertFalse(conductor.spv_node.server.reader.status_server.is_running)
|
||||||
await asyncio.wait_for(conductor.start_wallet(), timeout=5)
|
await asyncio.wait_for(conductor.start_wallet(), timeout=5)
|
||||||
self.addCleanup(conductor.stop_wallet)
|
self.addCleanup(conductor.stop_wallet)
|
||||||
self.assertTrue(conductor.wallet_node.ledger.network.is_connected)
|
self.assertTrue(conductor.wallet_node.ledger.network.is_connected)
|
||||||
|
|
|
@ -63,7 +63,7 @@ class SyncTests(IntegrationTestCase):
|
||||||
await self.assertBalance(account1, '1.0')
|
await self.assertBalance(account1, '1.0')
|
||||||
await self.assertBalance(account2, '1.0')
|
await self.assertBalance(account2, '1.0')
|
||||||
|
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
|
|
||||||
# pay 0.01 from main node to receiving node, would have increased change addresses
|
# pay 0.01 from main node to receiving node, would have increased change addresses
|
||||||
address0 = (await account0.receiving.get_addresses())[0]
|
address0 = (await account0.receiving.get_addresses())[0]
|
||||||
|
@ -79,7 +79,7 @@ class SyncTests(IntegrationTestCase):
|
||||||
account1.ledger.wait(tx),
|
account1.ledger.wait(tx),
|
||||||
account2.ledger.wait(tx),
|
account2.ledger.wait(tx),
|
||||||
])
|
])
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await asyncio.wait([
|
await asyncio.wait([
|
||||||
account0.ledger.wait(tx),
|
account0.ledger.wait(tx),
|
||||||
account1.ledger.wait(tx),
|
account1.ledger.wait(tx),
|
||||||
|
@ -92,7 +92,7 @@ class SyncTests(IntegrationTestCase):
|
||||||
await self.assertBalance(account1, '0.989876')
|
await self.assertBalance(account1, '0.989876')
|
||||||
await self.assertBalance(account2, '0.989876')
|
await self.assertBalance(account2, '0.989876')
|
||||||
|
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
|
|
||||||
# create a new mirror node and see if it syncs to same balance from scratch
|
# create a new mirror node and see if it syncs to same balance from scratch
|
||||||
node3 = await self.make_wallet_node(account1.seed)
|
node3 = await self.make_wallet_node(account1.seed)
|
||||||
|
|
|
@ -11,7 +11,7 @@ from lbry.wallet.dewies import dict_values_to_lbc
|
||||||
class WalletCommands(CommandTestCase):
|
class WalletCommands(CommandTestCase):
|
||||||
|
|
||||||
async def test_wallet_create_and_add_subscribe(self):
|
async def test_wallet_create_and_add_subscribe(self):
|
||||||
session = next(iter(self.conductor.spv_node.server.session_mgr.sessions.values()))
|
session = next(iter(self.conductor.spv_node.server.session_manager.sessions.values()))
|
||||||
self.assertEqual(len(session.hashX_subs), 27)
|
self.assertEqual(len(session.hashX_subs), 27)
|
||||||
wallet = await self.daemon.jsonrpc_wallet_create('foo', create_account=True, single_key=True)
|
wallet = await self.daemon.jsonrpc_wallet_create('foo', create_account=True, single_key=True)
|
||||||
self.assertEqual(len(session.hashX_subs), 28)
|
self.assertEqual(len(session.hashX_subs), 28)
|
||||||
|
|
|
@ -25,13 +25,13 @@ class TestSessions(IntegrationTestCase):
|
||||||
)
|
)
|
||||||
await session.create_connection()
|
await session.create_connection()
|
||||||
await session.send_request('server.banner', ())
|
await session.send_request('server.banner', ())
|
||||||
self.assertEqual(len(self.conductor.spv_node.server.session_mgr.sessions), 1)
|
self.assertEqual(len(self.conductor.spv_node.server.session_manager.sessions), 1)
|
||||||
self.assertFalse(session.is_closing())
|
self.assertFalse(session.is_closing())
|
||||||
await asyncio.sleep(1.1)
|
await asyncio.sleep(1.1)
|
||||||
with self.assertRaises(asyncio.TimeoutError):
|
with self.assertRaises(asyncio.TimeoutError):
|
||||||
await session.send_request('server.banner', ())
|
await session.send_request('server.banner', ())
|
||||||
self.assertTrue(session.is_closing())
|
self.assertTrue(session.is_closing())
|
||||||
self.assertEqual(len(self.conductor.spv_node.server.session_mgr.sessions), 0)
|
self.assertEqual(len(self.conductor.spv_node.server.session_manager.sessions), 0)
|
||||||
|
|
||||||
async def test_proper_version(self):
|
async def test_proper_version(self):
|
||||||
info = await self.ledger.network.get_server_features()
|
info = await self.ledger.network.get_server_features()
|
||||||
|
@ -186,7 +186,7 @@ class TestHubDiscovery(CommandTestCase):
|
||||||
self.daemon.ledger.network.client.server_address_and_port, ('127.0.0.1', kp_final_node.port)
|
self.daemon.ledger.network.client.server_address_and_port, ('127.0.0.1', kp_final_node.port)
|
||||||
)
|
)
|
||||||
|
|
||||||
kp_final_node.server.session_mgr._notify_peer('127.0.0.1:9988')
|
kp_final_node.server.session_manager._notify_peer('127.0.0.1:9988')
|
||||||
await self.daemon.ledger.network.on_hub.first
|
await self.daemon.ledger.network.on_hub.first
|
||||||
await asyncio.sleep(0.5) # wait for above event to be processed by other listeners
|
await asyncio.sleep(0.5) # wait for above event to be processed by other listeners
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
|
|
@ -23,7 +23,7 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
def assertMatchESClaim(self, claim_from_es, claim_from_db):
|
def assertMatchESClaim(self, claim_from_es, claim_from_db):
|
||||||
self.assertEqual(claim_from_es['claim_hash'][::-1].hex(), claim_from_db.claim_hash.hex())
|
self.assertEqual(claim_from_es['claim_hash'][::-1].hex(), claim_from_db.claim_hash.hex())
|
||||||
self.assertEqual(claim_from_es['claim_id'], claim_from_db.claim_hash.hex())
|
self.assertEqual(claim_from_es['claim_id'], claim_from_db.claim_hash.hex())
|
||||||
self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height)
|
self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height, f"es height: {claim_from_es['activation_height']}, rocksdb height: {claim_from_db.activation_height}")
|
||||||
self.assertEqual(claim_from_es['last_take_over_height'], claim_from_db.last_takeover_height)
|
self.assertEqual(claim_from_es['last_take_over_height'], claim_from_db.last_takeover_height)
|
||||||
self.assertEqual(claim_from_es['tx_id'], claim_from_db.tx_hash[::-1].hex())
|
self.assertEqual(claim_from_es['tx_id'], claim_from_db.tx_hash[::-1].hex())
|
||||||
self.assertEqual(claim_from_es['tx_nout'], claim_from_db.position)
|
self.assertEqual(claim_from_es['tx_nout'], claim_from_db.position)
|
||||||
|
@ -44,44 +44,44 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
if claim_id is None:
|
if claim_id is None:
|
||||||
self.assertIn('error', other)
|
self.assertIn('error', other)
|
||||||
self.assertEqual(other['error']['name'], 'NOT_FOUND')
|
self.assertEqual(other['error']['name'], 'NOT_FOUND')
|
||||||
claims_from_es = (await self.conductor.spv_node.server.bp.db.search_index.search(name=name))[0]
|
claims_from_es = (await self.conductor.spv_node.server.session_manager.search_index.search(name=name))[0]
|
||||||
claims_from_es = [c['claim_hash'][::-1].hex() for c in claims_from_es]
|
claims_from_es = [c['claim_hash'][::-1].hex() for c in claims_from_es]
|
||||||
self.assertNotIn(claim_id, claims_from_es)
|
self.assertNotIn(claim_id, claims_from_es)
|
||||||
else:
|
else:
|
||||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(claim_id=claim_id)
|
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id)
|
||||||
self.assertEqual(claim_id, other['claim_id'])
|
self.assertEqual(claim_id, other['claim_id'])
|
||||||
self.assertEqual(claim_id, claim_from_es[0][0]['claim_hash'][::-1].hex())
|
self.assertEqual(claim_id, claim_from_es[0][0]['claim_hash'][::-1].hex())
|
||||||
|
|
||||||
async def assertNoClaimForName(self, name: str):
|
async def assertNoClaimForName(self, name: str):
|
||||||
lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
||||||
stream, channel, _, _ = await self.conductor.spv_node.server.bp.db.resolve(name)
|
stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name)
|
||||||
self.assertNotIn('claimId', lbrycrd_winning)
|
self.assertNotIn('claimId', lbrycrd_winning)
|
||||||
if stream is not None:
|
if stream is not None:
|
||||||
self.assertIsInstance(stream, LookupError)
|
self.assertIsInstance(stream, LookupError)
|
||||||
else:
|
else:
|
||||||
self.assertIsInstance(channel, LookupError)
|
self.assertIsInstance(channel, LookupError)
|
||||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(name=name)
|
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(name=name)
|
||||||
self.assertListEqual([], claim_from_es[0])
|
self.assertListEqual([], claim_from_es[0])
|
||||||
|
|
||||||
async def assertNoClaim(self, claim_id: str):
|
async def assertNoClaim(self, claim_id: str):
|
||||||
self.assertDictEqual(
|
self.assertDictEqual(
|
||||||
{}, json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
{}, json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
||||||
)
|
)
|
||||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(claim_id=claim_id)
|
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id)
|
||||||
self.assertListEqual([], claim_from_es[0])
|
self.assertListEqual([], claim_from_es[0])
|
||||||
claim = await self.conductor.spv_node.server.bp.db.fs_getclaimbyid(claim_id)
|
claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id)
|
||||||
self.assertIsNone(claim)
|
self.assertIsNone(claim)
|
||||||
|
|
||||||
async def assertMatchWinningClaim(self, name):
|
async def assertMatchWinningClaim(self, name):
|
||||||
expected = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
expected = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
||||||
stream, channel, _, _ = await self.conductor.spv_node.server.bp.db.resolve(name)
|
stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name)
|
||||||
claim = stream if stream else channel
|
claim = stream if stream else channel
|
||||||
await self._assertMatchClaim(expected, claim)
|
await self._assertMatchClaim(expected, claim)
|
||||||
return claim
|
return claim
|
||||||
|
|
||||||
async def _assertMatchClaim(self, expected, claim):
|
async def _assertMatchClaim(self, expected, claim):
|
||||||
self.assertMatchDBClaim(expected, claim)
|
self.assertMatchDBClaim(expected, claim)
|
||||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
claim_id=claim.claim_hash.hex()
|
claim_id=claim.claim_hash.hex()
|
||||||
)
|
)
|
||||||
self.assertEqual(len(claim_from_es[0]), 1)
|
self.assertEqual(len(claim_from_es[0]), 1)
|
||||||
|
@ -90,7 +90,7 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
|
|
||||||
async def assertMatchClaim(self, claim_id, is_active_in_lbrycrd=True):
|
async def assertMatchClaim(self, claim_id, is_active_in_lbrycrd=True):
|
||||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
expected = json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
||||||
claim = await self.conductor.spv_node.server.bp.db.fs_getclaimbyid(claim_id)
|
claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id)
|
||||||
if is_active_in_lbrycrd:
|
if is_active_in_lbrycrd:
|
||||||
if not expected:
|
if not expected:
|
||||||
self.assertIsNone(claim)
|
self.assertIsNone(claim)
|
||||||
|
@ -98,7 +98,7 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
self.assertMatchDBClaim(expected, claim)
|
self.assertMatchDBClaim(expected, claim)
|
||||||
else:
|
else:
|
||||||
self.assertDictEqual({}, expected)
|
self.assertDictEqual({}, expected)
|
||||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
claim_id=claim.claim_hash.hex()
|
claim_id=claim.claim_hash.hex()
|
||||||
)
|
)
|
||||||
self.assertEqual(len(claim_from_es[0]), 1)
|
self.assertEqual(len(claim_from_es[0]), 1)
|
||||||
|
@ -116,7 +116,7 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
|
|
||||||
def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount, is_active_in_lbrycrd=True):
|
def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount, is_active_in_lbrycrd=True):
|
||||||
total_amount = 0
|
total_amount = 0
|
||||||
db = self.conductor.spv_node.server.bp.db
|
db = self.conductor.spv_node.server.db
|
||||||
|
|
||||||
for i, (tx_num, position, amount) in enumerate(db.get_supports(bytes.fromhex(claim_id))):
|
for i, (tx_num, position, amount) in enumerate(db.get_supports(bytes.fromhex(claim_id))):
|
||||||
total_amount += amount
|
total_amount += amount
|
||||||
|
@ -131,7 +131,7 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
async def assertMatchClaimsForName(self, name):
|
async def assertMatchClaimsForName(self, name):
|
||||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name))
|
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name))
|
||||||
|
|
||||||
db = self.conductor.spv_node.server.bp.db
|
db = self.conductor.spv_node.server.db
|
||||||
# self.assertEqual(len(expected['claims']), len(db_claims.claims))
|
# self.assertEqual(len(expected['claims']), len(db_claims.claims))
|
||||||
# self.assertEqual(expected['lastTakeoverHeight'], db_claims.lastTakeoverHeight)
|
# self.assertEqual(expected['lastTakeoverHeight'], db_claims.lastTakeoverHeight)
|
||||||
last_takeover = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))['lastTakeoverHeight']
|
last_takeover = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))['lastTakeoverHeight']
|
||||||
|
@ -143,7 +143,7 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
claim = db._fs_get_claim_by_hash(claim_hash)
|
claim = db._fs_get_claim_by_hash(claim_hash)
|
||||||
self.assertMatchDBClaim(c, claim)
|
self.assertMatchDBClaim(c, claim)
|
||||||
|
|
||||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
claim_id=c['claimId']
|
claim_id=c['claimId']
|
||||||
)
|
)
|
||||||
self.assertEqual(len(claim_from_es[0]), 1)
|
self.assertEqual(len(claim_from_es[0]), 1)
|
||||||
|
@ -151,6 +151,17 @@ class BaseResolveTestCase(CommandTestCase):
|
||||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||||
self._check_supports(c['claimId'], c['supports'], claim_from_es[0][0]['support_amount'])
|
self._check_supports(c['claimId'], c['supports'], claim_from_es[0][0]['support_amount'])
|
||||||
|
|
||||||
|
async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int,
|
||||||
|
non_winning_claims: List[ClaimStateValue]):
|
||||||
|
self.assertEqual(height, self.conductor.spv_node.server.db.db_height)
|
||||||
|
await self.assertMatchClaimIsWinning(name, winning_claim_id)
|
||||||
|
for non_winning in non_winning_claims:
|
||||||
|
claim = await self.assertMatchClaim(
|
||||||
|
non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd
|
||||||
|
)
|
||||||
|
self.assertEqual(non_winning.activation_height, claim.activation_height)
|
||||||
|
self.assertEqual(last_takeover_height, claim.last_takeover_height)
|
||||||
|
|
||||||
|
|
||||||
class ResolveCommand(BaseResolveTestCase):
|
class ResolveCommand(BaseResolveTestCase):
|
||||||
async def test_colliding_short_id(self):
|
async def test_colliding_short_id(self):
|
||||||
|
@ -641,17 +652,6 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
async def create_stream_claim(self, amount: str, name='derp') -> str:
|
async def create_stream_claim(self, amount: str, name='derp') -> str:
|
||||||
return (await self.stream_create(name, amount, allow_duplicate_name=True))['outputs'][0]['claim_id']
|
return (await self.stream_create(name, amount, allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||||
|
|
||||||
async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int,
|
|
||||||
non_winning_claims: List[ClaimStateValue]):
|
|
||||||
self.assertEqual(height, self.conductor.spv_node.server.bp.db.db_height)
|
|
||||||
await self.assertMatchClaimIsWinning(name, winning_claim_id)
|
|
||||||
for non_winning in non_winning_claims:
|
|
||||||
claim = await self.assertMatchClaim(
|
|
||||||
non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd
|
|
||||||
)
|
|
||||||
self.assertEqual(non_winning.activation_height, claim.activation_height)
|
|
||||||
self.assertEqual(last_takeover_height, claim.last_takeover_height)
|
|
||||||
|
|
||||||
async def test_delay_takeover_with_update(self):
|
async def test_delay_takeover_with_update(self):
|
||||||
name = 'derp'
|
name = 'derp'
|
||||||
first_claim_id = await self.create_stream_claim('0.2', name)
|
first_claim_id = await self.create_stream_claim('0.2', name)
|
||||||
|
@ -961,7 +961,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
)
|
)
|
||||||
greater_than_or_equal_to_zero = [
|
greater_than_or_equal_to_zero = [
|
||||||
claim['claim_id'] for claim in (
|
claim['claim_id'] for claim in (
|
||||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
channel_id=channel_id, fee_amount=">=0"
|
channel_id=channel_id, fee_amount=">=0"
|
||||||
))[0]
|
))[0]
|
||||||
]
|
]
|
||||||
|
@ -969,7 +969,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
self.assertSetEqual(set(greater_than_or_equal_to_zero), {stream_with_no_fee, stream_with_fee})
|
self.assertSetEqual(set(greater_than_or_equal_to_zero), {stream_with_no_fee, stream_with_fee})
|
||||||
greater_than_zero = [
|
greater_than_zero = [
|
||||||
claim['claim_id'] for claim in (
|
claim['claim_id'] for claim in (
|
||||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
channel_id=channel_id, fee_amount=">0"
|
channel_id=channel_id, fee_amount=">0"
|
||||||
))[0]
|
))[0]
|
||||||
]
|
]
|
||||||
|
@ -977,7 +977,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
self.assertSetEqual(set(greater_than_zero), {stream_with_fee})
|
self.assertSetEqual(set(greater_than_zero), {stream_with_fee})
|
||||||
equal_to_zero = [
|
equal_to_zero = [
|
||||||
claim['claim_id'] for claim in (
|
claim['claim_id'] for claim in (
|
||||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
channel_id=channel_id, fee_amount="<=0"
|
channel_id=channel_id, fee_amount="<=0"
|
||||||
))[0]
|
))[0]
|
||||||
]
|
]
|
||||||
|
@ -995,7 +995,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
await self.blockchain.send_to_address(address, 400.0)
|
await self.blockchain.send_to_address(address, 400.0)
|
||||||
await self.account.ledger.on_address.first
|
await self.account.ledger.on_address.first
|
||||||
await self.generate(100)
|
await self.generate(100)
|
||||||
self.assertEqual(800, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(800, self.conductor.spv_node.server.db.db_height)
|
||||||
|
|
||||||
# Block 801: Claim A for 10 LBC is accepted.
|
# Block 801: Claim A for 10 LBC is accepted.
|
||||||
# It is the first claim, so it immediately becomes active and controlling.
|
# It is the first claim, so it immediately becomes active and controlling.
|
||||||
|
@ -1007,10 +1007,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
# Its activation height is 1121 + min(4032, floor((1121-801) / 32)) = 1121 + 10 = 1131.
|
# Its activation height is 1121 + min(4032, floor((1121-801) / 32)) = 1121 + 10 = 1131.
|
||||||
# State: A(10) is controlling, B(20) is accepted.
|
# State: A(10) is controlling, B(20) is accepted.
|
||||||
await self.generate(32 * 10 - 1)
|
await self.generate(32 * 10 - 1)
|
||||||
self.assertEqual(1120, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1120, self.conductor.spv_node.server.db.db_height)
|
||||||
claim_id_B = (await self.stream_create(name, '20.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
claim_id_B = (await self.stream_create(name, '20.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||||
claim_B, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_B}")
|
claim_B, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_B}")
|
||||||
self.assertEqual(1121, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1121, self.conductor.spv_node.server.db.db_height)
|
||||||
self.assertEqual(1131, claim_B.activation_height)
|
self.assertEqual(1131, claim_B.activation_height)
|
||||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||||
|
|
||||||
|
@ -1018,33 +1018,33 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
# Since it is a support for the controlling claim, it activates immediately.
|
# Since it is a support for the controlling claim, it activates immediately.
|
||||||
# State: A(10+14) is controlling, B(20) is accepted.
|
# State: A(10+14) is controlling, B(20) is accepted.
|
||||||
await self.support_create(claim_id_A, bid='14.0')
|
await self.support_create(claim_id_A, bid='14.0')
|
||||||
self.assertEqual(1122, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1122, self.conductor.spv_node.server.db.db_height)
|
||||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||||
|
|
||||||
# Block 1123: Claim C for 50 LBC is accepted.
|
# Block 1123: Claim C for 50 LBC is accepted.
|
||||||
# The activation height is 1123 + min(4032, floor((1123-801) / 32)) = 1123 + 10 = 1133.
|
# The activation height is 1123 + min(4032, floor((1123-801) / 32)) = 1123 + 10 = 1133.
|
||||||
# State: A(10+14) is controlling, B(20) is accepted, C(50) is accepted.
|
# State: A(10+14) is controlling, B(20) is accepted, C(50) is accepted.
|
||||||
claim_id_C = (await self.stream_create(name, '50.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
claim_id_C = (await self.stream_create(name, '50.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||||
self.assertEqual(1123, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1123, self.conductor.spv_node.server.db.db_height)
|
||||||
claim_C, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_C}")
|
claim_C, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_C}")
|
||||||
self.assertEqual(1133, claim_C.activation_height)
|
self.assertEqual(1133, claim_C.activation_height)
|
||||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||||
|
|
||||||
await self.generate(7)
|
await self.generate(7)
|
||||||
self.assertEqual(1130, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1130, self.conductor.spv_node.server.db.db_height)
|
||||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
|
|
||||||
# Block 1131: Claim B activates. It has 20 LBC, while claim A has 24 LBC (10 original + 14 from support X). There is no takeover, and claim A remains controlling.
|
# Block 1131: Claim B activates. It has 20 LBC, while claim A has 24 LBC (10 original + 14 from support X). There is no takeover, and claim A remains controlling.
|
||||||
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted.
|
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted.
|
||||||
self.assertEqual(1131, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1131, self.conductor.spv_node.server.db.db_height)
|
||||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||||
|
|
||||||
# Block 1132: Claim D for 300 LBC is accepted. The activation height is 1132 + min(4032, floor((1132-801) / 32)) = 1132 + 10 = 1142.
|
# Block 1132: Claim D for 300 LBC is accepted. The activation height is 1132 + min(4032, floor((1132-801) / 32)) = 1132 + 10 = 1142.
|
||||||
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted, D(300) is accepted.
|
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted, D(300) is accepted.
|
||||||
claim_id_D = (await self.stream_create(name, '300.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
claim_id_D = (await self.stream_create(name, '300.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||||
self.assertEqual(1132, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1132, self.conductor.spv_node.server.db.db_height)
|
||||||
claim_D, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_D}")
|
claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}")
|
||||||
self.assertEqual(False, claim_D.is_controlling)
|
self.assertEqual(False, claim_D.is_controlling)
|
||||||
self.assertEqual(801, claim_D.last_takeover_height)
|
self.assertEqual(801, claim_D.last_takeover_height)
|
||||||
self.assertEqual(1142, claim_D.activation_height)
|
self.assertEqual(1142, claim_D.activation_height)
|
||||||
|
@ -1053,8 +1053,8 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
# Block 1133: Claim C activates. It has 50 LBC, while claim A has 24 LBC, so a takeover is initiated. The takeover height for this name is set to 1133, and therefore the activation delay for all the claims becomes min(4032, floor((1133-1133) / 32)) = 0. All the claims become active. The totals for each claim are recalculated, and claim D becomes controlling because it has the highest total.
|
# Block 1133: Claim C activates. It has 50 LBC, while claim A has 24 LBC, so a takeover is initiated. The takeover height for this name is set to 1133, and therefore the activation delay for all the claims becomes min(4032, floor((1133-1133) / 32)) = 0. All the claims become active. The totals for each claim are recalculated, and claim D becomes controlling because it has the highest total.
|
||||||
# State: A(10+14) is active, B(20) is active, C(50) is active, D(300) is controlling
|
# State: A(10+14) is active, B(20) is active, C(50) is active, D(300) is controlling
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
self.assertEqual(1133, self.conductor.spv_node.server.bp.db.db_height)
|
self.assertEqual(1133, self.conductor.spv_node.server.db.db_height)
|
||||||
claim_D, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_D}")
|
claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}")
|
||||||
self.assertEqual(True, claim_D.is_controlling)
|
self.assertEqual(True, claim_D.is_controlling)
|
||||||
self.assertEqual(1133, claim_D.last_takeover_height)
|
self.assertEqual(1133, claim_D.last_takeover_height)
|
||||||
self.assertEqual(1133, claim_D.activation_height)
|
self.assertEqual(1133, claim_D.activation_height)
|
||||||
|
@ -1329,9 +1329,8 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
# abandon the support that causes the winning claim to have the highest staked
|
# abandon the support that causes the winning claim to have the highest staked
|
||||||
tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id)
|
tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id)
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
|
await self.assertMatchClaim(second_claim_id, is_active_in_lbrycrd=False)
|
||||||
await self.assertMatchClaimIsWinning(name, first_claim_id)
|
await self.assertMatchClaimIsWinning(name, first_claim_id)
|
||||||
# await self.assertMatchClaim(second_claim_id)
|
|
||||||
|
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
|
|
||||||
await self.assertMatchClaim(first_claim_id)
|
await self.assertMatchClaim(first_claim_id)
|
||||||
|
@ -1407,12 +1406,12 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
second_claim_id = (await self.stream_create(name, '0.01', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
second_claim_id = (await self.stream_create(name, '0.01', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||||
await self.assertNoClaim(second_claim_id)
|
await self.assertNoClaim(second_claim_id)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 1
|
len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 1
|
||||||
)
|
)
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
await self.assertMatchClaim(second_claim_id)
|
await self.assertMatchClaim(second_claim_id)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 2
|
len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 2
|
||||||
)
|
)
|
||||||
|
|
||||||
async def test_abandon_controlling_same_block_as_new_claim(self):
|
async def test_abandon_controlling_same_block_as_new_claim(self):
|
||||||
|
@ -1428,35 +1427,47 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
||||||
|
|
||||||
async def test_trending(self):
|
async def test_trending(self):
|
||||||
async def get_trending_score(claim_id):
|
async def get_trending_score(claim_id):
|
||||||
return (await self.conductor.spv_node.server.bp.db.search_index.search(
|
return (await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||||
claim_id=claim_id
|
claim_id=claim_id
|
||||||
))[0][0]['trending_score']
|
))[0][0]['trending_score']
|
||||||
|
|
||||||
claim_id1 = (await self.stream_create('derp', '1.0'))['outputs'][0]['claim_id']
|
claim_id1 = (await self.stream_create('derp', '1.0'))['outputs'][0]['claim_id']
|
||||||
COIN = 1E8
|
COIN = int(1E8)
|
||||||
|
|
||||||
height = 99000
|
self.assertEqual(self.conductor.spv_node.writer.height, 207)
|
||||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||||
claim_id1, height, 0, 10 * COIN
|
(208, bytes.fromhex(claim_id1)), (0, 10 * COIN)
|
||||||
)
|
)
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
self.assertEqual(172.64252836433135, await get_trending_score(claim_id1))
|
self.assertEqual(self.conductor.spv_node.writer.height, 208)
|
||||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
|
||||||
claim_id1, height + 1, 10 * COIN, 100 * COIN
|
self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1))
|
||||||
|
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||||
|
(209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN)
|
||||||
)
|
)
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
self.assertEqual(173.45931832928875, await get_trending_score(claim_id1))
|
self.assertEqual(self.conductor.spv_node.writer.height, 209)
|
||||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1))
|
||||||
claim_id1, height + 100, 100 * COIN, 1000000 * COIN
|
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||||
|
(309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN)
|
||||||
)
|
)
|
||||||
await self.generate(1)
|
await self.generate(100)
|
||||||
self.assertEqual(176.65517070393514, await get_trending_score(claim_id1))
|
self.assertEqual(self.conductor.spv_node.writer.height, 309)
|
||||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
|
||||||
claim_id1, height + 200, 1000000 * COIN, 1 * COIN
|
|
||||||
|
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||||
|
(409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
await self.generate(99)
|
||||||
|
self.assertEqual(self.conductor.spv_node.writer.height, 408)
|
||||||
|
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
|
||||||
|
|
||||||
await self.generate(1)
|
await self.generate(1)
|
||||||
self.assertEqual(-174.951347102643, await get_trending_score(claim_id1))
|
self.assertEqual(self.conductor.spv_node.writer.height, 409)
|
||||||
search_results = (await self.conductor.spv_node.server.bp.db.search_index.search(claim_name="derp"))[0]
|
|
||||||
|
self.assertEqual(-3.4256156592205627, await get_trending_score(claim_id1))
|
||||||
|
search_results = (await self.conductor.spv_node.server.session_manager.search_index.search(claim_name="derp"))[0]
|
||||||
self.assertEqual(1, len(search_results))
|
self.assertEqual(1, len(search_results))
|
||||||
self.assertListEqual([claim_id1], [c['claim_id'] for c in search_results])
|
self.assertListEqual([claim_id1], [c['claim_id'] for c in search_results])
|
||||||
|
|
||||||
|
@ -1465,22 +1476,31 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
||||||
async def reorg(self, start):
|
async def reorg(self, start):
|
||||||
blocks = self.ledger.headers.height - start
|
blocks = self.ledger.headers.height - start
|
||||||
self.blockchain.block_expected = start - 1
|
self.blockchain.block_expected = start - 1
|
||||||
|
|
||||||
|
|
||||||
|
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||||
|
self.conductor.spv_node.server.synchronized.clear()
|
||||||
|
|
||||||
# go back to start
|
# go back to start
|
||||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
|
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
|
||||||
# go to previous + 1
|
# go to previous + 1
|
||||||
await self.generate(blocks + 2)
|
await self.blockchain.generate(blocks + 2)
|
||||||
|
|
||||||
|
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||||
|
await self.conductor.spv_node.server.synchronized.wait()
|
||||||
|
# await asyncio.wait_for(self.on_header(self.blockchain.block_expected), 30.0)
|
||||||
|
|
||||||
async def assertBlockHash(self, height):
|
async def assertBlockHash(self, height):
|
||||||
bp = self.conductor.spv_node.server.bp
|
reader_db = self.conductor.spv_node.server.db
|
||||||
block_hash = await self.blockchain.get_block_hash(height)
|
block_hash = await self.blockchain.get_block_hash(height)
|
||||||
|
|
||||||
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
|
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
|
||||||
self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex())
|
self.assertEqual(block_hash, (await reader_db.fs_block_hashes(height, 1))[0][::-1].hex())
|
||||||
txids = [
|
txids = [
|
||||||
tx_hash[::-1].hex() for tx_hash in bp.db.get_block_txs(height)
|
tx_hash[::-1].hex() for tx_hash in reader_db.get_block_txs(height)
|
||||||
]
|
]
|
||||||
txs = await bp.db.get_transactions_and_merkles(txids)
|
txs = await reader_db.get_transactions_and_merkles(txids)
|
||||||
block_txs = (await bp.daemon.deserialised_block(block_hash))['tx']
|
block_txs = (await self.conductor.spv_node.server.daemon.deserialised_block(block_hash))['tx']
|
||||||
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
|
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
|
||||||
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
||||||
|
|
||||||
|
@ -1491,9 +1511,18 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
||||||
channel_id = self.get_claim_id(
|
channel_id = self.get_claim_id(
|
||||||
await self.channel_create(channel_name, '0.01')
|
await self.channel_create(channel_name, '0.01')
|
||||||
)
|
)
|
||||||
self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex())
|
|
||||||
|
await self.assertNameState(
|
||||||
|
height=207, name='@abc', winning_claim_id=channel_id, last_takeover_height=207,
|
||||||
|
non_winning_claims=[]
|
||||||
|
)
|
||||||
|
|
||||||
await self.reorg(206)
|
await self.reorg(206)
|
||||||
self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex())
|
|
||||||
|
await self.assertNameState(
|
||||||
|
height=208, name='@abc', winning_claim_id=channel_id, last_takeover_height=207,
|
||||||
|
non_winning_claims=[]
|
||||||
|
)
|
||||||
|
|
||||||
# await self.assertNoClaimForName(channel_name)
|
# await self.assertNoClaimForName(channel_name)
|
||||||
# self.assertNotIn('error', await self.resolve(channel_name))
|
# self.assertNotIn('error', await self.resolve(channel_name))
|
||||||
|
@ -1502,16 +1531,29 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
||||||
stream_id = self.get_claim_id(
|
stream_id = self.get_claim_id(
|
||||||
await self.stream_create(stream_name, '0.01', channel_id=channel_id)
|
await self.stream_create(stream_name, '0.01', channel_id=channel_id)
|
||||||
)
|
)
|
||||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
|
||||||
|
await self.assertNameState(
|
||||||
|
height=209, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||||
|
non_winning_claims=[]
|
||||||
|
)
|
||||||
await self.reorg(206)
|
await self.reorg(206)
|
||||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
await self.assertNameState(
|
||||||
|
height=210, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||||
|
non_winning_claims=[]
|
||||||
|
)
|
||||||
|
|
||||||
await self.support_create(stream_id, '0.01')
|
await self.support_create(stream_id, '0.01')
|
||||||
self.assertNotIn('error', await self.resolve(stream_name))
|
|
||||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
await self.assertNameState(
|
||||||
|
height=211, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||||
|
non_winning_claims=[]
|
||||||
|
)
|
||||||
await self.reorg(206)
|
await self.reorg(206)
|
||||||
# self.assertNotIn('error', await self.resolve(stream_name))
|
# self.assertNotIn('error', await self.resolve(stream_name))
|
||||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
await self.assertNameState(
|
||||||
|
height=212, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||||
|
non_winning_claims=[]
|
||||||
|
)
|
||||||
|
|
||||||
await self.stream_abandon(stream_id)
|
await self.stream_abandon(stream_id)
|
||||||
self.assertNotIn('error', await self.resolve(channel_name))
|
self.assertNotIn('error', await self.resolve(channel_name))
|
||||||
|
@ -1553,7 +1595,6 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
||||||
await self.ledger.wait(broadcast_tx)
|
await self.ledger.wait(broadcast_tx)
|
||||||
await self.support_create(still_valid.outputs[0].claim_id, '0.01')
|
await self.support_create(still_valid.outputs[0].claim_id, '0.01')
|
||||||
|
|
||||||
# await self.generate(1)
|
|
||||||
await self.ledger.wait(broadcast_tx, self.blockchain.block_expected)
|
await self.ledger.wait(broadcast_tx, self.blockchain.block_expected)
|
||||||
self.assertEqual(self.ledger.headers.height, 208)
|
self.assertEqual(self.ledger.headers.height, 208)
|
||||||
await self.assertBlockHash(208)
|
await self.assertBlockHash(208)
|
||||||
|
@ -1603,7 +1644,7 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
||||||
await self.blockchain.generate(1)
|
await self.blockchain.generate(1)
|
||||||
|
|
||||||
# wait for the client to catch up
|
# wait for the client to catch up
|
||||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
await asyncio.wait_for(self.on_header(210), 3.0)
|
||||||
|
|
||||||
# verify the claim is in the new block and that it is returned by claim_search
|
# verify the claim is in the new block and that it is returned by claim_search
|
||||||
republished = await self.resolve('hovercraft')
|
republished = await self.resolve('hovercraft')
|
||||||
|
@ -1653,7 +1694,7 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
||||||
await self.blockchain.generate(2)
|
await self.blockchain.generate(2)
|
||||||
|
|
||||||
# wait for the client to catch up and verify the reorg
|
# wait for the client to catch up and verify the reorg
|
||||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
await asyncio.wait_for(self.on_header(209), 30.0)
|
||||||
await self.assertBlockHash(207)
|
await self.assertBlockHash(207)
|
||||||
await self.assertBlockHash(208)
|
await self.assertBlockHash(208)
|
||||||
await self.assertBlockHash(209)
|
await self.assertBlockHash(209)
|
||||||
|
|
|
@ -23,7 +23,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
||||||
))
|
))
|
||||||
sendtxid1 = await self.blockchain.send_to_address(address1, 5)
|
sendtxid1 = await self.blockchain.send_to_address(address1, 5)
|
||||||
sendtxid2 = await self.blockchain.send_to_address(address2, 5)
|
sendtxid2 = await self.blockchain.send_to_address(address2, 5)
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await notifications
|
await notifications
|
||||||
|
|
||||||
self.assertEqual(d2l(await self.account.get_balance()), '10.0')
|
self.assertEqual(d2l(await self.account.get_balance()), '10.0')
|
||||||
|
@ -57,7 +57,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
||||||
notifications = asyncio.create_task(asyncio.wait(
|
notifications = asyncio.create_task(asyncio.wait(
|
||||||
[asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))]
|
[asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))]
|
||||||
))
|
))
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await notifications
|
await notifications
|
||||||
self.assertEqual(d2l(await self.account.get_balance()), '7.985786')
|
self.assertEqual(d2l(await self.account.get_balance()), '7.985786')
|
||||||
self.assertEqual(d2l(await self.account.get_balance(include_claims=True)), '9.985786')
|
self.assertEqual(d2l(await self.account.get_balance(include_claims=True)), '9.985786')
|
||||||
|
@ -70,7 +70,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
||||||
await self.broadcast(abandon_tx)
|
await self.broadcast(abandon_tx)
|
||||||
await notify
|
await notify
|
||||||
notify = asyncio.create_task(self.ledger.wait(abandon_tx))
|
notify = asyncio.create_task(self.ledger.wait(abandon_tx))
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await notify
|
await notify
|
||||||
|
|
||||||
response = await self.ledger.resolve([], ['lbry://@bar/foo'])
|
response = await self.ledger.resolve([], ['lbry://@bar/foo'])
|
||||||
|
|
|
@ -9,9 +9,8 @@ from lbry.wallet.manager import WalletManager
|
||||||
|
|
||||||
|
|
||||||
class BasicTransactionTests(IntegrationTestCase):
|
class BasicTransactionTests(IntegrationTestCase):
|
||||||
|
|
||||||
async def test_variety_of_transactions_and_longish_history(self):
|
async def test_variety_of_transactions_and_longish_history(self):
|
||||||
await self.blockchain.generate(300)
|
await self.generate(300)
|
||||||
await self.assertBalance(self.account, '0.0')
|
await self.assertBalance(self.account, '0.0')
|
||||||
addresses = await self.account.receiving.get_addresses()
|
addresses = await self.account.receiving.get_addresses()
|
||||||
|
|
||||||
|
@ -57,7 +56,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
for tx in await self.ledger.db.get_transactions(txid__in=[tx.id for tx in txs])
|
for tx in await self.ledger.db.get_transactions(txid__in=[tx.id for tx in txs])
|
||||||
]))
|
]))
|
||||||
|
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await asyncio.wait([self.ledger.wait(tx) for tx in txs])
|
await asyncio.wait([self.ledger.wait(tx) for tx in txs])
|
||||||
await self.assertBalance(self.account, '199.99876')
|
await self.assertBalance(self.account, '199.99876')
|
||||||
|
|
||||||
|
@ -74,7 +73,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
)
|
)
|
||||||
await self.broadcast(tx)
|
await self.broadcast(tx)
|
||||||
await self.ledger.wait(tx)
|
await self.ledger.wait(tx)
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await self.ledger.wait(tx)
|
await self.ledger.wait(tx)
|
||||||
|
|
||||||
self.assertEqual(2, await self.account.get_utxo_count()) # 199 + change
|
self.assertEqual(2, await self.account.get_utxo_count()) # 199 + change
|
||||||
|
@ -92,7 +91,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
self.blockchain.send_to_address(address, 1.1) for address in addresses[:5]
|
self.blockchain.send_to_address(address, 1.1) for address in addresses[:5]
|
||||||
))
|
))
|
||||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # mempool
|
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # mempool
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # confirmed
|
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # confirmed
|
||||||
await self.assertBalance(account1, '5.5')
|
await self.assertBalance(account1, '5.5')
|
||||||
await self.assertBalance(account2, '0.0')
|
await self.assertBalance(account2, '0.0')
|
||||||
|
@ -107,7 +106,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
)
|
)
|
||||||
await self.broadcast(tx)
|
await self.broadcast(tx)
|
||||||
await self.ledger.wait(tx) # mempool
|
await self.ledger.wait(tx) # mempool
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await self.ledger.wait(tx) # confirmed
|
await self.ledger.wait(tx) # confirmed
|
||||||
|
|
||||||
await self.assertBalance(account1, '3.499802')
|
await self.assertBalance(account1, '3.499802')
|
||||||
|
@ -121,7 +120,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
)
|
)
|
||||||
await self.broadcast(tx)
|
await self.broadcast(tx)
|
||||||
await self.ledger.wait(tx) # mempool
|
await self.ledger.wait(tx) # mempool
|
||||||
await self.blockchain.generate(1)
|
await self.generate(1)
|
||||||
await self.ledger.wait(tx) # confirmed
|
await self.ledger.wait(tx) # confirmed
|
||||||
|
|
||||||
tx = (await account1.get_transactions(include_is_my_input=True, include_is_my_output=True))[1]
|
tx = (await account1.get_transactions(include_is_my_input=True, include_is_my_output=True))[1]
|
||||||
|
@ -133,11 +132,11 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
self.assertTrue(tx.outputs[1].is_internal_transfer)
|
self.assertTrue(tx.outputs[1].is_internal_transfer)
|
||||||
|
|
||||||
async def test_history_edge_cases(self):
|
async def test_history_edge_cases(self):
|
||||||
await self.blockchain.generate(300)
|
await self.generate(300)
|
||||||
await self.assertBalance(self.account, '0.0')
|
await self.assertBalance(self.account, '0.0')
|
||||||
address = await self.account.receiving.get_or_create_usable_address()
|
address = await self.account.receiving.get_or_create_usable_address()
|
||||||
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
||||||
original_summary = self.conductor.spv_node.server.bp.mempool.transaction_summaries
|
original_summary = self.conductor.spv_node.server.mempool.transaction_summaries
|
||||||
|
|
||||||
def random_summary(*args, **kwargs):
|
def random_summary(*args, **kwargs):
|
||||||
summary = original_summary(*args, **kwargs)
|
summary = original_summary(*args, **kwargs)
|
||||||
|
@ -146,7 +145,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
while summary == ordered:
|
while summary == ordered:
|
||||||
random.shuffle(summary)
|
random.shuffle(summary)
|
||||||
return summary
|
return summary
|
||||||
self.conductor.spv_node.server.bp.mempool.transaction_summaries = random_summary
|
self.conductor.spv_node.server.mempool.transaction_summaries = random_summary
|
||||||
# 10 unconfirmed txs, all from blockchain wallet
|
# 10 unconfirmed txs, all from blockchain wallet
|
||||||
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
||||||
# use batching to reduce issues with send_to_address on cli
|
# use batching to reduce issues with send_to_address on cli
|
||||||
|
@ -199,7 +198,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
||||||
|
|
||||||
async def test_sqlite_coin_chooser(self):
|
async def test_sqlite_coin_chooser(self):
|
||||||
wallet_manager = WalletManager([self.wallet], {self.ledger.get_id(): self.ledger})
|
wallet_manager = WalletManager([self.wallet], {self.ledger.get_id(): self.ledger})
|
||||||
await self.blockchain.generate(300)
|
await self.generate(300)
|
||||||
|
|
||||||
await self.assertBalance(self.account, '0.0')
|
await self.assertBalance(self.account, '0.0')
|
||||||
address = await self.account.receiving.get_or_create_usable_address()
|
address = await self.account.receiving.get_or_create_usable_address()
|
||||||
|
|
|
@ -126,28 +126,86 @@ class TestRevertablePrefixDB(unittest.TestCase):
|
||||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||||
self.assertEqual(10000000, self.db.claim_takeover.get_pending(name).height)
|
self.assertEqual(10000000, self.db.claim_takeover.get_pending(name).height)
|
||||||
|
|
||||||
self.db.commit(10000000)
|
self.db.commit(10000000, b'\x00' * 32)
|
||||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||||
|
|
||||||
self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height))
|
self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height))
|
||||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1))
|
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1))
|
||||||
self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1))
|
self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1))
|
||||||
self.db.commit(10000001)
|
self.db.commit(10000001, b'\x01' * 32)
|
||||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||||
self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2))
|
self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2))
|
||||||
self.db.commit(10000002)
|
self.db.commit(10000002, b'\x02' * 32)
|
||||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||||
|
|
||||||
self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2))
|
self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2))
|
||||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3))
|
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3))
|
||||||
self.db.commit(10000003)
|
self.db.commit(10000003, b'\x03' * 32)
|
||||||
self.assertEqual(10000003, self.db.claim_takeover.get(name).height)
|
self.assertEqual(10000003, self.db.claim_takeover.get(name).height)
|
||||||
|
|
||||||
self.db.rollback(10000003)
|
self.db.rollback(10000003, b'\x03' * 32)
|
||||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||||
self.db.rollback(10000002)
|
self.db.rollback(10000002, b'\x02' * 32)
|
||||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||||
self.db.rollback(10000001)
|
self.db.rollback(10000001, b'\x01' * 32)
|
||||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||||
self.db.rollback(10000000)
|
self.db.rollback(10000000, b'\x00' * 32)
|
||||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||||
|
|
||||||
|
def test_hub_db_iterator(self):
|
||||||
|
name = 'derp'
|
||||||
|
claim_hash0 = 20 * b'\x00'
|
||||||
|
claim_hash1 = 20 * b'\x01'
|
||||||
|
claim_hash2 = 20 * b'\x02'
|
||||||
|
claim_hash3 = 20 * b'\x03'
|
||||||
|
overflow_value = 0xffffffff
|
||||||
|
self.db.claim_expiration.stage_put((99, 999, 0), (claim_hash0, name))
|
||||||
|
self.db.claim_expiration.stage_put((100, 1000, 0), (claim_hash1, name))
|
||||||
|
self.db.claim_expiration.stage_put((100, 1001, 0), (claim_hash2, name))
|
||||||
|
self.db.claim_expiration.stage_put((101, 1002, 0), (claim_hash3, name))
|
||||||
|
self.db.claim_expiration.stage_put((overflow_value - 1, 1003, 0), (claim_hash3, name))
|
||||||
|
self.db.claim_expiration.stage_put((overflow_value, 1004, 0), (claim_hash3, name))
|
||||||
|
self.db.tx_num.stage_put((b'\x00' * 32,), (101,))
|
||||||
|
self.db.claim_takeover.stage_put((name,), (claim_hash3, 101))
|
||||||
|
self.db.db_state.stage_put((), (b'n?\xcf\x12\x99\xd4\xec]y\xc3\xa4\xc9\x1dbJJ\xcf\x9e.\x17=\x95\xa1\xa0POgvihuV', 0, 1, b'VuhivgOP\xa0\xa1\x95=\x17.\x9e\xcfJJb\x1d\xc9\xa4\xc3y]\xec\xd4\x99\x12\xcf?n', 1, 0, 1, 7, 1, -1, -1, 0))
|
||||||
|
self.db.unsafe_commit()
|
||||||
|
|
||||||
|
state = self.db.db_state.get()
|
||||||
|
self.assertEqual(b'n?\xcf\x12\x99\xd4\xec]y\xc3\xa4\xc9\x1dbJJ\xcf\x9e.\x17=\x95\xa1\xa0POgvihuV', state.genesis)
|
||||||
|
|
||||||
|
self.assertListEqual(
|
||||||
|
[], list(self.db.claim_expiration.iterate(prefix=(98,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
list(self.db.claim_expiration.iterate(start=(98,), stop=(99,))),
|
||||||
|
list(self.db.claim_expiration.iterate(prefix=(98,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
list(self.db.claim_expiration.iterate(start=(99,), stop=(100,))),
|
||||||
|
list(self.db.claim_expiration.iterate(prefix=(99,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
[
|
||||||
|
((99, 999, 0), (claim_hash0, name)),
|
||||||
|
], list(self.db.claim_expiration.iterate(prefix=(99,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
[
|
||||||
|
((100, 1000, 0), (claim_hash1, name)),
|
||||||
|
((100, 1001, 0), (claim_hash2, name))
|
||||||
|
], list(self.db.claim_expiration.iterate(prefix=(100,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
list(self.db.claim_expiration.iterate(start=(100,), stop=(101,))),
|
||||||
|
list(self.db.claim_expiration.iterate(prefix=(100,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
[
|
||||||
|
((overflow_value - 1, 1003, 0), (claim_hash3, name))
|
||||||
|
], list(self.db.claim_expiration.iterate(prefix=(overflow_value - 1,)))
|
||||||
|
)
|
||||||
|
self.assertListEqual(
|
||||||
|
[
|
||||||
|
((overflow_value, 1004, 0), (claim_hash3, name))
|
||||||
|
], list(self.db.claim_expiration.iterate(prefix=(overflow_value,)))
|
||||||
|
)
|
||||||
|
|
Loading…
Add table
Reference in a new issue