Compare commits
101 commits
master
...
rocksdb-hu
Author | SHA1 | Date | |
---|---|---|---|
|
973ee4f08c | ||
|
b83360f3e2 | ||
|
dac1b82ea7 | ||
|
56f80cbcda | ||
|
28abd9c449 | ||
|
8d86b0754c | ||
|
bed3255b89 | ||
|
a17a31acf5 | ||
|
1f815cf2d2 | ||
|
07ee73b653 | ||
|
7a9e8c6769 | ||
|
a2901e4331 | ||
|
7f8268703c | ||
|
32d2208fd9 | ||
|
704ec9e553 | ||
|
e0f7066163 | ||
|
32b26c9fa5 | ||
|
0d9d576436 | ||
|
e6c275f86e | ||
|
937adbf439 | ||
|
6a5ff0636c | ||
|
888d47f88b | ||
|
d7e50b269f | ||
|
46ce175481 | ||
|
6b2d4175be | ||
|
16bfb8589b | ||
|
a4bb4db8dd | ||
|
de1e2d0e3b | ||
|
a4880c1cf0 | ||
|
28f25538a3 | ||
|
83c8576b3f | ||
|
6fc909ea41 | ||
|
c17544d8ef | ||
|
7c46cc0805 | ||
|
fb4dc8342a | ||
|
df91f4754a | ||
|
7d8bc38cb9 | ||
|
a319595f37 | ||
|
31312af517 | ||
|
95ec1f3af4 | ||
|
b093aa3911 | ||
|
7bd157ef17 | ||
|
7f67cbfb40 | ||
|
2ea48bc8c2 | ||
|
8d42b375a0 | ||
|
c2acceaed5 | ||
|
a2db18010b | ||
|
545b7c33b1 | ||
|
07d584133e | ||
|
a10eb30771 | ||
|
98e264f4cd | ||
|
083d6a3bc3 | ||
|
94e87f99d8 | ||
|
fc4114621c | ||
|
63bd6f5792 | ||
|
0c86ed924b | ||
|
4a93b906d7 | ||
|
b727d2815f | ||
|
bad8ae7832 | ||
|
cffe895d22 | ||
|
ca4fec272a | ||
|
66419f1aa6 | ||
|
a13735769b | ||
|
749f72a8c5 | ||
|
e5f124fe68 | ||
|
88ed67a5b3 | ||
|
d5598462b6 | ||
|
31c60e167a | ||
|
fe04bfa10a | ||
|
c15bedfb6d | ||
|
0ff62495c7 | ||
|
b4be712a50 | ||
|
faa43fc20e | ||
|
f7deaa3303 | ||
|
d7ecde7040 | ||
|
e2a75758f8 | ||
|
f449cf61ab | ||
|
04db81e954 | ||
|
b49c9fd050 | ||
|
3ff2bcf913 | ||
|
556056c60d | ||
|
3c03fff380 | ||
|
b1441d4247 | ||
|
81458b75e4 | ||
|
f0f8ef044b | ||
|
47305e7446 | ||
|
ba6b985d71 | ||
|
49802b39cb | ||
|
46bcc5d725 | ||
|
98f8fd0556 | ||
|
c0ce27ccf3 | ||
|
cfae30a364 | ||
|
53e3828965 | ||
|
4f16f1c829 | ||
|
478bd0510b | ||
|
499ee74dfc | ||
|
d3da442727 | ||
|
358fa21eaf | ||
|
20f35d02fa | ||
|
77e64ef028 | ||
|
0a71e2ff91 |
57 changed files with 4337 additions and 3131 deletions
|
@ -15,7 +15,6 @@ RUN apt-get update && \
|
|||
build-essential \
|
||||
automake libtool \
|
||||
pkg-config \
|
||||
libleveldb-dev \
|
||||
python3.7 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
|
|
48
docker/Dockerfile.wallet_server_deploy
Normal file
48
docker/Dockerfile.wallet_server_deploy
Normal file
|
@ -0,0 +1,48 @@
|
|||
# FROM debian:10-slim
|
||||
FROM python:3.7.12-slim-buster
|
||||
|
||||
ARG user=lbry
|
||||
ARG db_dir=/database
|
||||
ARG projects_dir=/home/$user
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y --no-install-recommends install \
|
||||
wget \
|
||||
tar unzip \
|
||||
build-essential \
|
||||
automake libtool \
|
||||
pkg-config
|
||||
|
||||
RUN pip install uvloop
|
||||
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
||||
RUN mkdir -p $db_dir
|
||||
RUN chown -R $user:$user $db_dir
|
||||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
|
||||
# entry point
|
||||
ARG host=0.0.0.0
|
||||
ARG tcp_port=50001
|
||||
ARG daemon_url=http://lbry:lbry@192.99.151.178:9245/
|
||||
VOLUME $db_dir
|
||||
ENV TCP_PORT=$tcp_port
|
||||
ENV HOST=$host
|
||||
ENV DAEMON_URL=$daemon_url
|
||||
ENV DB_DIRECTORY=$db_dir
|
||||
ENV MAX_SESSIONS=1000000000
|
||||
ENV MAX_SEND=1000000000000000000
|
||||
ENV EVENT_LOOP_POLICY=uvloop
|
||||
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
@ -20,6 +20,14 @@ if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
|
|||
rm "$filename"
|
||||
fi
|
||||
|
||||
/home/lbry/.local/bin/lbry-hub-elastic-sync
|
||||
echo 'starting server'
|
||||
/home/lbry/.local/bin/lbry-hub "$@"
|
||||
if [ -z "$HUB_COMMAND" ]; then
|
||||
echo "HUB_COMMAND env variable must be writer, reader, or es_sync"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$HUB_COMMAND" in
|
||||
writer ) /home/lbry/.local/bin/lbry-hub-writer "$@" ;;
|
||||
reader ) /home/lbry/.local/bin/lbry-hub-server "$@" ;;
|
||||
es_sync ) /home/lbry/.local/bin/lbry-hub-elastic-sync ;;
|
||||
* ) "HUB_COMMAND env variable must be writer, reader, or es_sync" && exit 1 ;;
|
||||
esac
|
||||
|
|
|
@ -7,7 +7,7 @@ from lbry.error import ResolveCensoredError
|
|||
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
|
||||
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
|
||||
if TYPE_CHECKING:
|
||||
from lbry.wallet.server.leveldb import ResolveResult
|
||||
from lbry.wallet.server.db.common import ResolveResult
|
||||
|
||||
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
|
||||
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
||||
|
|
|
@ -19,7 +19,7 @@ from lbry.conf import Config
|
|||
from lbry.wallet.util import satoshis_to_coins
|
||||
from lbry.wallet.dewies import lbc_to_dewies
|
||||
from lbry.wallet.orchstr8 import Conductor
|
||||
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode, HubNode
|
||||
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode, HubNode
|
||||
from lbry.schema.claim import Claim
|
||||
|
||||
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
|
||||
|
@ -230,7 +230,7 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.conductor: Optional[Conductor] = None
|
||||
self.blockchain: Optional[BlockchainNode] = None
|
||||
self.blockchain: Optional[LBCWalletNode] = None
|
||||
self.hub: Optional[HubNode] = None
|
||||
self.wallet_node: Optional[WalletNode] = None
|
||||
self.manager: Optional[WalletManager] = None
|
||||
|
@ -240,15 +240,17 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
|
||||
async def asyncSetUp(self):
|
||||
self.conductor = Conductor(seed=self.SEED)
|
||||
await self.conductor.start_blockchain()
|
||||
self.addCleanup(self.conductor.stop_blockchain)
|
||||
await self.conductor.start_lbcd()
|
||||
self.addCleanup(self.conductor.stop_lbcd)
|
||||
await self.conductor.start_lbcwallet()
|
||||
self.addCleanup(self.conductor.stop_lbcwallet)
|
||||
await self.conductor.start_spv()
|
||||
self.addCleanup(self.conductor.stop_spv)
|
||||
await self.conductor.start_wallet()
|
||||
self.addCleanup(self.conductor.stop_wallet)
|
||||
await self.conductor.start_hub()
|
||||
self.addCleanup(self.conductor.stop_hub)
|
||||
self.blockchain = self.conductor.blockchain_node
|
||||
self.blockchain = self.conductor.lbcwallet_node
|
||||
self.hub = self.conductor.hub_node
|
||||
self.wallet_node = self.conductor.wallet_node
|
||||
self.manager = self.wallet_node.manager
|
||||
|
@ -263,6 +265,13 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def broadcast(self, tx):
|
||||
return self.ledger.broadcast(tx)
|
||||
|
||||
async def broadcast_and_confirm(self, tx, ledger=None):
|
||||
ledger = ledger or self.ledger
|
||||
notifications = asyncio.create_task(ledger.wait(tx))
|
||||
await ledger.broadcast(tx)
|
||||
await notifications
|
||||
await self.generate_and_wait(1, [tx.id], ledger)
|
||||
|
||||
async def on_header(self, height):
|
||||
if self.ledger.headers.height < height:
|
||||
await self.ledger.on_header.where(
|
||||
|
@ -270,11 +279,36 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
)
|
||||
return True
|
||||
|
||||
def on_transaction_id(self, txid, ledger=None):
|
||||
return (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid
|
||||
async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
|
||||
tx_watch = []
|
||||
txid = None
|
||||
done = False
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
|
||||
)
|
||||
|
||||
txid = await self.blockchain.send_to_address(address, amount)
|
||||
done = txid in tx_watch
|
||||
await watcher
|
||||
|
||||
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
|
||||
return txid
|
||||
|
||||
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
|
||||
if blocks_to_generate > 0:
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
|
||||
)
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
await self.blockchain.generate(blocks_to_generate)
|
||||
height = self.blockchain.block_expected
|
||||
await watcher
|
||||
while True:
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
if self.conductor.spv_node.server.db.db_height >= height:
|
||||
break
|
||||
|
||||
def on_address_update(self, address):
|
||||
return self.ledger.on_transaction.where(
|
||||
lambda e: e.address == address
|
||||
|
@ -285,6 +319,19 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
lambda e: e.tx.id == tx.id and e.address == address
|
||||
)
|
||||
|
||||
async def generate(self, blocks):
|
||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
height = self.blockchain.block_expected
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
await self.blockchain.generate(blocks)
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
while True:
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
if self.conductor.spv_node.server.db.db_height >= height:
|
||||
break
|
||||
|
||||
|
||||
class FakeExchangeRateManager(ExchangeRateManager):
|
||||
|
||||
|
@ -345,20 +392,19 @@ class CommandTestCase(IntegrationTestCase):
|
|||
self.skip_libtorrent = True
|
||||
|
||||
async def asyncSetUp(self):
|
||||
await super().asyncSetUp()
|
||||
|
||||
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
|
||||
|
||||
await super().asyncSetUp()
|
||||
|
||||
self.daemon = await self.add_daemon(self.wallet_node)
|
||||
|
||||
await self.account.ensure_address_gap()
|
||||
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||
sendtxid = await self.blockchain.send_to_address(address, 10)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.generate(5)
|
||||
await self.send_to_address_and_wait(address, 10, 6)
|
||||
|
||||
server_tmp_dir = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, server_tmp_dir)
|
||||
|
@ -455,9 +501,14 @@ class CommandTestCase(IntegrationTestCase):
|
|||
|
||||
async def confirm_tx(self, txid, ledger=None):
|
||||
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
await self.generate(1)
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
# await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
await asyncio.wait([self.generate(1), on_tx], timeout=5)
|
||||
|
||||
# # actually, if it's in the mempool or in the block we're fine
|
||||
# await self.generate_and_wait(1, [txid], ledger=ledger)
|
||||
# return txid
|
||||
|
||||
return txid
|
||||
|
||||
async def on_transaction_dict(self, tx):
|
||||
|
@ -472,12 +523,6 @@ class CommandTestCase(IntegrationTestCase):
|
|||
addresses.add(txo['address'])
|
||||
return list(addresses)
|
||||
|
||||
async def generate(self, blocks):
|
||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
await self.blockchain.generate(blocks)
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
|
||||
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
||||
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
||||
if confirm:
|
||||
|
@ -508,7 +553,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
return self.sout(tx)
|
||||
return tx
|
||||
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None):
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
|
||||
account = (daemon or self.daemon).wallet_manager.default_account
|
||||
claim_address = await account.receiving.get_or_create_usable_address()
|
||||
claim = Claim()
|
||||
|
@ -518,7 +563,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
claim_address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, False)
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
|
||||
return self.sout(tx)
|
||||
|
||||
def create_upload_file(self, data, prefix=None, suffix=None):
|
||||
|
|
|
@ -1,17 +1,23 @@
|
|||
__node_daemon__ = 'lbrycrdd'
|
||||
__node_cli__ = 'lbrycrd-cli'
|
||||
__node_bin__ = ''
|
||||
__node_url__ = (
|
||||
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip'
|
||||
__lbcd__ = 'lbcd'
|
||||
__lbcctl__ = 'lbcctl'
|
||||
__lbcwallet__ = 'lbcwallet'
|
||||
__lbcd_url__ = (
|
||||
'https://github.com/lbryio/lbcd/releases/download/' +
|
||||
'v0.22.200-beta/lbcd_0.22.200-beta_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__lbcwallet_url__ = (
|
||||
'https://github.com/lbryio/lbcwallet/releases/download/' +
|
||||
'v0.13.100-alpha-rc2/lbcwallet_0.13.100-alpha-rc2_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
||||
|
||||
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||
from .manager import WalletManager
|
||||
from .network import Network
|
||||
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic, DeterministicChannelKeyManager
|
||||
from .transaction import Transaction, Output, Input
|
||||
from .script import OutputScript, InputScript
|
||||
from .database import SQLiteMixin, Database
|
||||
from .header import Headers
|
||||
from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||
from lbry.wallet.manager import WalletManager
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
|
||||
DeterministicChannelKeyManager
|
||||
from lbry.wallet.transaction import Transaction, Output, Input
|
||||
from lbry.wallet.script import OutputScript, InputScript
|
||||
from lbry.wallet.database import SQLiteMixin, Database
|
||||
from lbry.wallet.header import Headers
|
||||
|
|
|
@ -16,18 +16,18 @@ from lbry.crypto.hash import hash160, double_sha256, sha256
|
|||
from lbry.crypto.base58 import Base58
|
||||
from lbry.utils import LRUCacheWithMetrics
|
||||
|
||||
from .tasks import TaskGroup
|
||||
from .database import Database
|
||||
from .stream import StreamController
|
||||
from .dewies import dewies_to_lbc
|
||||
from .account import Account, AddressManager, SingleKey
|
||||
from .network import Network
|
||||
from .transaction import Transaction, Output
|
||||
from .header import Headers, UnvalidatedHeaders
|
||||
from .checkpoints import HASHES
|
||||
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||
from .bip32 import PublicKey, PrivateKey
|
||||
from .coinselection import CoinSelector
|
||||
from lbry.wallet.tasks import TaskGroup
|
||||
from lbry.wallet.database import Database
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.wallet.account import Account, AddressManager, SingleKey
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.header import Headers, UnvalidatedHeaders
|
||||
from lbry.wallet.checkpoints import HASHES
|
||||
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||
from lbry.wallet.bip32 import PublicKey, PrivateKey
|
||||
from lbry.wallet.coinselection import CoinSelector
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -365,6 +365,10 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
await self.db.close()
|
||||
await self.headers.close()
|
||||
|
||||
async def tasks_are_done(self):
|
||||
await self._update_tasks.done.wait()
|
||||
await self._other_tasks.done.wait()
|
||||
|
||||
@property
|
||||
def local_height_including_downloaded_height(self):
|
||||
return max(self.headers.height, self._download_height)
|
||||
|
@ -739,7 +743,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
||||
if await self._wait_round(tx, height, addresses):
|
||||
return
|
||||
raise asyncio.TimeoutError('Timed out waiting for transaction.')
|
||||
raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
|
||||
|
||||
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
||||
records = await self.db.get_addresses(address__in=addresses)
|
||||
|
|
|
@ -12,13 +12,13 @@ from typing import List, Type, MutableSequence, MutableMapping, Optional
|
|||
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
|
||||
from lbry.conf import Config, NOT_SET
|
||||
|
||||
from .dewies import dewies_to_lbc
|
||||
from .account import Account
|
||||
from .ledger import Ledger, LedgerRegistry
|
||||
from .transaction import Transaction, Output
|
||||
from .database import Database
|
||||
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||
from .rpc.jsonrpc import CodeMessageError
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.wallet.account import Account
|
||||
from lbry.wallet.ledger import Ledger, LedgerRegistry
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.database import Database
|
||||
from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||
from lbry.wallet.rpc.jsonrpc import CodeMessageError
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
|
|
|
@ -122,7 +122,7 @@ class ClientSession(BaseClientSession):
|
|||
await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
log.warning("closing connection to %s:%i", *self.server)
|
||||
log.info("closing connection to %s:%i", *self.server)
|
||||
else:
|
||||
log.exception("lost connection to spv")
|
||||
finally:
|
||||
|
@ -140,7 +140,7 @@ class ClientSession(BaseClientSession):
|
|||
controller.add(request.args)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
log.warning("Connection lost: %s:%d", *self.server)
|
||||
log.debug("Connection lost: %s:%d", *self.server)
|
||||
super().connection_lost(exc)
|
||||
self.response_time = None
|
||||
self.connection_latency = None
|
||||
|
@ -303,7 +303,7 @@ class Network:
|
|||
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||
try:
|
||||
await client.create_connection()
|
||||
log.warning("Connected to spv server %s:%i", host, port)
|
||||
log.info("Connected to spv server %s:%i", host, port)
|
||||
await client.ensure_server_version()
|
||||
return client
|
||||
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
|
||||
|
@ -357,7 +357,7 @@ class Network:
|
|||
self._keepalive_task = None
|
||||
self.client = None
|
||||
self.server_features = None
|
||||
log.warning("connection lost to %s", server_str)
|
||||
log.info("connection lost to %s", server_str)
|
||||
log.info("network loop finished")
|
||||
|
||||
async def stop(self):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
__hub_url__ = (
|
||||
"https://github.com/lbryio/hub/releases/download/v0.2022.01.21.1/hub"
|
||||
)
|
||||
from .node import Conductor
|
||||
from .service import ConductorService
|
||||
from lbry.wallet.orchstr8.node import Conductor
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
|
|
@ -5,7 +5,9 @@ import aiohttp
|
|||
|
||||
from lbry import wallet
|
||||
from lbry.wallet.orchstr8.node import (
|
||||
Conductor, get_blockchain_node_from_ledger
|
||||
Conductor,
|
||||
get_lbcd_node_from_ledger,
|
||||
get_lbcwallet_node_from_ledger
|
||||
)
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
||||
|
@ -16,10 +18,11 @@ def get_argument_parser():
|
|||
)
|
||||
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
|
||||
|
||||
subparsers.add_parser("download", help="Download blockchain node binary.")
|
||||
subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
|
||||
|
||||
start = subparsers.add_parser("start", help="Start orchstr8 service.")
|
||||
start.add_argument("--blockchain", help="Hostname to start blockchain node.")
|
||||
start.add_argument("--lbcd", help="Hostname to start lbcd node.")
|
||||
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
|
||||
start.add_argument("--spv", help="Hostname to start SPV server.")
|
||||
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
|
||||
|
||||
|
@ -47,7 +50,8 @@ def main():
|
|||
|
||||
if command == 'download':
|
||||
logging.getLogger('blockchain').setLevel(logging.INFO)
|
||||
get_blockchain_node_from_ledger(wallet).ensure()
|
||||
get_lbcd_node_from_ledger(wallet).ensure()
|
||||
get_lbcwallet_node_from_ledger(wallet).ensure()
|
||||
|
||||
elif command == 'generate':
|
||||
loop.run_until_complete(run_remote_command(
|
||||
|
@ -57,9 +61,12 @@ def main():
|
|||
elif command == 'start':
|
||||
|
||||
conductor = Conductor()
|
||||
if getattr(args, 'blockchain', False):
|
||||
conductor.blockchain_node.hostname = args.blockchain
|
||||
loop.run_until_complete(conductor.start_blockchain())
|
||||
if getattr(args, 'lbcd', False):
|
||||
conductor.lbcd_node.hostname = args.lbcd
|
||||
loop.run_until_complete(conductor.start_lbcd())
|
||||
if getattr(args, 'lbcwallet', False):
|
||||
conductor.lbcwallet_node.hostname = args.lbcwallet
|
||||
loop.run_until_complete(conductor.start_lbcwallet())
|
||||
if getattr(args, 'spv', False):
|
||||
conductor.spv_node.hostname = args.spv
|
||||
loop.run_until_complete(conductor.start_spv())
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import signal
|
||||
import json
|
||||
import shutil
|
||||
import asyncio
|
||||
|
@ -8,6 +9,7 @@ import logging
|
|||
import tempfile
|
||||
import subprocess
|
||||
import importlib
|
||||
import platform
|
||||
from distutils.util import strtobool
|
||||
|
||||
from binascii import hexlify
|
||||
|
@ -16,11 +18,13 @@ import urllib.request
|
|||
from uuid import uuid4
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.server import Server
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
||||
from lbry.conf import KnownHubsList, Config
|
||||
from lbry.wallet.orchstr8 import __hub_url__
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.chain_reader import BlockchainReaderServer
|
||||
from lbry.wallet.server.db.elasticsearch.sync import ElasticWriter
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -31,11 +35,18 @@ def get_spvserver_from_ledger(ledger_module):
|
|||
return getattr(spvserver_module, regtest_class_name)
|
||||
|
||||
|
||||
def get_blockchain_node_from_ledger(ledger_module):
|
||||
return BlockchainNode(
|
||||
ledger_module.__node_url__,
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__),
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__)
|
||||
def get_lbcd_node_from_ledger(ledger_module):
|
||||
return LBCDNode(
|
||||
ledger_module.__lbcd_url__,
|
||||
ledger_module.__lbcd__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
def get_lbcwallet_node_from_ledger(ledger_module):
|
||||
return LBCWalletNode(
|
||||
ledger_module.__lbcwallet_url__,
|
||||
ledger_module.__lbcwallet__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
|
||||
|
@ -45,51 +56,51 @@ class Conductor:
|
|||
self.manager_module = WalletManager
|
||||
self.spv_module = get_spvserver_from_ledger(lbry.wallet)
|
||||
|
||||
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet)
|
||||
self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
|
||||
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode(self.spv_module)
|
||||
self.wallet_node = WalletNode(
|
||||
self.manager_module, RegTestLedger, default_seed=seed
|
||||
)
|
||||
self.hub_node = HubNode(__hub_url__, "hub", self.spv_node)
|
||||
|
||||
self.blockchain_started = False
|
||||
self.lbcd_started = False
|
||||
self.lbcwallet_started = False
|
||||
self.spv_started = False
|
||||
self.wallet_started = False
|
||||
self.hub_started = False
|
||||
|
||||
self.log = log.getChild('conductor')
|
||||
|
||||
async def start_blockchain(self):
|
||||
if not self.blockchain_started:
|
||||
asyncio.create_task(self.blockchain_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
await self.blockchain_node.generate(200)
|
||||
self.blockchain_started = True
|
||||
async def start_lbcd(self):
|
||||
if not self.lbcd_started:
|
||||
await self.lbcd_node.start()
|
||||
self.lbcd_started = True
|
||||
|
||||
async def stop_blockchain(self):
|
||||
if self.blockchain_started:
|
||||
await self.blockchain_node.stop(cleanup=True)
|
||||
self.blockchain_started = False
|
||||
async def stop_lbcd(self, cleanup=True):
|
||||
if self.lbcd_started:
|
||||
await self.lbcd_node.stop(cleanup)
|
||||
self.lbcd_started = False
|
||||
|
||||
async def start_hub(self):
|
||||
if not self.hub_started:
|
||||
asyncio.create_task(self.hub_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
await self.hub_node.start()
|
||||
await self.lbcwallet_node.running.wait()
|
||||
self.hub_started = True
|
||||
|
||||
async def stop_hub(self):
|
||||
async def stop_hub(self, cleanup=True):
|
||||
if self.hub_started:
|
||||
await self.hub_node.stop(cleanup=True)
|
||||
await self.hub_node.stop(cleanup)
|
||||
self.hub_started = False
|
||||
|
||||
async def start_spv(self):
|
||||
if not self.spv_started:
|
||||
await self.spv_node.start(self.blockchain_node)
|
||||
await self.spv_node.start(self.lbcwallet_node)
|
||||
self.spv_started = True
|
||||
|
||||
async def stop_spv(self):
|
||||
async def stop_spv(self, cleanup=True):
|
||||
if self.spv_started:
|
||||
await self.spv_node.stop(cleanup=True)
|
||||
await self.spv_node.stop(cleanup)
|
||||
self.spv_started = False
|
||||
|
||||
async def start_wallet(self):
|
||||
|
@ -97,21 +108,41 @@ class Conductor:
|
|||
await self.wallet_node.start(self.spv_node)
|
||||
self.wallet_started = True
|
||||
|
||||
async def stop_wallet(self):
|
||||
async def stop_wallet(self, cleanup=True):
|
||||
if self.wallet_started:
|
||||
await self.wallet_node.stop(cleanup=True)
|
||||
await self.wallet_node.stop(cleanup)
|
||||
self.wallet_started = False
|
||||
|
||||
async def start_lbcwallet(self, clean=True):
|
||||
if not self.lbcwallet_started:
|
||||
await self.lbcwallet_node.start()
|
||||
if clean:
|
||||
mining_addr = await self.lbcwallet_node.get_new_address()
|
||||
self.lbcwallet_node.mining_addr = mining_addr
|
||||
await self.lbcwallet_node.generate(200)
|
||||
# unlock the wallet for the next 1 hour
|
||||
await self.lbcwallet_node.wallet_passphrase("password", 3600)
|
||||
self.lbcwallet_started = True
|
||||
|
||||
async def stop_lbcwallet(self, cleanup=True):
|
||||
if self.lbcwallet_started:
|
||||
await self.lbcwallet_node.stop(cleanup)
|
||||
self.lbcwallet_started = False
|
||||
|
||||
async def start(self):
|
||||
await self.start_blockchain()
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet()
|
||||
await self.start_spv()
|
||||
await self.start_hub()
|
||||
await self.start_wallet()
|
||||
|
||||
async def stop(self):
|
||||
all_the_stops = [
|
||||
self.stop_wallet,
|
||||
self.stop_hub,
|
||||
self.stop_spv,
|
||||
self.stop_blockchain
|
||||
self.stop_lbcwallet,
|
||||
self.stop_lbcd
|
||||
]
|
||||
for stop in all_the_stops:
|
||||
try:
|
||||
|
@ -119,6 +150,12 @@ class Conductor:
|
|||
except Exception as e:
|
||||
log.exception('Exception raised while stopping services:', exc_info=e)
|
||||
|
||||
async def clear_mempool(self):
|
||||
await self.stop_lbcwallet(cleanup=False)
|
||||
await self.stop_lbcd(cleanup=False)
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet(clean=False)
|
||||
|
||||
|
||||
class WalletNode:
|
||||
|
||||
|
@ -139,10 +176,11 @@ class WalletNode:
|
|||
|
||||
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
|
||||
wallets_dir = os.path.join(self.data_path, 'wallets')
|
||||
os.mkdir(wallets_dir)
|
||||
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
if not os.path.isdir(wallets_dir):
|
||||
os.mkdir(wallets_dir)
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
self.manager = self.manager_class.from_config({
|
||||
'ledgers': {
|
||||
self.ledger_class.get_id(): {
|
||||
|
@ -186,53 +224,73 @@ class WalletNode:
|
|||
class SPVNode:
|
||||
|
||||
def __init__(self, coin_class, node_number=1):
|
||||
self.node_number = node_number
|
||||
self.coin_class = coin_class
|
||||
self.controller = None
|
||||
self.data_path = None
|
||||
self.server = None
|
||||
self.server: Optional[BlockchainReaderServer] = None
|
||||
self.writer: Optional[BlockProcessor] = None
|
||||
self.es_writer: Optional[ElasticWriter] = None
|
||||
self.hostname = 'localhost'
|
||||
self.port = 50001 + node_number # avoid conflict with default daemon
|
||||
self.udp_port = self.port
|
||||
self.elastic_notifier_port = 19080 + node_number
|
||||
self.rpc_port = 8000 + node_number
|
||||
self.session_timeout = 600
|
||||
self.rpc_port = '0' # disabled by default
|
||||
self.stopped = False
|
||||
self.stopped = True
|
||||
self.index_name = uuid4().hex
|
||||
|
||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'DESCRIPTION': '',
|
||||
'PAYMENT_ADDRESS': '',
|
||||
'DAILY_FEE': '0',
|
||||
'DB_DIRECTORY': self.data_path,
|
||||
'DAEMON_URL': blockchain_node.rpc_url,
|
||||
'REORG_LIMIT': '100',
|
||||
'HOST': self.hostname,
|
||||
'TCP_PORT': str(self.port),
|
||||
'UDP_PORT': str(self.udp_port),
|
||||
'SESSION_TIMEOUT': str(self.session_timeout),
|
||||
'MAX_QUERY_WORKERS': '0',
|
||||
'INDIVIDUAL_TAG_INDEXES': '',
|
||||
'RPC_PORT': self.rpc_port,
|
||||
'ES_INDEX_PREFIX': self.index_name,
|
||||
'ES_MODE': 'writer',
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
# TODO: don't use os.environ
|
||||
os.environ.update(conf)
|
||||
self.server = Server(Env(self.coin_class))
|
||||
self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
||||
await self.server.start()
|
||||
async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
|
||||
if not self.stopped:
|
||||
log.warning("spv node is already running")
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'description': '',
|
||||
'payment_address': '',
|
||||
'daily_fee': '0',
|
||||
'db_dir': self.data_path,
|
||||
'daemon_url': lbcwallet_node.rpc_url,
|
||||
'reorg_limit': 100,
|
||||
'host': self.hostname,
|
||||
'tcp_port': self.port,
|
||||
'udp_port': self.udp_port,
|
||||
'rpc_port': self.rpc_port,
|
||||
'elastic_notifier_port': self.elastic_notifier_port,
|
||||
'session_timeout': self.session_timeout,
|
||||
'max_query_workers': 0,
|
||||
'es_index_prefix': self.index_name,
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
env = Env(self.coin_class, **conf)
|
||||
self.writer = BlockProcessor(env)
|
||||
self.server = BlockchainReaderServer(env)
|
||||
self.es_writer = ElasticWriter(env)
|
||||
await self.writer.open()
|
||||
await self.writer.start()
|
||||
await self.es_writer.start()
|
||||
await self.server.start()
|
||||
except Exception as e:
|
||||
self.stopped = True
|
||||
if not isinstance(e, asyncio.CancelledError):
|
||||
log.exception("failed to start spv node")
|
||||
raise e
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
log.warning("spv node is already stopped")
|
||||
return
|
||||
try:
|
||||
await self.server.db.search_index.delete_index()
|
||||
await self.server.db.search_index.stop()
|
||||
await self.server.stop()
|
||||
await self.es_writer.stop(delete_index=True)
|
||||
await self.writer.stop()
|
||||
self.stopped = True
|
||||
except Exception as e:
|
||||
log.exception("failed to stop spv node")
|
||||
raise e
|
||||
finally:
|
||||
cleanup and self.cleanup()
|
||||
|
||||
|
@ -240,18 +298,19 @@ class SPVNode:
|
|||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class BlockchainProcess(asyncio.SubprocessProtocol):
|
||||
class LBCDProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
IGNORE_OUTPUT = [
|
||||
b'keypool keep',
|
||||
b'keypool reserve',
|
||||
b'keypool return',
|
||||
b'Block submitted',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('blockchain')
|
||||
self.log = log.getChild('lbcd')
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
|
@ -262,7 +321,7 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
|||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'Done loading' in data:
|
||||
if b'RPCS: RPC server listening on' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
|
@ -270,39 +329,57 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
|||
self.ready.set()
|
||||
|
||||
|
||||
class BlockchainNode:
|
||||
class WalletProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
IGNORE_OUTPUT = [
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
if b'Error:' in data:
|
||||
self.log.error(data.decode())
|
||||
else:
|
||||
self.log.info(data.decode())
|
||||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'WLLT: Finished rescan' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
class LBCDNode:
|
||||
def __init__(self, url, daemon, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('blockchain')
|
||||
self.data_path = None
|
||||
self.log = log.getChild('lbcd')
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.block_expected = 0
|
||||
self.hostname = 'localhost'
|
||||
self.peerport = 9246 + 2 # avoid conflict with default peer port
|
||||
self.rpcport = 9245 + 2 # avoid conflict with default rpc port
|
||||
self.peerport = 29246
|
||||
self.rpcport = 29245
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self.stopped = True
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
|
@ -311,6 +388,12 @@ class BlockchainNode:
|
|||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
|
@ -344,72 +427,206 @@ class BlockchainNode:
|
|||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}',
|
||||
f'-port={self.peerport}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
BlockchainProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbrycrdd', exc_info=e)
|
||||
if not self.stopped:
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
'--notls',
|
||||
f'--datadir={self.data_path}',
|
||||
'--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
|
||||
'--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
LBCDProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
log.exception('failed to start lbcd', exc_info=e)
|
||||
raise
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
try:
|
||||
if self.transport:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcd', exc_info=e)
|
||||
raise
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.daemon_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
self.running.clear()
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class LBCWalletNode:
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
|
||||
def __init__(self, url, lbcwallet, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.hostname = 'localhost'
|
||||
self.lbcd_rpcport = 29245
|
||||
self.lbcwallet_rpcport = 29244
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.stopped = True
|
||||
self.running = asyncio.Event()
|
||||
self.block_expected = 0
|
||||
self.mining_addr = ''
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
|
||||
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
os.path.exists(self.lbcwallet_bin)
|
||||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
)
|
||||
|
||||
if not os.path.exists(self.bin_dir):
|
||||
os.mkdir(self.bin_dir)
|
||||
|
||||
if not os.path.exists(downloaded_file):
|
||||
self.log.info('Downloading: %s', self.latest_release_url)
|
||||
with urllib.request.urlopen(self.latest_release_url) as response:
|
||||
with open(downloaded_file, 'wb') as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
|
||||
self.log.info('Extracting: %s', downloaded_file)
|
||||
|
||||
if downloaded_file.endswith('.zip'):
|
||||
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||
dotzip.extractall(self.bin_dir)
|
||||
# zipfile bug https://bugs.python.org/issue15795
|
||||
os.chmod(self.lbcwallet_bin, 0o755)
|
||||
|
||||
elif downloaded_file.endswith('.tar.gz'):
|
||||
with tarfile.open(downloaded_file) as tar:
|
||||
tar.extractall(self.bin_dir)
|
||||
|
||||
return self.exists
|
||||
|
||||
def ensure(self):
|
||||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
|
||||
command = [
|
||||
self.lbcwallet_bin,
|
||||
'--noservertls', '--noclienttls',
|
||||
'--regtest',
|
||||
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
|
||||
'--createtemp', f'--appdata={self.data_path}',
|
||||
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
WalletProcess, *command
|
||||
)
|
||||
self.protocol.transport = self.transport
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
self.stopped = False
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbcwallet', exc_info=e)
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
try:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcwallet', exc_info=e)
|
||||
raise
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.lbcwallet_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
||||
async def clear_mempool(self):
|
||||
self.restart_ready.clear()
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
self.running.clear()
|
||||
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
|
||||
self.restart_ready.set()
|
||||
await self.running.wait()
|
||||
|
||||
def cleanup(self):
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
self.running.clear()
|
||||
|
||||
async def _cli_cmnd(self, *args):
|
||||
cmnd_args = [
|
||||
self.cli_bin, f'-datadir={self.data_path}', '-regtest',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}'
|
||||
self.cli_bin,
|
||||
f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
|
||||
] + list(args)
|
||||
self.log.info(' '.join(cmnd_args))
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
out, _ = await process.communicate()
|
||||
out, err = await process.communicate()
|
||||
result = out.decode().strip()
|
||||
err = err.decode().strip()
|
||||
if len(result) <= 0 and err.startswith('-'):
|
||||
raise Exception(err)
|
||||
if err and 'creating a default config file' not in err:
|
||||
log.warning(err)
|
||||
self.log.info(result)
|
||||
if result.startswith('error code'):
|
||||
raise Exception(result)
|
||||
|
@ -417,7 +634,14 @@ class BlockchainNode:
|
|||
|
||||
def generate(self, blocks):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generate', str(blocks))
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
|
||||
|
||||
def generate_to_address(self, blocks, addr):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
|
||||
|
||||
def wallet_passphrase(self, passphrase, timeout):
|
||||
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
|
||||
|
||||
def invalidate_block(self, blockhash):
|
||||
return self._cli_cmnd('invalidateblock', blockhash)
|
||||
|
@ -434,7 +658,7 @@ class BlockchainNode:
|
|||
def get_raw_change_address(self):
|
||||
return self._cli_cmnd('getrawchangeaddress')
|
||||
|
||||
def get_new_address(self, address_type):
|
||||
def get_new_address(self, address_type='legacy'):
|
||||
return self._cli_cmnd('getnewaddress', "", address_type)
|
||||
|
||||
async def get_balance(self):
|
||||
|
@ -450,7 +674,10 @@ class BlockchainNode:
|
|||
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
||||
|
||||
async def sign_raw_transaction_with_wallet(self, tx):
|
||||
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode()
|
||||
# the "withwallet" portion should only come into play if we are doing segwit.
|
||||
# and "withwallet" doesn't exist on lbcd yet.
|
||||
result = await self._cli_cmnd('signrawtransaction', tx)
|
||||
return json.loads(result)['hex'].encode()
|
||||
|
||||
def decode_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
||||
|
@ -460,12 +687,15 @@ class BlockchainNode:
|
|||
|
||||
|
||||
class HubProcess(asyncio.SubprocessProtocol):
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
def __init__(self, ready, stopped):
|
||||
self.ready = ready
|
||||
self.stopped = stopped
|
||||
self.log = log.getChild('hub')
|
||||
self.transport = None
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
self.stopped.clear()
|
||||
self.ready.set()
|
||||
if self.log:
|
||||
self.log.info(data.decode())
|
||||
if b'error' in data.lower():
|
||||
|
@ -479,16 +709,26 @@ class HubProcess(asyncio.SubprocessProtocol):
|
|||
print(line)
|
||||
|
||||
def process_exited(self):
|
||||
self.ready.clear()
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
async def stop(self):
|
||||
t = asyncio.create_task(self.stopped.wait())
|
||||
try:
|
||||
self.transport.send_signal(signal.SIGINT)
|
||||
await asyncio.wait_for(t, 3)
|
||||
# log.warning("stopped go hub")
|
||||
except asyncio.TimeoutError:
|
||||
if not t.done():
|
||||
t.cancel()
|
||||
self.transport.terminate()
|
||||
await self.stopped.wait()
|
||||
log.warning("terminated go hub")
|
||||
|
||||
|
||||
class HubNode:
|
||||
|
||||
def __init__(self, url, daemon, spv_node):
|
||||
self.spv_node = spv_node
|
||||
self.debug = False
|
||||
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
|
@ -499,11 +739,13 @@ class HubNode:
|
|||
self.protocol = None
|
||||
self.hostname = 'localhost'
|
||||
self.rpcport = 50051 # avoid conflict with default rpc port
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self._stopped = asyncio.Event()
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def stopped(self):
|
||||
return not self.running.is_set()
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
|
@ -554,33 +796,24 @@ class HubNode:
|
|||
self.daemon_bin, 'serve', '--esindex', self.spv_node.index_name + 'claims', '--debug'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
HubProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start hub', exc_info=e)
|
||||
self.protocol = HubProcess(self.running, self._stopped)
|
||||
try:
|
||||
self.transport, _ = await loop.subprocess_exec(
|
||||
lambda: self.protocol, *command
|
||||
)
|
||||
self.protocol.transport = self.transport
|
||||
except Exception as e:
|
||||
log.exception('failed to start go hub', exc_info=e)
|
||||
raise e
|
||||
await self.protocol.ready.wait()
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
self.stopped = True
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
if self.protocol:
|
||||
await self.protocol.stop()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop go hub', exc_info=e)
|
||||
raise e
|
||||
finally:
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
|
|
@ -61,8 +61,10 @@ class ConductorService:
|
|||
#set_logging(
|
||||
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
|
||||
#)
|
||||
self.stack.blockchain_started or await self.stack.start_blockchain()
|
||||
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
|
||||
self.stack.lbcd_started or await self.stack.start_lbcd()
|
||||
self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
|
||||
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
|
||||
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
|
||||
self.stack.spv_started or await self.stack.start_spv()
|
||||
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
|
||||
self.stack.wallet_started or await self.stack.start_wallet()
|
||||
|
@ -74,7 +76,7 @@ class ConductorService:
|
|||
async def generate(self, request):
|
||||
data = await request.post()
|
||||
blocks = data.get('blocks', 1)
|
||||
await self.stack.blockchain_node.generate(int(blocks))
|
||||
await self.stack.lbcwallet_node.generate(int(blocks))
|
||||
return json_response({'blocks': blocks})
|
||||
|
||||
async def transfer(self, request):
|
||||
|
@ -85,11 +87,14 @@ class ConductorService:
|
|||
if not address:
|
||||
raise ValueError("No address was provided.")
|
||||
amount = data.get('amount', 1)
|
||||
txid = await self.stack.blockchain_node.send_to_address(address, amount)
|
||||
if self.stack.wallet_started:
|
||||
await self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.tx.id == txid and e.address == address
|
||||
watcher = self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
|
||||
)
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
await watcher
|
||||
else:
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
return json_response({
|
||||
'address': address,
|
||||
'amount': amount,
|
||||
|
@ -98,7 +103,7 @@ class ConductorService:
|
|||
|
||||
async def balance(self, _):
|
||||
return json_response({
|
||||
'balance': await self.stack.blockchain_node.get_balance()
|
||||
'balance': await self.stack.lbcwallet_node.get_balance()
|
||||
})
|
||||
|
||||
async def log(self, request):
|
||||
|
@ -129,7 +134,7 @@ class ConductorService:
|
|||
'type': 'status',
|
||||
'height': self.stack.wallet_node.ledger.headers.height,
|
||||
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
|
||||
'miner': await self.stack.blockchain_node.get_balance()
|
||||
'miner': await self.stack.lbcwallet_node.get_balance()
|
||||
})
|
||||
|
||||
def send_message(self, msg):
|
||||
|
|
|
@ -1,149 +1,32 @@
|
|||
import time
|
||||
import asyncio
|
||||
import typing
|
||||
import signal
|
||||
|
||||
from bisect import bisect_right
|
||||
from struct import pack, unpack
|
||||
from struct import pack
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
from typing import Optional, List, Tuple, Set, DefaultDict, Dict, NamedTuple
|
||||
from typing import Optional, List, Tuple, Set, DefaultDict, Dict
|
||||
from prometheus_client import Gauge, Histogram
|
||||
from collections import defaultdict
|
||||
|
||||
import lbry
|
||||
from lbry.schema.url import URL
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.wallet.ledger import Ledger, TestNetLedger, RegTestLedger
|
||||
from lbry.utils import LRUCache
|
||||
from lbry.wallet.transaction import OutputScript, Output, Transaction
|
||||
from lbry.wallet.rpc.jsonrpc import RPCError
|
||||
from lbry.wallet.server.tx import Tx, TxOutput, TxInput
|
||||
from lbry.wallet.server.daemon import DaemonError
|
||||
from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN
|
||||
from lbry.wallet.server.util import chunks, class_logger
|
||||
from lbry.wallet.server.hash import hash_to_hex_str
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.crypto.hash import hash160
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
from lbry.wallet.server.db.prefixes import ACTIVATED_SUPPORT_TXO_TYPE, ACTIVATED_CLAIM_TXO_TYPE
|
||||
from lbry.wallet.server.db.prefixes import PendingActivationKey, PendingActivationValue, ClaimToTXOValue
|
||||
from lbry.wallet.server.udp import StatusServer
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack
|
||||
from lbry.wallet.server.prefetcher import Prefetcher
|
||||
from lbry.wallet.server.db.db import HubDB
|
||||
from lbry.wallet.transaction import OutputScript, Output, Transaction
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
|
||||
|
||||
class TrendingNotification(NamedTuple):
|
||||
height: int
|
||||
prev_amount: int
|
||||
new_amount: int
|
||||
|
||||
|
||||
class Prefetcher:
|
||||
"""Prefetches blocks (in the forward direction only)."""
|
||||
|
||||
def __init__(self, daemon, coin, blocks_event):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.daemon = daemon
|
||||
self.coin = coin
|
||||
self.blocks_event = blocks_event
|
||||
self.blocks = []
|
||||
self.caught_up = False
|
||||
# Access to fetched_height should be protected by the semaphore
|
||||
self.fetched_height = None
|
||||
self.semaphore = asyncio.Semaphore()
|
||||
self.refill_event = asyncio.Event()
|
||||
# The prefetched block cache size. The min cache size has
|
||||
# little effect on sync time.
|
||||
self.cache_size = 0
|
||||
self.min_cache_size = 10 * 1024 * 1024
|
||||
# This makes the first fetch be 10 blocks
|
||||
self.ave_size = self.min_cache_size // 10
|
||||
self.polling_delay = 5
|
||||
|
||||
async def main_loop(self, bp_height):
|
||||
"""Loop forever polling for more blocks."""
|
||||
await self.reset_height(bp_height)
|
||||
while True:
|
||||
try:
|
||||
# Sleep a while if there is nothing to prefetch
|
||||
await self.refill_event.wait()
|
||||
if not await self._prefetch_blocks():
|
||||
await asyncio.sleep(self.polling_delay)
|
||||
except DaemonError as e:
|
||||
self.logger.info(f'ignoring daemon error: {e}')
|
||||
|
||||
def get_prefetched_blocks(self):
|
||||
"""Called by block processor when it is processing queued blocks."""
|
||||
blocks = self.blocks
|
||||
self.blocks = []
|
||||
self.cache_size = 0
|
||||
self.refill_event.set()
|
||||
return blocks
|
||||
|
||||
async def reset_height(self, height):
|
||||
"""Reset to prefetch blocks from the block processor's height.
|
||||
|
||||
Used in blockchain reorganisations. This coroutine can be
|
||||
called asynchronously to the _prefetch_blocks coroutine so we
|
||||
must synchronize with a semaphore.
|
||||
"""
|
||||
async with self.semaphore:
|
||||
self.blocks.clear()
|
||||
self.cache_size = 0
|
||||
self.fetched_height = height
|
||||
self.refill_event.set()
|
||||
|
||||
daemon_height = await self.daemon.height()
|
||||
behind = daemon_height - height
|
||||
if behind > 0:
|
||||
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
|
||||
f'({behind:,d} blocks behind)')
|
||||
else:
|
||||
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
|
||||
|
||||
async def _prefetch_blocks(self):
|
||||
"""Prefetch some blocks and put them on the queue.
|
||||
|
||||
Repeats until the queue is full or caught up.
|
||||
"""
|
||||
daemon = self.daemon
|
||||
daemon_height = await daemon.height()
|
||||
async with self.semaphore:
|
||||
while self.cache_size < self.min_cache_size:
|
||||
# Try and catch up all blocks but limit to room in cache.
|
||||
# Constrain fetch count to between 0 and 500 regardless;
|
||||
# testnet can be lumpy.
|
||||
cache_room = self.min_cache_size // self.ave_size
|
||||
count = min(daemon_height - self.fetched_height, cache_room)
|
||||
count = min(500, max(count, 0))
|
||||
if not count:
|
||||
self.caught_up = True
|
||||
return False
|
||||
|
||||
first = self.fetched_height + 1
|
||||
hex_hashes = await daemon.block_hex_hashes(first, count)
|
||||
if self.caught_up:
|
||||
self.logger.info('new block height {:,d} hash {}'
|
||||
.format(first + count-1, hex_hashes[-1]))
|
||||
blocks = await daemon.raw_blocks(hex_hashes)
|
||||
|
||||
assert count == len(blocks)
|
||||
|
||||
# Special handling for genesis block
|
||||
if first == 0:
|
||||
blocks[0] = self.coin.genesis_block(blocks[0])
|
||||
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
|
||||
|
||||
# Update our recent average block size estimate
|
||||
size = sum(len(block) for block in blocks)
|
||||
if count >= 10:
|
||||
self.ave_size = size // count
|
||||
else:
|
||||
self.ave_size = (size + (10 - count) * self.ave_size) // 10
|
||||
|
||||
self.blocks.extend(blocks)
|
||||
self.cache_size += size
|
||||
self.fetched_height += count
|
||||
self.blocks_event.set()
|
||||
|
||||
self.refill_event.clear()
|
||||
return True
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack
|
||||
|
||||
|
||||
class ChainError(Exception):
|
||||
|
@ -175,7 +58,6 @@ class StagedClaimtrieItem(typing.NamedTuple):
|
|||
)
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
|
||||
)
|
||||
|
@ -189,24 +71,28 @@ class BlockProcessor:
|
|||
"""
|
||||
|
||||
block_count_metric = Gauge(
|
||||
"block_count", "Number of processed blocks", namespace=NAMESPACE
|
||||
"block_count", "Number of processed blocks", namespace="block_processor"
|
||||
)
|
||||
block_update_time_metric = Histogram(
|
||||
"block_time", "Block update times", namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||
"block_time", "Block update times", namespace="block_processor", buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
reorg_count_metric = Gauge(
|
||||
"reorg_count", "Number of reorgs", namespace=NAMESPACE
|
||||
"reorg_count", "Number of reorgs", namespace="block_processor"
|
||||
)
|
||||
|
||||
def __init__(self, env, db: 'LevelDB', daemon, shutdown_event: asyncio.Event):
|
||||
self.state_lock = asyncio.Lock()
|
||||
def __init__(self, env: 'Env'):
|
||||
self.cancellable_tasks = []
|
||||
|
||||
self.env = env
|
||||
self.db = db
|
||||
self.daemon = daemon
|
||||
self.state_lock = asyncio.Lock()
|
||||
self.daemon = env.coin.DAEMON(env.coin, env.daemon_url)
|
||||
self._chain_executor = ThreadPoolExecutor(1, thread_name_prefix='block-processor')
|
||||
self._sync_reader_executor = ThreadPoolExecutor(1, thread_name_prefix='hub-es-sync')
|
||||
self.mempool = MemPool(env.coin, daemon, db, self.state_lock)
|
||||
self.shutdown_event = shutdown_event
|
||||
self.db = HubDB(
|
||||
env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes,
|
||||
max_open_files=env.db_max_open_files, blocking_channel_ids=env.blocking_channel_ids,
|
||||
filtering_channel_ids=env.filtering_channel_ids, executor=self._chain_executor
|
||||
)
|
||||
self.shutdown_event = asyncio.Event()
|
||||
self.coin = env.coin
|
||||
if env.coin.NET == 'mainnet':
|
||||
self.ledger = Ledger
|
||||
|
@ -214,6 +100,7 @@ class BlockProcessor:
|
|||
self.ledger = TestNetLedger
|
||||
else:
|
||||
self.ledger = RegTestLedger
|
||||
self.wait_for_blocks_duration = 0.1
|
||||
|
||||
self._caught_up_event: Optional[asyncio.Event] = None
|
||||
self.height = 0
|
||||
|
@ -221,7 +108,7 @@ class BlockProcessor:
|
|||
self.tx_count = 0
|
||||
|
||||
self.blocks_event = asyncio.Event()
|
||||
self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
|
||||
self.prefetcher = Prefetcher(self.daemon, env.coin, self.blocks_event)
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
|
||||
# Meta
|
||||
|
@ -231,14 +118,7 @@ class BlockProcessor:
|
|||
self.utxo_cache: Dict[Tuple[bytes, int], Tuple[bytes, int]] = {}
|
||||
|
||||
# Claimtrie cache
|
||||
self.db_op_stack: Optional[RevertableOpStack] = None
|
||||
|
||||
# self.search_cache = {}
|
||||
self.resolve_cache = LRUCache(2**16)
|
||||
self.resolve_outputs_cache = LRUCache(2 ** 16)
|
||||
|
||||
self.history_cache = {}
|
||||
self.status_server = StatusServer()
|
||||
self.db_op_stack: Optional['RevertableOpStack'] = None
|
||||
|
||||
#################################
|
||||
# attributes used for calculating stake activations and takeovers per block
|
||||
|
@ -275,7 +155,6 @@ class BlockProcessor:
|
|||
|
||||
self.removed_claims_to_send_es = set() # cumulative changes across blocks to send ES
|
||||
self.touched_claims_to_send_es = set()
|
||||
self.activation_info_to_send_es: DefaultDict[str, List[TrendingNotification]] = defaultdict(list)
|
||||
|
||||
self.removed_claim_hashes: Set[bytes] = set() # per block changes
|
||||
self.touched_claim_hashes: Set[bytes] = set()
|
||||
|
@ -297,18 +176,8 @@ class BlockProcessor:
|
|||
self.pending_transaction_num_mapping: Dict[bytes, int] = {}
|
||||
self.pending_transactions: Dict[int, bytes] = {}
|
||||
|
||||
async def claim_producer(self):
|
||||
if self.db.db_height <= 1:
|
||||
return
|
||||
|
||||
for claim_hash in self.removed_claims_to_send_es:
|
||||
yield 'delete', claim_hash.hex()
|
||||
|
||||
to_update = await asyncio.get_event_loop().run_in_executor(
|
||||
self._sync_reader_executor, self.db.claims_producer, self.touched_claims_to_send_es
|
||||
)
|
||||
for claim in to_update:
|
||||
yield 'update', claim
|
||||
self._stopping = False
|
||||
self._ready_to_stop = asyncio.Event()
|
||||
|
||||
async def run_in_thread_with_lock(self, func, *args):
|
||||
# Run in a thread to prevent blocking. Shielded so that
|
||||
|
@ -326,6 +195,44 @@ class BlockProcessor:
|
|||
return await asyncio.get_event_loop().run_in_executor(self._chain_executor, func, *args)
|
||||
return await asyncio.shield(run_in_thread())
|
||||
|
||||
async def refresh_mempool(self):
|
||||
def fetch_mempool(mempool_prefix):
|
||||
return {
|
||||
k.tx_hash: v.raw_tx for (k, v) in mempool_prefix.iterate()
|
||||
}
|
||||
|
||||
def update_mempool(unsafe_commit, mempool_prefix, to_put, to_delete):
|
||||
for tx_hash, raw_tx in to_put:
|
||||
mempool_prefix.stage_put((tx_hash,), (raw_tx,))
|
||||
for tx_hash, raw_tx in to_delete.items():
|
||||
mempool_prefix.stage_delete((tx_hash,), (raw_tx,))
|
||||
unsafe_commit()
|
||||
|
||||
async with self.state_lock:
|
||||
current_mempool = await self.run_in_thread(fetch_mempool, self.db.prefix_db.mempool_tx)
|
||||
_to_put = []
|
||||
try:
|
||||
mempool_hashes = await self.daemon.mempool_hashes()
|
||||
except (TypeError, RPCError):
|
||||
self.logger.warning("failed to get mempool tx hashes, reorg underway?")
|
||||
return
|
||||
for hh in mempool_hashes:
|
||||
tx_hash = bytes.fromhex(hh)[::-1]
|
||||
if tx_hash in current_mempool:
|
||||
current_mempool.pop(tx_hash)
|
||||
else:
|
||||
try:
|
||||
_to_put.append((tx_hash, bytes.fromhex(await self.daemon.getrawtransaction(hh))))
|
||||
except (TypeError, RPCError):
|
||||
self.logger.warning("failed to get a mempool tx, reorg underway?")
|
||||
return
|
||||
if current_mempool:
|
||||
if bytes.fromhex(await self.daemon.getbestblockhash())[::-1] != self.coin.header_hash(self.db.headers[-1]):
|
||||
return
|
||||
await self.run_in_thread(
|
||||
update_mempool, self.db.prefix_db.unsafe_commit, self.db.prefix_db.mempool_tx, _to_put, current_mempool
|
||||
)
|
||||
|
||||
async def check_and_advance_blocks(self, raw_blocks):
|
||||
"""Process the list of raw blocks passed. Detects and handles
|
||||
reorgs.
|
||||
|
@ -344,46 +251,23 @@ class BlockProcessor:
|
|||
total_start = time.perf_counter()
|
||||
try:
|
||||
for block in blocks:
|
||||
if self._stopping:
|
||||
return
|
||||
start = time.perf_counter()
|
||||
await self.run_in_thread(self.advance_block, block)
|
||||
await self.flush()
|
||||
await self.run_in_thread_with_lock(self.advance_block, block)
|
||||
|
||||
self.logger.info("advanced to %i in %0.3fs", self.height, time.perf_counter() - start)
|
||||
self.logger.info("writer advanced to %i in %0.3fs", self.height, time.perf_counter() - start)
|
||||
if self.height == self.coin.nExtendedClaimExpirationForkHeight:
|
||||
self.logger.warning(
|
||||
"applying extended claim expiration fork on claims accepted by, %i", self.height
|
||||
)
|
||||
await self.run_in_thread_with_lock(self.db.apply_expiration_extension_fork)
|
||||
if self.db.first_sync:
|
||||
self.db.search_index.clear_caches()
|
||||
self.touched_claims_to_send_es.clear()
|
||||
self.removed_claims_to_send_es.clear()
|
||||
self.activation_info_to_send_es.clear()
|
||||
# TODO: we shouldnt wait on the search index updating before advancing to the next block
|
||||
if not self.db.first_sync:
|
||||
await self.db.reload_blocking_filtering_streams()
|
||||
await self.db.search_index.claim_consumer(self.claim_producer())
|
||||
await self.db.search_index.apply_filters(self.db.blocked_streams, self.db.blocked_channels,
|
||||
self.db.filtered_streams, self.db.filtered_channels)
|
||||
await self.db.search_index.update_trending_score(self.activation_info_to_send_es)
|
||||
await self._es_caught_up()
|
||||
self.db.search_index.clear_caches()
|
||||
self.touched_claims_to_send_es.clear()
|
||||
self.removed_claims_to_send_es.clear()
|
||||
self.activation_info_to_send_es.clear()
|
||||
# print("******************\n")
|
||||
except:
|
||||
self.logger.exception("advance blocks failed")
|
||||
raise
|
||||
processed_time = time.perf_counter() - total_start
|
||||
self.block_count_metric.set(self.height)
|
||||
self.block_update_time_metric.observe(processed_time)
|
||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||
if not self.db.first_sync:
|
||||
s = '' if len(blocks) == 1 else 's'
|
||||
self.logger.info('processed {:,d} block{} in {:.1f}s'.format(len(blocks), s, processed_time))
|
||||
if self._caught_up_event.is_set():
|
||||
await self.mempool.on_block(self.touched_hashXs, self.height)
|
||||
self.touched_hashXs.clear()
|
||||
elif hprevs[0] != chain[0]:
|
||||
min_start_height = max(self.height - self.coin.REORG_LIMIT, 0)
|
||||
|
@ -401,20 +285,11 @@ class BlockProcessor:
|
|||
try:
|
||||
assert count > 0, count
|
||||
for _ in range(count):
|
||||
await self.backup_block()
|
||||
await self.run_in_thread_with_lock(self.backup_block)
|
||||
self.logger.info(f'backed up to height {self.height:,d}')
|
||||
|
||||
if self.env.cache_all_claim_txos:
|
||||
await self.db._read_claim_txos() # TODO: don't do this
|
||||
for touched in self.touched_claims_to_send_es:
|
||||
if not self.db.get_claim_txo(touched):
|
||||
self.removed_claims_to_send_es.add(touched)
|
||||
self.touched_claims_to_send_es.difference_update(self.removed_claims_to_send_es)
|
||||
await self.db.search_index.claim_consumer(self.claim_producer())
|
||||
self.db.search_index.clear_caches()
|
||||
self.touched_claims_to_send_es.clear()
|
||||
self.removed_claims_to_send_es.clear()
|
||||
self.activation_info_to_send_es.clear()
|
||||
await self.prefetcher.reset_height(self.height)
|
||||
self.reorg_count_metric.inc()
|
||||
except:
|
||||
|
@ -432,19 +307,6 @@ class BlockProcessor:
|
|||
'resetting the prefetcher')
|
||||
await self.prefetcher.reset_height(self.height)
|
||||
|
||||
async def flush(self):
|
||||
save_undo = (self.daemon.cached_height() - self.height) <= self.env.reorg_limit
|
||||
|
||||
def flush():
|
||||
self.db.write_db_state()
|
||||
if save_undo:
|
||||
self.db.prefix_db.commit(self.height)
|
||||
else:
|
||||
self.db.prefix_db.unsafe_commit()
|
||||
self.clear_after_advance_or_reorg()
|
||||
self.db.assert_db_state()
|
||||
await self.run_in_thread_with_lock(flush)
|
||||
|
||||
def _add_claim_or_update(self, height: int, txo: 'Output', tx_hash: bytes, tx_num: int, nout: int,
|
||||
spent_claims: typing.Dict[bytes, typing.Tuple[int, int, str]]):
|
||||
try:
|
||||
|
@ -749,8 +611,6 @@ class BlockProcessor:
|
|||
self.support_txo_to_claim.pop(support_txo_to_clear)
|
||||
self.support_txos_by_claim[claim_hash].clear()
|
||||
self.support_txos_by_claim.pop(claim_hash)
|
||||
if claim_hash.hex() in self.activation_info_to_send_es:
|
||||
self.activation_info_to_send_es.pop(claim_hash.hex())
|
||||
if normalized_name.startswith('@'): # abandon a channel, invalidate signatures
|
||||
self._invalidate_channel_signatures(claim_hash)
|
||||
|
||||
|
@ -1323,10 +1183,6 @@ class BlockProcessor:
|
|||
self.touched_claim_hashes.add(controlling.claim_hash)
|
||||
self.touched_claim_hashes.add(winning)
|
||||
|
||||
def _add_claim_activation_change_notification(self, claim_id: str, height: int, prev_amount: int,
|
||||
new_amount: int):
|
||||
self.activation_info_to_send_es[claim_id].append(TrendingNotification(height, prev_amount, new_amount))
|
||||
|
||||
def _get_cumulative_update_ops(self, height: int):
|
||||
# update the last takeover height for names with takeovers
|
||||
for name in self.taken_over_names:
|
||||
|
@ -1413,8 +1269,8 @@ class BlockProcessor:
|
|||
or touched in self.pending_support_amount_change:
|
||||
# exclude sending notifications for claims/supports that activated but
|
||||
# weren't added/spent in this block
|
||||
self._add_claim_activation_change_notification(
|
||||
touched.hex(), height, prev_effective_amount, new_effective_amount
|
||||
self.db.prefix_db.trending_notification.stage_put(
|
||||
(height, touched), (prev_effective_amount, new_effective_amount)
|
||||
)
|
||||
|
||||
for channel_hash, count in self.pending_channel_counts.items():
|
||||
|
@ -1454,6 +1310,12 @@ class BlockProcessor:
|
|||
spent_claims = {}
|
||||
txos = Transaction(tx.raw).outputs
|
||||
|
||||
# clean up mempool, delete txs that were already in mempool/staged to be added
|
||||
# leave txs in mempool that werent in the block
|
||||
mempool_tx = self.db.prefix_db.mempool_tx.get_pending(tx_hash)
|
||||
if mempool_tx:
|
||||
self.db.prefix_db.mempool_tx.stage_delete((tx_hash,), mempool_tx)
|
||||
|
||||
self.db.prefix_db.tx.stage_put(key_args=(tx_hash,), value_args=(tx.raw,))
|
||||
self.db.prefix_db.tx_num.stage_put(key_args=(tx_hash,), value_args=(tx_count,))
|
||||
self.db.prefix_db.tx_hash.stage_put(key_args=(tx_count,), value_args=(tx_hash,))
|
||||
|
@ -1513,6 +1375,8 @@ class BlockProcessor:
|
|||
# update effective amount and update sets of touched and deleted claims
|
||||
self._get_cumulative_update_ops(height)
|
||||
|
||||
self.db.prefix_db.touched_hashX.stage_put((height,), (list(sorted(self.touched_hashXs)),))
|
||||
|
||||
self.db.prefix_db.tx_count.stage_put(key_args=(height,), value_args=(tx_count,))
|
||||
|
||||
for hashX, new_history in self.hashXs_by_tx.items():
|
||||
|
@ -1534,17 +1398,6 @@ class BlockProcessor:
|
|||
self.db.headers.append(block.header)
|
||||
self.tip = self.coin.header_hash(block.header)
|
||||
|
||||
min_height = self.db.min_undo_height(self.db.db_height)
|
||||
if min_height > 0: # delete undos for blocks deep enough they can't be reorged
|
||||
undo_to_delete = list(self.db.prefix_db.undo.iterate(start=(0,), stop=(min_height,)))
|
||||
for (k, v) in undo_to_delete:
|
||||
self.db.prefix_db.undo.stage_delete((k,), (v,))
|
||||
touched_or_deleted_to_delete = list(self.db.prefix_db.touched_or_deleted.iterate(
|
||||
start=(0,), stop=(min_height,))
|
||||
)
|
||||
for (k, v) in touched_or_deleted_to_delete:
|
||||
self.db.prefix_db.touched_or_deleted.stage_delete(k, v)
|
||||
|
||||
self.db.fs_height = self.height
|
||||
self.db.fs_tx_count = self.tx_count
|
||||
self.db.hist_flush_count += 1
|
||||
|
@ -1559,6 +1412,16 @@ class BlockProcessor:
|
|||
self.db.last_flush = now
|
||||
self.db.write_db_state()
|
||||
|
||||
# flush the changes
|
||||
save_undo = (self.daemon.cached_height() - self.height) <= self.env.reorg_limit
|
||||
|
||||
if save_undo:
|
||||
self.db.prefix_db.commit(self.height, self.tip)
|
||||
else:
|
||||
self.db.prefix_db.unsafe_commit()
|
||||
self.clear_after_advance_or_reorg()
|
||||
self.db.assert_db_state()
|
||||
|
||||
def clear_after_advance_or_reorg(self):
|
||||
self.txo_to_claim.clear()
|
||||
self.claim_hash_to_txo.clear()
|
||||
|
@ -1581,8 +1444,6 @@ class BlockProcessor:
|
|||
self.claim_channels.clear()
|
||||
self.utxo_cache.clear()
|
||||
self.hashXs_by_tx.clear()
|
||||
self.history_cache.clear()
|
||||
self.mempool.notified_mempool_txs.clear()
|
||||
self.removed_claim_hashes.clear()
|
||||
self.touched_claim_hashes.clear()
|
||||
self.pending_reposted.clear()
|
||||
|
@ -1592,10 +1453,9 @@ class BlockProcessor:
|
|||
self.pending_transaction_num_mapping.clear()
|
||||
self.pending_transactions.clear()
|
||||
self.pending_support_amount_change.clear()
|
||||
self.resolve_cache.clear()
|
||||
self.resolve_outputs_cache.clear()
|
||||
self.touched_hashXs.clear()
|
||||
|
||||
async def backup_block(self):
|
||||
def backup_block(self):
|
||||
assert len(self.db.prefix_db._op_stack) == 0
|
||||
touched_and_deleted = self.db.prefix_db.touched_or_deleted.get(self.height)
|
||||
self.touched_claims_to_send_es.update(touched_and_deleted.touched_claims)
|
||||
|
@ -1606,8 +1466,8 @@ class BlockProcessor:
|
|||
self.logger.info("backup block %i", self.height)
|
||||
# Check and update self.tip
|
||||
|
||||
self.db.headers.pop()
|
||||
self.db.tx_counts.pop()
|
||||
reverted_block_hash = self.coin.header_hash(self.db.headers.pop())
|
||||
self.tip = self.coin.header_hash(self.db.headers[-1])
|
||||
if self.env.cache_all_tx_hashes:
|
||||
while len(self.db.total_transactions) > self.db.tx_counts[-1]:
|
||||
|
@ -1646,13 +1506,12 @@ class BlockProcessor:
|
|||
self.db.last_flush = now
|
||||
self.db.last_flush_tx_count = self.db.fs_tx_count
|
||||
|
||||
def rollback():
|
||||
self.db.prefix_db.rollback(self.height + 1)
|
||||
self.db.es_sync_height = self.height
|
||||
self.db.write_db_state()
|
||||
self.db.prefix_db.unsafe_commit()
|
||||
# rollback
|
||||
self.db.prefix_db.rollback(self.height + 1, reverted_block_hash)
|
||||
self.db.es_sync_height = self.height
|
||||
self.db.write_db_state()
|
||||
self.db.prefix_db.unsafe_commit()
|
||||
|
||||
await self.run_in_thread_with_lock(rollback)
|
||||
self.clear_after_advance_or_reorg()
|
||||
self.db.assert_db_state()
|
||||
|
||||
|
@ -1701,32 +1560,40 @@ class BlockProcessor:
|
|||
self.touched_hashXs.add(hashX)
|
||||
return hashX
|
||||
|
||||
async def _process_prefetched_blocks(self):
|
||||
async def process_blocks_and_mempool_forever(self):
|
||||
"""Loop forever processing blocks as they arrive."""
|
||||
while True:
|
||||
if self.height == self.daemon.cached_height():
|
||||
if not self._caught_up_event.is_set():
|
||||
await self._first_caught_up()
|
||||
self._caught_up_event.set()
|
||||
await self.blocks_event.wait()
|
||||
self.blocks_event.clear()
|
||||
blocks = self.prefetcher.get_prefetched_blocks()
|
||||
try:
|
||||
await self.check_and_advance_blocks(blocks)
|
||||
except Exception:
|
||||
self.logger.exception("error while processing txs")
|
||||
raise
|
||||
|
||||
async def _es_caught_up(self):
|
||||
self.db.es_sync_height = self.height
|
||||
|
||||
def flush():
|
||||
assert len(self.db.prefix_db._op_stack) == 0
|
||||
self.db.write_db_state()
|
||||
self.db.prefix_db.unsafe_commit()
|
||||
self.db.assert_db_state()
|
||||
|
||||
await self.run_in_thread_with_lock(flush)
|
||||
try:
|
||||
while not self._stopping:
|
||||
if self.height == self.daemon.cached_height():
|
||||
if not self._caught_up_event.is_set():
|
||||
await self._first_caught_up()
|
||||
self._caught_up_event.set()
|
||||
try:
|
||||
await asyncio.wait_for(self.blocks_event.wait(), self.wait_for_blocks_duration)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
self.blocks_event.clear()
|
||||
blocks = self.prefetcher.get_prefetched_blocks()
|
||||
if self._stopping:
|
||||
break
|
||||
if not blocks:
|
||||
try:
|
||||
await self.refresh_mempool()
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
self.logger.exception("error while updating mempool txs")
|
||||
raise
|
||||
else:
|
||||
try:
|
||||
await self.check_and_advance_blocks(blocks)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
self.logger.exception("error while processing txs")
|
||||
raise
|
||||
finally:
|
||||
self._ready_to_stop.set()
|
||||
|
||||
async def _first_caught_up(self):
|
||||
self.logger.info(f'caught up to height {self.height}')
|
||||
|
@ -1747,6 +1614,13 @@ class BlockProcessor:
|
|||
f'height {self.height:,d}, halting here.')
|
||||
self.shutdown_event.set()
|
||||
|
||||
async def open(self):
|
||||
self.db.open_db()
|
||||
self.height = self.db.db_height
|
||||
self.tip = self.db.db_tip
|
||||
self.tx_count = self.db.db_tx_count
|
||||
await self.db.initialize_caches()
|
||||
|
||||
async def fetch_and_process_blocks(self, caught_up_event):
|
||||
"""Fetch, process and index blocks from the daemon.
|
||||
|
||||
|
@ -1760,18 +1634,13 @@ class BlockProcessor:
|
|||
could be lost.
|
||||
"""
|
||||
|
||||
await self.open()
|
||||
|
||||
self._caught_up_event = caught_up_event
|
||||
try:
|
||||
self.db.open_db()
|
||||
self.height = self.db.db_height
|
||||
self.tip = self.db.db_tip
|
||||
self.tx_count = self.db.db_tx_count
|
||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||
await self.db.initialize_caches()
|
||||
await self.db.search_index.start()
|
||||
await asyncio.wait([
|
||||
self.prefetcher.main_loop(self.height),
|
||||
self._process_prefetched_blocks()
|
||||
self.process_blocks_and_mempool_forever()
|
||||
])
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
|
@ -1779,9 +1648,50 @@ class BlockProcessor:
|
|||
self.logger.exception("Block processing failed!")
|
||||
raise
|
||||
finally:
|
||||
self.status_server.stop()
|
||||
# Shut down block processing
|
||||
self.logger.info('closing the DB for a clean shutdown...')
|
||||
self._sync_reader_executor.shutdown(wait=True)
|
||||
self._chain_executor.shutdown(wait=True)
|
||||
self.db.close()
|
||||
|
||||
async def start(self):
|
||||
self._stopping = False
|
||||
env = self.env
|
||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||
self.logger.info(f'software version: {lbry.__version__}')
|
||||
self.logger.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||
self.logger.info(f'event loop policy: {env.loop_policy}')
|
||||
self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||
|
||||
await self.daemon.height()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
await _start_cancellable(self.fetch_and_process_blocks)
|
||||
|
||||
async def stop(self):
|
||||
self._stopping = True
|
||||
await self._ready_to_stop.wait()
|
||||
for task in reversed(self.cancellable_tasks):
|
||||
task.cancel()
|
||||
await asyncio.wait(self.cancellable_tasks)
|
||||
self.shutdown_event.set()
|
||||
await self.daemon.close()
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.set_default_executor(self._chain_executor)
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
||||
|
|
285
lbry/wallet/server/chain_reader.py
Normal file
285
lbry/wallet/server/chain_reader.py
Normal file
|
@ -0,0 +1,285 @@
|
|||
import signal
|
||||
import logging
|
||||
import asyncio
|
||||
import typing
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
from prometheus_client import Gauge, Histogram
|
||||
import lbry
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
from lbry.wallet.server.db.prefixes import DBState
|
||||
from lbry.wallet.server.udp import StatusServer
|
||||
from lbry.wallet.server.db.db import HubDB
|
||||
from lbry.wallet.server.db.elasticsearch.notifier import ElasticNotifierClientProtocol
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from lbry.prometheus import PrometheusServer
|
||||
|
||||
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
|
||||
)
|
||||
|
||||
|
||||
class BlockchainReader:
|
||||
block_count_metric = Gauge(
|
||||
"block_count", "Number of processed blocks", namespace="blockchain_reader"
|
||||
)
|
||||
block_update_time_metric = Histogram(
|
||||
"block_time", "Block update times", namespace="blockchain_reader", buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
reorg_count_metric = Gauge(
|
||||
"reorg_count", "Number of reorgs", namespace="blockchain_reader"
|
||||
)
|
||||
|
||||
def __init__(self, env, secondary_name: str, thread_workers: int = 1, thread_prefix: str = 'blockchain-reader'):
|
||||
self.env = env
|
||||
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
|
||||
self.shutdown_event = asyncio.Event()
|
||||
self.cancellable_tasks = []
|
||||
self._thread_workers = thread_workers
|
||||
self._thread_prefix = thread_prefix
|
||||
self._executor = ThreadPoolExecutor(thread_workers, thread_name_prefix=thread_prefix)
|
||||
self.db = HubDB(
|
||||
env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes,
|
||||
secondary_name=secondary_name, max_open_files=-1, blocking_channel_ids=env.blocking_channel_ids,
|
||||
filtering_channel_ids=env.filtering_channel_ids, executor=self._executor
|
||||
)
|
||||
self.last_state: typing.Optional[DBState] = None
|
||||
self._refresh_interval = 0.1
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
def _detect_changes(self):
|
||||
try:
|
||||
self.db.prefix_db.try_catch_up_with_primary()
|
||||
except:
|
||||
self.log.exception('failed to update secondary db')
|
||||
raise
|
||||
state = self.db.prefix_db.db_state.get()
|
||||
if not state or state.height <= 0:
|
||||
return
|
||||
if self.last_state and self.last_state.height > state.height:
|
||||
self.log.warning("reorg detected, waiting until the writer has flushed the new blocks to advance")
|
||||
return
|
||||
last_height = 0 if not self.last_state else self.last_state.height
|
||||
rewound = False
|
||||
if self.last_state:
|
||||
while True:
|
||||
if self.db.headers[-1] == self.db.prefix_db.header.get(last_height, deserialize_value=False):
|
||||
self.log.debug("connects to block %i", last_height)
|
||||
break
|
||||
else:
|
||||
self.log.warning("disconnect block %i", last_height)
|
||||
self.unwind()
|
||||
rewound = True
|
||||
last_height -= 1
|
||||
if rewound:
|
||||
self.reorg_count_metric.inc()
|
||||
self.db.read_db_state()
|
||||
if not self.last_state or last_height < state.height:
|
||||
for height in range(last_height + 1, state.height + 1):
|
||||
self.log.info("advancing to %i", height)
|
||||
self.advance(height)
|
||||
self.clear_caches()
|
||||
self.last_state = state
|
||||
self.block_count_metric.set(self.last_state.height)
|
||||
self.db.blocked_streams, self.db.blocked_channels = self.db.get_streams_and_channels_reposted_by_channel_hashes(
|
||||
self.db.blocking_channel_hashes
|
||||
)
|
||||
self.db.filtered_streams, self.db.filtered_channels = self.db.get_streams_and_channels_reposted_by_channel_hashes(
|
||||
self.db.filtering_channel_hashes
|
||||
)
|
||||
|
||||
async def poll_for_changes(self):
|
||||
await asyncio.get_event_loop().run_in_executor(self._executor, self._detect_changes)
|
||||
|
||||
async def refresh_blocks_forever(self, synchronized: asyncio.Event):
|
||||
while True:
|
||||
try:
|
||||
async with self._lock:
|
||||
await self.poll_for_changes()
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except:
|
||||
self.log.exception("blockchain reader main loop encountered an unexpected error")
|
||||
raise
|
||||
await asyncio.sleep(self._refresh_interval)
|
||||
synchronized.set()
|
||||
|
||||
def clear_caches(self):
|
||||
pass
|
||||
|
||||
def advance(self, height: int):
|
||||
tx_count = self.db.prefix_db.tx_count.get(height).tx_count
|
||||
assert tx_count not in self.db.tx_counts, f'boom {tx_count} in {len(self.db.tx_counts)} tx counts'
|
||||
assert len(self.db.tx_counts) == height, f"{len(self.db.tx_counts)} != {height}"
|
||||
self.db.tx_counts.append(tx_count)
|
||||
self.db.headers.append(self.db.prefix_db.header.get(height, deserialize_value=False))
|
||||
|
||||
def unwind(self):
|
||||
self.db.tx_counts.pop()
|
||||
self.db.headers.pop()
|
||||
|
||||
async def start(self):
|
||||
if not self._executor:
|
||||
self._executor = ThreadPoolExecutor(self._thread_workers, thread_name_prefix=self._thread_prefix)
|
||||
self.db._executor = self._executor
|
||||
|
||||
|
||||
class BlockchainReaderServer(BlockchainReader):
|
||||
block_count_metric = Gauge(
|
||||
"block_count", "Number of processed blocks", namespace="wallet_server"
|
||||
)
|
||||
block_update_time_metric = Histogram(
|
||||
"block_time", "Block update times", namespace="wallet_server", buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
reorg_count_metric = Gauge(
|
||||
"reorg_count", "Number of reorgs", namespace="wallet_server"
|
||||
)
|
||||
|
||||
def __init__(self, env):
|
||||
super().__init__(env, 'lbry-reader', thread_workers=max(1, env.max_query_workers), thread_prefix='hub-worker')
|
||||
self.history_cache = {}
|
||||
self.resolve_outputs_cache = {}
|
||||
self.resolve_cache = {}
|
||||
self.notifications_to_send = []
|
||||
self.mempool_notifications = set()
|
||||
self.status_server = StatusServer()
|
||||
self.daemon = env.coin.DAEMON(env.coin, env.daemon_url) # only needed for broadcasting txs
|
||||
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
||||
self.mempool = MemPool(self.env.coin, self.db)
|
||||
self.session_manager = LBRYSessionManager(
|
||||
env, self.db, self.mempool, self.history_cache, self.resolve_cache,
|
||||
self.resolve_outputs_cache, self.daemon,
|
||||
self.shutdown_event,
|
||||
on_available_callback=self.status_server.set_available,
|
||||
on_unavailable_callback=self.status_server.set_unavailable
|
||||
)
|
||||
self.mempool.session_manager = self.session_manager
|
||||
self.es_notifications = asyncio.Queue()
|
||||
self.es_notification_client = ElasticNotifierClientProtocol(self.es_notifications)
|
||||
self.synchronized = asyncio.Event()
|
||||
self._es_height = None
|
||||
self._es_block_hash = None
|
||||
|
||||
def clear_caches(self):
|
||||
self.history_cache.clear()
|
||||
self.resolve_outputs_cache.clear()
|
||||
self.resolve_cache.clear()
|
||||
# self.clear_search_cache()
|
||||
# self.mempool.notified_mempool_txs.clear()
|
||||
|
||||
def clear_search_cache(self):
|
||||
self.session_manager.search_index.clear_caches()
|
||||
|
||||
def advance(self, height: int):
|
||||
super().advance(height)
|
||||
touched_hashXs = self.db.prefix_db.touched_hashX.get(height).touched_hashXs
|
||||
self.notifications_to_send.append((set(touched_hashXs), height))
|
||||
|
||||
def _detect_changes(self):
|
||||
super()._detect_changes()
|
||||
self.mempool_notifications.update(self.mempool.refresh())
|
||||
|
||||
async def poll_for_changes(self):
|
||||
await super().poll_for_changes()
|
||||
if self.db.fs_height <= 0:
|
||||
return
|
||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||
if self.notifications_to_send:
|
||||
for (touched, height) in self.notifications_to_send:
|
||||
await self.mempool.on_block(touched, height)
|
||||
self.log.info("reader advanced to %i", height)
|
||||
if self._es_height == self.db.db_height:
|
||||
self.synchronized.set()
|
||||
if self.mempool_notifications:
|
||||
await self.mempool.on_mempool(
|
||||
set(self.mempool.touched_hashXs), self.mempool_notifications, self.db.db_height
|
||||
)
|
||||
self.mempool_notifications.clear()
|
||||
self.notifications_to_send.clear()
|
||||
|
||||
async def receive_es_notifications(self, synchronized: asyncio.Event):
|
||||
await asyncio.get_event_loop().create_connection(
|
||||
lambda: self.es_notification_client, '127.0.0.1', self.env.elastic_notifier_port
|
||||
)
|
||||
synchronized.set()
|
||||
try:
|
||||
while True:
|
||||
self._es_height, self._es_block_hash = await self.es_notifications.get()
|
||||
self.clear_search_cache()
|
||||
if self.last_state and self._es_block_hash == self.last_state.tip:
|
||||
self.synchronized.set()
|
||||
self.log.info("es and reader are in sync at block %i", self.last_state.height)
|
||||
else:
|
||||
self.log.info("es and reader are not yet in sync (block %s vs %s)", self._es_height,
|
||||
self.db.db_height)
|
||||
finally:
|
||||
self.es_notification_client.close()
|
||||
|
||||
async def start(self):
|
||||
await super().start()
|
||||
env = self.env
|
||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||
self.log.info(f'software version: {lbry.__version__}')
|
||||
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||
self.log.info(f'event loop policy: {env.loop_policy}')
|
||||
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||
await self.daemon.height()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
self.db.open_db()
|
||||
await self.db.initialize_caches()
|
||||
|
||||
self.last_state = self.db.read_db_state()
|
||||
|
||||
await self.start_prometheus()
|
||||
if self.env.udp_port and int(self.env.udp_port):
|
||||
await self.status_server.start(
|
||||
0, bytes.fromhex(self.env.coin.GENESIS_HASH)[::-1], self.env.country,
|
||||
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
||||
)
|
||||
await _start_cancellable(self.receive_es_notifications)
|
||||
await _start_cancellable(self.refresh_blocks_forever)
|
||||
await self.session_manager.search_index.start()
|
||||
await _start_cancellable(self.session_manager.serve, self.mempool)
|
||||
|
||||
async def stop(self):
|
||||
await self.status_server.stop()
|
||||
async with self._lock:
|
||||
while self.cancellable_tasks:
|
||||
t = self.cancellable_tasks.pop()
|
||||
if not t.done():
|
||||
t.cancel()
|
||||
await self.session_manager.search_index.stop()
|
||||
self.db.close()
|
||||
if self.prometheus_server:
|
||||
await self.prometheus_server.stop()
|
||||
self.prometheus_server = None
|
||||
await self.daemon.close()
|
||||
self._executor.shutdown(wait=True)
|
||||
self._executor = None
|
||||
self.shutdown_event.set()
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.set_default_executor(self._executor)
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
||||
|
||||
async def start_prometheus(self):
|
||||
if not self.prometheus_server and self.env.prometheus_port:
|
||||
self.prometheus_server = PrometheusServer()
|
||||
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)
|
|
@ -2,33 +2,63 @@ import logging
|
|||
import traceback
|
||||
import argparse
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.server import Server
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.chain_reader import BlockchainReaderServer
|
||||
from lbry.wallet.server.db.elasticsearch.sync import ElasticWriter
|
||||
|
||||
|
||||
def get_argument_parser():
|
||||
def get_arg_parser(name):
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="lbry-hub"
|
||||
prog=name
|
||||
)
|
||||
Env.contribute_to_arg_parser(parser)
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
parser = get_argument_parser()
|
||||
args = parser.parse_args()
|
||||
def setup_logging():
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
||||
logging.info('lbry.server starting')
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def run_writer_forever():
|
||||
setup_logging()
|
||||
args = get_arg_parser('lbry-hub-writer').parse_args()
|
||||
try:
|
||||
server = Server(Env.from_arg_parser(args))
|
||||
block_processor = BlockProcessor(Env.from_arg_parser(args))
|
||||
block_processor.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('block processor terminated abnormally')
|
||||
else:
|
||||
logging.info('block processor terminated normally')
|
||||
|
||||
|
||||
def run_server_forever():
|
||||
setup_logging()
|
||||
args = get_arg_parser('lbry-hub-server').parse_args()
|
||||
|
||||
try:
|
||||
server = BlockchainReaderServer(Env.from_arg_parser(args))
|
||||
server.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('lbry.server terminated abnormally')
|
||||
logging.critical('server terminated abnormally')
|
||||
else:
|
||||
logging.info('lbry.server terminated normally')
|
||||
logging.info('server terminated normally')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
def run_es_sync_forever():
|
||||
setup_logging()
|
||||
parser = get_arg_parser('lbry-hub-elastic-sync')
|
||||
parser.add_argument('--reindex', type=bool, default=False)
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
server = ElasticWriter(Env.from_arg_parser(args))
|
||||
server.run(args.reindex)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('es writer terminated abnormally')
|
||||
else:
|
||||
logging.info('es writer terminated normally')
|
||||
|
|
|
@ -12,9 +12,7 @@ from lbry.wallet.server.util import cachedproperty, subclasses
|
|||
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
|
||||
from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
||||
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
|
||||
|
||||
Block = namedtuple("Block", "raw header transactions")
|
||||
|
@ -38,9 +36,7 @@ class Coin:
|
|||
SESSIONCLS = LBRYElectrumX
|
||||
DESERIALIZER = lib_tx.Deserializer
|
||||
DAEMON = Daemon
|
||||
BLOCK_PROCESSOR = BlockProcessor
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DB = LevelDB
|
||||
HEADER_VALUES = [
|
||||
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
|
||||
]
|
||||
|
@ -243,7 +239,6 @@ class LBC(Coin):
|
|||
SESSIONCLS = LBRYElectrumX
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DESERIALIZER = DeserializerSegWit
|
||||
DB = LevelDB
|
||||
NAME = "LBRY"
|
||||
SHORTNAME = "LBC"
|
||||
NET = "mainnet"
|
||||
|
|
|
@ -53,7 +53,7 @@ class Daemon:
|
|||
self.max_retry = max_retry
|
||||
self._height = None
|
||||
self.available_rpcs = {}
|
||||
self.connector = aiohttp.TCPConnector()
|
||||
self.connector = aiohttp.TCPConnector(ssl=False)
|
||||
self._block_hash_cache = LRUCacheWithMetrics(100000)
|
||||
self._block_cache = LRUCacheWithMetrics(2 ** 13, metric_name='block', namespace=NAMESPACE)
|
||||
|
||||
|
@ -250,14 +250,14 @@ class Daemon:
|
|||
async def deserialised_block(self, hex_hash):
|
||||
"""Return the deserialised block with the given hex hash."""
|
||||
if hex_hash not in self._block_cache:
|
||||
block = await self._send_single('getblock', (hex_hash, True))
|
||||
block = await self._send_single('getblock', (hex_hash, 1))
|
||||
self._block_cache[hex_hash] = block
|
||||
return block
|
||||
return self._block_cache[hex_hash]
|
||||
|
||||
async def raw_blocks(self, hex_hashes):
|
||||
"""Return the raw binary blocks with the given hex hashes."""
|
||||
params_iterable = ((h, False) for h in hex_hashes)
|
||||
params_iterable = ((h, 0) for h in hex_hashes)
|
||||
blocks = await self._send_vector('getblock', params_iterable)
|
||||
# Convert hex string to bytes
|
||||
return [hex_to_bytes(block) for block in blocks]
|
||||
|
@ -334,42 +334,12 @@ class LBCDaemon(Daemon):
|
|||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
return await super().getrawtransaction(hex_hash=hex_hash, verbose=verbose)
|
||||
|
||||
@handles_errors
|
||||
async def getclaimbyid(self, claim_id):
|
||||
'''Given a claim id, retrieves claim information.'''
|
||||
return await self._send_single('getclaimbyid', (claim_id,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsbyids(self, claim_ids):
|
||||
'''Given a list of claim ids, batches calls to retrieve claim information.'''
|
||||
return await self._send_vector('getclaimbyid', ((claim_id,) for claim_id in claim_ids))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsforname(self, name):
|
||||
'''Given a name, retrieves all claims matching that name.'''
|
||||
return await self._send_single('getclaimsforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsfortx(self, txid):
|
||||
'''Given a txid, returns the claims it make.'''
|
||||
return await self._send_single('getclaimsfortx', (txid,)) or []
|
||||
|
||||
@handles_errors
|
||||
async def getnameproof(self, name, block_hash=None):
|
||||
'''Given a name and optional block_hash, returns a name proof and winner, if any.'''
|
||||
return await self._send_single('getnameproof', (name, block_hash,) if block_hash else (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getvalueforname(self, name):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getvalueforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getnamesintrie(self):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getnamesintrie')
|
||||
|
||||
@handles_errors
|
||||
async def claimname(self, name, hexvalue, amount):
|
||||
'''Claim a name, used for functional tests only.'''
|
||||
return await self._send_single('claimname', (name, hexvalue, float(amount)))
|
||||
async def getbestblockhash(self):
|
||||
'''Given a name, retrieves all claims matching that name.'''
|
||||
return await self._send_single('getbestblockhash')
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import enum
|
||||
from typing import NamedTuple, Optional
|
||||
|
||||
|
||||
@enum.unique
|
||||
|
@ -25,7 +26,7 @@ class DB_PREFIXES(enum.Enum):
|
|||
reposted_claim = b'W'
|
||||
|
||||
undo = b'M'
|
||||
claim_diff = b'Y'
|
||||
touched_or_deleted = b'Y'
|
||||
|
||||
tx = b'B'
|
||||
block_hash = b'C'
|
||||
|
@ -39,4 +40,10 @@ class DB_PREFIXES(enum.Enum):
|
|||
db_state = b's'
|
||||
channel_count = b'Z'
|
||||
support_amount = b'a'
|
||||
block_txs = b'b'
|
||||
block_tx = b'b'
|
||||
trending_notifications = b'c'
|
||||
mempool_tx = b'd'
|
||||
touched_hashX = b'e'
|
||||
|
||||
|
||||
COLUMN_SETTINGS = {} # this is updated by the PrefixRow metaclass
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
import typing
|
||||
from typing import Optional
|
||||
from lbry.error import ResolveCensoredError
|
||||
|
||||
CLAIM_TYPES = {
|
||||
'stream': 1,
|
||||
|
@ -445,3 +447,31 @@ class ResolveResult(typing.NamedTuple):
|
|||
channel_hash: typing.Optional[bytes]
|
||||
reposted_claim_hash: typing.Optional[bytes]
|
||||
signature_valid: typing.Optional[bool]
|
||||
|
||||
|
||||
class TrendingNotification(typing.NamedTuple):
|
||||
height: int
|
||||
prev_amount: int
|
||||
new_amount: int
|
||||
|
||||
|
||||
class UTXO(typing.NamedTuple):
|
||||
tx_num: int
|
||||
tx_pos: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
value: int
|
||||
|
||||
|
||||
OptionalResolveResultOrError = Optional[typing.Union[ResolveResult, ResolveCensoredError, LookupError, ValueError]]
|
||||
|
||||
|
||||
class ExpandedResolveResult(typing.NamedTuple):
|
||||
stream: OptionalResolveResultOrError
|
||||
channel: OptionalResolveResultOrError
|
||||
repost: OptionalResolveResultOrError
|
||||
reposted_channel: OptionalResolveResultOrError
|
||||
|
||||
|
||||
class DBError(Exception):
|
||||
"""Raised on general DB errors generally indicating corruption."""
|
||||
|
|
File diff suppressed because it is too large
Load diff
150
lbry/wallet/server/db/elasticsearch/common.py
Normal file
150
lbry/wallet/server/db/elasticsearch/common.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
from decimal import Decimal
|
||||
from typing import Iterable
|
||||
|
||||
from lbry.error import TooManyClaimSearchParametersError
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.url import normalize_name
|
||||
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
||||
from lbry.wallet.server.db.elasticsearch.constants import REPLACEMENTS, FIELDS, TEXT_FIELDS, RANGE_FIELDS
|
||||
|
||||
|
||||
def expand_query(**kwargs):
|
||||
if "amount_order" in kwargs:
|
||||
kwargs["limit"] = 1
|
||||
kwargs["order_by"] = "effective_amount"
|
||||
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
||||
if 'name' in kwargs:
|
||||
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
||||
if kwargs.get('is_controlling') is False:
|
||||
kwargs.pop('is_controlling')
|
||||
query = {'must': [], 'must_not': []}
|
||||
collapse = None
|
||||
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
||||
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
||||
for key, value in kwargs.items():
|
||||
key = key.replace('claim.', '')
|
||||
many = key.endswith('__in') or isinstance(value, list)
|
||||
if many and len(value) > 2048:
|
||||
raise TooManyClaimSearchParametersError(key, 2048)
|
||||
if many:
|
||||
key = key.replace('__in', '')
|
||||
value = list(filter(None, value))
|
||||
if value is None or isinstance(value, list) and len(value) == 0:
|
||||
continue
|
||||
key = REPLACEMENTS.get(key, key)
|
||||
if key in FIELDS:
|
||||
partial_id = False
|
||||
if key == 'claim_type':
|
||||
if isinstance(value, str):
|
||||
value = CLAIM_TYPES[value]
|
||||
else:
|
||||
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||
elif key == 'stream_type':
|
||||
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
if key == '_id':
|
||||
if isinstance(value, Iterable):
|
||||
value = [item[::-1].hex() for item in value]
|
||||
else:
|
||||
value = value[::-1].hex()
|
||||
if not many and key in ('_id', 'claim_id') and len(value) < 20:
|
||||
partial_id = True
|
||||
if key in ('signature_valid', 'has_source'):
|
||||
continue # handled later
|
||||
if key in TEXT_FIELDS:
|
||||
key += '.keyword'
|
||||
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
||||
if partial_id:
|
||||
query['must'].append({"prefix": {"claim_id": value}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
||||
operator_length = 2 if value[:2] in ops else 1
|
||||
operator, value = value[:operator_length], value[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"range": {key: {ops[operator]: value}}})
|
||||
elif many:
|
||||
query['must'].append({"terms": {key: value}})
|
||||
else:
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"term": {key: {"value": value}}})
|
||||
elif key == 'not_channel_ids':
|
||||
for channel_id in value:
|
||||
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
||||
query['must_not'].append({"term": {'_id': channel_id}})
|
||||
elif key == 'channel_ids':
|
||||
query['must'].append({"terms": {'channel_id.keyword': value}})
|
||||
elif key == 'claim_ids':
|
||||
query['must'].append({"terms": {'claim_id.keyword': value}})
|
||||
elif key == 'media_types':
|
||||
query['must'].append({"terms": {'media_type.keyword': value}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': value}})
|
||||
elif key == 'all_languages':
|
||||
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
||||
elif key == 'any_tags':
|
||||
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
||||
elif key == 'all_tags':
|
||||
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_tags':
|
||||
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_claim_id':
|
||||
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
||||
elif key == 'limit_claims_per_channel':
|
||||
collapse = ('channel_id.keyword', value)
|
||||
if kwargs.get('has_channel_signature'):
|
||||
query['must'].append({"exists": {"field": "signature"}})
|
||||
if 'signature_valid' in kwargs:
|
||||
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
elif 'signature_valid' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
||||
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
if 'has_source' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
||||
query['should'].append(
|
||||
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
||||
if kwargs.get('text'):
|
||||
query['must'].append(
|
||||
{"simple_query_string":
|
||||
{"query": kwargs["text"], "fields": [
|
||||
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
||||
]}})
|
||||
query = {
|
||||
"_source": {"excludes": ["description", "title"]},
|
||||
'query': {'bool': query},
|
||||
"sort": [],
|
||||
}
|
||||
if "limit" in kwargs:
|
||||
query["size"] = kwargs["limit"]
|
||||
if 'offset' in kwargs:
|
||||
query["from"] = kwargs["offset"]
|
||||
if 'order_by' in kwargs:
|
||||
if isinstance(kwargs["order_by"], str):
|
||||
kwargs["order_by"] = [kwargs["order_by"]]
|
||||
for value in kwargs['order_by']:
|
||||
if 'trending_group' in value:
|
||||
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
||||
continue
|
||||
is_asc = value.startswith('^')
|
||||
value = value[1:] if is_asc else value
|
||||
value = REPLACEMENTS.get(value, value)
|
||||
if value in TEXT_FIELDS:
|
||||
value += '.keyword'
|
||||
query['sort'].append({value: "asc" if is_asc else "desc"})
|
||||
if collapse:
|
||||
query["collapse"] = {
|
||||
"field": collapse[0],
|
||||
"inner_hits": {
|
||||
"name": collapse[0],
|
||||
"size": collapse[1],
|
||||
"sort": query["sort"]
|
||||
}
|
||||
}
|
||||
return query
|
117
lbry/wallet/server/db/elasticsearch/fast_ar_trending.py
Normal file
117
lbry/wallet/server/db/elasticsearch/fast_ar_trending.py
Normal file
|
@ -0,0 +1,117 @@
|
|||
FAST_AR_TRENDING_SCRIPT = """
|
||||
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
|
||||
|
||||
double logsumexp(double x, double y)
|
||||
{
|
||||
double top;
|
||||
if(x > y)
|
||||
top = x;
|
||||
else
|
||||
top = y;
|
||||
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
|
||||
return(result);
|
||||
}
|
||||
|
||||
double logdiffexp(double big, double small)
|
||||
{
|
||||
return big + Math.log(1.0 - Math.exp(small - big));
|
||||
}
|
||||
|
||||
double squash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return -Math.log(1.0 - x);
|
||||
else
|
||||
return Math.log(x + 1.0);
|
||||
}
|
||||
|
||||
double unsquash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return 1.0 - Math.exp(-x);
|
||||
else
|
||||
return Math.exp(x) - 1.0;
|
||||
}
|
||||
|
||||
double log_to_squash(double x)
|
||||
{
|
||||
return logsumexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squash_to_log(double x)
|
||||
{
|
||||
//assert x > 0.0;
|
||||
return logdiffexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squashed_add(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
|
||||
// Cases where the signs are the same
|
||||
if (x < 0.0 && y < 0.0)
|
||||
return -logsumexp(-x, logdiffexp(-y, 0.0));
|
||||
if (x >= 0.0 && y >= 0.0)
|
||||
return logsumexp(x, logdiffexp(y, 0.0));
|
||||
// Where the signs differ
|
||||
if (x >= 0.0 && y < 0.0)
|
||||
if (Math.abs(x) >= Math.abs(y))
|
||||
return logsumexp(0.0, logdiffexp(x, -y));
|
||||
else
|
||||
return -logsumexp(0.0, logdiffexp(-y, x));
|
||||
if (x < 0.0 && y >= 0.0)
|
||||
{
|
||||
// Addition is commutative, hooray for new math
|
||||
return squashed_add(y, x);
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
double squashed_multiply(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
|
||||
int sign;
|
||||
if(x*y >= 0.0)
|
||||
sign = 1;
|
||||
else
|
||||
sign = -1;
|
||||
return sign*logsumexp(squash_to_log(Math.abs(x))
|
||||
+ squash_to_log(Math.abs(y)), 0.0);
|
||||
}
|
||||
|
||||
// Squashed inflated units
|
||||
double inflateUnits(int height) {
|
||||
double timescale = 576.0; // Half life of 400 = e-folding time of a day
|
||||
// by coincidence, so may as well go with it
|
||||
return log_to_squash(height / timescale);
|
||||
}
|
||||
|
||||
double spikePower(double newAmount) {
|
||||
if (newAmount < 50.0) {
|
||||
return(0.5);
|
||||
} else if (newAmount < 85.0) {
|
||||
return(newAmount / 100.0);
|
||||
} else {
|
||||
return(0.85);
|
||||
}
|
||||
}
|
||||
|
||||
double spikeMass(double oldAmount, double newAmount) {
|
||||
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
||||
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
||||
double power = spikePower(newAmount);
|
||||
if (oldAmount > newAmount) {
|
||||
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
} else {
|
||||
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
}
|
||||
}
|
||||
|
||||
for (i in params.src.changes) {
|
||||
double units = inflateUnits(i.height);
|
||||
if (ctx._source.trending_score == null) {
|
||||
ctx._source.trending_score = 0.0;
|
||||
}
|
||||
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
|
||||
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
|
||||
}
|
||||
"""
|
55
lbry/wallet/server/db/elasticsearch/notifier.py
Normal file
55
lbry/wallet/server/db/elasticsearch/notifier.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import struct
|
||||
import typing
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ElasticNotifierProtocol(asyncio.Protocol):
|
||||
"""notifies the reader when ES has written updates"""
|
||||
|
||||
def __init__(self, listeners):
|
||||
self._listeners = listeners
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
self._listeners.append(self)
|
||||
log.info("got es notifier connection")
|
||||
|
||||
def connection_lost(self, exc) -> None:
|
||||
self._listeners.remove(self)
|
||||
self.transport = None
|
||||
|
||||
def send_height(self, height: int, block_hash: bytes):
|
||||
log.info("notify es update '%s'", height)
|
||||
self.transport.write(struct.pack(b'>Q32s', height, block_hash))
|
||||
|
||||
|
||||
class ElasticNotifierClientProtocol(asyncio.Protocol):
|
||||
"""notifies the reader when ES has written updates"""
|
||||
|
||||
def __init__(self, notifications: asyncio.Queue):
|
||||
self.notifications = notifications
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
|
||||
def close(self):
|
||||
if self.transport and not self.transport.is_closing():
|
||||
self.transport.close()
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
log.info("connected to es notifier")
|
||||
|
||||
def connection_lost(self, exc) -> None:
|
||||
self.transport = None
|
||||
|
||||
def data_received(self, data: bytes) -> None:
|
||||
try:
|
||||
height, block_hash = struct.unpack(b'>Q32s', data)
|
||||
except:
|
||||
log.exception("failed to decode %s", (data or b'').hex())
|
||||
raise
|
||||
self.notifications.put_nowait((height, block_hash))
|
|
@ -92,10 +92,11 @@ class SearchIndex:
|
|||
await self.sync_client.indices.refresh(self.index)
|
||||
return acked
|
||||
|
||||
def stop(self):
|
||||
clients = [self.sync_client, self.search_client]
|
||||
async def stop(self):
|
||||
clients = [c for c in (self.sync_client, self.search_client) if c is not None]
|
||||
self.sync_client, self.search_client = None, None
|
||||
return asyncio.ensure_future(asyncio.gather(*(client.close() for client in clients)))
|
||||
if clients:
|
||||
await asyncio.gather(*(client.close() for client in clients))
|
||||
|
||||
def delete_index(self):
|
||||
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||
|
|
|
@ -1,138 +1,416 @@
|
|||
import os
|
||||
import argparse
|
||||
import time
|
||||
import signal
|
||||
import json
|
||||
import typing
|
||||
from collections import defaultdict
|
||||
import asyncio
|
||||
import logging
|
||||
from elasticsearch import AsyncElasticsearch
|
||||
from elasticsearch import AsyncElasticsearch, NotFoundError
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.db.elasticsearch.search import SearchIndex, IndexVersionMismatch
|
||||
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS
|
||||
from prometheus_client import Gauge, Histogram
|
||||
|
||||
from lbry.schema.result import Censor
|
||||
from lbry.wallet.server.db.elasticsearch.search import IndexVersionMismatch
|
||||
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS, INDEX_DEFAULT_SETTINGS
|
||||
from lbry.wallet.server.db.elasticsearch.common import expand_query
|
||||
from lbry.wallet.server.db.elasticsearch.notifier import ElasticNotifierProtocol
|
||||
from lbry.wallet.server.db.elasticsearch.fast_ar_trending import FAST_AR_TRENDING_SCRIPT
|
||||
from lbry.wallet.server.chain_reader import BlockchainReader, HISTOGRAM_BUCKETS
|
||||
from lbry.wallet.server.db.revertable import RevertableOp
|
||||
from lbry.wallet.server.db.common import TrendingNotification
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
|
||||
|
||||
async def get_recent_claims(env, index_name='claims', db=None):
|
||||
log = logging.getLogger()
|
||||
need_open = db is None
|
||||
db = db or LevelDB(env)
|
||||
try:
|
||||
if need_open:
|
||||
db.open_db()
|
||||
if db.es_sync_height == db.db_height or db.db_height <= 0:
|
||||
return
|
||||
if need_open:
|
||||
await db.initialize_caches()
|
||||
log.info(f"catching up ES ({db.es_sync_height}) to leveldb height: {db.db_height}")
|
||||
cnt = 0
|
||||
touched_claims = set()
|
||||
deleted_claims = set()
|
||||
for height in range(db.es_sync_height, db.db_height + 1):
|
||||
touched_or_deleted = db.prefix_db.touched_or_deleted.get(height)
|
||||
touched_claims.update(touched_or_deleted.touched_claims)
|
||||
deleted_claims.update(touched_or_deleted.deleted_claims)
|
||||
touched_claims.difference_update(deleted_claims)
|
||||
|
||||
for deleted in deleted_claims:
|
||||
yield {
|
||||
'_index': index_name,
|
||||
'_op_type': 'delete',
|
||||
'_id': deleted.hex()
|
||||
}
|
||||
for touched in touched_claims:
|
||||
claim = db.claim_producer(touched)
|
||||
if claim:
|
||||
yield {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': index_name,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
cnt += 1
|
||||
else:
|
||||
logging.warning("could not sync claim %s", touched.hex())
|
||||
if cnt % 10000 == 0:
|
||||
logging.info("%i claims sent to ES", cnt)
|
||||
|
||||
db.es_sync_height = db.db_height
|
||||
db.write_db_state()
|
||||
db.prefix_db.unsafe_commit()
|
||||
db.assert_db_state()
|
||||
|
||||
logging.info("finished sending %i claims to ES, deleted %i", cnt, len(deleted_claims))
|
||||
finally:
|
||||
if need_open:
|
||||
db.close()
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
async def get_all_claims(env, index_name='claims', db=None):
|
||||
need_open = db is None
|
||||
db = db or LevelDB(env)
|
||||
if need_open:
|
||||
db.open_db()
|
||||
await db.initialize_caches()
|
||||
logging.info("Fetching claims to send ES from leveldb")
|
||||
try:
|
||||
cnt = 0
|
||||
async for claim in db.all_claims_producer():
|
||||
yield {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': index_name,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
cnt += 1
|
||||
if cnt % 10000 == 0:
|
||||
logging.info("sent %i claims to ES", cnt)
|
||||
finally:
|
||||
if need_open:
|
||||
db.close()
|
||||
class ElasticWriter(BlockchainReader):
|
||||
VERSION = 1
|
||||
prometheus_namespace = ""
|
||||
block_count_metric = Gauge(
|
||||
"block_count", "Number of processed blocks", namespace="elastic_sync"
|
||||
)
|
||||
block_update_time_metric = Histogram(
|
||||
"block_time", "Block update times", namespace="elastic_sync", buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
reorg_count_metric = Gauge(
|
||||
"reorg_count", "Number of reorgs", namespace="elastic_sync"
|
||||
)
|
||||
|
||||
def __init__(self, env):
|
||||
super().__init__(env, 'lbry-elastic-writer', thread_workers=1, thread_prefix='lbry-elastic-writer')
|
||||
# self._refresh_interval = 0.1
|
||||
self._task = None
|
||||
self.index = self.env.es_index_prefix + 'claims'
|
||||
self._elastic_host = env.elastic_host
|
||||
self._elastic_port = env.elastic_port
|
||||
self.sync_timeout = 1800
|
||||
self.sync_client = None
|
||||
self._es_info_path = os.path.join(env.db_dir, 'es_info')
|
||||
self._last_wrote_height = 0
|
||||
self._last_wrote_block_hash = None
|
||||
|
||||
async def make_es_index_and_run_sync(env: Env, clients=32, force=False, db=None, index_name='claims'):
|
||||
index = SearchIndex(env.es_index_prefix, elastic_host=env.elastic_host, elastic_port=env.elastic_port)
|
||||
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
|
||||
try:
|
||||
created = await index.start()
|
||||
except IndexVersionMismatch as err:
|
||||
logging.info(
|
||||
"dropping ES search index (version %s) for upgrade to version %s", err.got_version, err.expected_version
|
||||
self._touched_claims = set()
|
||||
self._deleted_claims = set()
|
||||
|
||||
self._removed_during_undo = set()
|
||||
|
||||
self._trending = defaultdict(list)
|
||||
self._advanced = True
|
||||
self.synchronized = asyncio.Event()
|
||||
self._listeners: typing.List[ElasticNotifierProtocol] = []
|
||||
|
||||
async def run_es_notifier(self, synchronized: asyncio.Event):
|
||||
server = await asyncio.get_event_loop().create_server(
|
||||
lambda: ElasticNotifierProtocol(self._listeners), '127.0.0.1', self.env.elastic_notifier_port
|
||||
)
|
||||
await index.delete_index()
|
||||
await index.stop()
|
||||
created = await index.start()
|
||||
finally:
|
||||
index.stop()
|
||||
self.log.info("ES notifier server listening on TCP localhost:%i", self.env.elastic_notifier_port)
|
||||
synchronized.set()
|
||||
async with server:
|
||||
await server.serve_forever()
|
||||
|
||||
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
|
||||
if force or created:
|
||||
claim_generator = get_all_claims(env, index_name=index_name, db=db)
|
||||
else:
|
||||
claim_generator = get_recent_claims(env, index_name=index_name, db=db)
|
||||
try:
|
||||
async for ok, item in async_streaming_bulk(es, claim_generator, request_timeout=600, raise_on_error=False):
|
||||
if not ok:
|
||||
logging.warning("indexing failed for an item: %s", item)
|
||||
await es.indices.refresh(index=index_name)
|
||||
finally:
|
||||
await es.close()
|
||||
def notify_es_notification_listeners(self, height: int, block_hash: bytes):
|
||||
for p in self._listeners:
|
||||
p.send_height(height, block_hash)
|
||||
self.log.info("notify listener %i", height)
|
||||
|
||||
def _read_es_height(self):
|
||||
info = {}
|
||||
if os.path.exists(self._es_info_path):
|
||||
with open(self._es_info_path, 'r') as f:
|
||||
info.update(json.loads(f.read()))
|
||||
self._last_wrote_height = int(info.get('height', 0))
|
||||
self._last_wrote_block_hash = info.get('block_hash', None)
|
||||
|
||||
def run_elastic_sync():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
async def read_es_height(self):
|
||||
await asyncio.get_event_loop().run_in_executor(self._executor, self._read_es_height)
|
||||
|
||||
logging.info('lbry.server starting')
|
||||
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
|
||||
parser.add_argument("-c", "--clients", type=int, default=32)
|
||||
parser.add_argument("-f", "--force", default=False, action='store_true')
|
||||
Env.contribute_to_arg_parser(parser)
|
||||
args = parser.parse_args()
|
||||
env = Env.from_arg_parser(args)
|
||||
def write_es_height(self, height: int, block_hash: str):
|
||||
with open(self._es_info_path, 'w') as f:
|
||||
f.write(json.dumps({'height': height, 'block_hash': block_hash}, indent=2))
|
||||
self._last_wrote_height = height
|
||||
self._last_wrote_block_hash = block_hash
|
||||
|
||||
if not os.path.exists(os.path.join(args.db_dir, 'lbry-leveldb')):
|
||||
logging.info("DB path doesnt exist, nothing to sync to ES")
|
||||
return
|
||||
async def get_index_version(self) -> int:
|
||||
try:
|
||||
template = await self.sync_client.indices.get_template(self.index)
|
||||
return template[self.index]['version']
|
||||
except NotFoundError:
|
||||
return 0
|
||||
|
||||
asyncio.run(make_es_index_and_run_sync(env, clients=args.clients, force=args.force))
|
||||
async def set_index_version(self, version):
|
||||
await self.sync_client.indices.put_template(
|
||||
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
||||
)
|
||||
|
||||
async def start_index(self) -> bool:
|
||||
if self.sync_client:
|
||||
return False
|
||||
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
||||
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
||||
while True:
|
||||
try:
|
||||
await self.sync_client.cluster.health(wait_for_status='yellow')
|
||||
self.log.info("ES is ready to connect to")
|
||||
break
|
||||
except ConnectionError:
|
||||
self.log.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
index_version = await self.get_index_version()
|
||||
|
||||
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||
acked = res.get('acknowledged', False)
|
||||
|
||||
if acked:
|
||||
await self.set_index_version(self.VERSION)
|
||||
return True
|
||||
elif index_version != self.VERSION:
|
||||
self.log.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||
else:
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
return False
|
||||
|
||||
async def stop_index(self):
|
||||
if self.sync_client:
|
||||
await self.sync_client.close()
|
||||
self.sync_client = None
|
||||
|
||||
async def delete_index(self):
|
||||
if self.sync_client:
|
||||
return await self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||
|
||||
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
||||
if channels:
|
||||
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
else:
|
||||
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
key = 'channel_id' if channels else 'claim_id'
|
||||
update['script'] = {
|
||||
"source": f"ctx._source.censor_type={censor_type}; "
|
||||
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
||||
"lang": "painless",
|
||||
"params": blockdict
|
||||
}
|
||||
return update
|
||||
|
||||
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||
if filtered_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if filtered_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
|
||||
@staticmethod
|
||||
def _upsert_claim_query(index, claim):
|
||||
return {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': index,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _delete_claim_query(index, claim_hash: bytes):
|
||||
return {
|
||||
'_index': index,
|
||||
'_op_type': 'delete',
|
||||
'_id': claim_hash.hex()
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _update_trending_query(index, claim_hash, notifications):
|
||||
return {
|
||||
'_id': claim_hash.hex(),
|
||||
'_index': index,
|
||||
'_op_type': 'update',
|
||||
'script': {
|
||||
'lang': 'painless',
|
||||
'source': FAST_AR_TRENDING_SCRIPT,
|
||||
'params': {'src': {
|
||||
'changes': [
|
||||
{
|
||||
'height': notification.height,
|
||||
'prev_amount': notification.prev_amount / 1E8,
|
||||
'new_amount': notification.new_amount / 1E8,
|
||||
} for notification in notifications
|
||||
]
|
||||
}}
|
||||
},
|
||||
}
|
||||
|
||||
async def _claim_producer(self):
|
||||
for deleted in self._deleted_claims:
|
||||
yield self._delete_claim_query(self.index, deleted)
|
||||
for touched in self._touched_claims:
|
||||
claim = self.db.claim_producer(touched)
|
||||
if claim:
|
||||
yield self._upsert_claim_query(self.index, claim)
|
||||
for claim_hash, notifications in self._trending.items():
|
||||
yield self._update_trending_query(self.index, claim_hash, notifications)
|
||||
|
||||
def advance(self, height: int):
|
||||
super().advance(height)
|
||||
|
||||
touched_or_deleted = self.db.prefix_db.touched_or_deleted.get(height)
|
||||
for k, v in self.db.prefix_db.trending_notification.iterate((height,)):
|
||||
self._trending[k.claim_hash].append(TrendingNotification(k.height, v.previous_amount, v.new_amount))
|
||||
if touched_or_deleted:
|
||||
readded_after_reorg = self._removed_during_undo.intersection(touched_or_deleted.touched_claims)
|
||||
self._deleted_claims.difference_update(readded_after_reorg)
|
||||
self._touched_claims.update(touched_or_deleted.touched_claims)
|
||||
self._deleted_claims.update(touched_or_deleted.deleted_claims)
|
||||
self._touched_claims.difference_update(self._deleted_claims)
|
||||
for to_del in touched_or_deleted.deleted_claims:
|
||||
if to_del in self._trending:
|
||||
self._trending.pop(to_del)
|
||||
self._advanced = True
|
||||
|
||||
def unwind(self):
|
||||
self.db.tx_counts.pop()
|
||||
reverted_block_hash = self.db.coin.header_hash(self.db.headers.pop())
|
||||
packed = self.db.prefix_db.undo.get(len(self.db.tx_counts), reverted_block_hash)
|
||||
touched_or_deleted = None
|
||||
claims_to_delete = []
|
||||
# find and apply the touched_or_deleted items in the undos for the reverted blocks
|
||||
assert packed, f'missing undo information for block {len(self.db.tx_counts)}'
|
||||
while packed:
|
||||
op, packed = RevertableOp.unpack(packed)
|
||||
if op.is_delete and op.key.startswith(DB_PREFIXES.touched_or_deleted.value):
|
||||
assert touched_or_deleted is None, 'only should have one match'
|
||||
touched_or_deleted = self.db.prefix_db.touched_or_deleted.unpack_value(op.value)
|
||||
elif op.is_delete and op.key.startswith(DB_PREFIXES.claim_to_txo.value):
|
||||
v = self.db.prefix_db.claim_to_txo.unpack_value(op.value)
|
||||
if v.root_tx_num == v.tx_num and v.root_tx_num > self.db.tx_counts[-1]:
|
||||
claims_to_delete.append(self.db.prefix_db.claim_to_txo.unpack_key(op.key).claim_hash)
|
||||
if touched_or_deleted:
|
||||
self._touched_claims.update(set(touched_or_deleted.deleted_claims).union(
|
||||
touched_or_deleted.touched_claims.difference(set(claims_to_delete))))
|
||||
self._deleted_claims.update(claims_to_delete)
|
||||
self._removed_during_undo.update(claims_to_delete)
|
||||
self._advanced = True
|
||||
self.log.warning("delete %i claim and upsert %i from reorg", len(self._deleted_claims), len(self._touched_claims))
|
||||
|
||||
async def poll_for_changes(self):
|
||||
await super().poll_for_changes()
|
||||
cnt = 0
|
||||
success = 0
|
||||
if self._advanced:
|
||||
if self._touched_claims or self._deleted_claims or self._trending:
|
||||
async for ok, item in async_streaming_bulk(
|
||||
self.sync_client, self._claim_producer(),
|
||||
raise_on_error=False):
|
||||
cnt += 1
|
||||
if not ok:
|
||||
self.log.warning("indexing failed for an item: %s", item)
|
||||
else:
|
||||
success += 1
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.db.reload_blocking_filtering_streams()
|
||||
await self.apply_filters(
|
||||
self.db.blocked_streams, self.db.blocked_channels, self.db.filtered_streams,
|
||||
self.db.filtered_channels
|
||||
)
|
||||
self.write_es_height(self.db.db_height, self.db.db_tip[::-1].hex())
|
||||
self.log.info("Indexing block %i done. %i/%i successful", self._last_wrote_height, success, cnt)
|
||||
self._touched_claims.clear()
|
||||
self._deleted_claims.clear()
|
||||
self._removed_during_undo.clear()
|
||||
self._trending.clear()
|
||||
self._advanced = False
|
||||
self.synchronized.set()
|
||||
self.notify_es_notification_listeners(self._last_wrote_height, self.db.db_tip)
|
||||
|
||||
@property
|
||||
def last_synced_height(self) -> int:
|
||||
return self._last_wrote_height
|
||||
|
||||
async def start(self, reindex=False):
|
||||
await super().start()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
self.db.open_db()
|
||||
await self.db.initialize_caches()
|
||||
await self.read_es_height()
|
||||
await self.start_index()
|
||||
self.last_state = self.db.read_db_state()
|
||||
|
||||
await _start_cancellable(self.run_es_notifier)
|
||||
|
||||
if reindex or self._last_wrote_height == 0 and self.db.db_height > 0:
|
||||
if self._last_wrote_height == 0:
|
||||
self.log.info("running initial ES indexing of rocksdb at block height %i", self.db.db_height)
|
||||
else:
|
||||
self.log.info("reindex (last wrote: %i, db height: %i)", self._last_wrote_height, self.db.db_height)
|
||||
await self.reindex()
|
||||
await _start_cancellable(self.refresh_blocks_forever)
|
||||
|
||||
async def stop(self, delete_index=False):
|
||||
async with self._lock:
|
||||
while self.cancellable_tasks:
|
||||
t = self.cancellable_tasks.pop()
|
||||
if not t.done():
|
||||
t.cancel()
|
||||
if delete_index:
|
||||
await self.delete_index()
|
||||
await self.stop_index()
|
||||
self._executor.shutdown(wait=True)
|
||||
self._executor = None
|
||||
self.shutdown_event.set()
|
||||
|
||||
def run(self, reindex=False):
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.set_default_executor(self._executor)
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start(reindex=reindex))
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
||||
|
||||
async def reindex(self):
|
||||
async with self._lock:
|
||||
self.log.info("reindexing %i claims (estimate)", self.db.prefix_db.claim_to_txo.estimate_num_keys())
|
||||
await self.delete_index()
|
||||
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||
acked = res.get('acknowledged', False)
|
||||
if acked:
|
||||
await self.set_index_version(self.VERSION)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.write_es_height(0, self.env.coin.GENESIS_HASH)
|
||||
await self._sync_all_claims()
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.write_es_height(self.db.db_height, self.db.db_tip[::-1].hex())
|
||||
self.notify_es_notification_listeners(self.db.db_height, self.db.db_tip)
|
||||
self.log.info("finished reindexing")
|
||||
|
||||
async def _sync_all_claims(self, batch_size=100000):
|
||||
def load_historic_trending():
|
||||
notifications = self._trending
|
||||
for k, v in self.db.prefix_db.trending_notification.iterate():
|
||||
notifications[k.claim_hash].append(TrendingNotification(k.height, v.previous_amount, v.new_amount))
|
||||
|
||||
async def all_claims_producer():
|
||||
async for claim in self.db.all_claims_producer(batch_size=batch_size):
|
||||
yield self._upsert_claim_query(self.index, claim)
|
||||
claim_hash = bytes.fromhex(claim['claim_id'])
|
||||
if claim_hash in self._trending:
|
||||
yield self._update_trending_query(self.index, claim_hash, self._trending.pop(claim_hash))
|
||||
self._trending.clear()
|
||||
|
||||
self.log.info("loading about %i historic trending updates", self.db.prefix_db.trending_notification.estimate_num_keys())
|
||||
await asyncio.get_event_loop().run_in_executor(self._executor, load_historic_trending)
|
||||
self.log.info("loaded historic trending updates for %i claims", len(self._trending))
|
||||
|
||||
cnt = 0
|
||||
success = 0
|
||||
producer = all_claims_producer()
|
||||
|
||||
finished = False
|
||||
try:
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, producer, raise_on_error=False):
|
||||
cnt += 1
|
||||
if not ok:
|
||||
self.log.warning("indexing failed for an item: %s", item)
|
||||
else:
|
||||
success += 1
|
||||
if cnt % batch_size == 0:
|
||||
self.log.info(f"indexed {success} claims")
|
||||
finished = True
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.log.info("indexed %i/%i claims", success, cnt)
|
||||
finally:
|
||||
if not finished:
|
||||
await producer.aclose()
|
||||
self.shutdown_event.set()
|
||||
|
|
145
lbry/wallet/server/db/interface.py
Normal file
145
lbry/wallet/server/db/interface.py
Normal file
|
@ -0,0 +1,145 @@
|
|||
import struct
|
||||
import typing
|
||||
|
||||
import rocksdb
|
||||
from typing import Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES, COLUMN_SETTINGS
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||
|
||||
|
||||
class BasePrefixDB:
|
||||
"""
|
||||
Base class for a revertable rocksdb database (a rocksdb db where each set of applied changes can be undone)
|
||||
"""
|
||||
UNDO_KEY_STRUCT = struct.Struct(b'>Q32s')
|
||||
PARTIAL_UNDO_KEY_STRUCT = struct.Struct(b'>Q')
|
||||
|
||||
def __init__(self, path, max_open_files=64, secondary_path='', max_undo_depth: int = 200, unsafe_prefixes=None):
|
||||
column_family_options = {}
|
||||
for prefix in DB_PREFIXES:
|
||||
settings = COLUMN_SETTINGS[prefix.value]
|
||||
column_family_options[prefix.value] = rocksdb.ColumnFamilyOptions()
|
||||
column_family_options[prefix.value].table_factory = rocksdb.BlockBasedTableFactory(
|
||||
block_cache=rocksdb.LRUCache(settings['cache_size']),
|
||||
)
|
||||
self.column_families: typing.Dict[bytes, 'rocksdb.ColumnFamilyHandle'] = {}
|
||||
options = rocksdb.Options(
|
||||
create_if_missing=True, use_fsync=False, target_file_size_base=33554432,
|
||||
max_open_files=max_open_files if not secondary_path else -1, create_missing_column_families=True
|
||||
)
|
||||
self._db = rocksdb.DB(
|
||||
path, options, secondary_name=secondary_path, column_families=column_family_options
|
||||
)
|
||||
for prefix in DB_PREFIXES:
|
||||
cf = self._db.get_column_family(prefix.value)
|
||||
if cf is None and not secondary_path:
|
||||
self._db.create_column_family(prefix.value, column_family_options[prefix.value])
|
||||
cf = self._db.get_column_family(prefix.value)
|
||||
self.column_families[prefix.value] = cf
|
||||
|
||||
self._op_stack = RevertableOpStack(self.get, unsafe_prefixes=unsafe_prefixes)
|
||||
self._max_undo_depth = max_undo_depth
|
||||
|
||||
def unsafe_commit(self):
|
||||
"""
|
||||
Write staged changes to the database without keeping undo information
|
||||
Changes written cannot be undone
|
||||
"""
|
||||
try:
|
||||
if not len(self._op_stack):
|
||||
return
|
||||
with self._db.write_batch(sync=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
get_column_family = self.column_families.__getitem__
|
||||
for staged_change in self._op_stack:
|
||||
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||
if staged_change.is_put:
|
||||
batch_put((column_family, staged_change.key), staged_change.value)
|
||||
else:
|
||||
batch_delete((column_family, staged_change.key))
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def commit(self, height: int, block_hash: bytes):
|
||||
"""
|
||||
Write changes for a block height to the database and keep undo information so that the changes can be reverted
|
||||
"""
|
||||
undo_ops = self._op_stack.get_undo_ops()
|
||||
delete_undos = []
|
||||
if height > self._max_undo_depth:
|
||||
delete_undos.extend(self._db.iterator(
|
||||
start=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(0),
|
||||
iterate_upper_bound=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
|
||||
include_value=False
|
||||
))
|
||||
try:
|
||||
undo_c_f = self.column_families[DB_PREFIXES.undo.value]
|
||||
with self._db.write_batch(sync=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
get_column_family = self.column_families.__getitem__
|
||||
for staged_change in self._op_stack:
|
||||
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||
if staged_change.is_put:
|
||||
batch_put((column_family, staged_change.key), staged_change.value)
|
||||
else:
|
||||
batch_delete((column_family, staged_change.key))
|
||||
for undo_to_delete in delete_undos:
|
||||
batch_delete((undo_c_f, undo_to_delete))
|
||||
batch_put((undo_c_f, DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)), undo_ops)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def rollback(self, height: int, block_hash: bytes):
|
||||
"""
|
||||
Revert changes for a block height
|
||||
"""
|
||||
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)
|
||||
undo_c_f = self.column_families[DB_PREFIXES.undo.value]
|
||||
undo_info = self._db.get((undo_c_f, undo_key))
|
||||
self._op_stack.apply_packed_undo_ops(undo_info)
|
||||
try:
|
||||
with self._db.write_batch(sync=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
get_column_family = self.column_families.__getitem__
|
||||
for staged_change in self._op_stack:
|
||||
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||
if staged_change.is_put:
|
||||
batch_put((column_family, staged_change.key), staged_change.value)
|
||||
else:
|
||||
batch_delete((column_family, staged_change.key))
|
||||
# batch_delete(undo_key)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
cf = self.column_families[key[:1]]
|
||||
return self._db.get((cf, key), fill_cache=fill_cache)
|
||||
|
||||
def iterator(self, start: bytes, column_family: 'rocksdb.ColumnFamilyHandle' = None,
|
||||
iterate_lower_bound: bytes = None, iterate_upper_bound: bytes = None,
|
||||
reverse: bool = False, include_key: bool = True, include_value: bool = True,
|
||||
fill_cache: bool = True, prefix_same_as_start: bool = False, auto_prefix_mode: bool = True):
|
||||
return self._db.iterator(
|
||||
start=start, column_family=column_family, iterate_lower_bound=iterate_lower_bound,
|
||||
iterate_upper_bound=iterate_upper_bound, reverse=reverse, include_key=include_key,
|
||||
include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=prefix_same_as_start,
|
||||
auto_prefix_mode=auto_prefix_mode
|
||||
)
|
||||
|
||||
def close(self):
|
||||
self._db.close()
|
||||
|
||||
def try_catch_up_with_primary(self):
|
||||
self._db.try_catch_up_with_primary()
|
||||
|
||||
def stage_raw_put(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertablePut(key, value))
|
||||
|
||||
def stage_raw_delete(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertableDelete(key, value))
|
||||
|
||||
def estimate_num_keys(self, column_family: 'rocksdb.ColumnFamilyHandle' = None):
|
||||
return int(self._db.get_property(b'rocksdb.estimate-num-keys', column_family).decode())
|
|
@ -2,9 +2,11 @@ import typing
|
|||
import struct
|
||||
import array
|
||||
import base64
|
||||
import rocksdb
|
||||
import rocksdb.interfaces
|
||||
from typing import Union, Tuple, NamedTuple, Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
from lbry.wallet.server.db.db import KeyValueStorage, PrefixDB
|
||||
from lbry.wallet.server.db import DB_PREFIXES, COLUMN_SETTINGS
|
||||
from lbry.wallet.server.db.interface import BasePrefixDB
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||
from lbry.schema.url import normalize_name
|
||||
|
||||
|
@ -29,6 +31,10 @@ class PrefixRowType(type):
|
|||
klass = super().__new__(cls, name, bases, kwargs)
|
||||
if name != "PrefixRow":
|
||||
ROW_TYPES[klass.prefix] = klass
|
||||
cache_size = klass.cache_size
|
||||
COLUMN_SETTINGS[klass.prefix] = {
|
||||
'cache_size': cache_size,
|
||||
}
|
||||
return klass
|
||||
|
||||
|
||||
|
@ -37,22 +43,34 @@ class PrefixRow(metaclass=PrefixRowType):
|
|||
key_struct: struct.Struct
|
||||
value_struct: struct.Struct
|
||||
key_part_lambdas = []
|
||||
cache_size: int = 1024 * 1024 * 64
|
||||
|
||||
def __init__(self, db: KeyValueStorage, op_stack: RevertableOpStack):
|
||||
def __init__(self, db: 'rocksdb.DB', op_stack: RevertableOpStack):
|
||||
self._db = db
|
||||
self._op_stack = op_stack
|
||||
self._column_family = self._db.get_column_family(self.prefix)
|
||||
if not self._column_family.is_valid:
|
||||
raise RuntimeError('column family is not valid')
|
||||
|
||||
def iterate(self, prefix=None, start=None, stop=None,
|
||||
reverse: bool = False, include_key: bool = True, include_value: bool = True,
|
||||
fill_cache: bool = True, deserialize_key: bool = True, deserialize_value: bool = True):
|
||||
def iterate(self, prefix=None, start=None, stop=None, reverse: bool = False, include_key: bool = True,
|
||||
include_value: bool = True, fill_cache: bool = True, deserialize_key: bool = True,
|
||||
deserialize_value: bool = True):
|
||||
if not prefix and not start and not stop:
|
||||
prefix = ()
|
||||
if prefix is not None:
|
||||
prefix = self.pack_partial_key(*prefix)
|
||||
if start is not None:
|
||||
start = self.pack_partial_key(*start)
|
||||
if stop is not None:
|
||||
stop = self.pack_partial_key(*stop)
|
||||
if stop is None:
|
||||
try:
|
||||
stop = (int.from_bytes(prefix, byteorder='big') + 1).to_bytes(len(prefix), byteorder='big')
|
||||
except OverflowError:
|
||||
stop = (int.from_bytes(prefix, byteorder='big') + 1).to_bytes(len(prefix) + 1, byteorder='big')
|
||||
else:
|
||||
stop = self.pack_partial_key(*stop)
|
||||
else:
|
||||
if start is not None:
|
||||
start = self.pack_partial_key(*start)
|
||||
if stop is not None:
|
||||
stop = self.pack_partial_key(*stop)
|
||||
|
||||
if deserialize_key:
|
||||
key_getter = lambda k: self.unpack_key(k)
|
||||
|
@ -63,25 +81,27 @@ class PrefixRow(metaclass=PrefixRowType):
|
|||
else:
|
||||
value_getter = lambda v: v
|
||||
|
||||
it = self._db.iterator(
|
||||
start or prefix, self._column_family, iterate_lower_bound=(start or prefix),
|
||||
iterate_upper_bound=stop, reverse=reverse, include_key=include_key,
|
||||
include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=False
|
||||
)
|
||||
|
||||
if include_key and include_value:
|
||||
for k, v in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse,
|
||||
fill_cache=fill_cache):
|
||||
yield key_getter(k), value_getter(v)
|
||||
for k, v in it:
|
||||
yield key_getter(k[1]), value_getter(v)
|
||||
elif include_key:
|
||||
for k in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_value=False,
|
||||
fill_cache=fill_cache):
|
||||
yield key_getter(k)
|
||||
for k in it:
|
||||
yield key_getter(k[1])
|
||||
elif include_value:
|
||||
for v in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_key=False,
|
||||
fill_cache=fill_cache):
|
||||
for v in it:
|
||||
yield value_getter(v)
|
||||
else:
|
||||
for _ in self._db.iterator(prefix=prefix, start=start, stop=stop, reverse=reverse, include_key=False,
|
||||
include_value=False, fill_cache=fill_cache):
|
||||
for _ in it:
|
||||
yield None
|
||||
|
||||
def get(self, *key_args, fill_cache=True, deserialize_value=True):
|
||||
v = self._db.get(self.pack_key(*key_args), fill_cache=fill_cache)
|
||||
v = self._db.get((self._column_family, self.pack_key(*key_args)), fill_cache=fill_cache)
|
||||
if v:
|
||||
return v if not deserialize_value else self.unpack_value(v)
|
||||
|
||||
|
@ -93,7 +113,7 @@ class PrefixRow(metaclass=PrefixRowType):
|
|||
return last_op.value if not deserialize_value else self.unpack_value(last_op.value)
|
||||
else: # it's a delete
|
||||
return
|
||||
v = self._db.get(packed_key, fill_cache=fill_cache)
|
||||
v = self._db.get((self._column_family, packed_key), fill_cache=fill_cache)
|
||||
if v:
|
||||
return v if not deserialize_value else self.unpack_value(v)
|
||||
|
||||
|
@ -117,7 +137,7 @@ class PrefixRow(metaclass=PrefixRowType):
|
|||
|
||||
@classmethod
|
||||
def unpack_key(cls, key: bytes):
|
||||
assert key[:1] == cls.prefix
|
||||
assert key[:1] == cls.prefix, f"prefix should be {cls.prefix}, got {key[:1]}"
|
||||
return cls.key_struct.unpack(key[1:])
|
||||
|
||||
@classmethod
|
||||
|
@ -128,6 +148,9 @@ class PrefixRow(metaclass=PrefixRowType):
|
|||
def unpack_item(cls, key: bytes, value: bytes):
|
||||
return cls.unpack_key(key), cls.unpack_value(value)
|
||||
|
||||
def estimate_num_keys(self) -> int:
|
||||
return int(self._db.get_property(b'rocksdb.estimate-num-keys', self._column_family).decode())
|
||||
|
||||
|
||||
class UTXOKey(NamedTuple):
|
||||
hashX: bytes
|
||||
|
@ -205,14 +228,14 @@ class TxHashValue(NamedTuple):
|
|||
tx_hash: bytes
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})"
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||
|
||||
|
||||
class TxNumKey(NamedTuple):
|
||||
tx_hash: bytes
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})"
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||
|
||||
|
||||
class TxNumValue(NamedTuple):
|
||||
|
@ -223,14 +246,14 @@ class TxKey(NamedTuple):
|
|||
tx_hash: bytes
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash.hex()})"
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||
|
||||
|
||||
class TxValue(NamedTuple):
|
||||
raw_tx: bytes
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx)})"
|
||||
return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx).decode()})"
|
||||
|
||||
|
||||
class BlockHeaderKey(NamedTuple):
|
||||
|
@ -549,6 +572,7 @@ class ActiveAmountPrefixRow(PrefixRow):
|
|||
struct.Struct(b'>20sBLL').pack,
|
||||
struct.Struct(b'>20sBLLH').pack
|
||||
]
|
||||
cache_size = 1024 * 1024 * 128
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, claim_hash: bytes, txo_type: int, activation_height: int, tx_num: int, position: int):
|
||||
|
@ -579,6 +603,7 @@ class ClaimToTXOPrefixRow(PrefixRow):
|
|||
lambda: b'',
|
||||
struct.Struct(b'>20s').pack
|
||||
]
|
||||
cache_size = 1024 * 1024 * 128
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, claim_hash: bytes):
|
||||
|
@ -618,6 +643,7 @@ class TXOToClaimPrefixRow(PrefixRow):
|
|||
prefix = DB_PREFIXES.txo_to_claim.value
|
||||
key_struct = struct.Struct(b'>LH')
|
||||
value_struct = struct.Struct(b'>20s')
|
||||
cache_size = 1024 * 1024 * 128
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, tx_num: int, position: int):
|
||||
|
@ -1012,6 +1038,7 @@ class EffectiveAmountPrefixRow(PrefixRow):
|
|||
shortid_key_helper(b'>QL'),
|
||||
shortid_key_helper(b'>QLH'),
|
||||
]
|
||||
cache_size = 1024 * 1024 * 128
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, name: str, effective_amount: int, tx_num: int, position: int):
|
||||
|
@ -1042,6 +1069,7 @@ class EffectiveAmountPrefixRow(PrefixRow):
|
|||
|
||||
class RepostPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.repost.value
|
||||
key_struct = struct.Struct(b'>20s')
|
||||
|
||||
key_part_lambdas = [
|
||||
lambda: b'',
|
||||
|
@ -1050,13 +1078,11 @@ class RepostPrefixRow(PrefixRow):
|
|||
|
||||
@classmethod
|
||||
def pack_key(cls, claim_hash: bytes):
|
||||
return cls.prefix + claim_hash
|
||||
return super().pack_key(claim_hash)
|
||||
|
||||
@classmethod
|
||||
def unpack_key(cls, key: bytes) -> RepostKey:
|
||||
assert key[:1] == cls.prefix
|
||||
assert len(key) == 21
|
||||
return RepostKey(key[1:])
|
||||
return RepostKey(*super().unpack_key(key))
|
||||
|
||||
@classmethod
|
||||
def pack_value(cls, reposted_claim_hash: bytes) -> bytes:
|
||||
|
@ -1103,24 +1129,28 @@ class RepostedPrefixRow(PrefixRow):
|
|||
return cls.pack_key(reposted_claim_hash, tx_num, position), cls.pack_value(claim_hash)
|
||||
|
||||
|
||||
class UndoKey(NamedTuple):
|
||||
height: int
|
||||
block_hash: bytes
|
||||
|
||||
|
||||
class UndoPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.undo.value
|
||||
key_struct = struct.Struct(b'>Q')
|
||||
key_struct = struct.Struct(b'>Q32s')
|
||||
|
||||
key_part_lambdas = [
|
||||
lambda: b'',
|
||||
struct.Struct(b'>Q').pack
|
||||
struct.Struct(b'>Q').pack,
|
||||
struct.Struct(b'>Q32s').pack
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, height: int):
|
||||
return super().pack_key(height)
|
||||
def pack_key(cls, height: int, block_hash: bytes):
|
||||
return super().pack_key(height, block_hash)
|
||||
|
||||
@classmethod
|
||||
def unpack_key(cls, key: bytes) -> int:
|
||||
assert key[:1] == cls.prefix
|
||||
height, = cls.key_struct.unpack(key[1:])
|
||||
return height
|
||||
def unpack_key(cls, key: bytes) -> UndoKey:
|
||||
return UndoKey(*super().unpack_key(key))
|
||||
|
||||
@classmethod
|
||||
def pack_value(cls, undo_ops: bytes) -> bytes:
|
||||
|
@ -1131,8 +1161,8 @@ class UndoPrefixRow(PrefixRow):
|
|||
return data
|
||||
|
||||
@classmethod
|
||||
def pack_item(cls, height: int, undo_ops: bytes):
|
||||
return cls.pack_key(height), cls.pack_value(undo_ops)
|
||||
def pack_item(cls, height: int, block_hash: bytes, undo_ops: bytes):
|
||||
return cls.pack_key(height, block_hash), cls.pack_value(undo_ops)
|
||||
|
||||
|
||||
class BlockHashPrefixRow(PrefixRow):
|
||||
|
@ -1422,7 +1452,7 @@ class HashXHistoryPrefixRow(PrefixRow):
|
|||
|
||||
|
||||
class TouchedOrDeletedPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.claim_diff.value
|
||||
prefix = DB_PREFIXES.touched_or_deleted.value
|
||||
key_struct = struct.Struct(b'>L')
|
||||
value_struct = struct.Struct(b'>LL')
|
||||
key_part_lambdas = [
|
||||
|
@ -1566,7 +1596,7 @@ class DBStatePrefixRow(PrefixRow):
|
|||
|
||||
|
||||
class BlockTxsPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.block_txs.value
|
||||
prefix = DB_PREFIXES.block_tx.value
|
||||
key_struct = struct.Struct(b'>L')
|
||||
key_part_lambdas = [
|
||||
lambda: b'',
|
||||
|
@ -1595,41 +1625,139 @@ class BlockTxsPrefixRow(PrefixRow):
|
|||
return cls.pack_key(height), cls.pack_value(tx_hashes)
|
||||
|
||||
|
||||
class LevelDBStore(KeyValueStorage):
|
||||
def __init__(self, path: str, cache_mb: int, max_open_files: int):
|
||||
import plyvel
|
||||
self.db = plyvel.DB(
|
||||
path, create_if_missing=True, max_open_files=max_open_files,
|
||||
lru_cache_size=cache_mb * 1024 * 1024, write_buffer_size=64 * 1024 * 1024,
|
||||
max_file_size=1024 * 1024 * 64, bloom_filter_bits=32
|
||||
)
|
||||
class MempoolTxKey(NamedTuple):
|
||||
tx_hash: bytes
|
||||
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
return self.db.get(key, fill_cache=fill_cache)
|
||||
|
||||
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
|
||||
include_key=True, include_value=True, fill_cache=True):
|
||||
return self.db.iterator(
|
||||
reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop,
|
||||
prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache
|
||||
)
|
||||
|
||||
def write_batch(self, transaction: bool = False, sync: bool = False):
|
||||
return self.db.write_batch(transaction=transaction, sync=sync)
|
||||
|
||||
def close(self):
|
||||
return self.db.close()
|
||||
|
||||
@property
|
||||
def closed(self) -> bool:
|
||||
return self.db.closed
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(tx_hash={self.tx_hash[::-1].hex()})"
|
||||
|
||||
|
||||
class HubDB(PrefixDB):
|
||||
def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 512,
|
||||
unsafe_prefixes: Optional[typing.Set[bytes]] = None):
|
||||
db = LevelDBStore(path, cache_mb, max_open_files)
|
||||
super().__init__(db, reorg_limit, unsafe_prefixes=unsafe_prefixes)
|
||||
class MempoolTxValue(NamedTuple):
|
||||
raw_tx: bytes
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(raw_tx={base64.b64encode(self.raw_tx).decode()})"
|
||||
|
||||
|
||||
class MempoolTXPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.mempool_tx.value
|
||||
key_struct = struct.Struct(b'>32s')
|
||||
|
||||
key_part_lambdas = [
|
||||
lambda: b'',
|
||||
struct.Struct(b'>32s').pack
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, tx_hash: bytes) -> bytes:
|
||||
return super().pack_key(tx_hash)
|
||||
|
||||
@classmethod
|
||||
def unpack_key(cls, tx_hash: bytes) -> MempoolTxKey:
|
||||
return MempoolTxKey(*super().unpack_key(tx_hash))
|
||||
|
||||
@classmethod
|
||||
def pack_value(cls, tx: bytes) -> bytes:
|
||||
return tx
|
||||
|
||||
@classmethod
|
||||
def unpack_value(cls, data: bytes) -> MempoolTxValue:
|
||||
return MempoolTxValue(data)
|
||||
|
||||
@classmethod
|
||||
def pack_item(cls, tx_hash: bytes, raw_tx: bytes):
|
||||
return cls.pack_key(tx_hash), cls.pack_value(raw_tx)
|
||||
|
||||
|
||||
class TrendingNotificationKey(typing.NamedTuple):
|
||||
height: int
|
||||
claim_hash: bytes
|
||||
|
||||
|
||||
class TrendingNotificationValue(typing.NamedTuple):
|
||||
previous_amount: int
|
||||
new_amount: int
|
||||
|
||||
|
||||
class TrendingNotificationPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.trending_notifications.value
|
||||
key_struct = struct.Struct(b'>L20s')
|
||||
value_struct = struct.Struct(b'>QQ')
|
||||
key_part_lambdas = [
|
||||
lambda: b'',
|
||||
struct.Struct(b'>L').pack,
|
||||
struct.Struct(b'>L20s').pack
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, height: int, claim_hash: bytes):
|
||||
return super().pack_key(height, claim_hash)
|
||||
|
||||
@classmethod
|
||||
def unpack_key(cls, key: bytes) -> TrendingNotificationKey:
|
||||
return TrendingNotificationKey(*super().unpack_key(key))
|
||||
|
||||
@classmethod
|
||||
def pack_value(cls, previous_amount: int, new_amount: int) -> bytes:
|
||||
return super().pack_value(previous_amount, new_amount)
|
||||
|
||||
@classmethod
|
||||
def unpack_value(cls, data: bytes) -> TrendingNotificationValue:
|
||||
return TrendingNotificationValue(*super().unpack_value(data))
|
||||
|
||||
@classmethod
|
||||
def pack_item(cls, height, claim_hash, previous_amount, new_amount):
|
||||
return cls.pack_key(height, claim_hash), cls.pack_value(previous_amount, new_amount)
|
||||
|
||||
|
||||
class TouchedHashXKey(NamedTuple):
|
||||
height: int
|
||||
|
||||
|
||||
class TouchedHashXValue(NamedTuple):
|
||||
touched_hashXs: typing.List[bytes]
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(touched_hashXs=[{', '.join(map(lambda x: x.hex(), self.touched_hashXs))}])"
|
||||
|
||||
|
||||
class TouchedHashXPrefixRow(PrefixRow):
|
||||
prefix = DB_PREFIXES.touched_hashX.value
|
||||
key_struct = struct.Struct(b'>L')
|
||||
|
||||
key_part_lambdas = [
|
||||
lambda: b'',
|
||||
struct.Struct(b'>L').pack
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def pack_key(cls, height: int):
|
||||
return super().pack_key(height)
|
||||
|
||||
@classmethod
|
||||
def unpack_key(cls, key: bytes) -> TouchedHashXKey:
|
||||
return TouchedHashXKey(*super().unpack_key(key))
|
||||
|
||||
@classmethod
|
||||
def pack_value(cls, touched: typing.List[bytes]) -> bytes:
|
||||
assert all(map(lambda item: len(item) == 11, touched))
|
||||
return b''.join(touched)
|
||||
|
||||
@classmethod
|
||||
def unpack_value(cls, data: bytes) -> TouchedHashXValue:
|
||||
return TouchedHashXValue([data[idx*11:(idx*11)+11] for idx in range(len(data) // 11)])
|
||||
|
||||
@classmethod
|
||||
def pack_item(cls, height: int, touched: typing.List[bytes]):
|
||||
return cls.pack_key(height), cls.pack_value(touched)
|
||||
|
||||
|
||||
class PrefixDB(BasePrefixDB):
|
||||
def __init__(self, path: str, cache_mb: int = 128, reorg_limit: int = 200, max_open_files: int = 64,
|
||||
secondary_path: str = '', unsafe_prefixes: Optional[typing.Set[bytes]] = None):
|
||||
super().__init__(path, max_open_files=max_open_files, secondary_path=secondary_path,
|
||||
max_undo_depth=reorg_limit, unsafe_prefixes=unsafe_prefixes)
|
||||
db = self._db
|
||||
self.claim_to_support = ClaimToSupportPrefixRow(db, self._op_stack)
|
||||
self.support_to_claim = SupportToClaimPrefixRow(db, self._op_stack)
|
||||
self.claim_to_txo = ClaimToTXOPrefixRow(db, self._op_stack)
|
||||
|
@ -1660,6 +1788,9 @@ class HubDB(PrefixDB):
|
|||
self.db_state = DBStatePrefixRow(db, self._op_stack)
|
||||
self.support_amount = SupportAmountPrefixRow(db, self._op_stack)
|
||||
self.block_txs = BlockTxsPrefixRow(db, self._op_stack)
|
||||
self.mempool_tx = MempoolTXPrefixRow(db, self._op_stack)
|
||||
self.trending_notification = TrendingNotificationPrefixRow(db, self._op_stack)
|
||||
self.touched_hashX = TouchedHashXPrefixRow(db, self._op_stack)
|
||||
|
||||
|
||||
def auto_decode_item(key: bytes, value: bytes) -> Union[Tuple[NamedTuple, NamedTuple], Tuple[bytes, bytes]]:
|
||||
|
|
|
@ -30,13 +30,14 @@ class Env:
|
|||
|
||||
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
|
||||
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
|
||||
chain=None, es_index_prefix=None, es_mode=None, cache_MB=None, reorg_limit=None, tcp_port=None,
|
||||
chain=None, es_index_prefix=None, cache_MB=None, reorg_limit=None, tcp_port=None,
|
||||
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
|
||||
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
|
||||
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
|
||||
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
|
||||
session_timeout=None, drop_client=None, description=None, daily_fee=None,
|
||||
database_query_timeout=None, db_max_open_files=512):
|
||||
database_query_timeout=None, db_max_open_files=64, elastic_notifier_port=None,
|
||||
blocking_channel_ids=None, filtering_channel_ids=None, peer_hubs=None, peer_announce=None):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
|
||||
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
|
||||
|
@ -47,6 +48,8 @@ class Env:
|
|||
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
|
||||
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
|
||||
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
|
||||
self.elastic_notifier_port = elastic_notifier_port if elastic_notifier_port is not None else self.integer('ELASTIC_NOTIFIER_PORT', 19080)
|
||||
|
||||
self.loop_policy = self.set_event_loop_policy(
|
||||
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
|
||||
)
|
||||
|
@ -66,7 +69,6 @@ class Env:
|
|||
else:
|
||||
self.coin = LBCRegTest
|
||||
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
|
||||
self.es_mode = es_mode if es_mode is not None else self.default('ES_MODE', 'writer')
|
||||
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
|
||||
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
||||
# Server stuff
|
||||
|
@ -89,8 +91,11 @@ class Env:
|
|||
self.country = country if country is not None else self.default('COUNTRY', 'US')
|
||||
# Peer discovery
|
||||
self.peer_discovery = self.peer_discovery_enum()
|
||||
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
|
||||
self.peer_hubs = self.extract_peer_hubs()
|
||||
self.peer_announce = peer_announce if peer_announce is not None else self.boolean('PEER_ANNOUNCE', True)
|
||||
if peer_hubs is not None:
|
||||
self.peer_hubs = [p.strip("") for p in peer_hubs.split(",")]
|
||||
else:
|
||||
self.peer_hubs = self.extract_peer_hubs()
|
||||
# self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
|
||||
# self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
|
||||
# The electrum client takes the empty string as unspecified
|
||||
|
@ -116,6 +121,10 @@ class Env:
|
|||
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
|
||||
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
|
||||
|
||||
# Filtering / Blocking
|
||||
self.blocking_channel_ids = blocking_channel_ids if blocking_channel_ids is not None else self.default('BLOCKING_CHANNEL_IDS', '').split(' ')
|
||||
self.filtering_channel_ids = filtering_channel_ids if filtering_channel_ids is not None else self.default('FILTERING_CHANNEL_IDS', '').split(' ')
|
||||
|
||||
@classmethod
|
||||
def default(cls, envvar, default):
|
||||
return environ.get(envvar, default)
|
||||
|
@ -294,8 +303,8 @@ class Env:
|
|||
parser.add_argument('--daemon_url',
|
||||
help='URL for rpc from lbrycrd, <rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>',
|
||||
default=cls.default('DAEMON_URL', None))
|
||||
parser.add_argument('--db_max_open_files', type=int, default=512,
|
||||
help='number of files leveldb can have open at a time')
|
||||
parser.add_argument('--db_max_open_files', type=int, default=64,
|
||||
help='number of files rocksdb can have open at a time')
|
||||
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
|
||||
help='Interface for hub server to listen on')
|
||||
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
|
||||
|
@ -322,8 +331,6 @@ class Env:
|
|||
help='elasticsearch host')
|
||||
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
|
||||
help='elasticsearch port')
|
||||
parser.add_argument('--es_mode', default=cls.default('ES_MODE', 'writer'), type=str,
|
||||
choices=['reader', 'writer'])
|
||||
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
|
||||
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
|
||||
choices=['default', 'uvloop'])
|
||||
|
@ -364,6 +371,11 @@ class Env:
|
|||
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
|
||||
help="elasticsearch query timeout")
|
||||
|
||||
parser.add_argument('--blocking_channel_ids', nargs='*', help='',
|
||||
default=cls.default('BLOCKING_CHANNEL_IDS', '').split(' '))
|
||||
parser.add_argument('--filtering_channel_ids', nargs='*', help='',
|
||||
default=cls.default('FILTERING_CHANNEL_IDS', '').split(' '))
|
||||
|
||||
@classmethod
|
||||
def from_arg_parser(cls, args):
|
||||
return cls(
|
||||
|
@ -371,7 +383,7 @@ class Env:
|
|||
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
|
||||
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
|
||||
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
|
||||
es_mode=args.es_mode, cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
|
||||
cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
|
||||
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
|
||||
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
|
||||
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
|
||||
|
@ -380,5 +392,6 @@ class Env:
|
|||
country=args.country, payment_address=args.payment_address, donation_address=args.donation_address,
|
||||
max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions,
|
||||
session_timeout=args.session_timeout, drop_client=args.drop_client, description=args.description,
|
||||
daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000)
|
||||
daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000),
|
||||
blocking_channel_ids=args.blocking_channel_ids, filtering_channel_ids=args.filtering_channel_ids
|
||||
)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -11,14 +11,13 @@ import itertools
|
|||
import time
|
||||
import attr
|
||||
import typing
|
||||
from typing import Set, Optional, Callable, Awaitable
|
||||
from collections import defaultdict
|
||||
from prometheus_client import Histogram
|
||||
from lbry.wallet.server.hash import hash_to_hex_str, hex_str_to_hash
|
||||
from lbry.wallet.server.util import class_logger, chunks
|
||||
from lbry.wallet.server.leveldb import UTXO
|
||||
from lbry.wallet.server.util import class_logger
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from wallet.server.db.db import HubDB
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
|
@ -50,269 +49,100 @@ mempool_process_time_metric = Histogram(
|
|||
|
||||
|
||||
class MemPool:
|
||||
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0):
|
||||
def __init__(self, coin, db: 'HubDB', refresh_secs=1.0):
|
||||
self.coin = coin
|
||||
self._daemon = daemon
|
||||
self._db = db
|
||||
self._touched_mp = {}
|
||||
self._touched_bp = {}
|
||||
self._highest_block = -1
|
||||
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.txs = {}
|
||||
self.hashXs = defaultdict(set) # None can be a key
|
||||
self.cached_compact_histogram = []
|
||||
self.raw_mempool = {}
|
||||
self.touched_hashXs: typing.DefaultDict[bytes, typing.Set[bytes]] = defaultdict(set) # None can be a key
|
||||
self.refresh_secs = refresh_secs
|
||||
self.log_status_secs = log_status_secs
|
||||
# Prevents mempool refreshes during fee histogram calculation
|
||||
self.lock = state_lock
|
||||
self.wakeup = asyncio.Event()
|
||||
self.mempool_process_time_metric = mempool_process_time_metric
|
||||
self.notified_mempool_txs = set()
|
||||
self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
|
||||
self.session_manager: typing.Optional['LBRYSessionManager'] = None
|
||||
|
||||
async def _logging(self, synchronized_event):
|
||||
"""Print regular logs of mempool stats."""
|
||||
self.logger.info('beginning processing of daemon mempool. '
|
||||
'This can take some time...')
|
||||
start = time.perf_counter()
|
||||
await synchronized_event.wait()
|
||||
elapsed = time.perf_counter() - start
|
||||
self.logger.info(f'synced in {elapsed:.2f}s')
|
||||
while True:
|
||||
self.logger.info(f'{len(self.txs):,d} txs '
|
||||
f'touching {len(self.hashXs):,d} addresses')
|
||||
await asyncio.sleep(self.log_status_secs)
|
||||
await synchronized_event.wait()
|
||||
def refresh(self) -> typing.Set[bytes]: # returns list of new touched hashXs
|
||||
prefix_db = self._db.prefix_db
|
||||
new_mempool = {k.tx_hash: v.raw_tx for k, v in prefix_db.mempool_tx.iterate()} # TODO: make this more efficient
|
||||
self.raw_mempool.clear()
|
||||
self.raw_mempool.update(new_mempool)
|
||||
|
||||
def _accept_transactions(self, tx_map, utxo_map, touched):
|
||||
"""Accept transactions in tx_map to the mempool if all their inputs
|
||||
can be found in the existing mempool or a utxo_map from the
|
||||
DB.
|
||||
# hashXs = self.hashXs # hashX: [tx_hash, ...]
|
||||
touched_hashXs = set()
|
||||
|
||||
Returns an (unprocessed tx_map, unspent utxo_map) pair.
|
||||
"""
|
||||
hashXs = self.hashXs
|
||||
txs = self.txs
|
||||
|
||||
deferred = {}
|
||||
unspent = set(utxo_map)
|
||||
# Try to find all prevouts so we can accept the TX
|
||||
for hash, tx in tx_map.items():
|
||||
in_pairs = []
|
||||
try:
|
||||
for prevout in tx.prevouts:
|
||||
utxo = utxo_map.get(prevout)
|
||||
if not utxo:
|
||||
prev_hash, prev_index = prevout
|
||||
# Raises KeyError if prev_hash is not in txs
|
||||
utxo = txs[prev_hash].out_pairs[prev_index]
|
||||
in_pairs.append(utxo)
|
||||
except KeyError:
|
||||
deferred[hash] = tx
|
||||
continue
|
||||
|
||||
# Spend the prevouts
|
||||
unspent.difference_update(tx.prevouts)
|
||||
|
||||
# Save the in_pairs, compute the fee and accept the TX
|
||||
tx.in_pairs = tuple(in_pairs)
|
||||
# Avoid negative fees if dealing with generation-like transactions
|
||||
# because some in_parts would be missing
|
||||
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
|
||||
sum(v for _, v in tx.out_pairs)))
|
||||
txs[hash] = tx
|
||||
|
||||
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
|
||||
touched.add(hashX)
|
||||
hashXs[hashX].add(hash)
|
||||
|
||||
return deferred, {prevout: utxo_map[prevout] for prevout in unspent}
|
||||
|
||||
async def _mempool_loop(self, synchronized_event):
|
||||
try:
|
||||
return await self._refresh_hashes(synchronized_event)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
self.logger.exception("MEMPOOL DIED")
|
||||
raise e
|
||||
|
||||
async def _refresh_hashes(self, synchronized_event):
|
||||
"""Refresh our view of the daemon's mempool."""
|
||||
while True:
|
||||
start = time.perf_counter()
|
||||
height = self._daemon.cached_height()
|
||||
hex_hashes = await self._daemon.mempool_hashes()
|
||||
if height != await self._daemon.height():
|
||||
continue
|
||||
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
|
||||
async with self.lock:
|
||||
new_hashes = hashes.difference(self.notified_mempool_txs)
|
||||
touched = await self._process_mempool(hashes)
|
||||
self.notified_mempool_txs.update(new_hashes)
|
||||
new_touched = {
|
||||
touched_hashx for touched_hashx, txs in self.hashXs.items() if txs.intersection(new_hashes)
|
||||
}
|
||||
synchronized_event.set()
|
||||
synchronized_event.clear()
|
||||
await self.on_mempool(touched, new_touched, height)
|
||||
duration = time.perf_counter() - start
|
||||
self.mempool_process_time_metric.observe(duration)
|
||||
try:
|
||||
# we wait up to `refresh_secs` but go early if a broadcast happens (which triggers wakeup event)
|
||||
await asyncio.wait_for(self.wakeup.wait(), timeout=self.refresh_secs)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
finally:
|
||||
self.wakeup.clear()
|
||||
|
||||
async def _process_mempool(self, all_hashes):
|
||||
# Re-sync with the new set of hashes
|
||||
txs = self.txs
|
||||
|
||||
hashXs = self.hashXs # hashX: [tx_hash, ...]
|
||||
touched = set()
|
||||
|
||||
# First handle txs that have disappeared
|
||||
for tx_hash in set(txs).difference(all_hashes):
|
||||
tx = txs.pop(tx_hash)
|
||||
tx_hashXs = {hashX for hashX, value in tx.in_pairs}
|
||||
tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
|
||||
# Remove txs that aren't in mempool anymore
|
||||
for tx_hash in set(self.txs).difference(self.raw_mempool.keys()):
|
||||
tx = self.txs.pop(tx_hash)
|
||||
tx_hashXs = {hashX for hashX, value in tx.in_pairs}.union({hashX for hashX, value in tx.out_pairs})
|
||||
for hashX in tx_hashXs:
|
||||
hashXs[hashX].remove(tx_hash)
|
||||
if not hashXs[hashX]:
|
||||
del hashXs[hashX]
|
||||
touched.update(tx_hashXs)
|
||||
|
||||
# Process new transactions
|
||||
new_hashes = list(all_hashes.difference(txs))
|
||||
if new_hashes:
|
||||
fetches = []
|
||||
for hashes in chunks(new_hashes, 200):
|
||||
fetches.append(self._fetch_and_accept(hashes, all_hashes, touched))
|
||||
tx_map = {}
|
||||
utxo_map = {}
|
||||
for fetch in asyncio.as_completed(fetches):
|
||||
deferred, unspent = await fetch
|
||||
tx_map.update(deferred)
|
||||
utxo_map.update(unspent)
|
||||
|
||||
prior_count = 0
|
||||
# FIXME: this is not particularly efficient
|
||||
while tx_map and len(tx_map) != prior_count:
|
||||
prior_count = len(tx_map)
|
||||
tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map, touched)
|
||||
|
||||
if tx_map:
|
||||
self.logger.info(f'{len(tx_map)} txs dropped')
|
||||
|
||||
return touched
|
||||
|
||||
async def _fetch_and_accept(self, hashes, all_hashes, touched):
|
||||
"""Fetch a list of mempool transactions."""
|
||||
raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes))
|
||||
|
||||
to_hashX = self.coin.hashX_from_script
|
||||
deserializer = self.coin.DESERIALIZER
|
||||
if hashX in self.touched_hashXs and tx_hash in self.touched_hashXs[hashX]:
|
||||
self.touched_hashXs[hashX].remove(tx_hash)
|
||||
if not self.touched_hashXs[hashX]:
|
||||
self.touched_hashXs.pop(hashX)
|
||||
touched_hashXs.update(tx_hashXs)
|
||||
|
||||
# Re-sync with the new set of hashes
|
||||
tx_map = {}
|
||||
for hash, raw_tx in zip(hashes, raw_txs):
|
||||
# The daemon may have evicted the tx from its
|
||||
# mempool or it may have gotten in a block
|
||||
if not raw_tx:
|
||||
for tx_hash, raw_tx in self.raw_mempool.items():
|
||||
if tx_hash in self.txs:
|
||||
continue
|
||||
tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
|
||||
tx, tx_size = self.coin.DESERIALIZER(raw_tx).read_tx_and_vsize()
|
||||
# Convert the inputs and outputs into (hashX, value) pairs
|
||||
# Drop generation-like inputs from MemPoolTx.prevouts
|
||||
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
|
||||
for txin in tx.inputs
|
||||
if not txin.is_generation())
|
||||
txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
|
||||
txout_pairs = tuple((self.coin.hashX_from_script(txout.pk_script), txout.value)
|
||||
for txout in tx.outputs)
|
||||
tx_map[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
|
||||
0, tx_size, raw_tx)
|
||||
|
||||
# Determine all prevouts not in the mempool, and fetch the
|
||||
# UTXO information from the database. Failed prevout lookups
|
||||
# return None - concurrent database updates happen - which is
|
||||
# relied upon by _accept_transactions. Ignore prevouts that are
|
||||
# generation-like.
|
||||
prevouts = tuple(prevout for tx in tx_map.values()
|
||||
for prevout in tx.prevouts
|
||||
if prevout[0] not in all_hashes)
|
||||
utxos = await self._db.lookup_utxos(prevouts)
|
||||
utxo_map = dict(zip(prevouts, utxos))
|
||||
tx_map[tx_hash] = MemPoolTx(None, txin_pairs, txout_pairs, 0, tx_size, raw_tx)
|
||||
|
||||
return self._accept_transactions(tx_map, utxo_map, touched)
|
||||
for tx_hash, tx in tx_map.items():
|
||||
prevouts = []
|
||||
# Look up the prevouts
|
||||
for prev_hash, prev_index in tx.in_pairs:
|
||||
if prev_hash in self.txs: # accepted mempool
|
||||
utxo = self.txs[prev_hash].out_pairs[prev_index]
|
||||
elif prev_hash in tx_map: # this set of changes
|
||||
utxo = tx_map[prev_hash].out_pairs[prev_index]
|
||||
else: # get it from the db
|
||||
prev_tx_num = prefix_db.tx_num.get(prev_hash)
|
||||
if not prev_tx_num:
|
||||
continue
|
||||
prev_tx_num = prev_tx_num.tx_num
|
||||
hashX_val = prefix_db.hashX_utxo.get(prev_hash[:4], prev_tx_num, prev_index)
|
||||
if not hashX_val:
|
||||
continue
|
||||
hashX = hashX_val.hashX
|
||||
utxo_value = prefix_db.utxo.get(hashX, prev_tx_num, prev_index)
|
||||
utxo = (hashX, utxo_value.amount)
|
||||
prevouts.append(utxo)
|
||||
|
||||
#
|
||||
# External interface
|
||||
#
|
||||
# Save the prevouts, compute the fee and accept the TX
|
||||
tx.prevouts = tuple(prevouts)
|
||||
# Avoid negative fees if dealing with generation-like transactions
|
||||
# because some in_parts would be missing
|
||||
tx.fee = max(0, (sum(v for _, v in tx.prevouts) -
|
||||
sum(v for _, v in tx.out_pairs)))
|
||||
self.txs[tx_hash] = tx
|
||||
# print(f"added {tx_hash[::-1].hex()} reader to mempool")
|
||||
|
||||
async def keep_synchronized(self, synchronized_event):
|
||||
"""Keep the mempool synchronized with the daemon."""
|
||||
await asyncio.wait([
|
||||
self._mempool_loop(synchronized_event),
|
||||
# self._refresh_histogram(synchronized_event),
|
||||
self._logging(synchronized_event)
|
||||
])
|
||||
|
||||
async def balance_delta(self, hashX):
|
||||
"""Return the unconfirmed amount in the mempool for hashX.
|
||||
|
||||
Can be positive or negative.
|
||||
"""
|
||||
value = 0
|
||||
if hashX in self.hashXs:
|
||||
for hash in self.hashXs[hashX]:
|
||||
tx = self.txs[hash]
|
||||
value -= sum(v for h168, v in tx.in_pairs if h168 == hashX)
|
||||
value += sum(v for h168, v in tx.out_pairs if h168 == hashX)
|
||||
return value
|
||||
|
||||
def compact_fee_histogram(self):
|
||||
"""Return a compact fee histogram of the current mempool."""
|
||||
return self.cached_compact_histogram
|
||||
|
||||
async def potential_spends(self, hashX):
|
||||
"""Return a set of (prev_hash, prev_idx) pairs from mempool
|
||||
transactions that touch hashX.
|
||||
|
||||
None, some or all of these may be spends of the hashX, but all
|
||||
actual spends of it (in the DB or mempool) will be included.
|
||||
"""
|
||||
result = set()
|
||||
for tx_hash in self.hashXs.get(hashX, ()):
|
||||
tx = self.txs[tx_hash]
|
||||
result.update(tx.prevouts)
|
||||
return result
|
||||
for hashX, value in itertools.chain(tx.prevouts, tx.out_pairs):
|
||||
self.touched_hashXs[hashX].add(tx_hash)
|
||||
touched_hashXs.add(hashX)
|
||||
return touched_hashXs
|
||||
|
||||
def transaction_summaries(self, hashX):
|
||||
"""Return a list of MemPoolTxSummary objects for the hashX."""
|
||||
result = []
|
||||
for tx_hash in self.hashXs.get(hashX, ()):
|
||||
for tx_hash in self.touched_hashXs.get(hashX, ()):
|
||||
if tx_hash not in self.txs:
|
||||
continue # the tx hash for the touched address is an input that isn't in mempool anymore
|
||||
tx = self.txs[tx_hash]
|
||||
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
|
||||
has_ui = any(hash in self.txs for hash, idx in tx.in_pairs)
|
||||
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
|
||||
return result
|
||||
|
||||
async def unordered_UTXOs(self, hashX):
|
||||
"""Return an unordered list of UTXO named tuples from mempool
|
||||
transactions that pay to hashX.
|
||||
|
||||
This does not consider if any other mempool transactions spend
|
||||
the outputs.
|
||||
"""
|
||||
utxos = []
|
||||
for tx_hash in self.hashXs.get(hashX, ()):
|
||||
tx = self.txs.get(tx_hash)
|
||||
for pos, (hX, value) in enumerate(tx.out_pairs):
|
||||
if hX == hashX:
|
||||
utxos.append(UTXO(-1, pos, tx_hash, 0, value))
|
||||
return utxos
|
||||
|
||||
def get_mempool_height(self, tx_hash):
|
||||
def get_mempool_height(self, tx_hash: bytes) -> int:
|
||||
# Height Progression
|
||||
# -2: not broadcast
|
||||
# -1: in mempool but has unconfirmed inputs
|
||||
|
@ -321,41 +151,57 @@ class MemPool:
|
|||
if tx_hash not in self.txs:
|
||||
return -2
|
||||
tx = self.txs[tx_hash]
|
||||
unspent_inputs = sum(1 if hash in self.txs else 0 for hash, idx in tx.prevouts)
|
||||
unspent_inputs = any(hash in self.raw_mempool for hash, idx in tx.in_pairs)
|
||||
if unspent_inputs:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
async def _maybe_notify(self, new_touched):
|
||||
tmp, tbp = self._touched_mp, self._touched_bp
|
||||
common = set(tmp).intersection(tbp)
|
||||
if common:
|
||||
height = max(common)
|
||||
elif tmp and max(tmp) == self._highest_block:
|
||||
height = self._highest_block
|
||||
else:
|
||||
# Either we are processing a block and waiting for it to
|
||||
# come in, or we have not yet had a mempool update for the
|
||||
# new block height
|
||||
return
|
||||
touched = tmp.pop(height)
|
||||
for old in [h for h in tmp if h <= height]:
|
||||
del tmp[old]
|
||||
for old in [h for h in tbp if h <= height]:
|
||||
touched.update(tbp.pop(old))
|
||||
# print("notify", height, len(touched), len(new_touched))
|
||||
await self.notify_sessions(height, touched, new_touched)
|
||||
|
||||
async def start(self, height, session_manager: 'LBRYSessionManager'):
|
||||
self._highest_block = height
|
||||
self.notify_sessions = session_manager._notify_sessions
|
||||
await self.notify_sessions(height, set(), set())
|
||||
self.session_manager = session_manager
|
||||
await self._notify_sessions(height, set(), set())
|
||||
|
||||
async def on_mempool(self, touched, new_touched, height):
|
||||
self._touched_mp[height] = touched
|
||||
await self._maybe_notify(new_touched)
|
||||
await self._notify_sessions(height, touched, new_touched)
|
||||
|
||||
async def on_block(self, touched, height):
|
||||
self._touched_bp[height] = touched
|
||||
self._highest_block = height
|
||||
await self._maybe_notify(set())
|
||||
await self._notify_sessions(height, touched, set())
|
||||
|
||||
async def _notify_sessions(self, height, touched, new_touched):
|
||||
"""Notify sessions about height changes and touched addresses."""
|
||||
height_changed = height != self.session_manager.notified_height
|
||||
if height_changed:
|
||||
await self.session_manager._refresh_hsub_results(height)
|
||||
|
||||
if not self.session_manager.sessions:
|
||||
return
|
||||
|
||||
if height_changed:
|
||||
header_tasks = [
|
||||
session.send_notification('blockchain.headers.subscribe', (self.session_manager.hsub_results[session.subscribe_headers_raw], ))
|
||||
for session in self.session_manager.sessions.values() if session.subscribe_headers
|
||||
]
|
||||
if header_tasks:
|
||||
self.logger.info(f'notify {len(header_tasks)} sessions of new header')
|
||||
asyncio.create_task(asyncio.wait(header_tasks))
|
||||
for hashX in touched.intersection(self.session_manager.mempool_statuses.keys()):
|
||||
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||
# self.bp._chain_executor
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
self._db._executor, touched.intersection_update, self.session_manager.hashx_subscriptions_by_session.keys()
|
||||
)
|
||||
|
||||
if touched or new_touched or (height_changed and self.session_manager.mempool_statuses):
|
||||
notified_hashxs = 0
|
||||
session_hashxes_to_notify = defaultdict(list)
|
||||
to_notify = touched if height_changed else new_touched
|
||||
|
||||
for hashX in to_notify:
|
||||
if hashX not in self.session_manager.hashx_subscriptions_by_session:
|
||||
continue
|
||||
for session_id in self.session_manager.hashx_subscriptions_by_session[hashX]:
|
||||
session_hashxes_to_notify[session_id].append(hashX)
|
||||
notified_hashxs += 1
|
||||
for session_id, hashXes in session_hashxes_to_notify.items():
|
||||
asyncio.create_task(self.session_manager.sessions[session_id].send_history_notifications(*hashXes))
|
||||
if session_hashxes_to_notify:
|
||||
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
|
||||
|
|
|
@ -1,135 +0,0 @@
|
|||
import time
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def calculate_elapsed(start) -> int:
|
||||
return int((time.perf_counter() - start) * 1000)
|
||||
|
||||
|
||||
def calculate_avg_percentiles(data) -> Tuple[int, int, int, int, int, int, int, int]:
|
||||
if not data:
|
||||
return 0, 0, 0, 0, 0, 0, 0, 0
|
||||
data.sort()
|
||||
size = len(data)
|
||||
return (
|
||||
int(sum(data) / size),
|
||||
data[0],
|
||||
data[math.ceil(size * .05) - 1],
|
||||
data[math.ceil(size * .25) - 1],
|
||||
data[math.ceil(size * .50) - 1],
|
||||
data[math.ceil(size * .75) - 1],
|
||||
data[math.ceil(size * .95) - 1],
|
||||
data[-1]
|
||||
)
|
||||
|
||||
|
||||
def remove_select_list(sql) -> str:
|
||||
return sql[sql.index('FROM'):]
|
||||
|
||||
|
||||
class APICallMetrics:
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
# total requests received
|
||||
self.receive_count = 0
|
||||
self.cache_response_count = 0
|
||||
|
||||
# millisecond timings for query based responses
|
||||
self.query_response_times = []
|
||||
self.query_intrp_times = []
|
||||
self.query_error_times = []
|
||||
|
||||
self.query_python_times = []
|
||||
self.query_wait_times = []
|
||||
self.query_sql_times = [] # aggregate total of multiple SQL calls made per request
|
||||
|
||||
self.individual_sql_times = [] # every SQL query run on server
|
||||
|
||||
# actual queries
|
||||
self.errored_queries = set()
|
||||
self.interrupted_queries = set()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
# total requests received
|
||||
"receive_count": self.receive_count,
|
||||
# sum of these is total responses made
|
||||
"cache_response_count": self.cache_response_count,
|
||||
"query_response_count": len(self.query_response_times),
|
||||
"intrp_response_count": len(self.query_intrp_times),
|
||||
"error_response_count": len(self.query_error_times),
|
||||
# millisecond timings for non-cache responses
|
||||
"response": calculate_avg_percentiles(self.query_response_times),
|
||||
"interrupt": calculate_avg_percentiles(self.query_intrp_times),
|
||||
"error": calculate_avg_percentiles(self.query_error_times),
|
||||
# response, interrupt and error each also report the python, wait and sql stats:
|
||||
"python": calculate_avg_percentiles(self.query_python_times),
|
||||
"wait": calculate_avg_percentiles(self.query_wait_times),
|
||||
"sql": calculate_avg_percentiles(self.query_sql_times),
|
||||
# extended timings for individual sql executions
|
||||
"individual_sql": calculate_avg_percentiles(self.individual_sql_times),
|
||||
"individual_sql_count": len(self.individual_sql_times),
|
||||
# actual queries
|
||||
"errored_queries": list(self.errored_queries),
|
||||
"interrupted_queries": list(self.interrupted_queries),
|
||||
}
|
||||
|
||||
def start(self):
|
||||
self.receive_count += 1
|
||||
|
||||
def cache_response(self):
|
||||
self.cache_response_count += 1
|
||||
|
||||
def _add_query_timings(self, request_total_time, metrics):
|
||||
if metrics and 'execute_query' in metrics:
|
||||
sub_process_total = metrics[self.name][0]['total']
|
||||
individual_query_times = [f['total'] for f in metrics['execute_query']]
|
||||
aggregated_query_time = sum(individual_query_times)
|
||||
self.individual_sql_times.extend(individual_query_times)
|
||||
self.query_sql_times.append(aggregated_query_time)
|
||||
self.query_python_times.append(sub_process_total - aggregated_query_time)
|
||||
self.query_wait_times.append(request_total_time - sub_process_total)
|
||||
|
||||
@staticmethod
|
||||
def _add_queries(query_set, metrics):
|
||||
if metrics and 'execute_query' in metrics:
|
||||
for execute_query in metrics['execute_query']:
|
||||
if 'sql' in execute_query:
|
||||
query_set.add(remove_select_list(execute_query['sql']))
|
||||
|
||||
def query_response(self, start, metrics):
|
||||
self.query_response_times.append(calculate_elapsed(start))
|
||||
self._add_query_timings(self.query_response_times[-1], metrics)
|
||||
|
||||
def query_interrupt(self, start, metrics):
|
||||
self.query_intrp_times.append(calculate_elapsed(start))
|
||||
self._add_queries(self.interrupted_queries, metrics)
|
||||
self._add_query_timings(self.query_intrp_times[-1], metrics)
|
||||
|
||||
def query_error(self, start, metrics):
|
||||
self.query_error_times.append(calculate_elapsed(start))
|
||||
self._add_queries(self.errored_queries, metrics)
|
||||
self._add_query_timings(self.query_error_times[-1], metrics)
|
||||
|
||||
|
||||
class ServerLoadData:
|
||||
|
||||
def __init__(self):
|
||||
self._apis = {}
|
||||
|
||||
def for_api(self, name) -> APICallMetrics:
|
||||
if name not in self._apis:
|
||||
self._apis[name] = APICallMetrics(name)
|
||||
return self._apis[name]
|
||||
|
||||
def to_json_and_reset(self, status):
|
||||
try:
|
||||
return {
|
||||
'api': {name: api.to_json() for name, api in self._apis.items()},
|
||||
'status': status
|
||||
}
|
||||
finally:
|
||||
self._apis = {}
|
119
lbry/wallet/server/prefetcher.py
Normal file
119
lbry/wallet/server/prefetcher.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
import asyncio
|
||||
import typing
|
||||
|
||||
from lbry.wallet.server.util import chunks, class_logger
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.daemon import LBCDaemon
|
||||
from lbry.wallet.server.coin import Coin
|
||||
|
||||
|
||||
class Prefetcher:
|
||||
"""Prefetches blocks (in the forward direction only)."""
|
||||
|
||||
def __init__(self, daemon: 'LBCDaemon', coin: 'Coin', blocks_event: asyncio.Event):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.daemon = daemon
|
||||
self.coin = coin
|
||||
self.blocks_event = blocks_event
|
||||
self.blocks = []
|
||||
self.caught_up = False
|
||||
# Access to fetched_height should be protected by the semaphore
|
||||
self.fetched_height = None
|
||||
self.semaphore = asyncio.Semaphore()
|
||||
self.refill_event = asyncio.Event()
|
||||
# The prefetched block cache size. The min cache size has
|
||||
# little effect on sync time.
|
||||
self.cache_size = 0
|
||||
self.min_cache_size = 10 * 1024 * 1024
|
||||
# This makes the first fetch be 10 blocks
|
||||
self.ave_size = self.min_cache_size // 10
|
||||
self.polling_delay = 0.5
|
||||
|
||||
async def main_loop(self, bp_height):
|
||||
"""Loop forever polling for more blocks."""
|
||||
await self.reset_height(bp_height)
|
||||
try:
|
||||
while True:
|
||||
# Sleep a while if there is nothing to prefetch
|
||||
await self.refill_event.wait()
|
||||
if not await self._prefetch_blocks():
|
||||
await asyncio.sleep(self.polling_delay)
|
||||
finally:
|
||||
self.logger.info("block pre-fetcher is shutting down")
|
||||
|
||||
def get_prefetched_blocks(self):
|
||||
"""Called by block processor when it is processing queued blocks."""
|
||||
blocks = self.blocks
|
||||
self.blocks = []
|
||||
self.cache_size = 0
|
||||
self.refill_event.set()
|
||||
return blocks
|
||||
|
||||
async def reset_height(self, height):
|
||||
"""Reset to prefetch blocks from the block processor's height.
|
||||
|
||||
Used in blockchain reorganisations. This coroutine can be
|
||||
called asynchronously to the _prefetch_blocks coroutine so we
|
||||
must synchronize with a semaphore.
|
||||
"""
|
||||
async with self.semaphore:
|
||||
self.blocks.clear()
|
||||
self.cache_size = 0
|
||||
self.fetched_height = height
|
||||
self.refill_event.set()
|
||||
|
||||
daemon_height = await self.daemon.height()
|
||||
behind = daemon_height - height
|
||||
if behind > 0:
|
||||
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
|
||||
f'({behind:,d} blocks behind)')
|
||||
else:
|
||||
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
|
||||
|
||||
async def _prefetch_blocks(self):
|
||||
"""Prefetch some blocks and put them on the queue.
|
||||
|
||||
Repeats until the queue is full or caught up.
|
||||
"""
|
||||
daemon = self.daemon
|
||||
daemon_height = await daemon.height()
|
||||
async with self.semaphore:
|
||||
while self.cache_size < self.min_cache_size:
|
||||
# Try and catch up all blocks but limit to room in cache.
|
||||
# Constrain fetch count to between 0 and 500 regardless;
|
||||
# testnet can be lumpy.
|
||||
cache_room = self.min_cache_size // self.ave_size
|
||||
count = min(daemon_height - self.fetched_height, cache_room)
|
||||
count = min(500, max(count, 0))
|
||||
if not count:
|
||||
self.caught_up = True
|
||||
return False
|
||||
|
||||
first = self.fetched_height + 1
|
||||
hex_hashes = await daemon.block_hex_hashes(first, count)
|
||||
if self.caught_up:
|
||||
self.logger.info('new block height {:,d} hash {}'
|
||||
.format(first + count-1, hex_hashes[-1]))
|
||||
blocks = await daemon.raw_blocks(hex_hashes)
|
||||
|
||||
assert count == len(blocks)
|
||||
|
||||
# Special handling for genesis block
|
||||
if first == 0:
|
||||
blocks[0] = self.coin.genesis_block(blocks[0])
|
||||
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
|
||||
|
||||
# Update our recent average block size estimate
|
||||
size = sum(len(block) for block in blocks)
|
||||
if count >= 10:
|
||||
self.ave_size = size // count
|
||||
else:
|
||||
self.ave_size = (size + (10 - count) * self.ave_size) // 10
|
||||
|
||||
self.blocks.extend(blocks)
|
||||
self.cache_size += size
|
||||
self.fetched_height += count
|
||||
self.blocks_event.set()
|
||||
|
||||
self.refill_event.clear()
|
||||
return True
|
|
@ -1,91 +0,0 @@
|
|||
import signal
|
||||
import logging
|
||||
import asyncio
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
import typing
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from lbry.prometheus import PrometheusServer
|
||||
|
||||
|
||||
class Server:
|
||||
|
||||
def __init__(self, env):
|
||||
self.env = env
|
||||
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
|
||||
self.shutdown_event = asyncio.Event()
|
||||
self.cancellable_tasks = []
|
||||
|
||||
self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url)
|
||||
self.db = db = LevelDB(env)
|
||||
self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event)
|
||||
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
||||
|
||||
self.session_mgr = LBRYSessionManager(
|
||||
env, db, bp, daemon, self.shutdown_event
|
||||
)
|
||||
self._indexer_task = None
|
||||
|
||||
async def start(self):
|
||||
env = self.env
|
||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||
self.log.info(f'software version: {lbry.__version__}')
|
||||
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||
self.log.info(f'event loop policy: {env.loop_policy}')
|
||||
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||
|
||||
await self.daemon.height()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
await self.start_prometheus()
|
||||
if self.env.udp_port:
|
||||
await self.bp.status_server.start(
|
||||
0, bytes.fromhex(self.bp.coin.GENESIS_HASH)[::-1], self.env.country,
|
||||
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
||||
)
|
||||
await _start_cancellable(self.bp.fetch_and_process_blocks)
|
||||
|
||||
await self.db.populate_header_merkle_cache()
|
||||
await _start_cancellable(self.bp.mempool.keep_synchronized)
|
||||
await _start_cancellable(self.session_mgr.serve, self.bp.mempool)
|
||||
|
||||
async def stop(self):
|
||||
for task in reversed(self.cancellable_tasks):
|
||||
task.cancel()
|
||||
await asyncio.wait(self.cancellable_tasks)
|
||||
if self.prometheus_server:
|
||||
await self.prometheus_server.stop()
|
||||
self.prometheus_server = None
|
||||
self.shutdown_event.set()
|
||||
await self.daemon.close()
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
executor = ThreadPoolExecutor(self.env.max_query_workers, thread_name_prefix='hub-worker')
|
||||
loop.set_default_executor(executor)
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
||||
executor.shutdown(True)
|
||||
|
||||
async def start_prometheus(self):
|
||||
if not self.prometheus_server and self.env.prometheus_port:
|
||||
self.prometheus_server = PrometheusServer()
|
||||
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)
|
|
@ -8,7 +8,7 @@ import asyncio
|
|||
import logging
|
||||
import itertools
|
||||
import collections
|
||||
|
||||
from bisect import bisect_right
|
||||
from asyncio import Event, sleep
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
|
@ -20,8 +20,7 @@ import lbry
|
|||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||
from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG
|
||||
from lbry.schema.result import Outputs
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.db.db import HubDB
|
||||
from lbry.wallet.server.websocket import AdminWebSocket
|
||||
from lbry.wallet.rpc.framing import NewlineFramer
|
||||
|
||||
|
@ -34,9 +33,12 @@ from lbry.wallet.rpc import (
|
|||
from lbry.wallet.server import util
|
||||
from lbry.wallet.server.hash import sha256, hash_to_hex_str, hex_str_to_hash, HASHX_LEN, Base58Error
|
||||
from lbry.wallet.server.daemon import DaemonError
|
||||
from lbry.wallet.server.db.elasticsearch import SearchIndex
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.daemon import Daemon
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
|
||||
BAD_REQUEST = 1
|
||||
DAEMON_ERROR = 2
|
||||
|
@ -170,31 +172,43 @@ class SessionManager:
|
|||
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
|
||||
def __init__(self, env: 'Env', db: LevelDB, bp: BlockProcessor, daemon: 'Daemon', shutdown_event: asyncio.Event):
|
||||
def __init__(self, env: 'Env', db: HubDB, mempool: 'MemPool', history_cache, resolve_cache, resolve_outputs_cache,
|
||||
daemon: 'Daemon', shutdown_event: asyncio.Event,
|
||||
on_available_callback: typing.Callable[[], None], on_unavailable_callback: typing.Callable[[], None]):
|
||||
env.max_send = max(350000, env.max_send)
|
||||
self.env = env
|
||||
self.db = db
|
||||
self.bp = bp
|
||||
self.on_available_callback = on_available_callback
|
||||
self.on_unavailable_callback = on_unavailable_callback
|
||||
self.daemon = daemon
|
||||
self.mempool = bp.mempool
|
||||
self.mempool = mempool
|
||||
self.shutdown_event = shutdown_event
|
||||
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
||||
self.servers: typing.Dict[str, asyncio.AbstractServer] = {}
|
||||
self.sessions: typing.Dict[int, 'SessionBase'] = {}
|
||||
self.sessions: typing.Dict[int, 'LBRYElectrumX'] = {}
|
||||
self.hashx_subscriptions_by_session: typing.DefaultDict[str, typing.Set[int]] = defaultdict(set)
|
||||
self.mempool_statuses = {}
|
||||
self.cur_group = SessionGroup(0)
|
||||
self.txs_sent = 0
|
||||
self.start_time = time.time()
|
||||
self.history_cache = self.bp.history_cache
|
||||
self.history_cache = history_cache
|
||||
self.resolve_cache = resolve_cache
|
||||
self.resolve_outputs_cache = resolve_outputs_cache
|
||||
self.notified_height: typing.Optional[int] = None
|
||||
# Cache some idea of room to avoid recounting on each subscription
|
||||
self.subs_room = 0
|
||||
|
||||
self.session_event = Event()
|
||||
|
||||
# Search index
|
||||
self.search_index = SearchIndex(
|
||||
self.env.es_index_prefix, self.env.database_query_timeout,
|
||||
elastic_host=env.elastic_host, elastic_port=env.elastic_port
|
||||
)
|
||||
|
||||
async def _start_server(self, kind, *args, **kw_args):
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
if kind == 'RPC':
|
||||
protocol_class = LocalRPC
|
||||
else:
|
||||
|
@ -243,7 +257,7 @@ class SessionManager:
|
|||
await self.session_event.wait()
|
||||
self.session_event.clear()
|
||||
if not paused and len(self.sessions) >= max_sessions:
|
||||
self.bp.status_server.set_unavailable()
|
||||
self.on_unavailable_callback()
|
||||
self.logger.info(f'maximum sessions {max_sessions:,d} '
|
||||
f'reached, stopping new connections until '
|
||||
f'count drops to {low_watermark:,d}')
|
||||
|
@ -252,7 +266,7 @@ class SessionManager:
|
|||
# Start listening for incoming connections if paused and
|
||||
# session count has fallen
|
||||
if paused and len(self.sessions) <= low_watermark:
|
||||
self.bp.status_server.set_available()
|
||||
self.on_available_callback()
|
||||
self.logger.info('resuming listening for incoming connections')
|
||||
await self._start_external_servers()
|
||||
paused = False
|
||||
|
@ -533,7 +547,7 @@ class SessionManager:
|
|||
await self.start_other()
|
||||
await self._start_external_servers()
|
||||
server_listening_event.set()
|
||||
self.bp.status_server.set_available()
|
||||
self.on_available_callback()
|
||||
# Peer discovery should start after the external servers
|
||||
# because we connect to ourself
|
||||
await asyncio.wait([
|
||||
|
@ -546,7 +560,7 @@ class SessionManager:
|
|||
raise err
|
||||
finally:
|
||||
await self._close_servers(list(self.servers.keys()))
|
||||
log.warning("disconnect %i sessions", len(self.sessions))
|
||||
log.info("disconnect %i sessions", len(self.sessions))
|
||||
if self.sessions:
|
||||
await asyncio.wait([
|
||||
session.close(force_after=1) for session in self.sessions.values()
|
||||
|
@ -573,7 +587,7 @@ class SessionManager:
|
|||
async def raw_header(self, height):
|
||||
"""Return the binary header at the given height."""
|
||||
try:
|
||||
return self.db.raw_header(height)
|
||||
return await self.db.raw_header(height)
|
||||
except IndexError:
|
||||
raise RPCError(BAD_REQUEST, f'height {height:,d} '
|
||||
'out of range') from None
|
||||
|
@ -608,46 +622,6 @@ class SessionManager:
|
|||
self.logger.info(f'notify {len(notify_tasks)} sessions of new peers')
|
||||
asyncio.create_task(asyncio.wait(notify_tasks))
|
||||
|
||||
async def _notify_sessions(self, height, touched, new_touched):
|
||||
"""Notify sessions about height changes and touched addresses."""
|
||||
height_changed = height != self.notified_height
|
||||
if height_changed:
|
||||
await self._refresh_hsub_results(height)
|
||||
|
||||
if not self.sessions:
|
||||
return
|
||||
|
||||
if height_changed:
|
||||
header_tasks = [
|
||||
session.send_notification('blockchain.headers.subscribe', (self.hsub_results[session.subscribe_headers_raw], ))
|
||||
for session in self.sessions.values() if session.subscribe_headers
|
||||
]
|
||||
if header_tasks:
|
||||
self.logger.info(f'notify {len(header_tasks)} sessions of new header')
|
||||
asyncio.create_task(asyncio.wait(header_tasks))
|
||||
for hashX in touched.intersection(self.mempool_statuses.keys()):
|
||||
self.mempool_statuses.pop(hashX, None)
|
||||
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
self.bp._chain_executor, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
|
||||
)
|
||||
|
||||
if touched or new_touched or (height_changed and self.mempool_statuses):
|
||||
notified_hashxs = 0
|
||||
session_hashxes_to_notify = defaultdict(list)
|
||||
to_notify = touched if height_changed else new_touched
|
||||
|
||||
for hashX in to_notify:
|
||||
if hashX not in self.hashx_subscriptions_by_session:
|
||||
continue
|
||||
for session_id in self.hashx_subscriptions_by_session[hashX]:
|
||||
session_hashxes_to_notify[session_id].append(hashX)
|
||||
notified_hashxs += 1
|
||||
for session_id, hashXes in session_hashxes_to_notify.items():
|
||||
asyncio.create_task(self.sessions[session_id].send_history_notifications(*hashXes))
|
||||
if session_hashxes_to_notify:
|
||||
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
|
||||
|
||||
def add_session(self, session):
|
||||
self.sessions[id(session)] = session
|
||||
self.session_event.set()
|
||||
|
@ -680,12 +654,12 @@ class SessionBase(RPCSession):
|
|||
request_handlers: typing.Dict[str, typing.Callable] = {}
|
||||
version = '0.5.7'
|
||||
|
||||
def __init__(self, session_mgr, db, mempool, kind):
|
||||
def __init__(self, session_manager: 'LBRYSessionManager', db: 'HubDB', mempool: 'MemPool', kind: str):
|
||||
connection = JSONRPCConnection(JSONRPCAutoDetect)
|
||||
self.env = session_mgr.env
|
||||
self.env = session_manager.env
|
||||
super().__init__(connection=connection)
|
||||
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
||||
self.session_mgr = session_mgr
|
||||
self.session_manager = session_manager
|
||||
self.db = db
|
||||
self.mempool = mempool
|
||||
self.kind = kind # 'RPC', 'TCP' etc.
|
||||
|
@ -693,7 +667,7 @@ class SessionBase(RPCSession):
|
|||
self.anon_logs = self.env.anon_logs
|
||||
self.txs_sent = 0
|
||||
self.log_me = False
|
||||
self.daemon_request = self.session_mgr.daemon_request
|
||||
self.daemon_request = self.session_manager.daemon_request
|
||||
# Hijack the connection so we can log messages
|
||||
self._receive_message_orig = self.connection.receive_message
|
||||
self.connection.receive_message = self.receive_message
|
||||
|
@ -723,17 +697,17 @@ class SessionBase(RPCSession):
|
|||
self.session_id = next(self.session_counter)
|
||||
context = {'conn_id': f'{self.session_id}'}
|
||||
self.logger = util.ConnectionLogger(self.logger, context)
|
||||
self.group = self.session_mgr.add_session(self)
|
||||
self.session_mgr.session_count_metric.labels(version=self.client_version).inc()
|
||||
self.group = self.session_manager.add_session(self)
|
||||
self.session_manager.session_count_metric.labels(version=self.client_version).inc()
|
||||
peer_addr_str = self.peer_address_str()
|
||||
self.logger.info(f'{self.kind} {peer_addr_str}, '
|
||||
f'{self.session_mgr.session_count():,d} total')
|
||||
f'{self.session_manager.session_count():,d} total')
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Handle client disconnection."""
|
||||
super().connection_lost(exc)
|
||||
self.session_mgr.remove_session(self)
|
||||
self.session_mgr.session_count_metric.labels(version=self.client_version).dec()
|
||||
self.session_manager.remove_session(self)
|
||||
self.session_manager.session_count_metric.labels(version=self.client_version).dec()
|
||||
msg = ''
|
||||
if not self._can_send.is_set():
|
||||
msg += ' whilst paused'
|
||||
|
@ -757,7 +731,7 @@ class SessionBase(RPCSession):
|
|||
"""Handle an incoming request. ElectrumX doesn't receive
|
||||
notifications from client sessions.
|
||||
"""
|
||||
self.session_mgr.request_count_metric.labels(method=request.method, version=self.client_version).inc()
|
||||
self.session_manager.request_count_metric.labels(method=request.method, version=self.client_version).inc()
|
||||
if isinstance(request, Request):
|
||||
handler = self.request_handlers.get(request.method)
|
||||
handler = partial(handler, self)
|
||||
|
@ -805,7 +779,7 @@ class LBRYElectrumX(SessionBase):
|
|||
PROTOCOL_MIN = VERSION.PROTOCOL_MIN
|
||||
PROTOCOL_MAX = VERSION.PROTOCOL_MAX
|
||||
max_errors = math.inf # don't disconnect people for errors! let them happen...
|
||||
session_mgr: LBRYSessionManager
|
||||
session_manager: LBRYSessionManager
|
||||
version = lbry.__version__
|
||||
cached_server_features = {}
|
||||
|
||||
|
@ -816,17 +790,17 @@ class LBRYElectrumX(SessionBase):
|
|||
'blockchain.block.get_header': cls.block_get_header,
|
||||
'blockchain.estimatefee': cls.estimatefee,
|
||||
'blockchain.relayfee': cls.relayfee,
|
||||
'blockchain.scripthash.get_balance': cls.scripthash_get_balance,
|
||||
# 'blockchain.scripthash.get_balance': cls.scripthash_get_balance,
|
||||
'blockchain.scripthash.get_history': cls.scripthash_get_history,
|
||||
'blockchain.scripthash.get_mempool': cls.scripthash_get_mempool,
|
||||
'blockchain.scripthash.listunspent': cls.scripthash_listunspent,
|
||||
# 'blockchain.scripthash.listunspent': cls.scripthash_listunspent,
|
||||
'blockchain.scripthash.subscribe': cls.scripthash_subscribe,
|
||||
'blockchain.transaction.broadcast': cls.transaction_broadcast,
|
||||
'blockchain.transaction.get': cls.transaction_get,
|
||||
'blockchain.transaction.get_batch': cls.transaction_get_batch,
|
||||
'blockchain.transaction.info': cls.transaction_info,
|
||||
'blockchain.transaction.get_merkle': cls.transaction_merkle,
|
||||
'server.add_peer': cls.add_peer,
|
||||
# 'server.add_peer': cls.add_peer,
|
||||
'server.banner': cls.banner,
|
||||
'server.payment_address': cls.payment_address,
|
||||
'server.donation_address': cls.donation_address,
|
||||
|
@ -843,10 +817,10 @@ class LBRYElectrumX(SessionBase):
|
|||
'blockchain.block.headers': cls.block_headers,
|
||||
'server.ping': cls.ping,
|
||||
'blockchain.headers.subscribe': cls.headers_subscribe_False,
|
||||
'blockchain.address.get_balance': cls.address_get_balance,
|
||||
# 'blockchain.address.get_balance': cls.address_get_balance,
|
||||
'blockchain.address.get_history': cls.address_get_history,
|
||||
'blockchain.address.get_mempool': cls.address_get_mempool,
|
||||
'blockchain.address.listunspent': cls.address_listunspent,
|
||||
# 'blockchain.address.listunspent': cls.address_listunspent,
|
||||
'blockchain.address.subscribe': cls.address_subscribe,
|
||||
'blockchain.address.unsubscribe': cls.address_unsubscribe,
|
||||
})
|
||||
|
@ -865,9 +839,8 @@ class LBRYElectrumX(SessionBase):
|
|||
self.sv_seen = False
|
||||
self.protocol_tuple = self.PROTOCOL_MIN
|
||||
self.protocol_string = None
|
||||
self.daemon = self.session_mgr.daemon
|
||||
self.bp: BlockProcessor = self.session_mgr.bp
|
||||
self.db: LevelDB = self.bp.db
|
||||
self.daemon = self.session_manager.daemon
|
||||
self.db: HubDB = self.session_manager.db
|
||||
|
||||
@classmethod
|
||||
def protocol_min_max_strings(cls):
|
||||
|
@ -907,6 +880,20 @@ class LBRYElectrumX(SessionBase):
|
|||
def sub_count(self):
|
||||
return len(self.hashX_subs)
|
||||
|
||||
async def get_hashX_status(self, hashX: bytes):
|
||||
mempool_history = self.mempool.transaction_summaries(hashX)
|
||||
history = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
||||
f'{height:d}:'
|
||||
for tx_hash, height in await self.session_manager.limited_history(hashX))
|
||||
history += ''.join(f'{hash_to_hex_str(tx.hash)}:'
|
||||
f'{-tx.has_unconfirmed_inputs:d}:'
|
||||
for tx in mempool_history)
|
||||
if history:
|
||||
status = sha256(history.encode()).hex()
|
||||
else:
|
||||
status = None
|
||||
return history, status, len(mempool_history) > 0
|
||||
|
||||
async def send_history_notifications(self, *hashXes: typing.Iterable[bytes]):
|
||||
notifications = []
|
||||
for hashX in hashXes:
|
||||
|
@ -916,38 +903,26 @@ class LBRYElectrumX(SessionBase):
|
|||
else:
|
||||
method = 'blockchain.address.subscribe'
|
||||
start = time.perf_counter()
|
||||
db_history = await self.session_mgr.limited_history(hashX)
|
||||
mempool = self.mempool.transaction_summaries(hashX)
|
||||
|
||||
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
||||
f'{height:d}:'
|
||||
for tx_hash, height in db_history)
|
||||
status += ''.join(f'{hash_to_hex_str(tx.hash)}:'
|
||||
f'{-tx.has_unconfirmed_inputs:d}:'
|
||||
for tx in mempool)
|
||||
if status:
|
||||
status = sha256(status.encode()).hex()
|
||||
history, status, mempool_status = await self.get_hashX_status(hashX)
|
||||
if mempool_status:
|
||||
self.session_manager.mempool_statuses[hashX] = status
|
||||
else:
|
||||
status = None
|
||||
if mempool:
|
||||
self.session_mgr.mempool_statuses[hashX] = status
|
||||
else:
|
||||
self.session_mgr.mempool_statuses.pop(hashX, None)
|
||||
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||
|
||||
self.session_mgr.address_history_metric.observe(time.perf_counter() - start)
|
||||
self.session_manager.address_history_metric.observe(time.perf_counter() - start)
|
||||
notifications.append((method, (alias, status)))
|
||||
|
||||
start = time.perf_counter()
|
||||
self.session_mgr.notifications_in_flight_metric.inc()
|
||||
self.session_manager.notifications_in_flight_metric.inc()
|
||||
for method, args in notifications:
|
||||
self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc()
|
||||
try:
|
||||
await self.send_notifications(
|
||||
Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications])
|
||||
)
|
||||
self.session_mgr.notifications_sent_metric.observe(time.perf_counter() - start)
|
||||
self.session_manager.notifications_sent_metric.observe(time.perf_counter() - start)
|
||||
finally:
|
||||
self.session_mgr.notifications_in_flight_metric.dec()
|
||||
self.session_manager.notifications_in_flight_metric.dec()
|
||||
|
||||
# def get_metrics_or_placeholder_for_api(self, query_name):
|
||||
# """ Do not hold on to a reference to the metrics
|
||||
|
@ -955,7 +930,7 @@ class LBRYElectrumX(SessionBase):
|
|||
# you may be working with a stale metrics object.
|
||||
# """
|
||||
# if self.env.track_metrics:
|
||||
# # return self.session_mgr.metrics.for_api(query_name)
|
||||
# # return self.session_manager.metrics.for_api(query_name)
|
||||
# else:
|
||||
# return APICallMetrics(query_name)
|
||||
|
||||
|
@ -965,17 +940,17 @@ class LBRYElectrumX(SessionBase):
|
|||
# if isinstance(kwargs, dict):
|
||||
# kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
|
||||
# try:
|
||||
# self.session_mgr.pending_query_metric.inc()
|
||||
# self.session_manager.pending_query_metric.inc()
|
||||
# return await self.db.search_index.session_query(query_name, kwargs)
|
||||
# except ConnectionTimeout:
|
||||
# self.session_mgr.interrupt_count_metric.inc()
|
||||
# self.session_manager.interrupt_count_metric.inc()
|
||||
# raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||
# finally:
|
||||
# self.session_mgr.pending_query_metric.dec()
|
||||
# self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
||||
# self.session_manager.pending_query_metric.dec()
|
||||
# self.session_manager.executor_time_metric.observe(time.perf_counter() - start)
|
||||
|
||||
async def mempool_compact_histogram(self):
|
||||
return self.mempool.compact_fee_histogram()
|
||||
return [] #self.mempool.compact_fee_histogram()
|
||||
|
||||
async def claimtrie_search(self, **kwargs):
|
||||
start = time.perf_counter()
|
||||
|
@ -987,16 +962,16 @@ class LBRYElectrumX(SessionBase):
|
|||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
self.session_mgr.pending_query_metric.inc()
|
||||
self.session_manager.pending_query_metric.inc()
|
||||
if 'channel' in kwargs:
|
||||
channel_url = kwargs.pop('channel')
|
||||
_, channel_claim, _, _ = await self.db.resolve(channel_url)
|
||||
if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)):
|
||||
return Outputs.to_base64([], [], 0, None, None)
|
||||
kwargs['channel_id'] = channel_claim.claim_hash.hex()
|
||||
return await self.db.search_index.cached_search(kwargs)
|
||||
return await self.session_manager.search_index.cached_search(kwargs)
|
||||
except ConnectionTimeout:
|
||||
self.session_mgr.interrupt_count_metric.inc()
|
||||
self.session_manager.interrupt_count_metric.inc()
|
||||
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||
except TooManyClaimSearchParametersError as err:
|
||||
await asyncio.sleep(2)
|
||||
|
@ -1004,25 +979,25 @@ class LBRYElectrumX(SessionBase):
|
|||
self.peer_address()[0], err.key, err.limit)
|
||||
return RPCError(1, str(err))
|
||||
finally:
|
||||
self.session_mgr.pending_query_metric.dec()
|
||||
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
||||
self.session_manager.pending_query_metric.dec()
|
||||
self.session_manager.executor_time_metric.observe(time.perf_counter() - start)
|
||||
|
||||
async def _cached_resolve_url(self, url):
|
||||
if url not in self.bp.resolve_cache:
|
||||
self.bp.resolve_cache[url] = await self.loop.run_in_executor(None, self.db._resolve, url)
|
||||
return self.bp.resolve_cache[url]
|
||||
if url not in self.session_manager.resolve_cache:
|
||||
self.session_manager.resolve_cache[url] = await self.loop.run_in_executor(self.db._executor, self.db._resolve, url)
|
||||
return self.session_manager.resolve_cache[url]
|
||||
|
||||
async def claimtrie_resolve(self, *urls) -> str:
|
||||
sorted_urls = tuple(sorted(urls))
|
||||
self.session_mgr.urls_to_resolve_count_metric.inc(len(sorted_urls))
|
||||
self.session_manager.urls_to_resolve_count_metric.inc(len(sorted_urls))
|
||||
try:
|
||||
if sorted_urls in self.bp.resolve_outputs_cache:
|
||||
return self.bp.resolve_outputs_cache[sorted_urls]
|
||||
if sorted_urls in self.session_manager.resolve_outputs_cache:
|
||||
return self.session_manager.resolve_outputs_cache[sorted_urls]
|
||||
rows, extra = [], []
|
||||
for url in urls:
|
||||
if url not in self.bp.resolve_cache:
|
||||
self.bp.resolve_cache[url] = await self._cached_resolve_url(url)
|
||||
stream, channel, repost, reposted_channel = self.bp.resolve_cache[url]
|
||||
if url not in self.session_manager.resolve_cache:
|
||||
self.session_manager.resolve_cache[url] = await self._cached_resolve_url(url)
|
||||
stream, channel, repost, reposted_channel = self.session_manager.resolve_cache[url]
|
||||
if isinstance(channel, ResolveCensoredError):
|
||||
rows.append(channel)
|
||||
extra.append(channel.censor_row)
|
||||
|
@ -1047,25 +1022,26 @@ class LBRYElectrumX(SessionBase):
|
|||
if reposted_channel:
|
||||
extra.append(reposted_channel)
|
||||
await asyncio.sleep(0)
|
||||
self.bp.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor(
|
||||
self.session_manager.resolve_outputs_cache[sorted_urls] = result = await self.loop.run_in_executor(
|
||||
None, Outputs.to_base64, rows, extra, 0, None, None
|
||||
)
|
||||
return result
|
||||
finally:
|
||||
self.session_mgr.resolved_url_count_metric.inc(len(sorted_urls))
|
||||
self.session_manager.resolved_url_count_metric.inc(len(sorted_urls))
|
||||
|
||||
async def get_server_height(self):
|
||||
return self.bp.height
|
||||
return self.db.db_height
|
||||
|
||||
async def transaction_get_height(self, tx_hash):
|
||||
self.assert_tx_hash(tx_hash)
|
||||
transaction_info = await self.daemon.getrawtransaction(tx_hash, True)
|
||||
if transaction_info and 'hex' in transaction_info and 'confirmations' in transaction_info:
|
||||
# an unconfirmed transaction from lbrycrdd will not have a 'confirmations' field
|
||||
return (self.db.db_height - transaction_info['confirmations']) + 1
|
||||
elif transaction_info and 'hex' in transaction_info:
|
||||
return -1
|
||||
return None
|
||||
|
||||
def get_height():
|
||||
v = self.db.prefix_db.tx_num.get(tx_hash)
|
||||
if v:
|
||||
return bisect_right(self.db.tx_counts, v.tx_num)
|
||||
return self.mempool.get_mempool_height(tx_hash)
|
||||
|
||||
return await asyncio.get_event_loop().run_in_executor(self.db._executor, get_height)
|
||||
|
||||
async def claimtrie_getclaimbyid(self, claim_id):
|
||||
rows = []
|
||||
|
@ -1088,7 +1064,7 @@ class LBRYElectrumX(SessionBase):
|
|||
|
||||
async def subscribe_headers_result(self):
|
||||
"""The result of a header subscription or notification."""
|
||||
return self.session_mgr.hsub_results[self.subscribe_headers_raw]
|
||||
return self.session_manager.hsub_results[self.subscribe_headers_raw]
|
||||
|
||||
async def _headers_subscribe(self, raw):
|
||||
"""Subscribe to get headers of new blocks."""
|
||||
|
@ -1124,48 +1100,34 @@ class LBRYElectrumX(SessionBase):
|
|||
"""
|
||||
# Note history is ordered and mempool unordered in electrum-server
|
||||
# For mempool, height is -1 if it has unconfirmed inputs, otherwise 0
|
||||
|
||||
db_history = await self.session_mgr.limited_history(hashX)
|
||||
mempool = self.mempool.transaction_summaries(hashX)
|
||||
|
||||
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
||||
f'{height:d}:'
|
||||
for tx_hash, height in db_history)
|
||||
status += ''.join(f'{hash_to_hex_str(tx.hash)}:'
|
||||
f'{-tx.has_unconfirmed_inputs:d}:'
|
||||
for tx in mempool)
|
||||
if status:
|
||||
status = sha256(status.encode()).hex()
|
||||
_, status, has_mempool_history = await self.get_hashX_status(hashX)
|
||||
if has_mempool_history:
|
||||
self.session_manager.mempool_statuses[hashX] = status
|
||||
else:
|
||||
status = None
|
||||
|
||||
if mempool:
|
||||
self.session_mgr.mempool_statuses[hashX] = status
|
||||
else:
|
||||
self.session_mgr.mempool_statuses.pop(hashX, None)
|
||||
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||
return status
|
||||
|
||||
async def hashX_listunspent(self, hashX):
|
||||
"""Return the list of UTXOs of a script hash, including mempool
|
||||
effects."""
|
||||
utxos = await self.db.all_utxos(hashX)
|
||||
utxos = sorted(utxos)
|
||||
utxos.extend(await self.mempool.unordered_UTXOs(hashX))
|
||||
spends = await self.mempool.potential_spends(hashX)
|
||||
|
||||
return [{'tx_hash': hash_to_hex_str(utxo.tx_hash),
|
||||
'tx_pos': utxo.tx_pos,
|
||||
'height': utxo.height, 'value': utxo.value}
|
||||
for utxo in utxos
|
||||
if (utxo.tx_hash, utxo.tx_pos) not in spends]
|
||||
# async def hashX_listunspent(self, hashX):
|
||||
# """Return the list of UTXOs of a script hash, including mempool
|
||||
# effects."""
|
||||
# utxos = await self.db.all_utxos(hashX)
|
||||
# utxos = sorted(utxos)
|
||||
# utxos.extend(await self.mempool.unordered_UTXOs(hashX))
|
||||
# spends = await self.mempool.potential_spends(hashX)
|
||||
#
|
||||
# return [{'tx_hash': hash_to_hex_str(utxo.tx_hash),
|
||||
# 'tx_pos': utxo.tx_pos,
|
||||
# 'height': utxo.height, 'value': utxo.value}
|
||||
# for utxo in utxos
|
||||
# if (utxo.tx_hash, utxo.tx_pos) not in spends]
|
||||
|
||||
async def hashX_subscribe(self, hashX, alias):
|
||||
self.hashX_subs[hashX] = alias
|
||||
self.session_mgr.hashx_subscriptions_by_session[hashX].add(id(self))
|
||||
self.session_manager.hashx_subscriptions_by_session[hashX].add(id(self))
|
||||
return await self.address_status(hashX)
|
||||
|
||||
async def hashX_unsubscribe(self, hashX, alias):
|
||||
sessions = self.session_mgr.hashx_subscriptions_by_session[hashX]
|
||||
sessions = self.session_manager.hashx_subscriptions_by_session[hashX]
|
||||
sessions.remove(id(self))
|
||||
if not sessions:
|
||||
self.hashX_subs.pop(hashX, None)
|
||||
|
@ -1177,10 +1139,10 @@ class LBRYElectrumX(SessionBase):
|
|||
pass
|
||||
raise RPCError(BAD_REQUEST, f'{address} is not a valid address')
|
||||
|
||||
async def address_get_balance(self, address):
|
||||
"""Return the confirmed and unconfirmed balance of an address."""
|
||||
hashX = self.address_to_hashX(address)
|
||||
return await self.get_balance(hashX)
|
||||
# async def address_get_balance(self, address):
|
||||
# """Return the confirmed and unconfirmed balance of an address."""
|
||||
# hashX = self.address_to_hashX(address)
|
||||
# return await self.get_balance(hashX)
|
||||
|
||||
async def address_get_history(self, address):
|
||||
"""Return the confirmed and unconfirmed history of an address."""
|
||||
|
@ -1192,10 +1154,10 @@ class LBRYElectrumX(SessionBase):
|
|||
hashX = self.address_to_hashX(address)
|
||||
return self.unconfirmed_history(hashX)
|
||||
|
||||
async def address_listunspent(self, address):
|
||||
"""Return the list of UTXOs of an address."""
|
||||
hashX = self.address_to_hashX(address)
|
||||
return await self.hashX_listunspent(hashX)
|
||||
# async def address_listunspent(self, address):
|
||||
# """Return the list of UTXOs of an address."""
|
||||
# hashX = self.address_to_hashX(address)
|
||||
# return await self.hashX_listunspent(hashX)
|
||||
|
||||
async def address_subscribe(self, *addresses):
|
||||
"""Subscribe to an address.
|
||||
|
@ -1216,16 +1178,16 @@ class LBRYElectrumX(SessionBase):
|
|||
hashX = self.address_to_hashX(address)
|
||||
return await self.hashX_unsubscribe(hashX, address)
|
||||
|
||||
async def get_balance(self, hashX):
|
||||
utxos = await self.db.all_utxos(hashX)
|
||||
confirmed = sum(utxo.value for utxo in utxos)
|
||||
unconfirmed = await self.mempool.balance_delta(hashX)
|
||||
return {'confirmed': confirmed, 'unconfirmed': unconfirmed}
|
||||
# async def get_balance(self, hashX):
|
||||
# utxos = await self.db.all_utxos(hashX)
|
||||
# confirmed = sum(utxo.value for utxo in utxos)
|
||||
# unconfirmed = await self.mempool.balance_delta(hashX)
|
||||
# return {'confirmed': confirmed, 'unconfirmed': unconfirmed}
|
||||
|
||||
async def scripthash_get_balance(self, scripthash):
|
||||
"""Return the confirmed and unconfirmed balance of a scripthash."""
|
||||
hashX = scripthash_to_hashX(scripthash)
|
||||
return await self.get_balance(hashX)
|
||||
# async def scripthash_get_balance(self, scripthash):
|
||||
# """Return the confirmed and unconfirmed balance of a scripthash."""
|
||||
# hashX = scripthash_to_hashX(scripthash)
|
||||
# return await self.get_balance(hashX)
|
||||
|
||||
def unconfirmed_history(self, hashX):
|
||||
# Note unconfirmed history is unordered in electrum-server
|
||||
|
@ -1237,7 +1199,7 @@ class LBRYElectrumX(SessionBase):
|
|||
|
||||
async def confirmed_and_unconfirmed_history(self, hashX):
|
||||
# Note history is ordered but unconfirmed is unordered in e-s
|
||||
history = await self.session_mgr.limited_history(hashX)
|
||||
history = await self.session_manager.limited_history(hashX)
|
||||
conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height}
|
||||
for tx_hash, height in history]
|
||||
return conf + self.unconfirmed_history(hashX)
|
||||
|
@ -1252,10 +1214,10 @@ class LBRYElectrumX(SessionBase):
|
|||
hashX = scripthash_to_hashX(scripthash)
|
||||
return self.unconfirmed_history(hashX)
|
||||
|
||||
async def scripthash_listunspent(self, scripthash):
|
||||
"""Return the list of UTXOs of a scripthash."""
|
||||
hashX = scripthash_to_hashX(scripthash)
|
||||
return await self.hashX_listunspent(hashX)
|
||||
# async def scripthash_listunspent(self, scripthash):
|
||||
# """Return the list of UTXOs of a scripthash."""
|
||||
# hashX = scripthash_to_hashX(scripthash)
|
||||
# return await self.hashX_listunspent(hashX)
|
||||
|
||||
async def scripthash_subscribe(self, scripthash):
|
||||
"""Subscribe to a script hash.
|
||||
|
@ -1290,7 +1252,7 @@ class LBRYElectrumX(SessionBase):
|
|||
|
||||
max_size = self.MAX_CHUNK_SIZE
|
||||
count = min(count, max_size)
|
||||
headers, count = self.db.read_headers(start_height, count)
|
||||
headers, count = await self.db.read_headers(start_height, count)
|
||||
|
||||
if b64:
|
||||
headers = self.db.encode_headers(start_height, count, headers)
|
||||
|
@ -1313,7 +1275,7 @@ class LBRYElectrumX(SessionBase):
|
|||
index = non_negative_integer(index)
|
||||
size = self.coin.CHUNK_SIZE
|
||||
start_height = index * size
|
||||
headers, _ = self.db.read_headers(start_height, size)
|
||||
headers, _ = await self.db.read_headers(start_height, size)
|
||||
return headers.hex()
|
||||
|
||||
async def block_get_header(self, height):
|
||||
|
@ -1321,7 +1283,7 @@ class LBRYElectrumX(SessionBase):
|
|||
|
||||
height: the header's height"""
|
||||
height = non_negative_integer(height)
|
||||
return await self.session_mgr.electrum_header(height)
|
||||
return await self.session_manager.electrum_header(height)
|
||||
|
||||
def is_tor(self):
|
||||
"""Try to detect if the connection is to a tor hidden service we are
|
||||
|
@ -1411,10 +1373,10 @@ class LBRYElectrumX(SessionBase):
|
|||
self.close_after_send = True
|
||||
raise RPCError(BAD_REQUEST, f'unsupported client: {client_name}')
|
||||
if self.client_version != client_name[:17]:
|
||||
self.session_mgr.session_count_metric.labels(version=self.client_version).dec()
|
||||
self.session_manager.session_count_metric.labels(version=self.client_version).dec()
|
||||
self.client_version = client_name[:17]
|
||||
self.session_mgr.session_count_metric.labels(version=self.client_version).inc()
|
||||
self.session_mgr.client_version_metric.labels(version=self.client_version).inc()
|
||||
self.session_manager.session_count_metric.labels(version=self.client_version).inc()
|
||||
self.session_manager.client_version_metric.labels(version=self.client_version).inc()
|
||||
|
||||
# Find the highest common protocol version. Disconnect if
|
||||
# that protocol version in unsupported.
|
||||
|
@ -1435,9 +1397,10 @@ class LBRYElectrumX(SessionBase):
|
|||
raw_tx: the raw transaction as a hexadecimal string"""
|
||||
# This returns errors as JSON RPC errors, as is natural
|
||||
try:
|
||||
hex_hash = await self.session_mgr.broadcast_transaction(raw_tx)
|
||||
hex_hash = await self.session_manager.broadcast_transaction(raw_tx)
|
||||
self.txs_sent += 1
|
||||
self.mempool.wakeup.set()
|
||||
# self.mempool.wakeup.set()
|
||||
# await asyncio.sleep(0.5)
|
||||
self.logger.info(f'sent tx: {hex_hash}')
|
||||
return hex_hash
|
||||
except DaemonError as e:
|
||||
|
@ -1451,7 +1414,7 @@ class LBRYElectrumX(SessionBase):
|
|||
return (await self.transaction_get_batch(tx_hash))[tx_hash]
|
||||
|
||||
async def transaction_get_batch(self, *tx_hashes):
|
||||
self.session_mgr.tx_request_count_metric.inc(len(tx_hashes))
|
||||
self.session_manager.tx_request_count_metric.inc(len(tx_hashes))
|
||||
if len(tx_hashes) > 100:
|
||||
raise RPCError(BAD_REQUEST, f'too many tx hashes in request: {len(tx_hashes)}')
|
||||
for tx_hash in tx_hashes:
|
||||
|
@ -1467,7 +1430,7 @@ class LBRYElectrumX(SessionBase):
|
|||
if mempool_tx:
|
||||
raw_tx, block_hash = mempool_tx.raw_tx.hex(), None
|
||||
else:
|
||||
tx_info = await self.daemon_request('getrawtransaction', tx_hash, True)
|
||||
tx_info = await self.daemon_request('getrawtransaction', tx_hash, 1)
|
||||
raw_tx = tx_info['hex']
|
||||
block_hash = tx_info.get('blockhash')
|
||||
if block_hash:
|
||||
|
@ -1490,8 +1453,7 @@ class LBRYElectrumX(SessionBase):
|
|||
'block_height': block_height
|
||||
}
|
||||
await asyncio.sleep(0) # heavy call, give other tasks a chance
|
||||
|
||||
self.session_mgr.tx_replied_count_metric.inc(len(tx_hashes))
|
||||
self.session_manager.tx_replied_count_metric.inc(len(tx_hashes))
|
||||
return batch_result
|
||||
|
||||
async def transaction_get(self, tx_hash, verbose=False):
|
||||
|
@ -1504,7 +1466,7 @@ class LBRYElectrumX(SessionBase):
|
|||
if verbose not in (True, False):
|
||||
raise RPCError(BAD_REQUEST, f'"verbose" must be a boolean')
|
||||
|
||||
return await self.daemon_request('getrawtransaction', tx_hash, verbose)
|
||||
return await self.daemon_request('getrawtransaction', tx_hash, int(verbose))
|
||||
|
||||
def _get_merkle_branch(self, tx_hashes, tx_pos):
|
||||
"""Return a merkle branch to a transaction.
|
||||
|
|
|
@ -98,7 +98,7 @@ class Deserializer:
|
|||
TX_HASH_FN = staticmethod(double_sha256)
|
||||
|
||||
def __init__(self, binary, start=0):
|
||||
assert isinstance(binary, bytes)
|
||||
assert isinstance(binary, bytes), f"type {type(binary)} is not 'bytes'"
|
||||
self.binary = binary
|
||||
self.binary_length = len(binary)
|
||||
self.cursor = start
|
||||
|
|
|
@ -110,6 +110,7 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
|||
self._min_delay = 1 / throttle_reqs_per_sec
|
||||
self._allow_localhost = allow_localhost
|
||||
self._allow_lan = allow_lan
|
||||
self.closed = asyncio.Event()
|
||||
|
||||
def update_cached_response(self):
|
||||
self._left_cache, self._right_cache = SPVPong.make_sans_source_address(
|
||||
|
@ -160,13 +161,16 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
|||
|
||||
def connection_made(self, transport) -> None:
|
||||
self.transport = transport
|
||||
self.closed.clear()
|
||||
|
||||
def connection_lost(self, exc: Optional[Exception]) -> None:
|
||||
self.transport = None
|
||||
self.closed.set()
|
||||
|
||||
def close(self):
|
||||
async def close(self):
|
||||
if self.transport:
|
||||
self.transport.close()
|
||||
await self.closed.wait()
|
||||
|
||||
|
||||
class StatusServer:
|
||||
|
@ -184,9 +188,9 @@ class StatusServer:
|
|||
await loop.create_datagram_endpoint(lambda: self._protocol, (interface, port))
|
||||
log.info("started udp status server on %s:%i", interface, port)
|
||||
|
||||
def stop(self):
|
||||
async def stop(self):
|
||||
if self.is_running:
|
||||
self._protocol.close()
|
||||
await self._protocol.close()
|
||||
self._protocol = None
|
||||
|
||||
@property
|
||||
|
|
|
@ -40,22 +40,17 @@ def checkrecord(record, expected_winner, expected_claim):
|
|||
|
||||
|
||||
async def checkcontrolling(daemon: Daemon, db: SQLDB):
|
||||
records, claim_ids, names, futs = [], [], [], []
|
||||
records, names, futs = [], [], []
|
||||
for record in db.get_claims('claimtrie.claim_hash as is_controlling, claim.*', is_controlling=True):
|
||||
records.append(record)
|
||||
claim_id = hex_reverted(record['claim_hash'])
|
||||
claim_ids.append((claim_id,))
|
||||
names.append((record['normalized'],))
|
||||
names.append((record['normalized'], (claim_id,), "", True)) # last parameter is IncludeValues
|
||||
if len(names) > 50000:
|
||||
futs.append(daemon._send_vector('getvalueforname', names[:]))
|
||||
futs.append(daemon._send_vector('getclaimbyid', claim_ids[:]))
|
||||
futs.append(daemon._send_vector('getclaimsfornamebyid', names))
|
||||
names.clear()
|
||||
claim_ids.clear()
|
||||
if names:
|
||||
futs.append(daemon._send_vector('getvalueforname', names[:]))
|
||||
futs.append(daemon._send_vector('getclaimbyid', claim_ids[:]))
|
||||
futs.append(daemon._send_vector('getclaimsfornamebyid', names))
|
||||
names.clear()
|
||||
claim_ids.clear()
|
||||
|
||||
while futs:
|
||||
winners, claims = futs.pop(0), futs.pop(0)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
SNAPSHOT_HEIGHT="1049658"
|
||||
SNAPSHOT_HEIGHT="1072108"
|
||||
|
||||
HUB_VOLUME_PATH="/var/lib/docker/volumes/${USER}_wallet_server"
|
||||
ES_VOLUME_PATH="/var/lib/docker/volumes/${USER}_es01"
|
||||
|
||||
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar"
|
||||
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar"
|
||||
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar.gz"
|
||||
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar.gz"
|
||||
|
||||
SNAPSHOT_URL="https://snapshots.lbry.com/hub/${SNAPSHOT_TAR_NAME}"
|
||||
ES_SNAPSHOT_URL="https://snapshots.lbry.com/hub/${ES_SNAPSHOT_TAR_NAME}"
|
||||
|
|
17
setup.py
17
setup.py
|
@ -7,9 +7,11 @@ BASE = os.path.dirname(__file__)
|
|||
with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
PLYVEL = []
|
||||
if sys.platform.startswith('linux'):
|
||||
PLYVEL.append('plyvel==1.3.0')
|
||||
|
||||
ROCKSDB = []
|
||||
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
|
||||
ROCKSDB.append('lbry-rocksdb==0.8.2')
|
||||
|
||||
|
||||
setup(
|
||||
name=__name__,
|
||||
|
@ -28,9 +30,10 @@ setup(
|
|||
entry_points={
|
||||
'console_scripts': [
|
||||
'lbrynet=lbry.extras.cli:main',
|
||||
'lbry-hub=lbry.wallet.server.cli:main',
|
||||
'orchstr8=lbry.wallet.orchstr8.cli:main',
|
||||
'lbry-hub-elastic-sync=lbry.wallet.server.db.elasticsearch.sync:run_elastic_sync'
|
||||
'lbry-hub-writer=lbry.wallet.server.cli:run_writer_forever',
|
||||
'lbry-hub-server=lbry.wallet.server.cli:run_server_forever',
|
||||
'lbry-hub-elastic-sync=lbry.wallet.server.cli:run_es_sync_forever',
|
||||
'orchstr8=lbry.wallet.orchstr8.cli:main'
|
||||
],
|
||||
},
|
||||
install_requires=[
|
||||
|
@ -58,7 +61,7 @@ setup(
|
|||
'elasticsearch==7.10.1',
|
||||
'grpcio==1.38.0',
|
||||
'filetype==1.0.9'
|
||||
] + PLYVEL,
|
||||
] + ROCKSDB,
|
||||
extras_require={
|
||||
'torrent': ['lbry-libtorrent'],
|
||||
'lint': ['pylint==2.10.0'],
|
||||
|
|
47
test_rocksdb.py
Executable file
47
test_rocksdb.py
Executable file
|
@ -0,0 +1,47 @@
|
|||
#! python
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import rocksdb
|
||||
import tempfile
|
||||
import logging
|
||||
|
||||
log = logging.getLogger()
|
||||
log.addHandler(logging.StreamHandler())
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
def _main(db_loc):
|
||||
opts = rocksdb.Options(create_if_missing=True)
|
||||
db = rocksdb.DB(os.path.join(db_loc, "test"), opts)
|
||||
secondary_location = os.path.join(db_loc, "secondary")
|
||||
secondary = rocksdb.DB(
|
||||
os.path.join(db_loc, "test"),
|
||||
rocksdb.Options(create_if_missing=True, max_open_files=-1),
|
||||
secondary_name=secondary_location
|
||||
)
|
||||
try:
|
||||
assert secondary.get(b"a") is None
|
||||
db.put(b"a", b"b")
|
||||
assert db.get(b"a") == b"b"
|
||||
assert secondary.get(b"a") is None
|
||||
|
||||
secondary.try_catch_up_with_primary()
|
||||
assert secondary.get(b"a") == b"b"
|
||||
finally:
|
||||
secondary.close()
|
||||
db.close()
|
||||
|
||||
|
||||
def main():
|
||||
db_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
_main(db_dir)
|
||||
log.info("rocksdb %s (%s) works!", rocksdb.__version__, rocksdb.ROCKSDB_VERSION)
|
||||
except:
|
||||
log.exception("boom")
|
||||
finally:
|
||||
shutil.rmtree(db_dir)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -103,7 +103,7 @@ class AccountManagement(CommandTestCase):
|
|||
second_account = await self.daemon.jsonrpc_account_create('second account')
|
||||
|
||||
tx = await self.daemon.jsonrpc_account_send(
|
||||
'0.05', await self.daemon.jsonrpc_address_unused(account_id=second_account.id)
|
||||
'0.05', await self.daemon.jsonrpc_address_unused(account_id=second_account.id), blocking=True
|
||||
)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.assertOutputAmount(['0.05', '9.949876'], utxo_list())
|
||||
|
|
|
@ -9,7 +9,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
VERBOSITY = logging.WARN
|
||||
|
||||
async def assertBlockHash(self, height):
|
||||
bp = self.conductor.spv_node.server.bp
|
||||
bp = self.conductor.spv_node.writer
|
||||
|
||||
def get_txids():
|
||||
return [
|
||||
|
@ -29,15 +29,16 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
||||
|
||||
async def test_reorg(self):
|
||||
bp = self.conductor.spv_node.server.bp
|
||||
bp = self.conductor.spv_node.writer
|
||||
bp.reorg_count_metric.set(0)
|
||||
# invalidate current block, move forward 2
|
||||
height = 206
|
||||
self.assertEqual(self.ledger.headers.height, height)
|
||||
await self.assertBlockHash(height)
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
||||
block_hash = (await self.ledger.headers.hash(206)).decode()
|
||||
await self.blockchain.invalidate_block(block_hash)
|
||||
await self.blockchain.generate(2)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 207)
|
||||
await asyncio.wait_for(self.on_header(207), 3.0)
|
||||
self.assertEqual(self.ledger.headers.height, 207)
|
||||
await self.assertBlockHash(206)
|
||||
await self.assertBlockHash(207)
|
||||
|
@ -46,14 +47,14 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
# invalidate current block, move forward 3
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
||||
await self.blockchain.generate(3)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 208)
|
||||
await asyncio.wait_for(self.on_header(208), 3.0)
|
||||
self.assertEqual(self.ledger.headers.height, 208)
|
||||
await self.assertBlockHash(206)
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
self.assertEqual(2, bp.reorg_count_metric._samples()[0][2])
|
||||
await self.blockchain.generate(3)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 211)
|
||||
await asyncio.wait_for(self.on_header(211), 3.0)
|
||||
await self.assertBlockHash(209)
|
||||
await self.assertBlockHash(210)
|
||||
await self.assertBlockHash(211)
|
||||
|
@ -62,7 +63,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
)
|
||||
await self.ledger.wait(still_valid)
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 212)
|
||||
await asyncio.wait_for(self.on_header(212), 1.0)
|
||||
claim_id = still_valid.outputs[0].claim_id
|
||||
c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id']
|
||||
c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id']
|
||||
|
@ -71,7 +72,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
|
||||
abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id)
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 213)
|
||||
await asyncio.wait_for(self.on_header(213), 1.0)
|
||||
c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}')
|
||||
c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}'])
|
||||
c3 = await self.daemon.jsonrpc_resolve([f'still-valid'])
|
||||
|
@ -112,11 +113,10 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
||||
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
await self.assertBlockHash(209)
|
||||
|
@ -142,9 +142,8 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
# broadcast the claim in a different block
|
||||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||
self.assertEqual(broadcast_tx.id, new_txid)
|
||||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await self.blockchain.generate(1)
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
|
@ -191,7 +190,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
|
@ -222,8 +221,6 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||
self.assertEqual(broadcast_tx.id, new_txid)
|
||||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
|
|
|
@ -37,6 +37,12 @@ class NetworkTests(IntegrationTestCase):
|
|||
}, await self.ledger.network.get_server_features())
|
||||
# await self.conductor.spv_node.stop()
|
||||
payment_address, donation_address = await self.account.get_addresses(limit=2)
|
||||
|
||||
original_address = self.conductor.spv_node.server.env.payment_address
|
||||
original_donation_address = self.conductor.spv_node.server.env.donation_address
|
||||
original_description = self.conductor.spv_node.server.env.description
|
||||
original_daily_fee = self.conductor.spv_node.server.env.daily_fee
|
||||
|
||||
self.conductor.spv_node.server.env.payment_address = payment_address
|
||||
self.conductor.spv_node.server.env.donation_address = donation_address
|
||||
self.conductor.spv_node.server.env.description = 'Fastest server in the west.'
|
||||
|
@ -61,6 +67,13 @@ class NetworkTests(IntegrationTestCase):
|
|||
'trending_algorithm': 'fast_ar',
|
||||
}, await self.ledger.network.get_server_features())
|
||||
|
||||
# cleanup the changes since the attributes are set on the class
|
||||
self.conductor.spv_node.server.env.payment_address = original_address
|
||||
self.conductor.spv_node.server.env.donation_address = original_donation_address
|
||||
self.conductor.spv_node.server.env.description = original_description
|
||||
self.conductor.spv_node.server.env.daily_fee = original_daily_fee
|
||||
LBRYElectrumX.set_server_features(self.conductor.spv_node.server.env)
|
||||
|
||||
|
||||
class ReconnectTests(IntegrationTestCase):
|
||||
|
||||
|
@ -86,7 +99,7 @@ class ReconnectTests(IntegrationTestCase):
|
|||
await self.ledger.stop()
|
||||
initial_height = self.ledger.local_height_including_downloaded_height
|
||||
await self.blockchain.generate(100)
|
||||
while self.conductor.spv_node.server.session_mgr.notified_height < initial_height + 99: # off by 1
|
||||
while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 99: # off by 1
|
||||
await asyncio.sleep(0.1)
|
||||
self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height)
|
||||
await self.ledger.headers.open()
|
||||
|
@ -101,12 +114,7 @@ class ReconnectTests(IntegrationTestCase):
|
|||
self.ledger.network.client.transport.close()
|
||||
self.assertFalse(self.ledger.network.is_connected)
|
||||
await self.ledger.resolve([], 'derp')
|
||||
sendtxid = await self.blockchain.send_to_address(address1, 1.1337)
|
||||
# await self.ledger.resolve([], 'derp')
|
||||
# self.assertTrue(self.ledger.network.is_connected)
|
||||
await asyncio.wait_for(self.on_transaction_id(sendtxid), 10.0) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await self.on_transaction_id(sendtxid) # confirmed
|
||||
sendtxid = await self.send_to_address_and_wait(address1, 1.1337, 1)
|
||||
self.assertLess(self.ledger.network.client.response_time, 1) # response time properly set lower, we are fine
|
||||
|
||||
await self.assertBalance(self.account, '1.1337')
|
||||
|
@ -135,7 +143,7 @@ class ReconnectTests(IntegrationTestCase):
|
|||
await self.conductor.spv_node.stop()
|
||||
self.assertFalse(self.ledger.network.is_connected)
|
||||
await asyncio.sleep(0.2) # let it retry and fail once
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node)
|
||||
await self.ledger.network.on_connected.first
|
||||
self.assertTrue(self.ledger.network.is_connected)
|
||||
|
||||
|
@ -161,15 +169,16 @@ class ReconnectTests(IntegrationTestCase):
|
|||
|
||||
|
||||
class UDPServerFailDiscoveryTest(AsyncioTestCase):
|
||||
|
||||
async def test_wallet_connects_despite_lack_of_udp(self):
|
||||
conductor = Conductor()
|
||||
conductor.spv_node.udp_port = '0'
|
||||
await conductor.start_blockchain()
|
||||
self.addCleanup(conductor.stop_blockchain)
|
||||
await conductor.start_lbcd()
|
||||
self.addCleanup(conductor.stop_lbcd)
|
||||
await conductor.start_lbcwallet()
|
||||
self.addCleanup(conductor.stop_lbcwallet)
|
||||
await conductor.start_spv()
|
||||
self.addCleanup(conductor.stop_spv)
|
||||
self.assertFalse(conductor.spv_node.server.bp.status_server.is_running)
|
||||
self.assertFalse(conductor.spv_node.server.status_server.is_running)
|
||||
await asyncio.wait_for(conductor.start_wallet(), timeout=5)
|
||||
self.addCleanup(conductor.stop_wallet)
|
||||
self.assertTrue(conductor.wallet_node.ledger.network.is_connected)
|
||||
|
|
|
@ -174,8 +174,7 @@ class PurchaseCommandTests(CommandTestCase):
|
|||
self.merchant_address = await self.account.receiving.get_or_create_usable_address()
|
||||
daemon2 = await self.add_daemon()
|
||||
address2 = await daemon2.wallet_manager.default_account.receiving.get_or_create_usable_address()
|
||||
sendtxid = await self.blockchain.send_to_address(address2, 2)
|
||||
await self.confirm_tx(sendtxid, daemon2.ledger)
|
||||
await self.send_to_address_and_wait(address2, 2, 1, ledger=daemon2.ledger)
|
||||
|
||||
stream = await self.priced_stream('a', '1.0')
|
||||
await self.assertBalance(self.account, '9.987893')
|
||||
|
|
|
@ -63,7 +63,7 @@ class SyncTests(IntegrationTestCase):
|
|||
await self.assertBalance(account1, '1.0')
|
||||
await self.assertBalance(account2, '1.0')
|
||||
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
|
||||
# pay 0.01 from main node to receiving node, would have increased change addresses
|
||||
address0 = (await account0.receiving.get_addresses())[0]
|
||||
|
@ -79,7 +79,7 @@ class SyncTests(IntegrationTestCase):
|
|||
account1.ledger.wait(tx),
|
||||
account2.ledger.wait(tx),
|
||||
])
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await asyncio.wait([
|
||||
account0.ledger.wait(tx),
|
||||
account1.ledger.wait(tx),
|
||||
|
@ -92,7 +92,7 @@ class SyncTests(IntegrationTestCase):
|
|||
await self.assertBalance(account1, '0.989876')
|
||||
await self.assertBalance(account2, '0.989876')
|
||||
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
|
||||
# create a new mirror node and see if it syncs to same balance from scratch
|
||||
node3 = await self.make_wallet_node(account1.seed)
|
||||
|
|
|
@ -11,7 +11,7 @@ from lbry.wallet.dewies import dict_values_to_lbc
|
|||
class WalletCommands(CommandTestCase):
|
||||
|
||||
async def test_wallet_create_and_add_subscribe(self):
|
||||
session = next(iter(self.conductor.spv_node.server.session_mgr.sessions.values()))
|
||||
session = next(iter(self.conductor.spv_node.server.session_manager.sessions.values()))
|
||||
self.assertEqual(len(session.hashX_subs), 27)
|
||||
wallet = await self.daemon.jsonrpc_wallet_create('foo', create_account=True, single_key=True)
|
||||
self.assertEqual(len(session.hashX_subs), 28)
|
||||
|
@ -23,7 +23,7 @@ class WalletCommands(CommandTestCase):
|
|||
async def test_wallet_syncing_status(self):
|
||||
address = await self.daemon.jsonrpc_address_unused()
|
||||
self.assertFalse(self.daemon.jsonrpc_wallet_status()['is_syncing'])
|
||||
await self.blockchain.send_to_address(address, 1)
|
||||
await self.send_to_address_and_wait(address, 1)
|
||||
await self.ledger._update_tasks.started.wait()
|
||||
self.assertTrue(self.daemon.jsonrpc_wallet_status()['is_syncing'])
|
||||
await self.ledger._update_tasks.done.wait()
|
||||
|
@ -47,9 +47,9 @@ class WalletCommands(CommandTestCase):
|
|||
status = await self.daemon.jsonrpc_status()
|
||||
self.assertEqual(len(status['wallet']['servers']), 1)
|
||||
self.assertEqual(status['wallet']['servers'][0]['port'], 50002)
|
||||
await self.conductor.spv_node.stop(True)
|
||||
await self.conductor.spv_node.stop()
|
||||
self.conductor.spv_node.port = 54320
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node)
|
||||
status = await self.daemon.jsonrpc_status()
|
||||
self.assertEqual(len(status['wallet']['servers']), 0)
|
||||
self.daemon.jsonrpc_settings_set('lbryum_servers', ['localhost:54320'])
|
||||
|
@ -59,23 +59,22 @@ class WalletCommands(CommandTestCase):
|
|||
self.assertEqual(status['wallet']['servers'][0]['port'], 54320)
|
||||
|
||||
async def test_sending_to_scripthash_address(self):
|
||||
self.assertEqual(await self.blockchain.get_balance(), '95.99973580')
|
||||
bal = await self.blockchain.get_balance()
|
||||
await self.assertBalance(self.account, '10.0')
|
||||
p2sh_address1 = await self.blockchain.get_new_address(self.blockchain.P2SH_SEGWIT_ADDRESS)
|
||||
tx = await self.account_send('2.0', p2sh_address1)
|
||||
self.assertEqual(tx['outputs'][0]['address'], p2sh_address1)
|
||||
self.assertEqual(await self.blockchain.get_balance(), '98.99973580') # +1 lbc for confirm block
|
||||
self.assertEqual(await self.blockchain.get_balance(), str(float(bal)+3)) # +1 lbc for confirm block
|
||||
await self.assertBalance(self.account, '7.999877')
|
||||
await self.wallet_send('3.0', p2sh_address1)
|
||||
self.assertEqual(await self.blockchain.get_balance(), '102.99973580') # +1 lbc for confirm block
|
||||
self.assertEqual(await self.blockchain.get_balance(), str(float(bal)+7)) # +1 lbc for confirm block
|
||||
await self.assertBalance(self.account, '4.999754')
|
||||
|
||||
async def test_balance_caching(self):
|
||||
account2 = await self.daemon.jsonrpc_account_create("Tip-er")
|
||||
address2 = await self.daemon.jsonrpc_address_unused(account2.id)
|
||||
sendtxid = await self.blockchain.send_to_address(address2, 10)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.generate(1)
|
||||
await self.send_to_address_and_wait(address2, 10, 2)
|
||||
await self.ledger.tasks_are_done() # don't mess with the query count while we need it
|
||||
|
||||
wallet_balance = self.daemon.jsonrpc_wallet_balance
|
||||
ledger = self.ledger
|
||||
|
@ -90,14 +89,16 @@ class WalletCommands(CommandTestCase):
|
|||
self.assertIsNone(ledger._balance_cache.get(self.account.id))
|
||||
|
||||
query_count += 2
|
||||
self.assertEqual(await wallet_balance(), expected)
|
||||
balance = await wallet_balance()
|
||||
self.assertEqual(self.ledger.db.db.query_count, query_count)
|
||||
self.assertEqual(balance, expected)
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(self.account.id))['total'], '10.0')
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(account2.id))['total'], '10.0')
|
||||
|
||||
# calling again uses cache
|
||||
self.assertEqual(await wallet_balance(), expected)
|
||||
balance = await wallet_balance()
|
||||
self.assertEqual(self.ledger.db.db.query_count, query_count)
|
||||
self.assertEqual(balance, expected)
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(self.account.id))['total'], '10.0')
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(account2.id))['total'], '10.0')
|
||||
|
||||
|
@ -123,8 +124,7 @@ class WalletCommands(CommandTestCase):
|
|||
wallet2 = await self.daemon.jsonrpc_wallet_create('foo', create_account=True)
|
||||
account3 = wallet2.default_account
|
||||
address3 = await self.daemon.jsonrpc_address_unused(account3.id, wallet2.id)
|
||||
await self.confirm_tx(await self.blockchain.send_to_address(address3, 1))
|
||||
await self.generate(1)
|
||||
await self.send_to_address_and_wait(address3, 1, 1)
|
||||
|
||||
account_balance = self.daemon.jsonrpc_account_balance
|
||||
wallet_balance = self.daemon.jsonrpc_wallet_balance
|
||||
|
@ -154,7 +154,7 @@ class WalletCommands(CommandTestCase):
|
|||
address2 = await self.daemon.jsonrpc_address_unused(account2.id)
|
||||
|
||||
# send lbc to someone else
|
||||
tx = await self.daemon.jsonrpc_account_send('1.0', address2)
|
||||
tx = await self.daemon.jsonrpc_account_send('1.0', address2, blocking=True)
|
||||
await self.confirm_tx(tx.id)
|
||||
self.assertEqual(await account_balance(), {
|
||||
'total': '8.97741',
|
||||
|
@ -187,7 +187,7 @@ class WalletCommands(CommandTestCase):
|
|||
})
|
||||
|
||||
# tip claimed
|
||||
tx = await self.daemon.jsonrpc_support_abandon(txid=support1['txid'], nout=0)
|
||||
tx = await self.daemon.jsonrpc_support_abandon(txid=support1['txid'], nout=0, blocking=True)
|
||||
await self.confirm_tx(tx.id)
|
||||
self.assertEqual(await account_balance(), {
|
||||
'total': '9.277303',
|
||||
|
@ -238,8 +238,7 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
|
|||
"carbon smart garage balance margin twelve"
|
||||
)
|
||||
address = (await self.daemon2.wallet_manager.default_account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||
sendtxid = await self.blockchain.send_to_address(address, 1)
|
||||
await self.confirm_tx(sendtxid, self.daemon2.ledger)
|
||||
await self.send_to_address_and_wait(address, 1, 1, ledger=self.daemon2.ledger)
|
||||
|
||||
def assertWalletEncrypted(self, wallet_path, encrypted):
|
||||
with open(wallet_path) as opened:
|
||||
|
@ -294,7 +293,7 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
|
|||
'3056301006072a8648ce3d020106052b8104000a034200049ae7283f3f6723e0a1'
|
||||
'66b7e19e1d1167f6dc5f4af61b4a58066a0d2a8bed2b35c66bccb4ec3eba316b16'
|
||||
'a97a6d6a4a8effd29d748901bb9789352519cd00b13d'
|
||||
), self.daemon2)
|
||||
), self.daemon2, blocking=True)
|
||||
await self.confirm_tx(channel['txid'], self.daemon2.ledger)
|
||||
|
||||
# both daemons will have the channel but only one has the cert so far
|
||||
|
|
|
@ -5,7 +5,6 @@ import lbry.wallet
|
|||
from lbry.error import ServerPaymentFeeAboveMaxAllowedError
|
||||
from lbry.wallet.network import ClientSession
|
||||
from lbry.wallet.rpc import RPCError
|
||||
from lbry.wallet.server.db.elasticsearch.sync import make_es_index_and_run_sync
|
||||
from lbry.wallet.server.session import LBRYElectrumX
|
||||
from lbry.testcase import IntegrationTestCase, CommandTestCase
|
||||
from lbry.wallet.orchstr8.node import SPVNode
|
||||
|
@ -25,13 +24,13 @@ class TestSessions(IntegrationTestCase):
|
|||
)
|
||||
await session.create_connection()
|
||||
await session.send_request('server.banner', ())
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_mgr.sessions), 1)
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_manager.sessions), 1)
|
||||
self.assertFalse(session.is_closing())
|
||||
await asyncio.sleep(1.1)
|
||||
with self.assertRaises(asyncio.TimeoutError):
|
||||
await session.send_request('server.banner', ())
|
||||
self.assertTrue(session.is_closing())
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_mgr.sessions), 0)
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_manager.sessions), 0)
|
||||
|
||||
async def test_proper_version(self):
|
||||
info = await self.ledger.network.get_server_features()
|
||||
|
@ -46,7 +45,7 @@ class TestSessions(IntegrationTestCase):
|
|||
|
||||
|
||||
class TestUsagePayment(CommandTestCase):
|
||||
async def _test_single_server_payment(self):
|
||||
async def test_single_server_payment(self):
|
||||
wallet_pay_service = self.daemon.component_manager.get_component('wallet_server_payments')
|
||||
wallet_pay_service.payment_period = 1
|
||||
# only starts with a positive max key fee
|
||||
|
@ -64,7 +63,7 @@ class TestUsagePayment(CommandTestCase):
|
|||
self.assertEqual(history, [])
|
||||
|
||||
node = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
await node.start(self.blockchain, extraconf={"PAYMENT_ADDRESS": address, "DAILY_FEE": "1.1"})
|
||||
await node.start(self.blockchain, extraconf={"payment_address": address, "daily_fee": "1.1"})
|
||||
self.addCleanup(node.stop)
|
||||
self.daemon.jsonrpc_settings_set('lbryum_servers', [f"{node.hostname}:{node.port}"])
|
||||
await self.daemon.jsonrpc_wallet_reconnect()
|
||||
|
@ -90,56 +89,77 @@ class TestUsagePayment(CommandTestCase):
|
|||
|
||||
class TestESSync(CommandTestCase):
|
||||
async def test_es_sync_utility(self):
|
||||
es_writer = self.conductor.spv_node.es_writer
|
||||
server_search_client = self.conductor.spv_node.server.session_manager.search_index
|
||||
|
||||
for i in range(10):
|
||||
await self.stream_create(f"stream{i}", bid='0.001')
|
||||
await self.generate(1)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
db = self.conductor.spv_node.server.db
|
||||
env = self.conductor.spv_node.server.env
|
||||
|
||||
await db.search_index.delete_index()
|
||||
db.search_index.clear_caches()
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
await db.search_index.stop()
|
||||
|
||||
async def resync():
|
||||
await db.search_index.start()
|
||||
db.search_index.clear_caches()
|
||||
await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# delete the index and verify nothing is returned by claim search
|
||||
await es_writer.delete_index()
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
await resync()
|
||||
|
||||
# this time we will test a migration from unversioned to v1
|
||||
await db.search_index.sync_client.indices.delete_template(db.search_index.index)
|
||||
await db.search_index.stop()
|
||||
|
||||
await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
|
||||
await db.search_index.start()
|
||||
|
||||
await resync()
|
||||
# reindex, 10 claims should be returned
|
||||
await es_writer.reindex()
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# reindex again, this should not appear to do anything but will delete and reinsert the same 10 claims
|
||||
await es_writer.reindex()
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# delete the index again and stop the writer, upon starting it the writer should reindex automatically
|
||||
await es_writer.stop(delete_index=True)
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
await es_writer.start(reindex=True)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# stop the es writer and advance the chain by 1, adding a new claim. upon resuming the es writer, it should
|
||||
# add the new claim
|
||||
await es_writer.stop()
|
||||
await self.stream_create(f"stream11", bid='0.001', confirm=False)
|
||||
generate_block_task = asyncio.create_task(self.generate(1))
|
||||
await es_writer.start()
|
||||
await generate_block_task
|
||||
self.assertEqual(11, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
|
||||
# # this time we will test a migration from unversioned to v1
|
||||
# await db.search_index.sync_client.indices.delete_template(db.search_index.index)
|
||||
# await db.search_index.stop()
|
||||
#
|
||||
# await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
|
||||
# await db.search_index.start()
|
||||
#
|
||||
# await es_writer.reindex()
|
||||
# self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
|
||||
class TestHubDiscovery(CommandTestCase):
|
||||
|
||||
async def test_hub_discovery(self):
|
||||
us_final_node = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
await us_final_node.start(self.blockchain, extraconf={"COUNTRY": "US"})
|
||||
await us_final_node.start(self.blockchain, extraconf={"country": "US"})
|
||||
self.addCleanup(us_final_node.stop)
|
||||
final_node_host = f"{us_final_node.hostname}:{us_final_node.port}"
|
||||
|
||||
kp_final_node = SPVNode(self.conductor.spv_module, node_number=3)
|
||||
await kp_final_node.start(self.blockchain, extraconf={"COUNTRY": "KP"})
|
||||
await kp_final_node.start(self.blockchain, extraconf={"country": "KP"})
|
||||
self.addCleanup(kp_final_node.stop)
|
||||
kp_final_node_host = f"{kp_final_node.hostname}:{kp_final_node.port}"
|
||||
|
||||
relay_node = SPVNode(self.conductor.spv_module, node_number=4)
|
||||
await relay_node.start(self.blockchain, extraconf={
|
||||
"COUNTRY": "FR",
|
||||
"PEER_HUBS": ",".join([kp_final_node_host, final_node_host])
|
||||
"country": "FR",
|
||||
"peer_hubs": ",".join([kp_final_node_host, final_node_host])
|
||||
})
|
||||
relay_node_host = f"{relay_node.hostname}:{relay_node.port}"
|
||||
self.addCleanup(relay_node.stop)
|
||||
|
@ -186,7 +206,7 @@ class TestHubDiscovery(CommandTestCase):
|
|||
self.daemon.ledger.network.client.server_address_and_port, ('127.0.0.1', kp_final_node.port)
|
||||
)
|
||||
|
||||
kp_final_node.server.session_mgr._notify_peer('127.0.0.1:9988')
|
||||
kp_final_node.server.session_manager._notify_peer('127.0.0.1:9988')
|
||||
await self.daemon.ledger.network.on_hub.first
|
||||
await asyncio.sleep(0.5) # wait for above event to be processed by other listeners
|
||||
self.assertEqual(
|
||||
|
|
|
@ -125,18 +125,6 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
with self.assertRaises(ConnectionResetError):
|
||||
await self.claim_search(claim_ids=claim_ids)
|
||||
|
||||
async def test_claim_search_as_reader_server(self):
|
||||
node2 = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
current_prefix = self.conductor.spv_node.server.bp.env.es_index_prefix
|
||||
await node2.start(self.blockchain, extraconf={'ES_MODE': 'reader', 'ES_INDEX_PREFIX': current_prefix})
|
||||
self.addCleanup(node2.stop)
|
||||
self.ledger.network.config['default_servers'] = [(node2.hostname, node2.port)]
|
||||
await self.ledger.stop()
|
||||
await self.ledger.start()
|
||||
channel2 = await self.channel_create('@abc', '0.1', allow_duplicate_name=True)
|
||||
await asyncio.sleep(1) # fixme: find a way to block on the writer
|
||||
await self.assertFindsClaims([channel2], name='@abc')
|
||||
|
||||
async def test_basic_claim_search(self):
|
||||
await self.create_channel()
|
||||
channel_txo = self.channel['outputs'][0]
|
||||
|
@ -494,8 +482,7 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
tx = await Transaction.claim_create(
|
||||
'unknown', b'{"sources":{"lbry_sd_hash":""}}', 1, address, [self.account], self.account)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
octet = await self.stream_create()
|
||||
video = await self.stream_create('chrome', file_path=self.video_file_name)
|
||||
|
@ -1226,7 +1213,7 @@ class ChannelCommands(CommandTestCase):
|
|||
data_to_sign = "CAFEBABE"
|
||||
# claim new name
|
||||
await self.channel_create('@someotherchan')
|
||||
channel_tx = await self.daemon.jsonrpc_channel_create('@signer', '0.1')
|
||||
channel_tx = await self.daemon.jsonrpc_channel_create('@signer', '0.1', blocking=True)
|
||||
await self.confirm_tx(channel_tx.id)
|
||||
channel = channel_tx.outputs[0]
|
||||
signature1 = await self.out(self.daemon.jsonrpc_channel_sign(channel_name='@signer', hexdata=data_to_sign))
|
||||
|
@ -1373,7 +1360,7 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertEqual('8.989893', (await self.daemon.jsonrpc_account_balance())['available'])
|
||||
|
||||
result = await self.out(self.daemon.jsonrpc_account_send(
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(account2_id), blocking=True
|
||||
))
|
||||
await self.confirm_tx(result['txid'])
|
||||
|
||||
|
@ -1514,9 +1501,11 @@ class StreamCommands(ClaimTestCase):
|
|||
await self.channel_create('@filtering', '0.1')
|
||||
)
|
||||
self.conductor.spv_node.server.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_streams))
|
||||
self.conductor.spv_node.es_writer.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id))
|
||||
|
||||
self.assertEqual(0, len(self.conductor.spv_node.es_writer.db.filtered_streams))
|
||||
await self.stream_repost(bad_content_id, 'filter1', '0.1', channel_name='@filtering')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_streams))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.es_writer.db.filtered_streams))
|
||||
|
||||
# search for filtered content directly
|
||||
result = await self.out(self.daemon.jsonrpc_claim_search(name='bad_content'))
|
||||
|
@ -1560,16 +1549,17 @@ class StreamCommands(ClaimTestCase):
|
|||
)
|
||||
# test setting from env vars and starting from scratch
|
||||
await self.conductor.spv_node.stop(False)
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node,
|
||||
extraconf={'BLOCKING_CHANNEL_IDS': blocking_channel_id,
|
||||
'FILTERING_CHANNEL_IDS': filtering_channel_id})
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node,
|
||||
extraconf={'blocking_channel_ids': [blocking_channel_id],
|
||||
'filtering_channel_ids': [filtering_channel_id]})
|
||||
await self.daemon.wallet_manager.reset()
|
||||
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_streams))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.es_writer.db.blocked_streams))
|
||||
await self.stream_repost(bad_content_id, 'block1', '0.1', channel_name='@blocking')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_streams))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.es_writer.db.blocked_streams))
|
||||
|
||||
# blocked content is not resolveable
|
||||
print((await self.resolve('lbry://@some_channel/bad_content')))
|
||||
error = (await self.resolve('lbry://@some_channel/bad_content'))['error']
|
||||
self.assertEqual(error['name'], 'BLOCKED')
|
||||
self.assertTrue(error['text'].startswith("Resolve of 'lbry://@some_channel/bad_content' was censored"))
|
||||
|
@ -2177,7 +2167,7 @@ class SupportCommands(CommandTestCase):
|
|||
tip = await self.out(
|
||||
self.daemon.jsonrpc_support_create(
|
||||
claim_id, '1.0', True, account_id=account2.id, wallet_id='wallet2',
|
||||
funding_account_ids=[account2.id])
|
||||
funding_account_ids=[account2.id], blocking=True)
|
||||
)
|
||||
await self.confirm_tx(tip['txid'])
|
||||
|
||||
|
@ -2209,7 +2199,7 @@ class SupportCommands(CommandTestCase):
|
|||
support = await self.out(
|
||||
self.daemon.jsonrpc_support_create(
|
||||
claim_id, '2.0', False, account_id=account2.id, wallet_id='wallet2',
|
||||
funding_account_ids=[account2.id])
|
||||
funding_account_ids=[account2.id], blocking=True)
|
||||
)
|
||||
await self.confirm_tx(support['txid'])
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import unittest
|
||||
from unittest import skipIf
|
||||
import asyncio
|
||||
import os
|
||||
|
@ -36,8 +37,7 @@ class FileCommands(CommandTestCase):
|
|||
tx_to_update.outputs[0], claim, 1, address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
self.client_session = self.daemon.file_manager.source_managers['torrent'].torrent_session
|
||||
self.client_session._session.add_dht_node(('localhost', 4040))
|
||||
self.client_session.wait_start = False # fixme: this is super slow on tests
|
||||
|
@ -216,6 +216,7 @@ class FileCommands(CommandTestCase):
|
|||
await self.wait_files_to_complete()
|
||||
self.assertNotEqual(first_path, second_path)
|
||||
|
||||
@unittest.SkipTest # FIXME: claimname/updateclaim is gone. #3480 wip, unblock #3479"
|
||||
async def test_file_list_updated_metadata_on_resolve(self):
|
||||
await self.stream_create('foo', '0.01')
|
||||
txo = (await self.daemon.resolve(self.wallet.accounts, ['lbry://foo']))['lbry://foo']
|
||||
|
@ -504,8 +505,7 @@ class FileCommands(CommandTestCase):
|
|||
tx.outputs[0].claim.stream.fee.address_bytes = b''
|
||||
tx.outputs[0].script.generate()
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
async def __raw_value_update_no_fee_amount(self, tx, claim_address):
|
||||
tx = await self.daemon.jsonrpc_stream_update(
|
||||
|
@ -515,8 +515,7 @@ class FileCommands(CommandTestCase):
|
|||
tx.outputs[0].claim.stream.fee.message.ClearField('amount')
|
||||
tx.outputs[0].script.generate()
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
|
||||
class DiskSpaceManagement(CommandTestCase):
|
||||
|
|
|
@ -80,7 +80,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
|
||||
# After some soul searching Chris decides that his story needs more
|
||||
# heart and a better ending. He takes down the story and begins the rewrite.
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(claim_id, blocking=False))
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(claim_id, blocking=True))
|
||||
self.assertEqual(abandon['inputs'][0]['claim_id'], claim_id)
|
||||
await self.confirm_tx(abandon['txid'])
|
||||
|
||||
|
@ -103,7 +103,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# 1 LBC to which Chris readily obliges
|
||||
ramsey_account_id = (await self.out(self.daemon.jsonrpc_account_create("Ramsey")))['id']
|
||||
ramsey_address = await self.daemon.jsonrpc_address_unused(ramsey_account_id)
|
||||
result = await self.out(self.daemon.jsonrpc_account_send('1.0', ramsey_address))
|
||||
result = await self.out(self.daemon.jsonrpc_account_send('1.0', ramsey_address, blocking=True))
|
||||
self.assertIn("txid", result)
|
||||
await self.confirm_tx(result['txid'])
|
||||
|
||||
|
@ -133,7 +133,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# And voila, and bravo and encore! His Best Friend Ramsey read the story and immediately knew this was a hit
|
||||
# Now to keep this claim winning on the lbry blockchain he immediately supports the claim
|
||||
tx = await self.out(self.daemon.jsonrpc_support_create(
|
||||
claim_id2, '0.2', account_id=ramsey_account_id
|
||||
claim_id2, '0.2', account_id=ramsey_account_id, blocking=True
|
||||
))
|
||||
await self.confirm_tx(tx['txid'])
|
||||
|
||||
|
@ -147,7 +147,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# Now he also wanted to support the original creator of the Award Winning Novel
|
||||
# So he quickly decides to send a tip to him
|
||||
tx = await self.out(
|
||||
self.daemon.jsonrpc_support_create(claim_id2, '0.3', tip=True, account_id=ramsey_account_id)
|
||||
self.daemon.jsonrpc_support_create(claim_id2, '0.3', tip=True, account_id=ramsey_account_id, blocking=True)
|
||||
)
|
||||
await self.confirm_tx(tx['txid'])
|
||||
|
||||
|
@ -158,7 +158,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
await self.generate(5)
|
||||
|
||||
# Seeing the ravishing success of his novel Chris adds support to his claim too
|
||||
tx = await self.out(self.daemon.jsonrpc_support_create(claim_id2, '0.4'))
|
||||
tx = await self.out(self.daemon.jsonrpc_support_create(claim_id2, '0.4', blocking=True))
|
||||
await self.confirm_tx(tx['txid'])
|
||||
|
||||
# And check if his support showed up
|
||||
|
@ -183,7 +183,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
|
||||
# But sadly Ramsey wasn't so pleased. It was hard for him to tell Chris...
|
||||
# Chris, though a bit heartbroken, abandoned the claim for now, but instantly started working on new hit lyrics
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(txid=tx['txid'], nout=0, blocking=False))
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(txid=tx['txid'], nout=0, blocking=True))
|
||||
self.assertTrue(abandon['inputs'][0]['txid'], tx['txid'])
|
||||
await self.confirm_tx(abandon['txid'])
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import asyncio
|
||||
import json
|
||||
import hashlib
|
||||
import sys
|
||||
from bisect import bisect_right
|
||||
from binascii import hexlify, unhexlify
|
||||
from collections import defaultdict
|
||||
|
@ -23,7 +24,7 @@ class BaseResolveTestCase(CommandTestCase):
|
|||
def assertMatchESClaim(self, claim_from_es, claim_from_db):
|
||||
self.assertEqual(claim_from_es['claim_hash'][::-1].hex(), claim_from_db.claim_hash.hex())
|
||||
self.assertEqual(claim_from_es['claim_id'], claim_from_db.claim_hash.hex())
|
||||
self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height)
|
||||
self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height, f"es height: {claim_from_es['activation_height']}, rocksdb height: {claim_from_db.activation_height}")
|
||||
self.assertEqual(claim_from_es['last_take_over_height'], claim_from_db.last_takeover_height)
|
||||
self.assertEqual(claim_from_es['tx_id'], claim_from_db.tx_hash[::-1].hex())
|
||||
self.assertEqual(claim_from_es['tx_nout'], claim_from_db.position)
|
||||
|
@ -31,125 +32,151 @@ class BaseResolveTestCase(CommandTestCase):
|
|||
self.assertEqual(claim_from_es['effective_amount'], claim_from_db.effective_amount)
|
||||
|
||||
def assertMatchDBClaim(self, expected, claim):
|
||||
self.assertEqual(expected['claimId'], claim.claim_hash.hex())
|
||||
self.assertEqual(expected['validAtHeight'], claim.activation_height)
|
||||
self.assertEqual(expected['lastTakeoverHeight'], claim.last_takeover_height)
|
||||
self.assertEqual(expected['txId'], claim.tx_hash[::-1].hex())
|
||||
self.assertEqual(expected['claimid'], claim.claim_hash.hex())
|
||||
self.assertEqual(expected['validatheight'], claim.activation_height)
|
||||
self.assertEqual(expected['lasttakeoverheight'], claim.last_takeover_height)
|
||||
self.assertEqual(expected['txid'], claim.tx_hash[::-1].hex())
|
||||
self.assertEqual(expected['n'], claim.position)
|
||||
self.assertEqual(expected['amount'], claim.amount)
|
||||
self.assertEqual(expected['effectiveAmount'], claim.effective_amount)
|
||||
self.assertEqual(expected['effectiveamount'], claim.effective_amount)
|
||||
|
||||
async def assertResolvesToClaimId(self, name, claim_id):
|
||||
other = await self.resolve(name)
|
||||
if claim_id is None:
|
||||
self.assertIn('error', other)
|
||||
self.assertEqual(other['error']['name'], 'NOT_FOUND')
|
||||
claims_from_es = (await self.conductor.spv_node.server.bp.db.search_index.search(name=name))[0]
|
||||
claims_from_es = (await self.conductor.spv_node.server.session_manager.search_index.search(name=name))[0]
|
||||
claims_from_es = [c['claim_hash'][::-1].hex() for c in claims_from_es]
|
||||
self.assertNotIn(claim_id, claims_from_es)
|
||||
else:
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(claim_id=claim_id)
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id)
|
||||
self.assertEqual(claim_id, other['claim_id'])
|
||||
self.assertEqual(claim_id, claim_from_es[0][0]['claim_hash'][::-1].hex())
|
||||
|
||||
async def assertNoClaimForName(self, name: str):
|
||||
lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.bp.db.resolve(name)
|
||||
self.assertNotIn('claimId', lbrycrd_winning)
|
||||
lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name)
|
||||
if 'claims' in lbrycrd_winning and lbrycrd_winning['claims'] is not None:
|
||||
self.assertEqual(len(lbrycrd_winning['claims']), 0)
|
||||
if stream is not None:
|
||||
self.assertIsInstance(stream, LookupError)
|
||||
else:
|
||||
self.assertIsInstance(channel, LookupError)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(name=name)
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(name=name)
|
||||
self.assertListEqual([], claim_from_es[0])
|
||||
|
||||
async def assertNoClaim(self, claim_id: str):
|
||||
self.assertDictEqual(
|
||||
{}, json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
||||
)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(claim_id=claim_id)
|
||||
async def assertNoClaim(self, name: str, claim_id: str):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebyid', name, '["' + claim_id + '"]'))
|
||||
if 'claims' in expected and expected['claims'] is not None:
|
||||
# ensure that if we do have the matching claim that it is not active
|
||||
self.assertEqual(expected['claims'][0]['effectiveamount'], 0)
|
||||
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id)
|
||||
self.assertListEqual([], claim_from_es[0])
|
||||
claim = await self.conductor.spv_node.server.bp.db.fs_getclaimbyid(claim_id)
|
||||
claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id)
|
||||
self.assertIsNone(claim)
|
||||
|
||||
async def assertMatchWinningClaim(self, name):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.bp.db.resolve(name)
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebybid', name, "[0]"))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name)
|
||||
claim = stream if stream else channel
|
||||
await self._assertMatchClaim(expected, claim)
|
||||
expected['claims'][0]['lasttakeoverheight'] = expected['lasttakeoverheight']
|
||||
await self._assertMatchClaim(expected['claims'][0], claim)
|
||||
return claim
|
||||
|
||||
async def _assertMatchClaim(self, expected, claim):
|
||||
self.assertMatchDBClaim(expected, claim)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim.claim_hash.hex()
|
||||
)
|
||||
self.assertEqual(len(claim_from_es[0]), 1)
|
||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||
self._check_supports(claim.claim_hash.hex(), expected['supports'], claim_from_es[0][0]['support_amount'])
|
||||
self._check_supports(claim.claim_hash.hex(), expected.get('supports', []),
|
||||
claim_from_es[0][0]['support_amount'])
|
||||
|
||||
async def assertMatchClaim(self, claim_id, is_active_in_lbrycrd=True):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
||||
claim = await self.conductor.spv_node.server.bp.db.fs_getclaimbyid(claim_id)
|
||||
if is_active_in_lbrycrd:
|
||||
if not expected:
|
||||
self.assertIsNone(claim)
|
||||
return
|
||||
self.assertMatchDBClaim(expected, claim)
|
||||
else:
|
||||
self.assertDictEqual({}, expected)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
async def assertMatchClaim(self, name, claim_id, is_active_in_lbrycrd=True):
|
||||
claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id)
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim.claim_hash.hex()
|
||||
)
|
||||
self.assertEqual(len(claim_from_es[0]), 1)
|
||||
self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), claim.claim_hash.hex())
|
||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||
self._check_supports(
|
||||
claim.claim_hash.hex(), expected.get('supports', []), claim_from_es[0][0]['support_amount'],
|
||||
is_active_in_lbrycrd
|
||||
)
|
||||
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebyid', name, '["' + claim_id + '"]'))
|
||||
if is_active_in_lbrycrd:
|
||||
if not expected:
|
||||
self.assertIsNone(claim)
|
||||
return
|
||||
expected['claims'][0]['lasttakeoverheight'] = expected['lasttakeoverheight']
|
||||
self.assertMatchDBClaim(expected['claims'][0], claim)
|
||||
self._check_supports(claim.claim_hash.hex(), expected['claims'][0].get('supports', []),
|
||||
claim_from_es[0][0]['support_amount'])
|
||||
else:
|
||||
if 'claims' in expected and expected['claims'] is not None:
|
||||
# ensure that if we do have the matching claim that it is not active
|
||||
self.assertEqual(expected['claims'][0]['effectiveamount'], 0)
|
||||
return claim
|
||||
|
||||
async def assertMatchClaimIsWinning(self, name, claim_id):
|
||||
self.assertEqual(claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex())
|
||||
await self.assertMatchClaimsForName(name)
|
||||
|
||||
def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount, is_active_in_lbrycrd=True):
|
||||
total_amount = 0
|
||||
db = self.conductor.spv_node.server.bp.db
|
||||
def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount):
|
||||
total_lbrycrd_amount = 0.0
|
||||
total_es_amount = 0.0
|
||||
active_es_amount = 0.0
|
||||
db = self.conductor.spv_node.server.db
|
||||
es_supports = db.get_supports(bytes.fromhex(claim_id))
|
||||
|
||||
for i, (tx_num, position, amount) in enumerate(db.get_supports(bytes.fromhex(claim_id))):
|
||||
total_amount += amount
|
||||
if is_active_in_lbrycrd:
|
||||
support = lbrycrd_supports[i]
|
||||
self.assertEqual(support['txId'], db.prefix_db.tx_hash.get(tx_num, deserialize_value=False)[::-1].hex())
|
||||
self.assertEqual(support['n'], position)
|
||||
self.assertEqual(support['height'], bisect_right(db.tx_counts, tx_num))
|
||||
self.assertEqual(support['validAtHeight'], db.get_activation(tx_num, position, is_support=True))
|
||||
self.assertEqual(total_amount, es_support_amount, f"lbrycrd support amount: {total_amount} vs es: {es_support_amount}")
|
||||
# we're only concerned about active supports here, and they should match
|
||||
self.assertTrue(len(es_supports) >= len(lbrycrd_supports))
|
||||
|
||||
for i, (tx_num, position, amount) in enumerate(es_supports):
|
||||
total_es_amount += amount
|
||||
valid_height = db.get_activation(tx_num, position, is_support=True)
|
||||
if valid_height > db.db_height:
|
||||
continue
|
||||
active_es_amount += amount
|
||||
txid = db.prefix_db.tx_hash.get(tx_num, deserialize_value=False)[::-1].hex()
|
||||
support = next(filter(lambda s: s['txid'] == txid and s['n'] == position, lbrycrd_supports))
|
||||
total_lbrycrd_amount += support['amount']
|
||||
self.assertEqual(support['height'], bisect_right(db.tx_counts, tx_num))
|
||||
self.assertEqual(support['validatheight'], valid_height)
|
||||
|
||||
self.assertEqual(total_es_amount, es_support_amount)
|
||||
self.assertEqual(active_es_amount, total_lbrycrd_amount)
|
||||
|
||||
async def assertMatchClaimsForName(self, name):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name))
|
||||
|
||||
db = self.conductor.spv_node.server.bp.db
|
||||
# self.assertEqual(len(expected['claims']), len(db_claims.claims))
|
||||
# self.assertEqual(expected['lastTakeoverHeight'], db_claims.lastTakeoverHeight)
|
||||
last_takeover = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))['lastTakeoverHeight']
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name, "", "true"))
|
||||
db = self.conductor.spv_node.server.db
|
||||
|
||||
for c in expected['claims']:
|
||||
c['lastTakeoverHeight'] = last_takeover
|
||||
claim_id = c['claimId']
|
||||
c['lasttakeoverheight'] = expected['lasttakeoverheight']
|
||||
claim_id = c['claimid']
|
||||
claim_hash = bytes.fromhex(claim_id)
|
||||
claim = db._fs_get_claim_by_hash(claim_hash)
|
||||
self.assertMatchDBClaim(c, claim)
|
||||
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
claim_id=c['claimId']
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim_id
|
||||
)
|
||||
self.assertEqual(len(claim_from_es[0]), 1)
|
||||
self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), c['claimId'])
|
||||
self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), claim_id)
|
||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||
self._check_supports(c['claimId'], c['supports'], claim_from_es[0][0]['support_amount'])
|
||||
self._check_supports(claim_id, c.get('supports', []),
|
||||
claim_from_es[0][0]['support_amount'])
|
||||
|
||||
async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int,
|
||||
non_winning_claims: List[ClaimStateValue]):
|
||||
self.assertEqual(height, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, winning_claim_id)
|
||||
for non_winning in non_winning_claims:
|
||||
claim = await self.assertMatchClaim(
|
||||
name, non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd
|
||||
)
|
||||
self.assertEqual(non_winning.activation_height, claim.activation_height)
|
||||
self.assertEqual(last_takeover_height, claim.last_takeover_height)
|
||||
|
||||
|
||||
class ResolveCommand(BaseResolveTestCase):
|
||||
|
@ -261,19 +288,20 @@ class ResolveCommand(BaseResolveTestCase):
|
|||
tx_details = await self.blockchain.get_raw_transaction(claim['txid'])
|
||||
self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations'])
|
||||
|
||||
# FIXME : claimname/updateclaim is gone. #3480 wip, unblock #3479"
|
||||
# resolve handles invalid data
|
||||
await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1")
|
||||
await self.generate(1)
|
||||
response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish"))
|
||||
self.assertSetEqual({'lbry://gibberish'}, set(response))
|
||||
claim = response['lbry://gibberish']
|
||||
self.assertEqual(claim['name'], 'gibberish')
|
||||
self.assertNotIn('value', claim)
|
||||
# await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1")
|
||||
# await self.generate(1)
|
||||
# response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish"))
|
||||
# self.assertSetEqual({'lbry://gibberish'}, set(response))
|
||||
# claim = response['lbry://gibberish']
|
||||
# self.assertEqual(claim['name'], 'gibberish')
|
||||
# self.assertNotIn('value', claim)
|
||||
|
||||
# resolve retries
|
||||
await self.conductor.spv_node.stop()
|
||||
resolve_task = asyncio.create_task(self.resolve('foo'))
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node)
|
||||
self.assertIsNotNone((await resolve_task)['claim_id'])
|
||||
|
||||
async def test_winning_by_effective_amount(self):
|
||||
|
@ -443,16 +471,16 @@ class ResolveCommand(BaseResolveTestCase):
|
|||
self.assertEqual(one, claim6['name'])
|
||||
|
||||
async def test_resolve_old_claim(self):
|
||||
channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0')
|
||||
channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0', blocking=True)
|
||||
await self.confirm_tx(channel.id)
|
||||
address = channel.outputs[0].get_address(self.account.ledger)
|
||||
claim = generate_signed_legacy(address, channel.outputs[0])
|
||||
tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
response = await self.resolve('@olds/example')
|
||||
self.assertTrue('is_channel_signature_valid' in response, str(response))
|
||||
self.assertTrue(response['is_channel_signature_valid'])
|
||||
|
||||
claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature))
|
||||
|
@ -460,8 +488,7 @@ class ResolveCommand(BaseResolveTestCase):
|
|||
'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
response = await self.resolve('bad_example')
|
||||
self.assertFalse(response['is_channel_signature_valid'])
|
||||
|
@ -643,10 +670,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
|
||||
async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int,
|
||||
non_winning_claims: List[ClaimStateValue]):
|
||||
self.assertEqual(height, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(height, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, winning_claim_id)
|
||||
for non_winning in non_winning_claims:
|
||||
claim = await self.assertMatchClaim(
|
||||
claim = await self.assertMatchClaim(name,
|
||||
non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd
|
||||
)
|
||||
self.assertEqual(non_winning.activation_height, claim.activation_height)
|
||||
|
@ -961,7 +988,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
)
|
||||
greater_than_or_equal_to_zero = [
|
||||
claim['claim_id'] for claim in (
|
||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
channel_id=channel_id, fee_amount=">=0"
|
||||
))[0]
|
||||
]
|
||||
|
@ -969,7 +996,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
self.assertSetEqual(set(greater_than_or_equal_to_zero), {stream_with_no_fee, stream_with_fee})
|
||||
greater_than_zero = [
|
||||
claim['claim_id'] for claim in (
|
||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
channel_id=channel_id, fee_amount=">0"
|
||||
))[0]
|
||||
]
|
||||
|
@ -977,7 +1004,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
self.assertSetEqual(set(greater_than_zero), {stream_with_fee})
|
||||
equal_to_zero = [
|
||||
claim['claim_id'] for claim in (
|
||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
channel_id=channel_id, fee_amount="<=0"
|
||||
))[0]
|
||||
]
|
||||
|
@ -992,10 +1019,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
name = 'test'
|
||||
await self.generate(494)
|
||||
address = (await self.account.receiving.get_addresses(True))[0]
|
||||
await self.blockchain.send_to_address(address, 400.0)
|
||||
await self.send_to_address_and_wait(address, 400.0)
|
||||
await self.account.ledger.on_address.first
|
||||
await self.generate(100)
|
||||
self.assertEqual(800, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(800, self.conductor.spv_node.server.db.db_height)
|
||||
|
||||
# Block 801: Claim A for 10 LBC is accepted.
|
||||
# It is the first claim, so it immediately becomes active and controlling.
|
||||
|
@ -1007,10 +1034,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
# Its activation height is 1121 + min(4032, floor((1121-801) / 32)) = 1121 + 10 = 1131.
|
||||
# State: A(10) is controlling, B(20) is accepted.
|
||||
await self.generate(32 * 10 - 1)
|
||||
self.assertEqual(1120, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1120, self.conductor.spv_node.server.db.db_height)
|
||||
claim_id_B = (await self.stream_create(name, '20.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
claim_B, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_B}")
|
||||
self.assertEqual(1121, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_B, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_B}")
|
||||
self.assertEqual(1121, self.conductor.spv_node.server.db.db_height)
|
||||
self.assertEqual(1131, claim_B.activation_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
|
@ -1018,33 +1045,33 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
# Since it is a support for the controlling claim, it activates immediately.
|
||||
# State: A(10+14) is controlling, B(20) is accepted.
|
||||
await self.support_create(claim_id_A, bid='14.0')
|
||||
self.assertEqual(1122, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1122, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
# Block 1123: Claim C for 50 LBC is accepted.
|
||||
# The activation height is 1123 + min(4032, floor((1123-801) / 32)) = 1123 + 10 = 1133.
|
||||
# State: A(10+14) is controlling, B(20) is accepted, C(50) is accepted.
|
||||
claim_id_C = (await self.stream_create(name, '50.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
self.assertEqual(1123, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_C, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_C}")
|
||||
self.assertEqual(1123, self.conductor.spv_node.server.db.db_height)
|
||||
claim_C, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_C}")
|
||||
self.assertEqual(1133, claim_C.activation_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
await self.generate(7)
|
||||
self.assertEqual(1130, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1130, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
await self.generate(1)
|
||||
|
||||
# Block 1131: Claim B activates. It has 20 LBC, while claim A has 24 LBC (10 original + 14 from support X). There is no takeover, and claim A remains controlling.
|
||||
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted.
|
||||
self.assertEqual(1131, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1131, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
# Block 1132: Claim D for 300 LBC is accepted. The activation height is 1132 + min(4032, floor((1132-801) / 32)) = 1132 + 10 = 1142.
|
||||
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted, D(300) is accepted.
|
||||
claim_id_D = (await self.stream_create(name, '300.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
self.assertEqual(1132, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(1132, self.conductor.spv_node.server.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(False, claim_D.is_controlling)
|
||||
self.assertEqual(801, claim_D.last_takeover_height)
|
||||
self.assertEqual(1142, claim_D.activation_height)
|
||||
|
@ -1053,8 +1080,8 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
# Block 1133: Claim C activates. It has 50 LBC, while claim A has 24 LBC, so a takeover is initiated. The takeover height for this name is set to 1133, and therefore the activation delay for all the claims becomes min(4032, floor((1133-1133) / 32)) = 0. All the claims become active. The totals for each claim are recalculated, and claim D becomes controlling because it has the highest total.
|
||||
# State: A(10+14) is active, B(20) is active, C(50) is active, D(300) is controlling
|
||||
await self.generate(1)
|
||||
self.assertEqual(1133, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(1133, self.conductor.spv_node.server.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(True, claim_D.is_controlling)
|
||||
self.assertEqual(1133, claim_D.last_takeover_height)
|
||||
self.assertEqual(1133, claim_D.activation_height)
|
||||
|
@ -1327,15 +1354,15 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
await self.generate(8)
|
||||
await self.assertMatchClaimIsWinning(name, first_claim_id)
|
||||
# abandon the support that causes the winning claim to have the highest staked
|
||||
tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id)
|
||||
tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id, blocking=True)
|
||||
await self.generate(1)
|
||||
await self.assertMatchClaimIsWinning(name, first_claim_id)
|
||||
# await self.assertMatchClaim(second_claim_id)
|
||||
|
||||
await self.assertNameState(538, name, first_claim_id, last_takeover_height=207, non_winning_claims=[
|
||||
ClaimStateValue(second_claim_id, activation_height=539, active_in_lbrycrd=False)
|
||||
])
|
||||
await self.generate(1)
|
||||
|
||||
await self.assertMatchClaim(first_claim_id)
|
||||
await self.assertMatchClaimIsWinning(name, second_claim_id)
|
||||
await self.assertNameState(539, name, second_claim_id, last_takeover_height=539, non_winning_claims=[
|
||||
ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True)
|
||||
])
|
||||
|
||||
async def test_remove_controlling_support(self):
|
||||
name = 'derp'
|
||||
|
@ -1405,14 +1432,14 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
await self.generate(32)
|
||||
|
||||
second_claim_id = (await self.stream_create(name, '0.01', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
await self.assertNoClaim(second_claim_id)
|
||||
await self.assertNoClaim(name, second_claim_id)
|
||||
self.assertEqual(
|
||||
len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 1
|
||||
len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 1
|
||||
)
|
||||
await self.generate(1)
|
||||
await self.assertMatchClaim(second_claim_id)
|
||||
await self.assertMatchClaim(name, second_claim_id)
|
||||
self.assertEqual(
|
||||
len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 2
|
||||
len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 2
|
||||
)
|
||||
|
||||
async def test_abandon_controlling_same_block_as_new_claim(self):
|
||||
|
@ -1428,35 +1455,47 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
|
||||
async def test_trending(self):
|
||||
async def get_trending_score(claim_id):
|
||||
return (await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
return (await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim_id
|
||||
))[0][0]['trending_score']
|
||||
|
||||
claim_id1 = (await self.stream_create('derp', '1.0'))['outputs'][0]['claim_id']
|
||||
COIN = 1E8
|
||||
COIN = int(1E8)
|
||||
|
||||
height = 99000
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height, 0, 10 * COIN
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 207)
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(208, bytes.fromhex(claim_id1)), (0, 10 * COIN)
|
||||
)
|
||||
await self.generate(1)
|
||||
self.assertEqual(172.64252836433135, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height + 1, 10 * COIN, 100 * COIN
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 208)
|
||||
|
||||
self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN)
|
||||
)
|
||||
await self.generate(1)
|
||||
self.assertEqual(173.45931832928875, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height + 100, 100 * COIN, 1000000 * COIN
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 209)
|
||||
self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN)
|
||||
)
|
||||
await self.generate(1)
|
||||
self.assertEqual(176.65517070393514, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height + 200, 1000000 * COIN, 1 * COIN
|
||||
await self.generate(100)
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 309)
|
||||
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
|
||||
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN)
|
||||
)
|
||||
|
||||
await self.generate(99)
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 408)
|
||||
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
|
||||
|
||||
await self.generate(1)
|
||||
self.assertEqual(-174.951347102643, await get_trending_score(claim_id1))
|
||||
search_results = (await self.conductor.spv_node.server.bp.db.search_index.search(claim_name="derp"))[0]
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 409)
|
||||
|
||||
self.assertEqual(-3.4256156592205627, await get_trending_score(claim_id1))
|
||||
search_results = (await self.conductor.spv_node.server.session_manager.search_index.search(claim_name="derp"))[0]
|
||||
self.assertEqual(1, len(search_results))
|
||||
self.assertListEqual([claim_id1], [c['claim_id'] for c in search_results])
|
||||
|
||||
|
@ -1465,22 +1504,31 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
async def reorg(self, start):
|
||||
blocks = self.ledger.headers.height - start
|
||||
self.blockchain.block_expected = start - 1
|
||||
|
||||
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
|
||||
# go back to start
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
|
||||
# go to previous + 1
|
||||
await self.generate(blocks + 2)
|
||||
await self.blockchain.generate(blocks + 2)
|
||||
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
# await asyncio.wait_for(self.on_header(self.blockchain.block_expected), 30.0)
|
||||
|
||||
async def assertBlockHash(self, height):
|
||||
bp = self.conductor.spv_node.server.bp
|
||||
reader_db = self.conductor.spv_node.server.db
|
||||
block_hash = await self.blockchain.get_block_hash(height)
|
||||
|
||||
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
|
||||
self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex())
|
||||
self.assertEqual(block_hash, (await reader_db.fs_block_hashes(height, 1))[0][::-1].hex())
|
||||
txids = [
|
||||
tx_hash[::-1].hex() for tx_hash in bp.db.get_block_txs(height)
|
||||
tx_hash[::-1].hex() for tx_hash in reader_db.get_block_txs(height)
|
||||
]
|
||||
txs = await bp.db.get_transactions_and_merkles(txids)
|
||||
block_txs = (await bp.daemon.deserialised_block(block_hash))['tx']
|
||||
txs = await reader_db.get_transactions_and_merkles(txids)
|
||||
block_txs = (await self.conductor.spv_node.server.daemon.deserialised_block(block_hash))['tx']
|
||||
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
|
||||
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
||||
|
||||
|
@ -1491,9 +1539,18 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
channel_id = self.get_claim_id(
|
||||
await self.channel_create(channel_name, '0.01')
|
||||
)
|
||||
self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=207, name='@abc', winning_claim_id=channel_id, last_takeover_height=207,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
await self.reorg(206)
|
||||
self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=208, name='@abc', winning_claim_id=channel_id, last_takeover_height=207,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
# await self.assertNoClaimForName(channel_name)
|
||||
# self.assertNotIn('error', await self.resolve(channel_name))
|
||||
|
@ -1502,16 +1559,29 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
stream_id = self.get_claim_id(
|
||||
await self.stream_create(stream_name, '0.01', channel_id=channel_id)
|
||||
)
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=209, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
await self.reorg(206)
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
await self.assertNameState(
|
||||
height=210, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
await self.support_create(stream_id, '0.01')
|
||||
self.assertNotIn('error', await self.resolve(stream_name))
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=211, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
await self.reorg(206)
|
||||
# self.assertNotIn('error', await self.resolve(stream_name))
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
await self.assertNameState(
|
||||
height=212, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
await self.stream_abandon(stream_id)
|
||||
self.assertNotIn('error', await self.resolve(channel_name))
|
||||
|
@ -1553,7 +1623,6 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
await self.ledger.wait(broadcast_tx)
|
||||
await self.support_create(still_valid.outputs[0].claim_id, '0.01')
|
||||
|
||||
# await self.generate(1)
|
||||
await self.ledger.wait(broadcast_tx, self.blockchain.block_expected)
|
||||
self.assertEqual(self.ledger.headers.height, 208)
|
||||
await self.assertBlockHash(208)
|
||||
|
@ -1570,7 +1639,7 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
|
@ -1603,7 +1672,7 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
await asyncio.wait_for(self.on_header(210), 3.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
republished = await self.resolve('hovercraft')
|
||||
|
@ -1649,11 +1718,11 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
||||
await asyncio.wait_for(self.on_header(209), 30.0)
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
await self.assertBlockHash(209)
|
||||
|
|
|
@ -21,9 +21,8 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
[asyncio.ensure_future(self.on_address_update(address1)),
|
||||
asyncio.ensure_future(self.on_address_update(address2))]
|
||||
))
|
||||
sendtxid1 = await self.blockchain.send_to_address(address1, 5)
|
||||
sendtxid2 = await self.blockchain.send_to_address(address2, 5)
|
||||
await self.blockchain.generate(1)
|
||||
await self.send_to_address_and_wait(address1, 5)
|
||||
await self.send_to_address_and_wait(address2, 5, 1)
|
||||
await notifications
|
||||
|
||||
self.assertEqual(d2l(await self.account.get_balance()), '10.0')
|
||||
|
@ -57,7 +56,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
notifications = asyncio.create_task(asyncio.wait(
|
||||
[asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))]
|
||||
))
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await notifications
|
||||
self.assertEqual(d2l(await self.account.get_balance()), '7.985786')
|
||||
self.assertEqual(d2l(await self.account.get_balance(include_claims=True)), '9.985786')
|
||||
|
@ -70,7 +69,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
await self.broadcast(abandon_tx)
|
||||
await notify
|
||||
notify = asyncio.create_task(self.ledger.wait(abandon_tx))
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await notify
|
||||
|
||||
response = await self.ledger.resolve([], ['lbry://@bar/foo'])
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import unittest
|
||||
|
||||
from lbry.testcase import CommandTestCase
|
||||
|
||||
|
||||
|
@ -17,7 +19,7 @@ class TransactionCommandsTestCase(CommandTestCase):
|
|||
async def test_transaction_show(self):
|
||||
# local tx
|
||||
result = await self.out(self.daemon.jsonrpc_account_send(
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id)
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id), blocking=True
|
||||
))
|
||||
await self.confirm_tx(result['txid'])
|
||||
tx = await self.daemon.jsonrpc_transaction_show(result['txid'])
|
||||
|
@ -38,10 +40,9 @@ class TransactionCommandsTestCase(CommandTestCase):
|
|||
self.assertFalse(result['success'])
|
||||
|
||||
async def test_utxo_release(self):
|
||||
sendtxid = await self.blockchain.send_to_address(
|
||||
await self.account.receiving.get_or_create_usable_address(), 1
|
||||
await self.send_to_address_and_wait(
|
||||
await self.account.receiving.get_or_create_usable_address(), 1, 1
|
||||
)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.assertBalance(self.account, '11.0')
|
||||
await self.ledger.reserve_outputs(await self.account.get_utxos())
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
|
@ -51,6 +52,7 @@ class TransactionCommandsTestCase(CommandTestCase):
|
|||
|
||||
class TestSegwit(CommandTestCase):
|
||||
|
||||
@unittest.SkipTest
|
||||
async def test_segwit(self):
|
||||
p2sh_address1 = await self.blockchain.get_new_address(self.blockchain.P2SH_SEGWIT_ADDRESS)
|
||||
p2sh_address2 = await self.blockchain.get_new_address(self.blockchain.P2SH_SEGWIT_ADDRESS)
|
||||
|
@ -64,14 +66,13 @@ class TestSegwit(CommandTestCase):
|
|||
p2sh_txid2 = await self.blockchain.send_to_address(p2sh_address2, '1.0')
|
||||
bech32_txid1 = await self.blockchain.send_to_address(bech32_address1, '1.0')
|
||||
bech32_txid2 = await self.blockchain.send_to_address(bech32_address2, '1.0')
|
||||
|
||||
await self.generate(1)
|
||||
|
||||
# P2SH & BECH32 can pay to P2SH address
|
||||
tx = await self.blockchain.create_raw_transaction([
|
||||
{"txid": p2sh_txid1, "vout": 0},
|
||||
{"txid": bech32_txid1, "vout": 0},
|
||||
], [{p2sh_address3: '1.9'}]
|
||||
], {p2sh_address3: 1.9}
|
||||
)
|
||||
tx = await self.blockchain.sign_raw_transaction_with_wallet(tx)
|
||||
p2sh_txid3 = await self.blockchain.send_raw_transaction(tx)
|
||||
|
@ -82,7 +83,7 @@ class TestSegwit(CommandTestCase):
|
|||
tx = await self.blockchain.create_raw_transaction([
|
||||
{"txid": p2sh_txid2, "vout": 0},
|
||||
{"txid": bech32_txid2, "vout": 0},
|
||||
], [{bech32_address3: '1.9'}]
|
||||
], {bech32_address3: 1.9}
|
||||
)
|
||||
tx = await self.blockchain.sign_raw_transaction_with_wallet(tx)
|
||||
bech32_txid3 = await self.blockchain.send_raw_transaction(tx)
|
||||
|
@ -94,12 +95,9 @@ class TestSegwit(CommandTestCase):
|
|||
tx = await self.blockchain.create_raw_transaction([
|
||||
{"txid": p2sh_txid3, "vout": 0},
|
||||
{"txid": bech32_txid3, "vout": 0},
|
||||
], [{address: '3.5'}]
|
||||
], {address: 3.5}
|
||||
)
|
||||
tx = await self.blockchain.sign_raw_transaction_with_wallet(tx)
|
||||
txid = await self.blockchain.send_raw_transaction(tx)
|
||||
await self.on_transaction_id(txid)
|
||||
await self.generate(1)
|
||||
await self.on_transaction_id(txid)
|
||||
|
||||
await self.generate_and_wait(1, [txid])
|
||||
await self.assertBalance(self.account, '13.5')
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import asyncio
|
||||
import random
|
||||
from itertools import chain
|
||||
|
||||
import lbry.wallet.rpc.jsonrpc
|
||||
from lbry.wallet.transaction import Transaction, Output, Input
|
||||
from lbry.testcase import IntegrationTestCase
|
||||
from lbry.wallet.util import satoshis_to_coins, coins_to_satoshis
|
||||
|
@ -9,9 +9,8 @@ from lbry.wallet.manager import WalletManager
|
|||
|
||||
|
||||
class BasicTransactionTests(IntegrationTestCase):
|
||||
|
||||
async def test_variety_of_transactions_and_longish_history(self):
|
||||
await self.blockchain.generate(300)
|
||||
await self.generate(300)
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
addresses = await self.account.receiving.get_addresses()
|
||||
|
||||
|
@ -19,10 +18,10 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
# to the 10th receiving address for a total of 30 UTXOs on the entire account
|
||||
for i in range(10):
|
||||
notification = asyncio.ensure_future(self.on_address_update(addresses[i]))
|
||||
txid = await self.blockchain.send_to_address(addresses[i], 10)
|
||||
_ = await self.send_to_address_and_wait(addresses[i], 10)
|
||||
await notification
|
||||
notification = asyncio.ensure_future(self.on_address_update(addresses[9]))
|
||||
txid = await self.blockchain.send_to_address(addresses[9], 10)
|
||||
_ = await self.send_to_address_and_wait(addresses[9], 10)
|
||||
await notification
|
||||
|
||||
# use batching to reduce issues with send_to_address on cli
|
||||
|
@ -57,7 +56,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
for tx in await self.ledger.db.get_transactions(txid__in=[tx.id for tx in txs])
|
||||
]))
|
||||
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await asyncio.wait([self.ledger.wait(tx) for tx in txs])
|
||||
await self.assertBalance(self.account, '199.99876')
|
||||
|
||||
|
@ -74,7 +73,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
)
|
||||
await self.broadcast(tx)
|
||||
await self.ledger.wait(tx)
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx)
|
||||
|
||||
self.assertEqual(2, await self.account.get_utxo_count()) # 199 + change
|
||||
|
@ -88,12 +87,10 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
await self.assertBalance(account2, '0.0')
|
||||
|
||||
addresses = await account1.receiving.get_addresses()
|
||||
txids = await asyncio.gather(*(
|
||||
self.blockchain.send_to_address(address, 1.1) for address in addresses[:5]
|
||||
))
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # confirmed
|
||||
txids = []
|
||||
for address in addresses[:5]:
|
||||
txids.append(await self.send_to_address_and_wait(address, 1.1))
|
||||
await self.generate_and_wait(1, txids)
|
||||
await self.assertBalance(account1, '5.5')
|
||||
await self.assertBalance(account2, '0.0')
|
||||
|
||||
|
@ -107,7 +104,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
)
|
||||
await self.broadcast(tx)
|
||||
await self.ledger.wait(tx) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx) # confirmed
|
||||
|
||||
await self.assertBalance(account1, '3.499802')
|
||||
|
@ -121,7 +118,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
)
|
||||
await self.broadcast(tx)
|
||||
await self.ledger.wait(tx) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx) # confirmed
|
||||
|
||||
tx = (await account1.get_transactions(include_is_my_input=True, include_is_my_output=True))[1]
|
||||
|
@ -133,11 +130,11 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
self.assertTrue(tx.outputs[1].is_internal_transfer)
|
||||
|
||||
async def test_history_edge_cases(self):
|
||||
await self.blockchain.generate(300)
|
||||
await self.generate(300)
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
address = await self.account.receiving.get_or_create_usable_address()
|
||||
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
||||
original_summary = self.conductor.spv_node.server.bp.mempool.transaction_summaries
|
||||
original_summary = self.conductor.spv_node.server.mempool.transaction_summaries
|
||||
|
||||
def random_summary(*args, **kwargs):
|
||||
summary = original_summary(*args, **kwargs)
|
||||
|
@ -146,13 +143,10 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
while summary == ordered:
|
||||
random.shuffle(summary)
|
||||
return summary
|
||||
self.conductor.spv_node.server.bp.mempool.transaction_summaries = random_summary
|
||||
self.conductor.spv_node.server.mempool.transaction_summaries = random_summary
|
||||
# 10 unconfirmed txs, all from blockchain wallet
|
||||
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
||||
# use batching to reduce issues with send_to_address on cli
|
||||
for batch in range(0, len(sends), 10):
|
||||
txids = await asyncio.gather(*sends[batch:batch + 10])
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids])
|
||||
for i in range(10):
|
||||
await self.send_to_address_and_wait(address, 10)
|
||||
remote_status = await self.ledger.network.subscribe_address(address)
|
||||
self.assertTrue(await self.ledger.update_history(address, remote_status))
|
||||
# 20 unconfirmed txs, 10 from blockchain, 10 from local to local
|
||||
|
@ -170,8 +164,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
remote_status = await self.ledger.network.subscribe_address(address)
|
||||
self.assertTrue(await self.ledger.update_history(address, remote_status))
|
||||
# server history grows unordered
|
||||
txid = await self.blockchain.send_to_address(address, 1)
|
||||
await self.on_transaction_id(txid)
|
||||
await self.send_to_address_and_wait(address, 1)
|
||||
self.assertTrue(await self.ledger.update_history(address, remote_status))
|
||||
self.assertEqual(21, len((await self.ledger.get_local_status_and_history(address))[1]))
|
||||
self.assertEqual(0, len(self.ledger._known_addresses_out_of_sync))
|
||||
|
@ -195,37 +188,37 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
self.ledger, 2000000000000, [self.account], set_reserved=False, return_insufficient_funds=True
|
||||
)
|
||||
got_amounts = [estimator.effective_amount for estimator in spendable]
|
||||
self.assertListEqual(amounts, got_amounts)
|
||||
self.assertListEqual(sorted(amounts), sorted(got_amounts))
|
||||
|
||||
async def test_sqlite_coin_chooser(self):
|
||||
wallet_manager = WalletManager([self.wallet], {self.ledger.get_id(): self.ledger})
|
||||
await self.blockchain.generate(300)
|
||||
await self.generate(300)
|
||||
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
address = await self.account.receiving.get_or_create_usable_address()
|
||||
other_account = self.wallet.generate_account(self.ledger)
|
||||
other_address = await other_account.receiving.get_or_create_usable_address()
|
||||
self.ledger.coin_selection_strategy = 'sqlite'
|
||||
await self.ledger.subscribe_account(self.account)
|
||||
await self.ledger.subscribe_account(other_account)
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 1.0)
|
||||
_ = await self.send_to_address_and_wait(address, 1.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 1.0)
|
||||
_ = await self.send_to_address_and_wait(address, 1.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 3.0)
|
||||
_ = await self.send_to_address_and_wait(address, 3.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 5.0)
|
||||
_ = await self.send_to_address_and_wait(address, 5.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 10.0)
|
||||
_ = await self.send_to_address_and_wait(address, 10.0)
|
||||
await accepted
|
||||
|
||||
await self.assertBalance(self.account, '20.0')
|
||||
|
@ -266,6 +259,12 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
async def broadcast(tx):
|
||||
try:
|
||||
return await real_broadcast(tx)
|
||||
except lbry.wallet.rpc.jsonrpc.RPCError as err:
|
||||
# this is expected in tests where we try to double spend.
|
||||
if 'the transaction was rejected by network rules.' in str(err):
|
||||
pass
|
||||
else:
|
||||
raise err
|
||||
finally:
|
||||
e.set()
|
||||
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
import time
|
||||
import unittest
|
||||
from lbry.wallet.server.metrics import ServerLoadData, calculate_avg_percentiles
|
||||
|
||||
|
||||
class TestPercentileCalculation(unittest.TestCase):
|
||||
|
||||
def test_calculate_percentiles(self):
|
||||
self.assertEqual(calculate_avg_percentiles([]), (0, 0, 0, 0, 0, 0, 0, 0))
|
||||
self.assertEqual(calculate_avg_percentiles([1]), (1, 1, 1, 1, 1, 1, 1, 1))
|
||||
self.assertEqual(calculate_avg_percentiles([1, 2]), (1, 1, 1, 1, 1, 2, 2, 2))
|
||||
self.assertEqual(calculate_avg_percentiles([1, 2, 3]), (2, 1, 1, 1, 2, 3, 3, 3))
|
||||
self.assertEqual(calculate_avg_percentiles([4, 1, 2, 3]), (2, 1, 1, 1, 2, 3, 4, 4))
|
||||
self.assertEqual(calculate_avg_percentiles([1, 2, 3, 4, 5, 6]), (3, 1, 1, 2, 3, 5, 6, 6))
|
||||
self.assertEqual(calculate_avg_percentiles(
|
||||
list(range(1, 101))), (50, 1, 5, 25, 50, 75, 95, 100))
|
||||
|
||||
|
||||
class TestCollectingMetrics(unittest.TestCase):
|
||||
|
||||
def test_happy_path(self):
|
||||
self.maxDiff = None
|
||||
load = ServerLoadData()
|
||||
search = load.for_api('search')
|
||||
self.assertEqual(search.name, 'search')
|
||||
search.start()
|
||||
search.cache_response()
|
||||
search.cache_response()
|
||||
metrics = {
|
||||
'search': [{'total': 40}],
|
||||
'execute_query': [
|
||||
{'total': 20},
|
||||
{'total': 10}
|
||||
]
|
||||
}
|
||||
for x in range(5):
|
||||
search.query_response(time.perf_counter() - 0.055 + 0.001*x, metrics)
|
||||
metrics['execute_query'][0]['total'] = 10
|
||||
metrics['execute_query'][0]['sql'] = "select lots, of, stuff FROM claim where something=1"
|
||||
search.query_interrupt(time.perf_counter() - 0.050, metrics)
|
||||
search.query_error(time.perf_counter() - 0.050, metrics)
|
||||
search.query_error(time.perf_counter() - 0.052, {})
|
||||
self.assertEqual(load.to_json_and_reset({}), {'status': {}, 'api': {'search': {
|
||||
"receive_count": 1,
|
||||
"cache_response_count": 2,
|
||||
"query_response_count": 5,
|
||||
"intrp_response_count": 1,
|
||||
"error_response_count": 2,
|
||||
"response": (53, 51, 51, 52, 53, 54, 55, 55),
|
||||
"interrupt": (50, 50, 50, 50, 50, 50, 50, 50),
|
||||
"error": (51, 50, 50, 50, 50, 52, 52, 52),
|
||||
"python": (12, 10, 10, 10, 10, 20, 20, 20),
|
||||
"wait": (12, 10, 10, 10, 12, 14, 15, 15),
|
||||
"sql": (27, 20, 20, 20, 30, 30, 30, 30),
|
||||
"individual_sql": (13, 10, 10, 10, 10, 20, 20, 20),
|
||||
"individual_sql_count": 14,
|
||||
"errored_queries": ['FROM claim where something=1'],
|
||||
"interrupted_queries": ['FROM claim where something=1'],
|
||||
}}})
|
||||
self.assertEqual(load.to_json_and_reset({}), {'status': {}, 'api': {}})
|
|
@ -2,7 +2,7 @@ import unittest
|
|||
import tempfile
|
||||
import shutil
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertableDelete, RevertablePut, OpStackIntegrity
|
||||
from lbry.wallet.server.db.prefixes import ClaimToTXOPrefixRow, HubDB
|
||||
from lbry.wallet.server.db.prefixes import ClaimToTXOPrefixRow, PrefixDB
|
||||
|
||||
|
||||
class TestRevertableOpStack(unittest.TestCase):
|
||||
|
@ -107,7 +107,7 @@ class TestRevertableOpStack(unittest.TestCase):
|
|||
class TestRevertablePrefixDB(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.tmp_dir = tempfile.mkdtemp()
|
||||
self.db = HubDB(self.tmp_dir, cache_mb=1, max_open_files=32)
|
||||
self.db = PrefixDB(self.tmp_dir, cache_mb=1, max_open_files=32)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.db.close()
|
||||
|
@ -126,28 +126,112 @@ class TestRevertablePrefixDB(unittest.TestCase):
|
|||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get_pending(name).height)
|
||||
|
||||
self.db.commit(10000000)
|
||||
self.db.commit(10000000, b'\x00' * 32)
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1))
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1))
|
||||
self.db.commit(10000001)
|
||||
self.db.commit(10000001, b'\x01' * 32)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2))
|
||||
self.db.commit(10000002)
|
||||
self.db.commit(10000002, b'\x02' * 32)
|
||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3))
|
||||
self.db.commit(10000003)
|
||||
self.db.commit(10000003, b'\x03' * 32)
|
||||
self.assertEqual(10000003, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.rollback(10000003)
|
||||
self.db.rollback(10000003, b'\x03' * 32)
|
||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||
self.db.rollback(10000002)
|
||||
self.db.rollback(10000002, b'\x02' * 32)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.rollback(10000001)
|
||||
self.db.rollback(10000001, b'\x01' * 32)
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||
self.db.rollback(10000000)
|
||||
self.db.rollback(10000000, b'\x00' * 32)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
|
||||
def test_hub_db_iterator(self):
|
||||
name = 'derp'
|
||||
claim_hash0 = 20 * b'\x00'
|
||||
claim_hash1 = 20 * b'\x01'
|
||||
claim_hash2 = 20 * b'\x02'
|
||||
claim_hash3 = 20 * b'\x03'
|
||||
overflow_value = 0xffffffff
|
||||
self.db.claim_expiration.stage_put((99, 999, 0), (claim_hash0, name))
|
||||
self.db.claim_expiration.stage_put((100, 1000, 0), (claim_hash1, name))
|
||||
self.db.claim_expiration.stage_put((100, 1001, 0), (claim_hash2, name))
|
||||
self.db.claim_expiration.stage_put((101, 1002, 0), (claim_hash3, name))
|
||||
self.db.claim_expiration.stage_put((overflow_value - 1, 1003, 0), (claim_hash3, name))
|
||||
self.db.claim_expiration.stage_put((overflow_value, 1004, 0), (claim_hash3, name))
|
||||
self.db.tx_num.stage_put((b'\x00' * 32,), (101,))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash3, 101))
|
||||
self.db.db_state.stage_put((), (b'n?\xcf\x12\x99\xd4\xec]y\xc3\xa4\xc9\x1dbJJ\xcf\x9e.\x17=\x95\xa1\xa0POgvihuV', 0, 1, b'VuhivgOP\xa0\xa1\x95=\x17.\x9e\xcfJJb\x1d\xc9\xa4\xc3y]\xec\xd4\x99\x12\xcf?n', 1, 0, 1, 7, 1, -1, -1, 0))
|
||||
self.db.unsafe_commit()
|
||||
|
||||
state = self.db.db_state.get()
|
||||
self.assertEqual(b'n?\xcf\x12\x99\xd4\xec]y\xc3\xa4\xc9\x1dbJJ\xcf\x9e.\x17=\x95\xa1\xa0POgvihuV', state.genesis)
|
||||
|
||||
self.assertListEqual(
|
||||
[], list(self.db.claim_expiration.iterate(prefix=(98,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(self.db.claim_expiration.iterate(start=(98,), stop=(99,))),
|
||||
list(self.db.claim_expiration.iterate(prefix=(98,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(self.db.claim_expiration.iterate(start=(99,), stop=(100,))),
|
||||
list(self.db.claim_expiration.iterate(prefix=(99,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
[
|
||||
((99, 999, 0), (claim_hash0, name)),
|
||||
], list(self.db.claim_expiration.iterate(prefix=(99,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
[
|
||||
((100, 1000, 0), (claim_hash1, name)),
|
||||
((100, 1001, 0), (claim_hash2, name))
|
||||
], list(self.db.claim_expiration.iterate(prefix=(100,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
list(self.db.claim_expiration.iterate(start=(100,), stop=(101,))),
|
||||
list(self.db.claim_expiration.iterate(prefix=(100,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
[
|
||||
((overflow_value - 1, 1003, 0), (claim_hash3, name))
|
||||
], list(self.db.claim_expiration.iterate(prefix=(overflow_value - 1,)))
|
||||
)
|
||||
self.assertListEqual(
|
||||
[
|
||||
((overflow_value, 1004, 0), (claim_hash3, name))
|
||||
], list(self.db.claim_expiration.iterate(prefix=(overflow_value,)))
|
||||
)
|
||||
|
||||
def test_hub_db_iterator_start_stop(self):
|
||||
tx_num = 101
|
||||
for x in range(255):
|
||||
claim_hash = 20 * chr(x).encode()
|
||||
self.db.active_amount.stage_put((claim_hash, 1, 200, tx_num, 1), (100000,))
|
||||
self.db.active_amount.stage_put((claim_hash, 1, 201, tx_num + 1, 1), (200000,))
|
||||
self.db.active_amount.stage_put((claim_hash, 1, 202, tx_num + 2, 1), (300000,))
|
||||
tx_num += 3
|
||||
self.db.unsafe_commit()
|
||||
|
||||
def get_active_amount_as_of_height(claim_hash: bytes, height: int) -> int:
|
||||
for v in self.db.active_amount.iterate(
|
||||
start=(claim_hash, 1, 0), stop=(claim_hash, 1, height + 1),
|
||||
include_key=False, reverse=True):
|
||||
return v.amount
|
||||
return 0
|
||||
|
||||
for x in range(255):
|
||||
claim_hash = 20 * chr(x).encode()
|
||||
self.assertEqual(300000, get_active_amount_as_of_height(claim_hash, 300))
|
||||
self.assertEqual(300000, get_active_amount_as_of_height(claim_hash, 203))
|
||||
self.assertEqual(300000, get_active_amount_as_of_height(claim_hash, 202))
|
||||
self.assertEqual(200000, get_active_amount_as_of_height(claim_hash, 201))
|
||||
self.assertEqual(100000, get_active_amount_as_of_height(claim_hash, 200))
|
||||
self.assertEqual(0, get_active_amount_as_of_height(claim_hash, 199))
|
||||
|
|
Loading…
Reference in a new issue