forked from LBRYCommunity/lbry-sdk
Merge pull request #3588 from lbryio/scribe
move `lbry.wallet.server` to new project called `scribe`, switch from using `lbrycrd` to `lbcd` in integration tests
This commit is contained in:
commit
c3e524cb8b
60 changed files with 1097 additions and 12279 deletions
|
@ -53,8 +53,7 @@ from lbry.extras.daemon.security import ensure_request_allowed
|
|||
from lbry.file_analysis import VideoFileAnalyzer
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.wallet.server.db.elasticsearch.constants import RANGE_FIELDS, REPLACEMENTS
|
||||
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
|
||||
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
|
@ -67,6 +66,29 @@ if typing.TYPE_CHECKING:
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_score', 'censor_type', 'tx_num'
|
||||
}
|
||||
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
|
||||
REPLACEMENTS = {
|
||||
'claim_name': 'normalized_name',
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'trending_group': 'trending_score',
|
||||
'trending_mixed': 'trending_score',
|
||||
'trending_global': 'trending_score',
|
||||
'trending_local': 'trending_score',
|
||||
'reposted': 'repost_count',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'valid_channel_signature': 'is_signature_valid'
|
||||
}
|
||||
|
||||
|
||||
def is_transactional_function(name):
|
||||
for action in ('create', 'update', 'abandon', 'send', 'fund'):
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
import base64
|
||||
from typing import List, TYPE_CHECKING, Union, Optional
|
||||
from typing import List, Union, Optional, NamedTuple
|
||||
from binascii import hexlify
|
||||
from itertools import chain
|
||||
|
||||
from lbry.error import ResolveCensoredError
|
||||
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
|
||||
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
|
||||
if TYPE_CHECKING:
|
||||
from lbry.wallet.server.leveldb import ResolveResult
|
||||
|
||||
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
|
||||
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
||||
|
@ -24,6 +22,31 @@ def set_reference(reference, claim_hash, rows):
|
|||
return
|
||||
|
||||
|
||||
class ResolveResult(NamedTuple):
|
||||
name: str
|
||||
normalized_name: str
|
||||
claim_hash: bytes
|
||||
tx_num: int
|
||||
position: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
amount: int
|
||||
short_url: str
|
||||
is_controlling: bool
|
||||
canonical_url: str
|
||||
creation_height: int
|
||||
activation_height: int
|
||||
expiration_height: int
|
||||
effective_amount: int
|
||||
support_amount: int
|
||||
reposted: int
|
||||
last_takeover_height: Optional[int]
|
||||
claims_in_channel: Optional[int]
|
||||
channel_hash: Optional[bytes]
|
||||
reposted_claim_hash: Optional[bytes]
|
||||
signature_valid: Optional[bool]
|
||||
|
||||
|
||||
class Censor:
|
||||
|
||||
NOT_CENSORED = 0
|
||||
|
|
|
@ -19,7 +19,7 @@ from lbry.conf import Config
|
|||
from lbry.wallet.util import satoshis_to_coins
|
||||
from lbry.wallet.dewies import lbc_to_dewies
|
||||
from lbry.wallet.orchstr8 import Conductor
|
||||
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode, HubNode
|
||||
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode, HubNode
|
||||
from lbry.schema.claim import Claim
|
||||
|
||||
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
|
||||
|
@ -236,7 +236,7 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.conductor: Optional[Conductor] = None
|
||||
self.blockchain: Optional[BlockchainNode] = None
|
||||
self.blockchain: Optional[LBCWalletNode] = None
|
||||
self.hub: Optional[HubNode] = None
|
||||
self.wallet_node: Optional[WalletNode] = None
|
||||
self.manager: Optional[WalletManager] = None
|
||||
|
@ -246,15 +246,17 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
|
||||
async def asyncSetUp(self):
|
||||
self.conductor = Conductor(seed=self.SEED)
|
||||
await self.conductor.start_blockchain()
|
||||
self.addCleanup(self.conductor.stop_blockchain)
|
||||
await self.conductor.start_lbcd()
|
||||
self.addCleanup(self.conductor.stop_lbcd)
|
||||
await self.conductor.start_lbcwallet()
|
||||
self.addCleanup(self.conductor.stop_lbcwallet)
|
||||
await self.conductor.start_spv()
|
||||
self.addCleanup(self.conductor.stop_spv)
|
||||
await self.conductor.start_wallet()
|
||||
self.addCleanup(self.conductor.stop_wallet)
|
||||
await self.conductor.start_hub()
|
||||
self.addCleanup(self.conductor.stop_hub)
|
||||
self.blockchain = self.conductor.blockchain_node
|
||||
self.blockchain = self.conductor.lbcwallet_node
|
||||
self.hub = self.conductor.hub_node
|
||||
self.wallet_node = self.conductor.wallet_node
|
||||
self.manager = self.wallet_node.manager
|
||||
|
@ -269,6 +271,13 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def broadcast(self, tx):
|
||||
return self.ledger.broadcast(tx)
|
||||
|
||||
async def broadcast_and_confirm(self, tx, ledger=None):
|
||||
ledger = ledger or self.ledger
|
||||
notifications = asyncio.create_task(ledger.wait(tx))
|
||||
await ledger.broadcast(tx)
|
||||
await notifications
|
||||
await self.generate_and_wait(1, [tx.id], ledger)
|
||||
|
||||
async def on_header(self, height):
|
||||
if self.ledger.headers.height < height:
|
||||
await self.ledger.on_header.where(
|
||||
|
@ -276,11 +285,36 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
)
|
||||
return True
|
||||
|
||||
def on_transaction_id(self, txid, ledger=None):
|
||||
return (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid
|
||||
async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
|
||||
tx_watch = []
|
||||
txid = None
|
||||
done = False
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
|
||||
)
|
||||
|
||||
txid = await self.blockchain.send_to_address(address, amount)
|
||||
done = txid in tx_watch
|
||||
await watcher
|
||||
|
||||
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
|
||||
return txid
|
||||
|
||||
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
|
||||
if blocks_to_generate > 0:
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
|
||||
)
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
await self.blockchain.generate(blocks_to_generate)
|
||||
height = self.blockchain.block_expected
|
||||
await watcher
|
||||
while True:
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
if self.conductor.spv_node.server.db.db_height >= height:
|
||||
break
|
||||
|
||||
def on_address_update(self, address):
|
||||
return self.ledger.on_transaction.where(
|
||||
lambda e: e.address == address
|
||||
|
@ -291,6 +325,19 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
lambda e: e.tx.id == tx.id and e.address == address
|
||||
)
|
||||
|
||||
async def generate(self, blocks):
|
||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
height = self.blockchain.block_expected
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
await self.blockchain.generate(blocks)
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
while True:
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
if self.conductor.spv_node.server.db.db_height >= height:
|
||||
break
|
||||
|
||||
|
||||
class FakeExchangeRateManager(ExchangeRateManager):
|
||||
|
||||
|
@ -351,20 +398,19 @@ class CommandTestCase(IntegrationTestCase):
|
|||
self.skip_libtorrent = True
|
||||
|
||||
async def asyncSetUp(self):
|
||||
await super().asyncSetUp()
|
||||
|
||||
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
|
||||
|
||||
await super().asyncSetUp()
|
||||
|
||||
self.daemon = await self.add_daemon(self.wallet_node)
|
||||
|
||||
await self.account.ensure_address_gap()
|
||||
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||
sendtxid = await self.blockchain.send_to_address(address, 10)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.generate(5)
|
||||
await self.send_to_address_and_wait(address, 10, 6)
|
||||
|
||||
server_tmp_dir = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, server_tmp_dir)
|
||||
|
@ -461,9 +507,14 @@ class CommandTestCase(IntegrationTestCase):
|
|||
|
||||
async def confirm_tx(self, txid, ledger=None):
|
||||
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
await self.generate(1)
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
# await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
await asyncio.wait([self.generate(1), on_tx], timeout=5)
|
||||
|
||||
# # actually, if it's in the mempool or in the block we're fine
|
||||
# await self.generate_and_wait(1, [txid], ledger=ledger)
|
||||
# return txid
|
||||
|
||||
return txid
|
||||
|
||||
async def on_transaction_dict(self, tx):
|
||||
|
@ -478,12 +529,6 @@ class CommandTestCase(IntegrationTestCase):
|
|||
addresses.add(txo['address'])
|
||||
return list(addresses)
|
||||
|
||||
async def generate(self, blocks):
|
||||
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
await self.blockchain.generate(blocks)
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
|
||||
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
|
||||
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
|
||||
if confirm:
|
||||
|
@ -514,7 +559,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
return self.sout(tx)
|
||||
return tx
|
||||
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None):
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
|
||||
account = (daemon or self.daemon).wallet_manager.default_account
|
||||
claim_address = await account.receiving.get_or_create_usable_address()
|
||||
claim = Claim()
|
||||
|
@ -524,7 +569,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
claim_address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, False)
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
|
||||
return self.sout(tx)
|
||||
|
||||
def create_upload_file(self, data, prefix=None, suffix=None):
|
||||
|
|
|
@ -405,7 +405,7 @@ async def fallback_get_external_ip(): # used if spv servers can't be used for i
|
|||
|
||||
async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
|
||||
# used if upnp is disabled or non-functioning
|
||||
from lbry.wallet.server.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||
from lbry.wallet.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||
|
||||
hostname_to_ip = {}
|
||||
ip_to_hostnames = collections.defaultdict(list)
|
||||
|
|
|
@ -1,17 +1,23 @@
|
|||
__node_daemon__ = 'lbrycrdd'
|
||||
__node_cli__ = 'lbrycrd-cli'
|
||||
__node_bin__ = ''
|
||||
__node_url__ = (
|
||||
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip'
|
||||
__lbcd__ = 'lbcd'
|
||||
__lbcctl__ = 'lbcctl'
|
||||
__lbcwallet__ = 'lbcwallet'
|
||||
__lbcd_url__ = (
|
||||
'https://github.com/lbryio/lbcd/releases/download/' +
|
||||
'v0.22.200-beta/lbcd_0.22.200-beta_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__lbcwallet_url__ = (
|
||||
'https://github.com/lbryio/lbcwallet/releases/download/' +
|
||||
'v0.13.100-alpha-rc2/lbcwallet_0.13.100-alpha-rc2_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
||||
|
||||
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||
from .manager import WalletManager
|
||||
from .network import Network
|
||||
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic, DeterministicChannelKeyManager
|
||||
from .transaction import Transaction, Output, Input
|
||||
from .script import OutputScript, InputScript
|
||||
from .database import SQLiteMixin, Database
|
||||
from .header import Headers
|
||||
from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
|
||||
from lbry.wallet.manager import WalletManager
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
|
||||
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
|
||||
DeterministicChannelKeyManager
|
||||
from lbry.wallet.transaction import Transaction, Output, Input
|
||||
from lbry.wallet.script import OutputScript, InputScript
|
||||
from lbry.wallet.database import SQLiteMixin, Database
|
||||
from lbry.wallet.header import Headers
|
||||
|
|
|
@ -16,18 +16,18 @@ from lbry.crypto.hash import hash160, double_sha256, sha256
|
|||
from lbry.crypto.base58 import Base58
|
||||
from lbry.utils import LRUCacheWithMetrics
|
||||
|
||||
from .tasks import TaskGroup
|
||||
from .database import Database
|
||||
from .stream import StreamController
|
||||
from .dewies import dewies_to_lbc
|
||||
from .account import Account, AddressManager, SingleKey
|
||||
from .network import Network
|
||||
from .transaction import Transaction, Output
|
||||
from .header import Headers, UnvalidatedHeaders
|
||||
from .checkpoints import HASHES
|
||||
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||
from .bip32 import PublicKey, PrivateKey
|
||||
from .coinselection import CoinSelector
|
||||
from lbry.wallet.tasks import TaskGroup
|
||||
from lbry.wallet.database import Database
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.wallet.account import Account, AddressManager, SingleKey
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.header import Headers, UnvalidatedHeaders
|
||||
from lbry.wallet.checkpoints import HASHES
|
||||
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
|
||||
from lbry.wallet.bip32 import PublicKey, PrivateKey
|
||||
from lbry.wallet.coinselection import CoinSelector
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -365,6 +365,10 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
await self.db.close()
|
||||
await self.headers.close()
|
||||
|
||||
async def tasks_are_done(self):
|
||||
await self._update_tasks.done.wait()
|
||||
await self._other_tasks.done.wait()
|
||||
|
||||
@property
|
||||
def local_height_including_downloaded_height(self):
|
||||
return max(self.headers.height, self._download_height)
|
||||
|
@ -739,7 +743,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
while timeout and (int(time.perf_counter()) - start) <= timeout:
|
||||
if await self._wait_round(tx, height, addresses):
|
||||
return
|
||||
raise asyncio.TimeoutError('Timed out waiting for transaction.')
|
||||
raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
|
||||
|
||||
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
|
||||
records = await self.db.get_addresses(address__in=addresses)
|
||||
|
@ -782,7 +786,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
if hub_server:
|
||||
outputs = Outputs.from_grpc(encoded_outputs)
|
||||
else:
|
||||
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
|
||||
outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
|
||||
txs: List[Transaction] = []
|
||||
if len(outputs.txs) > 0:
|
||||
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
|
||||
|
|
|
@ -12,13 +12,13 @@ from typing import List, Type, MutableSequence, MutableMapping, Optional
|
|||
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
|
||||
from lbry.conf import Config, NOT_SET
|
||||
|
||||
from .dewies import dewies_to_lbc
|
||||
from .account import Account
|
||||
from .ledger import Ledger, LedgerRegistry
|
||||
from .transaction import Transaction, Output
|
||||
from .database import Database
|
||||
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||
from .rpc.jsonrpc import CodeMessageError
|
||||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.wallet.account import Account
|
||||
from lbry.wallet.ledger import Ledger, LedgerRegistry
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.database import Database
|
||||
from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
|
||||
from lbry.wallet.rpc.jsonrpc import CodeMessageError
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
|
|
|
@ -16,7 +16,7 @@ from lbry.utils import resolve_host
|
|||
from lbry.error import IncompatibleWalletServerError
|
||||
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry.wallet.server.udp import SPVStatusClientProtocol, SPVPong
|
||||
from lbry.wallet.udp import SPVStatusClientProtocol, SPVPong
|
||||
from lbry.conf import KnownHubsList
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -122,7 +122,7 @@ class ClientSession(BaseClientSession):
|
|||
await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
log.warning("closing connection to %s:%i", *self.server)
|
||||
log.info("closing connection to %s:%i", *self.server)
|
||||
else:
|
||||
log.exception("lost connection to spv")
|
||||
finally:
|
||||
|
@ -140,7 +140,7 @@ class ClientSession(BaseClientSession):
|
|||
controller.add(request.args)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
log.warning("Connection lost: %s:%d", *self.server)
|
||||
log.debug("Connection lost: %s:%d", *self.server)
|
||||
super().connection_lost(exc)
|
||||
self.response_time = None
|
||||
self.connection_latency = None
|
||||
|
@ -303,7 +303,7 @@ class Network:
|
|||
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||
try:
|
||||
await client.create_connection()
|
||||
log.warning("Connected to spv server %s:%i", host, port)
|
||||
log.info("Connected to spv server %s:%i", host, port)
|
||||
await client.ensure_server_version()
|
||||
return client
|
||||
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
|
||||
|
@ -357,7 +357,7 @@ class Network:
|
|||
self._keepalive_task = None
|
||||
self.client = None
|
||||
self.server_features = None
|
||||
log.warning("connection lost to %s", server_str)
|
||||
log.info("connection lost to %s", server_str)
|
||||
log.info("network loop finished")
|
||||
|
||||
async def stop(self):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
__hub_url__ = (
|
||||
"https://github.com/lbryio/hub/releases/download/v0.2022.01.21.1/hub"
|
||||
)
|
||||
from .node import Conductor
|
||||
from .service import ConductorService
|
||||
from lbry.wallet.orchstr8.node import Conductor
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
|
|
@ -5,7 +5,9 @@ import aiohttp
|
|||
|
||||
from lbry import wallet
|
||||
from lbry.wallet.orchstr8.node import (
|
||||
Conductor, get_blockchain_node_from_ledger
|
||||
Conductor,
|
||||
get_lbcd_node_from_ledger,
|
||||
get_lbcwallet_node_from_ledger
|
||||
)
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
||||
|
@ -16,10 +18,11 @@ def get_argument_parser():
|
|||
)
|
||||
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
|
||||
|
||||
subparsers.add_parser("download", help="Download blockchain node binary.")
|
||||
subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
|
||||
|
||||
start = subparsers.add_parser("start", help="Start orchstr8 service.")
|
||||
start.add_argument("--blockchain", help="Hostname to start blockchain node.")
|
||||
start.add_argument("--lbcd", help="Hostname to start lbcd node.")
|
||||
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
|
||||
start.add_argument("--spv", help="Hostname to start SPV server.")
|
||||
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
|
||||
|
||||
|
@ -47,7 +50,8 @@ def main():
|
|||
|
||||
if command == 'download':
|
||||
logging.getLogger('blockchain').setLevel(logging.INFO)
|
||||
get_blockchain_node_from_ledger(wallet).ensure()
|
||||
get_lbcd_node_from_ledger(wallet).ensure()
|
||||
get_lbcwallet_node_from_ledger(wallet).ensure()
|
||||
|
||||
elif command == 'generate':
|
||||
loop.run_until_complete(run_remote_command(
|
||||
|
@ -57,9 +61,12 @@ def main():
|
|||
elif command == 'start':
|
||||
|
||||
conductor = Conductor()
|
||||
if getattr(args, 'blockchain', False):
|
||||
conductor.blockchain_node.hostname = args.blockchain
|
||||
loop.run_until_complete(conductor.start_blockchain())
|
||||
if getattr(args, 'lbcd', False):
|
||||
conductor.lbcd_node.hostname = args.lbcd
|
||||
loop.run_until_complete(conductor.start_lbcd())
|
||||
if getattr(args, 'lbcwallet', False):
|
||||
conductor.lbcwallet_node.hostname = args.lbcwallet
|
||||
loop.run_until_complete(conductor.start_lbcwallet())
|
||||
if getattr(args, 'spv', False):
|
||||
conductor.spv_node.hostname = args.spv
|
||||
loop.run_until_complete(conductor.start_spv())
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# pylint: disable=import-error
|
||||
import os
|
||||
import signal
|
||||
import json
|
||||
import shutil
|
||||
import asyncio
|
||||
|
@ -7,7 +9,7 @@ import tarfile
|
|||
import logging
|
||||
import tempfile
|
||||
import subprocess
|
||||
import importlib
|
||||
import platform
|
||||
from distutils.util import strtobool
|
||||
|
||||
from binascii import hexlify
|
||||
|
@ -15,9 +17,15 @@ from typing import Type, Optional
|
|||
import urllib.request
|
||||
from uuid import uuid4
|
||||
|
||||
try:
|
||||
from scribe.env import Env
|
||||
from scribe.hub.service import HubServerService
|
||||
from scribe.elasticsearch.service import ElasticSyncService
|
||||
from scribe.blockchain.service import BlockchainProcessorService
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.server import Server
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
||||
from lbry.conf import KnownHubsList, Config
|
||||
from lbry.wallet.orchstr8 import __hub_url__
|
||||
|
@ -25,17 +33,19 @@ from lbry.wallet.orchstr8 import __hub_url__
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_spvserver_from_ledger(ledger_module):
|
||||
spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1)
|
||||
spvserver_module = importlib.import_module(spvserver_path)
|
||||
return getattr(spvserver_module, regtest_class_name)
|
||||
def get_lbcd_node_from_ledger(ledger_module):
|
||||
return LBCDNode(
|
||||
ledger_module.__lbcd_url__,
|
||||
ledger_module.__lbcd__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
|
||||
def get_blockchain_node_from_ledger(ledger_module):
|
||||
return BlockchainNode(
|
||||
ledger_module.__node_url__,
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__),
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__)
|
||||
def get_lbcwallet_node_from_ledger(ledger_module):
|
||||
return LBCWalletNode(
|
||||
ledger_module.__lbcwallet_url__,
|
||||
ledger_module.__lbcwallet__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
|
||||
|
@ -43,53 +53,51 @@ class Conductor:
|
|||
|
||||
def __init__(self, seed=None):
|
||||
self.manager_module = WalletManager
|
||||
self.spv_module = get_spvserver_from_ledger(lbry.wallet)
|
||||
|
||||
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode(self.spv_module)
|
||||
self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
|
||||
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode()
|
||||
self.wallet_node = WalletNode(
|
||||
self.manager_module, RegTestLedger, default_seed=seed
|
||||
)
|
||||
self.hub_node = HubNode(__hub_url__, "hub", self.spv_node)
|
||||
|
||||
self.blockchain_started = False
|
||||
self.lbcd_started = False
|
||||
self.lbcwallet_started = False
|
||||
self.spv_started = False
|
||||
self.wallet_started = False
|
||||
self.hub_started = False
|
||||
|
||||
self.log = log.getChild('conductor')
|
||||
|
||||
async def start_blockchain(self):
|
||||
if not self.blockchain_started:
|
||||
asyncio.create_task(self.blockchain_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
await self.blockchain_node.generate(200)
|
||||
self.blockchain_started = True
|
||||
async def start_lbcd(self):
|
||||
if not self.lbcd_started:
|
||||
await self.lbcd_node.start()
|
||||
self.lbcd_started = True
|
||||
|
||||
async def stop_blockchain(self):
|
||||
if self.blockchain_started:
|
||||
await self.blockchain_node.stop(cleanup=True)
|
||||
self.blockchain_started = False
|
||||
async def stop_lbcd(self, cleanup=True):
|
||||
if self.lbcd_started:
|
||||
await self.lbcd_node.stop(cleanup)
|
||||
self.lbcd_started = False
|
||||
|
||||
async def start_hub(self):
|
||||
if not self.hub_started:
|
||||
asyncio.create_task(self.hub_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
await self.hub_node.start()
|
||||
await self.lbcwallet_node.running.wait()
|
||||
self.hub_started = True
|
||||
|
||||
async def stop_hub(self):
|
||||
async def stop_hub(self, cleanup=True):
|
||||
if self.hub_started:
|
||||
await self.hub_node.stop(cleanup=True)
|
||||
await self.hub_node.stop(cleanup)
|
||||
self.hub_started = False
|
||||
|
||||
async def start_spv(self):
|
||||
if not self.spv_started:
|
||||
await self.spv_node.start(self.blockchain_node)
|
||||
await self.spv_node.start(self.lbcwallet_node)
|
||||
self.spv_started = True
|
||||
|
||||
async def stop_spv(self):
|
||||
async def stop_spv(self, cleanup=True):
|
||||
if self.spv_started:
|
||||
await self.spv_node.stop(cleanup=True)
|
||||
await self.spv_node.stop(cleanup)
|
||||
self.spv_started = False
|
||||
|
||||
async def start_wallet(self):
|
||||
|
@ -97,21 +105,41 @@ class Conductor:
|
|||
await self.wallet_node.start(self.spv_node)
|
||||
self.wallet_started = True
|
||||
|
||||
async def stop_wallet(self):
|
||||
async def stop_wallet(self, cleanup=True):
|
||||
if self.wallet_started:
|
||||
await self.wallet_node.stop(cleanup=True)
|
||||
await self.wallet_node.stop(cleanup)
|
||||
self.wallet_started = False
|
||||
|
||||
async def start_lbcwallet(self, clean=True):
|
||||
if not self.lbcwallet_started:
|
||||
await self.lbcwallet_node.start()
|
||||
if clean:
|
||||
mining_addr = await self.lbcwallet_node.get_new_address()
|
||||
self.lbcwallet_node.mining_addr = mining_addr
|
||||
await self.lbcwallet_node.generate(200)
|
||||
# unlock the wallet for the next 1 hour
|
||||
await self.lbcwallet_node.wallet_passphrase("password", 3600)
|
||||
self.lbcwallet_started = True
|
||||
|
||||
async def stop_lbcwallet(self, cleanup=True):
|
||||
if self.lbcwallet_started:
|
||||
await self.lbcwallet_node.stop(cleanup)
|
||||
self.lbcwallet_started = False
|
||||
|
||||
async def start(self):
|
||||
await self.start_blockchain()
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet()
|
||||
await self.start_spv()
|
||||
await self.start_hub()
|
||||
await self.start_wallet()
|
||||
|
||||
async def stop(self):
|
||||
all_the_stops = [
|
||||
self.stop_wallet,
|
||||
self.stop_hub,
|
||||
self.stop_spv,
|
||||
self.stop_blockchain
|
||||
self.stop_lbcwallet,
|
||||
self.stop_lbcd
|
||||
]
|
||||
for stop in all_the_stops:
|
||||
try:
|
||||
|
@ -119,6 +147,12 @@ class Conductor:
|
|||
except Exception as e:
|
||||
log.exception('Exception raised while stopping services:', exc_info=e)
|
||||
|
||||
async def clear_mempool(self):
|
||||
await self.stop_lbcwallet(cleanup=False)
|
||||
await self.stop_lbcd(cleanup=False)
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet(clean=False)
|
||||
|
||||
|
||||
class WalletNode:
|
||||
|
||||
|
@ -139,10 +173,11 @@ class WalletNode:
|
|||
|
||||
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
|
||||
wallets_dir = os.path.join(self.data_path, 'wallets')
|
||||
os.mkdir(wallets_dir)
|
||||
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
if not os.path.isdir(wallets_dir):
|
||||
os.mkdir(wallets_dir)
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
self.manager = self.manager_class.from_config({
|
||||
'ledgers': {
|
||||
self.ledger_class.get_id(): {
|
||||
|
@ -184,55 +219,72 @@ class WalletNode:
|
|||
|
||||
|
||||
class SPVNode:
|
||||
|
||||
def __init__(self, coin_class, node_number=1):
|
||||
self.coin_class = coin_class
|
||||
def __init__(self, node_number=1):
|
||||
self.node_number = node_number
|
||||
self.controller = None
|
||||
self.data_path = None
|
||||
self.server = None
|
||||
self.server: Optional[HubServerService] = None
|
||||
self.writer: Optional[BlockchainProcessorService] = None
|
||||
self.es_writer: Optional[ElasticSyncService] = None
|
||||
self.hostname = 'localhost'
|
||||
self.port = 50001 + node_number # avoid conflict with default daemon
|
||||
self.udp_port = self.port
|
||||
self.elastic_notifier_port = 19080 + node_number
|
||||
self.session_timeout = 600
|
||||
self.rpc_port = '0' # disabled by default
|
||||
self.stopped = False
|
||||
self.stopped = True
|
||||
self.index_name = uuid4().hex
|
||||
|
||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'DESCRIPTION': '',
|
||||
'PAYMENT_ADDRESS': '',
|
||||
'DAILY_FEE': '0',
|
||||
'DB_DIRECTORY': self.data_path,
|
||||
'DAEMON_URL': blockchain_node.rpc_url,
|
||||
'REORG_LIMIT': '100',
|
||||
'HOST': self.hostname,
|
||||
'TCP_PORT': str(self.port),
|
||||
'UDP_PORT': str(self.udp_port),
|
||||
'SESSION_TIMEOUT': str(self.session_timeout),
|
||||
'MAX_QUERY_WORKERS': '0',
|
||||
'INDIVIDUAL_TAG_INDEXES': '',
|
||||
'RPC_PORT': self.rpc_port,
|
||||
'ES_INDEX_PREFIX': self.index_name,
|
||||
'ES_MODE': 'writer',
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
# TODO: don't use os.environ
|
||||
os.environ.update(conf)
|
||||
self.server = Server(Env(self.coin_class))
|
||||
self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
||||
await self.server.start()
|
||||
async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
|
||||
if not self.stopped:
|
||||
log.warning("spv node is already running")
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'description': '',
|
||||
'payment_address': '',
|
||||
'daily_fee': '0',
|
||||
'db_dir': self.data_path,
|
||||
'daemon_url': lbcwallet_node.rpc_url,
|
||||
'reorg_limit': 100,
|
||||
'host': self.hostname,
|
||||
'tcp_port': self.port,
|
||||
'udp_port': self.udp_port,
|
||||
'elastic_notifier_port': self.elastic_notifier_port,
|
||||
'session_timeout': self.session_timeout,
|
||||
'max_query_workers': 0,
|
||||
'es_index_prefix': self.index_name,
|
||||
'chain': 'regtest'
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
env = Env(**conf)
|
||||
self.writer = BlockchainProcessorService(env)
|
||||
self.server = HubServerService(env)
|
||||
self.es_writer = ElasticSyncService(env)
|
||||
await self.writer.start()
|
||||
await self.es_writer.start()
|
||||
await self.server.start()
|
||||
except Exception as e:
|
||||
self.stopped = True
|
||||
if not isinstance(e, asyncio.CancelledError):
|
||||
log.exception("failed to start spv node")
|
||||
raise e
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
log.warning("spv node is already stopped")
|
||||
return
|
||||
try:
|
||||
await self.server.db.search_index.delete_index()
|
||||
await self.server.db.search_index.stop()
|
||||
await self.server.stop()
|
||||
await self.es_writer.delete_index()
|
||||
await self.es_writer.stop()
|
||||
await self.writer.stop()
|
||||
self.stopped = True
|
||||
except Exception as e:
|
||||
log.exception("failed to stop spv node")
|
||||
raise e
|
||||
finally:
|
||||
cleanup and self.cleanup()
|
||||
|
||||
|
@ -240,18 +292,19 @@ class SPVNode:
|
|||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class BlockchainProcess(asyncio.SubprocessProtocol):
|
||||
class LBCDProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
IGNORE_OUTPUT = [
|
||||
b'keypool keep',
|
||||
b'keypool reserve',
|
||||
b'keypool return',
|
||||
b'Block submitted',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('blockchain')
|
||||
self.log = log.getChild('lbcd')
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
|
@ -262,7 +315,7 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
|||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'Done loading' in data:
|
||||
if b'RPCS: RPC server listening on' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
|
@ -270,39 +323,57 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
|
|||
self.ready.set()
|
||||
|
||||
|
||||
class BlockchainNode:
|
||||
class WalletProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
IGNORE_OUTPUT = [
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
if b'Error:' in data:
|
||||
self.log.error(data.decode())
|
||||
else:
|
||||
self.log.info(data.decode())
|
||||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'WLLT: Finished rescan' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
class LBCDNode:
|
||||
def __init__(self, url, daemon, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('blockchain')
|
||||
self.data_path = None
|
||||
self.log = log.getChild('lbcd')
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.block_expected = 0
|
||||
self.hostname = 'localhost'
|
||||
self.peerport = 9246 + 2 # avoid conflict with default peer port
|
||||
self.rpcport = 9245 + 2 # avoid conflict with default rpc port
|
||||
self.peerport = 29246
|
||||
self.rpcport = 29245
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self.stopped = True
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
|
@ -311,6 +382,12 @@ class BlockchainNode:
|
|||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
|
@ -344,72 +421,206 @@ class BlockchainNode:
|
|||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}',
|
||||
f'-port={self.peerport}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
BlockchainProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbrycrdd', exc_info=e)
|
||||
if not self.stopped:
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
'--notls',
|
||||
f'--datadir={self.data_path}',
|
||||
'--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
|
||||
'--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
LBCDProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
log.exception('failed to start lbcd', exc_info=e)
|
||||
raise
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
try:
|
||||
if self.transport:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcd', exc_info=e)
|
||||
raise
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.daemon_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
self.running.clear()
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class LBCWalletNode:
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
|
||||
def __init__(self, url, lbcwallet, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.hostname = 'localhost'
|
||||
self.lbcd_rpcport = 29245
|
||||
self.lbcwallet_rpcport = 29244
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.stopped = True
|
||||
self.running = asyncio.Event()
|
||||
self.block_expected = 0
|
||||
self.mining_addr = ''
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
|
||||
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
os.path.exists(self.lbcwallet_bin)
|
||||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
)
|
||||
|
||||
if not os.path.exists(self.bin_dir):
|
||||
os.mkdir(self.bin_dir)
|
||||
|
||||
if not os.path.exists(downloaded_file):
|
||||
self.log.info('Downloading: %s', self.latest_release_url)
|
||||
with urllib.request.urlopen(self.latest_release_url) as response:
|
||||
with open(downloaded_file, 'wb') as out_file:
|
||||
shutil.copyfileobj(response, out_file)
|
||||
|
||||
self.log.info('Extracting: %s', downloaded_file)
|
||||
|
||||
if downloaded_file.endswith('.zip'):
|
||||
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||
dotzip.extractall(self.bin_dir)
|
||||
# zipfile bug https://bugs.python.org/issue15795
|
||||
os.chmod(self.lbcwallet_bin, 0o755)
|
||||
|
||||
elif downloaded_file.endswith('.tar.gz'):
|
||||
with tarfile.open(downloaded_file) as tar:
|
||||
tar.extractall(self.bin_dir)
|
||||
|
||||
return self.exists
|
||||
|
||||
def ensure(self):
|
||||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
|
||||
command = [
|
||||
self.lbcwallet_bin,
|
||||
'--noservertls', '--noclienttls',
|
||||
'--regtest',
|
||||
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
|
||||
'--createtemp', f'--appdata={self.data_path}',
|
||||
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
WalletProcess, *command
|
||||
)
|
||||
self.protocol.transport = self.transport
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
self.stopped = False
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbcwallet', exc_info=e)
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
try:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcwallet', exc_info=e)
|
||||
raise
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.lbcwallet_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
||||
async def clear_mempool(self):
|
||||
self.restart_ready.clear()
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
self.running.clear()
|
||||
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
|
||||
self.restart_ready.set()
|
||||
await self.running.wait()
|
||||
|
||||
def cleanup(self):
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
self.running.clear()
|
||||
|
||||
async def _cli_cmnd(self, *args):
|
||||
cmnd_args = [
|
||||
self.cli_bin, f'-datadir={self.data_path}', '-regtest',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}'
|
||||
self.cli_bin,
|
||||
f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
|
||||
] + list(args)
|
||||
self.log.info(' '.join(cmnd_args))
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
out, _ = await process.communicate()
|
||||
out, err = await process.communicate()
|
||||
result = out.decode().strip()
|
||||
err = err.decode().strip()
|
||||
if len(result) <= 0 and err.startswith('-'):
|
||||
raise Exception(err)
|
||||
if err and 'creating a default config file' not in err:
|
||||
log.warning(err)
|
||||
self.log.info(result)
|
||||
if result.startswith('error code'):
|
||||
raise Exception(result)
|
||||
|
@ -417,7 +628,14 @@ class BlockchainNode:
|
|||
|
||||
def generate(self, blocks):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generate', str(blocks))
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
|
||||
|
||||
def generate_to_address(self, blocks, addr):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
|
||||
|
||||
def wallet_passphrase(self, passphrase, timeout):
|
||||
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
|
||||
|
||||
def invalidate_block(self, blockhash):
|
||||
return self._cli_cmnd('invalidateblock', blockhash)
|
||||
|
@ -434,7 +652,7 @@ class BlockchainNode:
|
|||
def get_raw_change_address(self):
|
||||
return self._cli_cmnd('getrawchangeaddress')
|
||||
|
||||
def get_new_address(self, address_type):
|
||||
def get_new_address(self, address_type='legacy'):
|
||||
return self._cli_cmnd('getnewaddress', "", address_type)
|
||||
|
||||
async def get_balance(self):
|
||||
|
@ -450,7 +668,10 @@ class BlockchainNode:
|
|||
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
||||
|
||||
async def sign_raw_transaction_with_wallet(self, tx):
|
||||
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode()
|
||||
# the "withwallet" portion should only come into play if we are doing segwit.
|
||||
# and "withwallet" doesn't exist on lbcd yet.
|
||||
result = await self._cli_cmnd('signrawtransaction', tx)
|
||||
return json.loads(result)['hex'].encode()
|
||||
|
||||
def decode_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
||||
|
@ -460,12 +681,15 @@ class BlockchainNode:
|
|||
|
||||
|
||||
class HubProcess(asyncio.SubprocessProtocol):
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
def __init__(self, ready, stopped):
|
||||
self.ready = ready
|
||||
self.stopped = stopped
|
||||
self.log = log.getChild('hub')
|
||||
self.transport = None
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
self.stopped.clear()
|
||||
self.ready.set()
|
||||
if self.log:
|
||||
self.log.info(data.decode())
|
||||
if b'error' in data.lower():
|
||||
|
@ -479,16 +703,26 @@ class HubProcess(asyncio.SubprocessProtocol):
|
|||
print(line)
|
||||
|
||||
def process_exited(self):
|
||||
self.ready.clear()
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
async def stop(self):
|
||||
t = asyncio.create_task(self.stopped.wait())
|
||||
try:
|
||||
self.transport.send_signal(signal.SIGINT)
|
||||
await asyncio.wait_for(t, 3)
|
||||
# log.warning("stopped go hub")
|
||||
except asyncio.TimeoutError:
|
||||
if not t.done():
|
||||
t.cancel()
|
||||
self.transport.terminate()
|
||||
await self.stopped.wait()
|
||||
log.warning("terminated go hub")
|
||||
|
||||
|
||||
class HubNode:
|
||||
|
||||
def __init__(self, url, daemon, spv_node):
|
||||
self.spv_node = spv_node
|
||||
self.debug = False
|
||||
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
|
@ -499,11 +733,13 @@ class HubNode:
|
|||
self.protocol = None
|
||||
self.hostname = 'localhost'
|
||||
self.rpcport = 50051 # avoid conflict with default rpc port
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self._stopped = asyncio.Event()
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def stopped(self):
|
||||
return not self.running.is_set()
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
|
@ -554,33 +790,24 @@ class HubNode:
|
|||
self.daemon_bin, 'serve', '--esindex', self.spv_node.index_name + 'claims', '--debug'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
HubProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start hub', exc_info=e)
|
||||
self.protocol = HubProcess(self.running, self._stopped)
|
||||
try:
|
||||
self.transport, _ = await loop.subprocess_exec(
|
||||
lambda: self.protocol, *command
|
||||
)
|
||||
self.protocol.transport = self.transport
|
||||
except Exception as e:
|
||||
log.exception('failed to start go hub', exc_info=e)
|
||||
raise e
|
||||
await self.protocol.ready.wait()
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
self.stopped = True
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
if self.protocol:
|
||||
await self.protocol.stop()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop go hub', exc_info=e)
|
||||
raise e
|
||||
finally:
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
|
|
|
@ -61,8 +61,10 @@ class ConductorService:
|
|||
#set_logging(
|
||||
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
|
||||
#)
|
||||
self.stack.blockchain_started or await self.stack.start_blockchain()
|
||||
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
|
||||
self.stack.lbcd_started or await self.stack.start_lbcd()
|
||||
self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
|
||||
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
|
||||
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
|
||||
self.stack.spv_started or await self.stack.start_spv()
|
||||
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
|
||||
self.stack.wallet_started or await self.stack.start_wallet()
|
||||
|
@ -74,7 +76,7 @@ class ConductorService:
|
|||
async def generate(self, request):
|
||||
data = await request.post()
|
||||
blocks = data.get('blocks', 1)
|
||||
await self.stack.blockchain_node.generate(int(blocks))
|
||||
await self.stack.lbcwallet_node.generate(int(blocks))
|
||||
return json_response({'blocks': blocks})
|
||||
|
||||
async def transfer(self, request):
|
||||
|
@ -85,11 +87,14 @@ class ConductorService:
|
|||
if not address:
|
||||
raise ValueError("No address was provided.")
|
||||
amount = data.get('amount', 1)
|
||||
txid = await self.stack.blockchain_node.send_to_address(address, amount)
|
||||
if self.stack.wallet_started:
|
||||
await self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.tx.id == txid and e.address == address
|
||||
watcher = self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
|
||||
)
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
await watcher
|
||||
else:
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
return json_response({
|
||||
'address': address,
|
||||
'amount': amount,
|
||||
|
@ -98,7 +103,7 @@ class ConductorService:
|
|||
|
||||
async def balance(self, _):
|
||||
return json_response({
|
||||
'balance': await self.stack.blockchain_node.get_balance()
|
||||
'balance': await self.stack.lbcwallet_node.get_balance()
|
||||
})
|
||||
|
||||
async def log(self, request):
|
||||
|
@ -129,7 +134,7 @@ class ConductorService:
|
|||
'type': 'status',
|
||||
'height': self.stack.wallet_node.ledger.headers.height,
|
||||
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
|
||||
'miner': await self.stack.blockchain_node.get_balance()
|
||||
'miner': await self.stack.lbcwallet_node.get_balance()
|
||||
})
|
||||
|
||||
def send_message(self, msg):
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,34 +0,0 @@
|
|||
import logging
|
||||
import traceback
|
||||
import argparse
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.server import Server
|
||||
|
||||
|
||||
def get_argument_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="lbry-hub"
|
||||
)
|
||||
Env.contribute_to_arg_parser(parser)
|
||||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
parser = get_argument_parser()
|
||||
args = parser.parse_args()
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
||||
logging.info('lbry.server starting')
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
try:
|
||||
server = Server(Env.from_arg_parser(args))
|
||||
server.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('lbry.server terminated abnormally')
|
||||
else:
|
||||
logging.info('lbry.server terminated normally')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,386 +0,0 @@
|
|||
import re
|
||||
import struct
|
||||
from typing import List
|
||||
from hashlib import sha256
|
||||
from decimal import Decimal
|
||||
from collections import namedtuple
|
||||
|
||||
import lbry.wallet.server.tx as lib_tx
|
||||
from lbry.wallet.script import OutputScript, OP_CLAIM_NAME, OP_UPDATE_CLAIM, OP_SUPPORT_CLAIM
|
||||
from lbry.wallet.server.tx import DeserializerSegWit
|
||||
from lbry.wallet.server.util import cachedproperty, subclasses
|
||||
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
|
||||
from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
||||
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
|
||||
|
||||
Block = namedtuple("Block", "raw header transactions")
|
||||
OP_RETURN = OpCodes.OP_RETURN
|
||||
|
||||
|
||||
class CoinError(Exception):
|
||||
"""Exception raised for coin-related errors."""
|
||||
|
||||
|
||||
class Coin:
|
||||
"""Base class of coin hierarchy."""
|
||||
|
||||
REORG_LIMIT = 200
|
||||
# Not sure if these are coin-specific
|
||||
RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?')
|
||||
VALUE_PER_COIN = 100000000
|
||||
CHUNK_SIZE = 2016
|
||||
BASIC_HEADER_SIZE = 80
|
||||
STATIC_BLOCK_HEADERS = True
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
DESERIALIZER = lib_tx.Deserializer
|
||||
DAEMON = Daemon
|
||||
BLOCK_PROCESSOR = BlockProcessor
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DB = LevelDB
|
||||
HEADER_VALUES = [
|
||||
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
|
||||
]
|
||||
HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from
|
||||
MEMPOOL_HISTOGRAM_REFRESH_SECS = 500
|
||||
XPUB_VERBYTES = bytes('????', 'utf-8')
|
||||
XPRV_VERBYTES = bytes('????', 'utf-8')
|
||||
ENCODE_CHECK = Base58.encode_check
|
||||
DECODE_CHECK = Base58.decode_check
|
||||
# Peer discovery
|
||||
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
|
||||
PEERS: List[str] = []
|
||||
|
||||
@classmethod
|
||||
def lookup_coin_class(cls, name, net):
|
||||
"""Return a coin class given name and network.
|
||||
|
||||
Raise an exception if unrecognised."""
|
||||
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
|
||||
for coin in subclasses(Coin):
|
||||
if (coin.NAME.lower() == name.lower() and
|
||||
coin.NET.lower() == net.lower()):
|
||||
coin_req_attrs = req_attrs.copy()
|
||||
missing = [attr for attr in coin_req_attrs
|
||||
if not hasattr(coin, attr)]
|
||||
if missing:
|
||||
raise CoinError(f'coin {name} missing {missing} attributes')
|
||||
return coin
|
||||
raise CoinError(f'unknown coin {name} and network {net} combination')
|
||||
|
||||
@classmethod
|
||||
def sanitize_url(cls, url):
|
||||
# Remove surrounding ws and trailing /s
|
||||
url = url.strip().rstrip('/')
|
||||
match = cls.RPC_URL_REGEX.match(url)
|
||||
if not match:
|
||||
raise CoinError(f'invalid daemon URL: "{url}"')
|
||||
if match.groups()[1] is None:
|
||||
url += f':{cls.RPC_PORT:d}'
|
||||
if not url.startswith('http://') and not url.startswith('https://'):
|
||||
url = 'http://' + url
|
||||
return url + '/'
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, block):
|
||||
"""Check the Genesis block is the right one for this coin.
|
||||
|
||||
Return the block less its unspendable coinbase.
|
||||
"""
|
||||
header = cls.block_header(block, 0)
|
||||
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
|
||||
if header_hex_hash != cls.GENESIS_HASH:
|
||||
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
|
||||
|
||||
return header + bytes(1)
|
||||
|
||||
@classmethod
|
||||
def hashX_from_script(cls, script):
|
||||
"""Returns a hashX from a script, or None if the script is provably
|
||||
unspendable so the output can be dropped.
|
||||
"""
|
||||
if script and script[0] == OP_RETURN:
|
||||
return None
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
@staticmethod
|
||||
def lookup_xverbytes(verbytes):
|
||||
"""Return a (is_xpub, coin_class) pair given xpub/xprv verbytes."""
|
||||
# Order means BTC testnet will override NMC testnet
|
||||
for coin in subclasses(Coin):
|
||||
if verbytes == coin.XPUB_VERBYTES:
|
||||
return True, coin
|
||||
if verbytes == coin.XPRV_VERBYTES:
|
||||
return False, coin
|
||||
raise CoinError('version bytes unrecognised')
|
||||
|
||||
@classmethod
|
||||
def address_to_hashX(cls, address):
|
||||
"""Return a hashX given a coin address."""
|
||||
return cls.hashX_from_script(cls.pay_to_address_script(address))
|
||||
|
||||
@classmethod
|
||||
def P2PKH_address_from_hash160(cls, hash160):
|
||||
"""Return a P2PKH address given a public key."""
|
||||
assert len(hash160) == 20
|
||||
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
|
||||
|
||||
@classmethod
|
||||
def P2PKH_address_from_pubkey(cls, pubkey):
|
||||
"""Return a coin address given a public key."""
|
||||
return cls.P2PKH_address_from_hash160(hash160(pubkey))
|
||||
|
||||
@classmethod
|
||||
def P2SH_address_from_hash160(cls, hash160):
|
||||
"""Return a coin address given a hash160."""
|
||||
assert len(hash160) == 20
|
||||
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
|
||||
|
||||
@classmethod
|
||||
def hash160_to_P2PKH_script(cls, hash160):
|
||||
return ScriptPubKey.P2PKH_script(hash160)
|
||||
|
||||
@classmethod
|
||||
def hash160_to_P2PKH_hashX(cls, hash160):
|
||||
return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160))
|
||||
|
||||
@classmethod
|
||||
def pay_to_address_script(cls, address):
|
||||
"""Return a pubkey script that pays to a pubkey hash.
|
||||
|
||||
Pass the address (either P2PKH or P2SH) in base58 form.
|
||||
"""
|
||||
raw = cls.DECODE_CHECK(address)
|
||||
|
||||
# Require version byte(s) plus hash160.
|
||||
verbyte = -1
|
||||
verlen = len(raw) - 20
|
||||
if verlen > 0:
|
||||
verbyte, hash160 = raw[:verlen], raw[verlen:]
|
||||
|
||||
if verbyte == cls.P2PKH_VERBYTE:
|
||||
return cls.hash160_to_P2PKH_script(hash160)
|
||||
if verbyte in cls.P2SH_VERBYTES:
|
||||
return ScriptPubKey.P2SH_script(hash160)
|
||||
|
||||
raise CoinError(f'invalid address: {address}')
|
||||
|
||||
@classmethod
|
||||
def privkey_WIF(cls, privkey_bytes, compressed):
|
||||
"""Return the private key encoded in Wallet Import Format."""
|
||||
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
|
||||
if compressed:
|
||||
payload.append(0x01)
|
||||
return cls.ENCODE_CHECK(payload)
|
||||
|
||||
@classmethod
|
||||
def header_hash(cls, header):
|
||||
"""Given a header return hash"""
|
||||
return double_sha256(header)
|
||||
|
||||
@classmethod
|
||||
def header_prevhash(cls, header):
|
||||
"""Given a header return previous hash"""
|
||||
return header[4:36]
|
||||
|
||||
@classmethod
|
||||
def static_header_offset(cls, height):
|
||||
"""Given a header height return its offset in the headers file.
|
||||
|
||||
If header sizes change at some point, this is the only code
|
||||
that needs updating."""
|
||||
assert cls.STATIC_BLOCK_HEADERS
|
||||
return height * cls.BASIC_HEADER_SIZE
|
||||
|
||||
@classmethod
|
||||
def static_header_len(cls, height):
|
||||
"""Given a header height return its length."""
|
||||
return (cls.static_header_offset(height + 1)
|
||||
- cls.static_header_offset(height))
|
||||
|
||||
@classmethod
|
||||
def block_header(cls, block, height):
|
||||
"""Returns the block header given a block and its height."""
|
||||
return block[:cls.static_header_len(height)]
|
||||
|
||||
@classmethod
|
||||
def block(cls, raw_block, height):
|
||||
"""Return a Block namedtuple given a raw block and its height."""
|
||||
header = cls.block_header(raw_block, height)
|
||||
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
|
||||
return Block(raw_block, header, txs)
|
||||
|
||||
@classmethod
|
||||
def transaction(cls, raw_tx: bytes):
|
||||
"""Return a Block namedtuple given a raw block and its height."""
|
||||
return cls.DESERIALIZER(raw_tx).read_tx()
|
||||
|
||||
@classmethod
|
||||
def decimal_value(cls, value):
|
||||
"""Return the number of standard coin units as a Decimal given a
|
||||
quantity of smallest units.
|
||||
|
||||
For example 1 BTC is returned for 100 million satoshis.
|
||||
"""
|
||||
return Decimal(value) / cls.VALUE_PER_COIN
|
||||
|
||||
@classmethod
|
||||
def electrum_header(cls, header, height):
|
||||
h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header)))
|
||||
# Add the height that is not present in the header itself
|
||||
h['block_height'] = height
|
||||
# Convert bytes to str
|
||||
h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash'])
|
||||
h['merkle_root'] = hash_to_hex_str(h['merkle_root'])
|
||||
return h
|
||||
|
||||
|
||||
class LBC(Coin):
|
||||
DAEMON = LBCDaemon
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DESERIALIZER = DeserializerSegWit
|
||||
DB = LevelDB
|
||||
NAME = "LBRY"
|
||||
SHORTNAME = "LBC"
|
||||
NET = "mainnet"
|
||||
BASIC_HEADER_SIZE = 112
|
||||
CHUNK_SIZE = 96
|
||||
XPUB_VERBYTES = bytes.fromhex("0488b21e")
|
||||
XPRV_VERBYTES = bytes.fromhex("0488ade4")
|
||||
P2PKH_VERBYTE = bytes.fromhex("55")
|
||||
P2SH_VERBYTES = bytes.fromhex("7A")
|
||||
WIF_BYTE = bytes.fromhex("1C")
|
||||
GENESIS_HASH = ('9c89283ba0f3227f6c03b70216b9f665'
|
||||
'f0118d5e0fa729cedf4fb34d6a34f463')
|
||||
TX_COUNT = 2716936
|
||||
TX_COUNT_HEIGHT = 329554
|
||||
TX_PER_BLOCK = 1
|
||||
RPC_PORT = 9245
|
||||
REORG_LIMIT = 200
|
||||
|
||||
nOriginalClaimExpirationTime = 262974
|
||||
nExtendedClaimExpirationTime = 2102400
|
||||
nExtendedClaimExpirationForkHeight = 400155
|
||||
nNormalizedNameForkHeight = 539940 # targeting 21 March 2019
|
||||
nMinTakeoverWorkaroundHeight = 496850
|
||||
nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019
|
||||
nWitnessForkHeight = 680770 # targeting 11 Dec 2019
|
||||
nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019
|
||||
proportionalDelayFactor = 32
|
||||
maxTakeoverDelay = 4032
|
||||
|
||||
PEERS = [
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, block):
|
||||
'''Check the Genesis block is the right one for this coin.
|
||||
|
||||
Return the block less its unspendable coinbase.
|
||||
'''
|
||||
header = cls.block_header(block, 0)
|
||||
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
|
||||
if header_hex_hash != cls.GENESIS_HASH:
|
||||
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
|
||||
|
||||
return block
|
||||
|
||||
@classmethod
|
||||
def electrum_header(cls, header, height):
|
||||
version, = struct.unpack('<I', header[:4])
|
||||
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
|
||||
return {
|
||||
'version': version,
|
||||
'prev_block_hash': hash_to_hex_str(header[4:36]),
|
||||
'merkle_root': hash_to_hex_str(header[36:68]),
|
||||
'claim_trie_root': hash_to_hex_str(header[68:100]),
|
||||
'timestamp': timestamp,
|
||||
'bits': bits,
|
||||
'nonce': nonce,
|
||||
'block_height': height,
|
||||
}
|
||||
|
||||
@cachedproperty
|
||||
def address_handlers(self):
|
||||
return ScriptPubKey.PayToHandlers(
|
||||
address=self.P2PKH_address_from_hash160,
|
||||
script_hash=self.P2SH_address_from_hash160,
|
||||
pubkey=self.P2PKH_address_from_pubkey,
|
||||
unspendable=lambda: None,
|
||||
strange=self.claim_address_handler,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def address_from_script(cls, script):
|
||||
'''Given a pk_script, return the address it pays to, or None.'''
|
||||
return ScriptPubKey.pay_to(cls.address_handlers, script)
|
||||
|
||||
@classmethod
|
||||
def claim_address_handler(cls, script):
|
||||
'''Parse a claim script, returns the address
|
||||
'''
|
||||
output = OutputScript(script)
|
||||
if output.is_pay_pubkey_hash:
|
||||
return cls.P2PKH_address_from_hash160(output.values['pubkey_hash'])
|
||||
if output.is_pay_script_hash:
|
||||
return cls.P2SH_address_from_hash160(output.values['script_hash'])
|
||||
if output.is_pay_pubkey:
|
||||
return cls.P2PKH_address_from_pubkey(output.values['pubkey'])
|
||||
if output.is_return_data:
|
||||
return None
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def hashX_from_script(cls, script):
|
||||
'''
|
||||
Overrides electrumx hashX from script by extracting addresses from claim scripts.
|
||||
'''
|
||||
if script and script[0] == OpCodes.OP_RETURN or not script:
|
||||
return None
|
||||
if script[0] in [
|
||||
OP_CLAIM_NAME,
|
||||
OP_UPDATE_CLAIM,
|
||||
OP_SUPPORT_CLAIM,
|
||||
]:
|
||||
return cls.address_to_hashX(cls.claim_address_handler(script))
|
||||
else:
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
@classmethod
|
||||
def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int:
|
||||
if extended:
|
||||
return last_updated_height + cls.nExtendedClaimExpirationTime
|
||||
if last_updated_height < cls.nExtendedClaimExpirationForkHeight:
|
||||
return last_updated_height + cls.nOriginalClaimExpirationTime
|
||||
return last_updated_height + cls.nExtendedClaimExpirationTime
|
||||
|
||||
@classmethod
|
||||
def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int:
|
||||
return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay)
|
||||
|
||||
|
||||
class LBCRegTest(LBC):
|
||||
NET = "regtest"
|
||||
GENESIS_HASH = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
|
||||
XPUB_VERBYTES = bytes.fromhex('043587cf')
|
||||
XPRV_VERBYTES = bytes.fromhex('04358394')
|
||||
P2PKH_VERBYTE = bytes.fromhex("6f")
|
||||
P2SH_VERBYTES = bytes.fromhex("c4")
|
||||
|
||||
nOriginalClaimExpirationTime = 500
|
||||
nExtendedClaimExpirationTime = 600
|
||||
nExtendedClaimExpirationForkHeight = 800
|
||||
nNormalizedNameForkHeight = 250
|
||||
nMinTakeoverWorkaroundHeight = -1
|
||||
nMaxTakeoverWorkaroundHeight = -1
|
||||
nWitnessForkHeight = 150
|
||||
nAllClaimsInMerkleForkHeight = 350
|
||||
|
||||
|
||||
class LBCTestNet(LBCRegTest):
|
||||
NET = "testnet"
|
||||
GENESIS_HASH = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
|
|
@ -1,375 +0,0 @@
|
|||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
import time
|
||||
from functools import wraps
|
||||
|
||||
import aiohttp
|
||||
from prometheus_client import Gauge, Histogram
|
||||
from lbry.utils import LRUCacheWithMetrics
|
||||
from lbry.wallet.rpc.jsonrpc import RPCError
|
||||
from lbry.wallet.server.util import hex_to_bytes, class_logger
|
||||
from lbry.wallet.rpc import JSONRPC
|
||||
|
||||
|
||||
class DaemonError(Exception):
|
||||
"""Raised when the daemon returns an error in its results."""
|
||||
|
||||
|
||||
class WarmingUpError(Exception):
|
||||
"""Internal - when the daemon is warming up."""
|
||||
|
||||
|
||||
class WorkQueueFullError(Exception):
|
||||
"""Internal - when the daemon's work queue is full."""
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
|
||||
|
||||
class Daemon:
|
||||
"""Handles connections to a daemon at the given URL."""
|
||||
|
||||
WARMING_UP = -28
|
||||
id_counter = itertools.count()
|
||||
|
||||
lbrycrd_request_time_metric = Histogram(
|
||||
"lbrycrd_request", "lbrycrd requests count", namespace=NAMESPACE, labelnames=("method",)
|
||||
)
|
||||
lbrycrd_pending_count_metric = Gauge(
|
||||
"lbrycrd_pending_count", "Number of lbrycrd rpcs that are in flight", namespace=NAMESPACE,
|
||||
labelnames=("method",)
|
||||
)
|
||||
|
||||
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
|
||||
max_retry=4.0):
|
||||
self.coin = coin
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.set_url(url)
|
||||
# Limit concurrent RPC calls to this number.
|
||||
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
|
||||
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
|
||||
self.init_retry = init_retry
|
||||
self.max_retry = max_retry
|
||||
self._height = None
|
||||
self.available_rpcs = {}
|
||||
self.connector = aiohttp.TCPConnector()
|
||||
self._block_hash_cache = LRUCacheWithMetrics(100000)
|
||||
self._block_cache = LRUCacheWithMetrics(2 ** 13, metric_name='block', namespace=NAMESPACE)
|
||||
|
||||
async def close(self):
|
||||
if self.connector:
|
||||
await self.connector.close()
|
||||
self.connector = None
|
||||
|
||||
def set_url(self, url):
|
||||
"""Set the URLS to the given list, and switch to the first one."""
|
||||
urls = url.split(',')
|
||||
urls = [self.coin.sanitize_url(url) for url in urls]
|
||||
for n, url in enumerate(urls):
|
||||
status = '' if n else ' (current)'
|
||||
logged_url = self.logged_url(url)
|
||||
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
|
||||
self.url_index = 0
|
||||
self.urls = urls
|
||||
|
||||
def current_url(self):
|
||||
"""Returns the current daemon URL."""
|
||||
return self.urls[self.url_index]
|
||||
|
||||
def logged_url(self, url=None):
|
||||
"""The host and port part, for logging."""
|
||||
url = url or self.current_url()
|
||||
return url[url.rindex('@') + 1:]
|
||||
|
||||
def failover(self):
|
||||
"""Call to fail-over to the next daemon URL.
|
||||
|
||||
Returns False if there is only one, otherwise True.
|
||||
"""
|
||||
if len(self.urls) > 1:
|
||||
self.url_index = (self.url_index + 1) % len(self.urls)
|
||||
self.logger.info(f'failing over to {self.logged_url()}')
|
||||
return True
|
||||
return False
|
||||
|
||||
def client_session(self):
|
||||
"""An aiohttp client session."""
|
||||
return aiohttp.ClientSession(connector=self.connector, connector_owner=False)
|
||||
|
||||
async def _send_data(self, data):
|
||||
if not self.connector:
|
||||
raise asyncio.CancelledError('Tried to send request during shutdown.')
|
||||
async with self.workqueue_semaphore:
|
||||
async with self.client_session() as session:
|
||||
async with session.post(self.current_url(), data=data) as resp:
|
||||
kind = resp.headers.get('Content-Type', None)
|
||||
if kind == 'application/json':
|
||||
return await resp.json()
|
||||
# bitcoind's HTTP protocol "handling" is a bad joke
|
||||
text = await resp.text()
|
||||
if 'Work queue depth exceeded' in text:
|
||||
raise WorkQueueFullError
|
||||
text = text.strip() or resp.reason
|
||||
self.logger.error(text)
|
||||
raise DaemonError(text)
|
||||
|
||||
async def _send(self, payload, processor):
|
||||
"""Send a payload to be converted to JSON.
|
||||
|
||||
Handles temporary connection issues. Daemon response errors
|
||||
are raise through DaemonError.
|
||||
"""
|
||||
|
||||
def log_error(error):
|
||||
nonlocal last_error_log, retry
|
||||
now = time.time()
|
||||
if now - last_error_log > 60:
|
||||
last_error_log = now
|
||||
self.logger.error(f'{error} Retrying occasionally...')
|
||||
if retry == self.max_retry and self.failover():
|
||||
retry = 0
|
||||
|
||||
on_good_message = None
|
||||
last_error_log = 0
|
||||
data = json.dumps(payload)
|
||||
retry = self.init_retry
|
||||
methods = tuple(
|
||||
[payload['method']] if isinstance(payload, dict) else [request['method'] for request in payload]
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
for method in methods:
|
||||
self.lbrycrd_pending_count_metric.labels(method=method).inc()
|
||||
result = await self._send_data(data)
|
||||
result = processor(result)
|
||||
if on_good_message:
|
||||
self.logger.info(on_good_message)
|
||||
return result
|
||||
except asyncio.TimeoutError:
|
||||
log_error('timeout error.')
|
||||
except aiohttp.ServerDisconnectedError:
|
||||
log_error('disconnected.')
|
||||
on_good_message = 'connection restored'
|
||||
except aiohttp.ClientConnectionError:
|
||||
log_error('connection problem - is your daemon running?')
|
||||
on_good_message = 'connection restored'
|
||||
except aiohttp.ClientError as e:
|
||||
log_error(f'daemon error: {e}')
|
||||
on_good_message = 'running normally'
|
||||
except WarmingUpError:
|
||||
log_error('starting up checking blocks.')
|
||||
on_good_message = 'running normally'
|
||||
except WorkQueueFullError:
|
||||
log_error('work queue full.')
|
||||
on_good_message = 'running normally'
|
||||
finally:
|
||||
for method in methods:
|
||||
self.lbrycrd_pending_count_metric.labels(method=method).dec()
|
||||
await asyncio.sleep(retry)
|
||||
retry = max(min(self.max_retry, retry * 2), self.init_retry)
|
||||
|
||||
async def _send_single(self, method, params=None):
|
||||
"""Send a single request to the daemon."""
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
def processor(result):
|
||||
err = result['error']
|
||||
if not err:
|
||||
return result['result']
|
||||
if err.get('code') == self.WARMING_UP:
|
||||
raise WarmingUpError
|
||||
raise DaemonError(err)
|
||||
|
||||
payload = {'method': method, 'id': next(self.id_counter)}
|
||||
if params:
|
||||
payload['params'] = params
|
||||
result = await self._send(payload, processor)
|
||||
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
|
||||
return result
|
||||
|
||||
async def _send_vector(self, method, params_iterable, replace_errs=False):
|
||||
"""Send several requests of the same method.
|
||||
|
||||
The result will be an array of the same length as params_iterable.
|
||||
If replace_errs is true, any item with an error is returned as None,
|
||||
otherwise an exception is raised."""
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
def processor(result):
|
||||
errs = [item['error'] for item in result if item['error']]
|
||||
if any(err.get('code') == self.WARMING_UP for err in errs):
|
||||
raise WarmingUpError
|
||||
if not errs or replace_errs:
|
||||
return [item['result'] for item in result]
|
||||
raise DaemonError(errs)
|
||||
|
||||
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
|
||||
for p in params_iterable]
|
||||
result = []
|
||||
if payload:
|
||||
result = await self._send(payload, processor)
|
||||
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
|
||||
return result
|
||||
|
||||
async def _is_rpc_available(self, method):
|
||||
"""Return whether given RPC method is available in the daemon.
|
||||
|
||||
Results are cached and the daemon will generally not be queried with
|
||||
the same method more than once."""
|
||||
available = self.available_rpcs.get(method)
|
||||
if available is None:
|
||||
available = True
|
||||
try:
|
||||
await self._send_single(method)
|
||||
except DaemonError as e:
|
||||
err = e.args[0]
|
||||
error_code = err.get("code")
|
||||
available = error_code != JSONRPC.METHOD_NOT_FOUND
|
||||
self.available_rpcs[method] = available
|
||||
return available
|
||||
|
||||
async def block_hex_hashes(self, first, count):
|
||||
"""Return the hex hashes of count block starting at height first."""
|
||||
if first + count < (self.cached_height() or 0) - 200:
|
||||
return await self._cached_block_hex_hashes(first, count)
|
||||
params_iterable = ((h, ) for h in range(first, first + count))
|
||||
return await self._send_vector('getblockhash', params_iterable)
|
||||
|
||||
async def _cached_block_hex_hashes(self, first, count):
|
||||
"""Return the hex hashes of count block starting at height first."""
|
||||
cached = self._block_hash_cache.get((first, count))
|
||||
if cached:
|
||||
return cached
|
||||
params_iterable = ((h, ) for h in range(first, first + count))
|
||||
self._block_hash_cache[(first, count)] = await self._send_vector('getblockhash', params_iterable)
|
||||
return self._block_hash_cache[(first, count)]
|
||||
|
||||
async def deserialised_block(self, hex_hash):
|
||||
"""Return the deserialised block with the given hex hash."""
|
||||
if hex_hash not in self._block_cache:
|
||||
block = await self._send_single('getblock', (hex_hash, True))
|
||||
self._block_cache[hex_hash] = block
|
||||
return block
|
||||
return self._block_cache[hex_hash]
|
||||
|
||||
async def raw_blocks(self, hex_hashes):
|
||||
"""Return the raw binary blocks with the given hex hashes."""
|
||||
params_iterable = ((h, False) for h in hex_hashes)
|
||||
blocks = await self._send_vector('getblock', params_iterable)
|
||||
# Convert hex string to bytes
|
||||
return [hex_to_bytes(block) for block in blocks]
|
||||
|
||||
async def mempool_hashes(self):
|
||||
"""Update our record of the daemon's mempool hashes."""
|
||||
return await self._send_single('getrawmempool')
|
||||
|
||||
async def estimatefee(self, block_count):
|
||||
"""Return the fee estimate for the block count. Units are whole
|
||||
currency units per KB, e.g. 0.00000995, or -1 if no estimate
|
||||
is available.
|
||||
"""
|
||||
args = (block_count, )
|
||||
if await self._is_rpc_available('estimatesmartfee'):
|
||||
estimate = await self._send_single('estimatesmartfee', args)
|
||||
return estimate.get('feerate', -1)
|
||||
return await self._send_single('estimatefee', args)
|
||||
|
||||
async def getnetworkinfo(self):
|
||||
"""Return the result of the 'getnetworkinfo' RPC call."""
|
||||
return await self._send_single('getnetworkinfo')
|
||||
|
||||
async def relayfee(self):
|
||||
"""The minimum fee a low-priority tx must pay in order to be accepted
|
||||
to the daemon's memory pool."""
|
||||
network_info = await self.getnetworkinfo()
|
||||
return network_info['relayfee']
|
||||
|
||||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
"""Return the serialized raw transaction with the given hash."""
|
||||
# Cast to int because some coin daemons are old and require it
|
||||
return await self._send_single('getrawtransaction',
|
||||
(hex_hash, int(verbose)))
|
||||
|
||||
async def getrawtransactions(self, hex_hashes, replace_errs=True):
|
||||
"""Return the serialized raw transactions with the given hashes.
|
||||
|
||||
Replaces errors with None by default."""
|
||||
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
|
||||
txs = await self._send_vector('getrawtransaction', params_iterable,
|
||||
replace_errs=replace_errs)
|
||||
# Convert hex strings to bytes
|
||||
return [hex_to_bytes(tx) if tx else None for tx in txs]
|
||||
|
||||
async def broadcast_transaction(self, raw_tx):
|
||||
"""Broadcast a transaction to the network."""
|
||||
return await self._send_single('sendrawtransaction', (raw_tx, ))
|
||||
|
||||
async def height(self):
|
||||
"""Query the daemon for its current height."""
|
||||
self._height = await self._send_single('getblockcount')
|
||||
return self._height
|
||||
|
||||
def cached_height(self):
|
||||
"""Return the cached daemon height.
|
||||
|
||||
If the daemon has not been queried yet this returns None."""
|
||||
return self._height
|
||||
|
||||
|
||||
def handles_errors(decorated_function):
|
||||
@wraps(decorated_function)
|
||||
async def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return await decorated_function(*args, **kwargs)
|
||||
except DaemonError as daemon_error:
|
||||
raise RPCError(1, daemon_error.args[0])
|
||||
return wrapper
|
||||
|
||||
|
||||
class LBCDaemon(Daemon):
|
||||
@handles_errors
|
||||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
return await super().getrawtransaction(hex_hash=hex_hash, verbose=verbose)
|
||||
|
||||
@handles_errors
|
||||
async def getclaimbyid(self, claim_id):
|
||||
'''Given a claim id, retrieves claim information.'''
|
||||
return await self._send_single('getclaimbyid', (claim_id,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsbyids(self, claim_ids):
|
||||
'''Given a list of claim ids, batches calls to retrieve claim information.'''
|
||||
return await self._send_vector('getclaimbyid', ((claim_id,) for claim_id in claim_ids))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsforname(self, name):
|
||||
'''Given a name, retrieves all claims matching that name.'''
|
||||
return await self._send_single('getclaimsforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsfortx(self, txid):
|
||||
'''Given a txid, returns the claims it make.'''
|
||||
return await self._send_single('getclaimsfortx', (txid,)) or []
|
||||
|
||||
@handles_errors
|
||||
async def getnameproof(self, name, block_hash=None):
|
||||
'''Given a name and optional block_hash, returns a name proof and winner, if any.'''
|
||||
return await self._send_single('getnameproof', (name, block_hash,) if block_hash else (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getvalueforname(self, name):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getvalueforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getnamesintrie(self):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getnamesintrie')
|
||||
|
||||
@handles_errors
|
||||
async def claimname(self, name, hexvalue, amount):
|
||||
'''Claim a name, used for functional tests only.'''
|
||||
return await self._send_single('claimname', (name, hexvalue, float(amount)))
|
|
@ -1,42 +0,0 @@
|
|||
import enum
|
||||
|
||||
|
||||
@enum.unique
|
||||
class DB_PREFIXES(enum.Enum):
|
||||
claim_to_support = b'K'
|
||||
support_to_claim = b'L'
|
||||
|
||||
claim_to_txo = b'E'
|
||||
txo_to_claim = b'G'
|
||||
|
||||
claim_to_channel = b'I'
|
||||
channel_to_claim = b'J'
|
||||
|
||||
claim_short_id_prefix = b'F'
|
||||
effective_amount = b'D'
|
||||
claim_expiration = b'O'
|
||||
|
||||
claim_takeover = b'P'
|
||||
pending_activation = b'Q'
|
||||
activated_claim_and_support = b'R'
|
||||
active_amount = b'S'
|
||||
|
||||
repost = b'V'
|
||||
reposted_claim = b'W'
|
||||
|
||||
undo = b'M'
|
||||
claim_diff = b'Y'
|
||||
|
||||
tx = b'B'
|
||||
block_hash = b'C'
|
||||
header = b'H'
|
||||
tx_num = b'N'
|
||||
tx_count = b'T'
|
||||
tx_hash = b'X'
|
||||
utxo = b'u'
|
||||
hashx_utxo = b'h'
|
||||
hashx_history = b'x'
|
||||
db_state = b's'
|
||||
channel_count = b'Z'
|
||||
support_amount = b'a'
|
||||
block_txs = b'b'
|
|
@ -1,447 +0,0 @@
|
|||
import typing
|
||||
|
||||
CLAIM_TYPES = {
|
||||
'stream': 1,
|
||||
'channel': 2,
|
||||
'repost': 3,
|
||||
'collection': 4,
|
||||
}
|
||||
|
||||
STREAM_TYPES = {
|
||||
'video': 1,
|
||||
'audio': 2,
|
||||
'image': 3,
|
||||
'document': 4,
|
||||
'binary': 5,
|
||||
'model': 6,
|
||||
}
|
||||
|
||||
# 9/21/2020
|
||||
MOST_USED_TAGS = {
|
||||
"gaming",
|
||||
"people & blogs",
|
||||
"entertainment",
|
||||
"music",
|
||||
"pop culture",
|
||||
"education",
|
||||
"technology",
|
||||
"blockchain",
|
||||
"news",
|
||||
"funny",
|
||||
"science & technology",
|
||||
"learning",
|
||||
"gameplay",
|
||||
"news & politics",
|
||||
"comedy",
|
||||
"bitcoin",
|
||||
"beliefs",
|
||||
"nature",
|
||||
"art",
|
||||
"economics",
|
||||
"film & animation",
|
||||
"lets play",
|
||||
"games",
|
||||
"sports",
|
||||
"howto & style",
|
||||
"game",
|
||||
"cryptocurrency",
|
||||
"playstation 4",
|
||||
"automotive",
|
||||
"crypto",
|
||||
"mature",
|
||||
"sony interactive entertainment",
|
||||
"walkthrough",
|
||||
"tutorial",
|
||||
"video game",
|
||||
"weapons",
|
||||
"playthrough",
|
||||
"pc",
|
||||
"anime",
|
||||
"how to",
|
||||
"btc",
|
||||
"fun",
|
||||
"ethereum",
|
||||
"food",
|
||||
"travel & events",
|
||||
"minecraft",
|
||||
"science",
|
||||
"autos & vehicles",
|
||||
"play",
|
||||
"politics",
|
||||
"commentary",
|
||||
"twitch",
|
||||
"ps4live",
|
||||
"love",
|
||||
"ps4",
|
||||
"nonprofits & activism",
|
||||
"ps4share",
|
||||
"fortnite",
|
||||
"xbox",
|
||||
"porn",
|
||||
"video games",
|
||||
"trump",
|
||||
"español",
|
||||
"money",
|
||||
"music video",
|
||||
"nintendo",
|
||||
"movie",
|
||||
"coronavirus",
|
||||
"donald trump",
|
||||
"steam",
|
||||
"trailer",
|
||||
"android",
|
||||
"podcast",
|
||||
"xbox one",
|
||||
"survival",
|
||||
"audio",
|
||||
"linux",
|
||||
"travel",
|
||||
"funny moments",
|
||||
"litecoin",
|
||||
"animation",
|
||||
"gamer",
|
||||
"lets",
|
||||
"playstation",
|
||||
"bitcoin news",
|
||||
"history",
|
||||
"xxx",
|
||||
"fox news",
|
||||
"dance",
|
||||
"god",
|
||||
"adventure",
|
||||
"liberal",
|
||||
"2020",
|
||||
"horror",
|
||||
"government",
|
||||
"freedom",
|
||||
"reaction",
|
||||
"meme",
|
||||
"photography",
|
||||
"truth",
|
||||
"health",
|
||||
"lbry",
|
||||
"family",
|
||||
"online",
|
||||
"eth",
|
||||
"crypto news",
|
||||
"diy",
|
||||
"trading",
|
||||
"gold",
|
||||
"memes",
|
||||
"world",
|
||||
"space",
|
||||
"lol",
|
||||
"covid-19",
|
||||
"rpg",
|
||||
"humor",
|
||||
"democrat",
|
||||
"film",
|
||||
"call of duty",
|
||||
"tech",
|
||||
"religion",
|
||||
"conspiracy",
|
||||
"rap",
|
||||
"cnn",
|
||||
"hangoutsonair",
|
||||
"unboxing",
|
||||
"fiction",
|
||||
"conservative",
|
||||
"cars",
|
||||
"hoa",
|
||||
"epic",
|
||||
"programming",
|
||||
"progressive",
|
||||
"cryptocurrency news",
|
||||
"classical",
|
||||
"jesus",
|
||||
"movies",
|
||||
"book",
|
||||
"ps3",
|
||||
"republican",
|
||||
"fitness",
|
||||
"books",
|
||||
"multiplayer",
|
||||
"animals",
|
||||
"pokemon",
|
||||
"bitcoin price",
|
||||
"facebook",
|
||||
"sharefactory",
|
||||
"criptomonedas",
|
||||
"cod",
|
||||
"bible",
|
||||
"business",
|
||||
"stream",
|
||||
"comics",
|
||||
"how",
|
||||
"fail",
|
||||
"nsfw",
|
||||
"new music",
|
||||
"satire",
|
||||
"pets & animals",
|
||||
"computer",
|
||||
"classical music",
|
||||
"indie",
|
||||
"musica",
|
||||
"msnbc",
|
||||
"fps",
|
||||
"mod",
|
||||
"sport",
|
||||
"sony",
|
||||
"ripple",
|
||||
"auto",
|
||||
"rock",
|
||||
"marvel",
|
||||
"complete",
|
||||
"mining",
|
||||
"political",
|
||||
"mobile",
|
||||
"pubg",
|
||||
"hip hop",
|
||||
"flat earth",
|
||||
"xbox 360",
|
||||
"reviews",
|
||||
"vlogging",
|
||||
"latest news",
|
||||
"hack",
|
||||
"tarot",
|
||||
"iphone",
|
||||
"media",
|
||||
"cute",
|
||||
"christian",
|
||||
"free speech",
|
||||
"trap",
|
||||
"war",
|
||||
"remix",
|
||||
"ios",
|
||||
"xrp",
|
||||
"spirituality",
|
||||
"song",
|
||||
"league of legends",
|
||||
"cat"
|
||||
}
|
||||
|
||||
MATURE_TAGS = [
|
||||
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
|
||||
]
|
||||
|
||||
|
||||
def normalize_tag(tag):
|
||||
return tag.replace(" ", "_").replace("&", "and").replace("-", "_")
|
||||
|
||||
|
||||
COMMON_TAGS = {
|
||||
tag: normalize_tag(tag) for tag in list(MOST_USED_TAGS)
|
||||
}
|
||||
|
||||
INDEXED_LANGUAGES = [
|
||||
'none',
|
||||
'en',
|
||||
'aa',
|
||||
'ab',
|
||||
'ae',
|
||||
'af',
|
||||
'ak',
|
||||
'am',
|
||||
'an',
|
||||
'ar',
|
||||
'as',
|
||||
'av',
|
||||
'ay',
|
||||
'az',
|
||||
'ba',
|
||||
'be',
|
||||
'bg',
|
||||
'bh',
|
||||
'bi',
|
||||
'bm',
|
||||
'bn',
|
||||
'bo',
|
||||
'br',
|
||||
'bs',
|
||||
'ca',
|
||||
'ce',
|
||||
'ch',
|
||||
'co',
|
||||
'cr',
|
||||
'cs',
|
||||
'cu',
|
||||
'cv',
|
||||
'cy',
|
||||
'da',
|
||||
'de',
|
||||
'dv',
|
||||
'dz',
|
||||
'ee',
|
||||
'el',
|
||||
'eo',
|
||||
'es',
|
||||
'et',
|
||||
'eu',
|
||||
'fa',
|
||||
'ff',
|
||||
'fi',
|
||||
'fj',
|
||||
'fo',
|
||||
'fr',
|
||||
'fy',
|
||||
'ga',
|
||||
'gd',
|
||||
'gl',
|
||||
'gn',
|
||||
'gu',
|
||||
'gv',
|
||||
'ha',
|
||||
'he',
|
||||
'hi',
|
||||
'ho',
|
||||
'hr',
|
||||
'ht',
|
||||
'hu',
|
||||
'hy',
|
||||
'hz',
|
||||
'ia',
|
||||
'id',
|
||||
'ie',
|
||||
'ig',
|
||||
'ii',
|
||||
'ik',
|
||||
'io',
|
||||
'is',
|
||||
'it',
|
||||
'iu',
|
||||
'ja',
|
||||
'jv',
|
||||
'ka',
|
||||
'kg',
|
||||
'ki',
|
||||
'kj',
|
||||
'kk',
|
||||
'kl',
|
||||
'km',
|
||||
'kn',
|
||||
'ko',
|
||||
'kr',
|
||||
'ks',
|
||||
'ku',
|
||||
'kv',
|
||||
'kw',
|
||||
'ky',
|
||||
'la',
|
||||
'lb',
|
||||
'lg',
|
||||
'li',
|
||||
'ln',
|
||||
'lo',
|
||||
'lt',
|
||||
'lu',
|
||||
'lv',
|
||||
'mg',
|
||||
'mh',
|
||||
'mi',
|
||||
'mk',
|
||||
'ml',
|
||||
'mn',
|
||||
'mr',
|
||||
'ms',
|
||||
'mt',
|
||||
'my',
|
||||
'na',
|
||||
'nb',
|
||||
'nd',
|
||||
'ne',
|
||||
'ng',
|
||||
'nl',
|
||||
'nn',
|
||||
'no',
|
||||
'nr',
|
||||
'nv',
|
||||
'ny',
|
||||
'oc',
|
||||
'oj',
|
||||
'om',
|
||||
'or',
|
||||
'os',
|
||||
'pa',
|
||||
'pi',
|
||||
'pl',
|
||||
'ps',
|
||||
'pt',
|
||||
'qu',
|
||||
'rm',
|
||||
'rn',
|
||||
'ro',
|
||||
'ru',
|
||||
'rw',
|
||||
'sa',
|
||||
'sc',
|
||||
'sd',
|
||||
'se',
|
||||
'sg',
|
||||
'si',
|
||||
'sk',
|
||||
'sl',
|
||||
'sm',
|
||||
'sn',
|
||||
'so',
|
||||
'sq',
|
||||
'sr',
|
||||
'ss',
|
||||
'st',
|
||||
'su',
|
||||
'sv',
|
||||
'sw',
|
||||
'ta',
|
||||
'te',
|
||||
'tg',
|
||||
'th',
|
||||
'ti',
|
||||
'tk',
|
||||
'tl',
|
||||
'tn',
|
||||
'to',
|
||||
'tr',
|
||||
'ts',
|
||||
'tt',
|
||||
'tw',
|
||||
'ty',
|
||||
'ug',
|
||||
'uk',
|
||||
'ur',
|
||||
'uz',
|
||||
've',
|
||||
'vi',
|
||||
'vo',
|
||||
'wa',
|
||||
'wo',
|
||||
'xh',
|
||||
'yi',
|
||||
'yo',
|
||||
'za',
|
||||
'zh',
|
||||
'zu'
|
||||
]
|
||||
|
||||
|
||||
class ResolveResult(typing.NamedTuple):
|
||||
name: str
|
||||
normalized_name: str
|
||||
claim_hash: bytes
|
||||
tx_num: int
|
||||
position: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
amount: int
|
||||
short_url: str
|
||||
is_controlling: bool
|
||||
canonical_url: str
|
||||
creation_height: int
|
||||
activation_height: int
|
||||
expiration_height: int
|
||||
effective_amount: int
|
||||
support_amount: int
|
||||
reposted: int
|
||||
last_takeover_height: typing.Optional[int]
|
||||
claims_in_channel: typing.Optional[int]
|
||||
channel_hash: typing.Optional[bytes]
|
||||
reposted_claim_hash: typing.Optional[bytes]
|
||||
signature_valid: typing.Optional[bool]
|
|
@ -1,119 +0,0 @@
|
|||
import struct
|
||||
from typing import Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||
|
||||
|
||||
class KeyValueStorage:
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
raise NotImplemented()
|
||||
|
||||
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
|
||||
include_key=True, include_value=True, fill_cache=True):
|
||||
raise NotImplemented()
|
||||
|
||||
def write_batch(self, transaction: bool = False):
|
||||
raise NotImplemented()
|
||||
|
||||
def close(self):
|
||||
raise NotImplemented()
|
||||
|
||||
@property
|
||||
def closed(self) -> bool:
|
||||
raise NotImplemented()
|
||||
|
||||
|
||||
class PrefixDB:
|
||||
UNDO_KEY_STRUCT = struct.Struct(b'>Q')
|
||||
|
||||
def __init__(self, db: KeyValueStorage, max_undo_depth: int = 200, unsafe_prefixes=None):
|
||||
self._db = db
|
||||
self._op_stack = RevertableOpStack(db.get, unsafe_prefixes=unsafe_prefixes)
|
||||
self._max_undo_depth = max_undo_depth
|
||||
|
||||
def unsafe_commit(self):
|
||||
"""
|
||||
Write staged changes to the database without keeping undo information
|
||||
Changes written cannot be undone
|
||||
"""
|
||||
try:
|
||||
with self._db.write_batch(transaction=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
for staged_change in self._op_stack:
|
||||
if staged_change.is_put:
|
||||
batch_put(staged_change.key, staged_change.value)
|
||||
else:
|
||||
batch_delete(staged_change.key)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def commit(self, height: int):
|
||||
"""
|
||||
Write changes for a block height to the database and keep undo information so that the changes can be reverted
|
||||
"""
|
||||
undo_ops = self._op_stack.get_undo_ops()
|
||||
delete_undos = []
|
||||
if height > self._max_undo_depth:
|
||||
delete_undos.extend(self._db.iterator(
|
||||
start=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(0),
|
||||
stop=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
|
||||
include_value=False
|
||||
))
|
||||
try:
|
||||
with self._db.write_batch(transaction=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
for staged_change in self._op_stack:
|
||||
if staged_change.is_put:
|
||||
batch_put(staged_change.key, staged_change.value)
|
||||
else:
|
||||
batch_delete(staged_change.key)
|
||||
for undo_to_delete in delete_undos:
|
||||
batch_delete(undo_to_delete)
|
||||
batch_put(DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height), undo_ops)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def rollback(self, height: int):
|
||||
"""
|
||||
Revert changes for a block height
|
||||
"""
|
||||
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height)
|
||||
self._op_stack.apply_packed_undo_ops(self._db.get(undo_key))
|
||||
try:
|
||||
with self._db.write_batch(transaction=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
for staged_change in self._op_stack:
|
||||
if staged_change.is_put:
|
||||
batch_put(staged_change.key, staged_change.value)
|
||||
else:
|
||||
batch_delete(staged_change.key)
|
||||
batch_delete(undo_key)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
return self._db.get(key, fill_cache=fill_cache)
|
||||
|
||||
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
|
||||
include_key=True, include_value=True, fill_cache=True):
|
||||
return self._db.iterator(
|
||||
reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop,
|
||||
prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache
|
||||
)
|
||||
|
||||
def close(self):
|
||||
if not self._db.closed:
|
||||
self._db.close()
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._db.closed
|
||||
|
||||
def stage_raw_put(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertablePut(key, value))
|
||||
|
||||
def stage_raw_delete(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertableDelete(key, value))
|
|
@ -1 +0,0 @@
|
|||
from .search import SearchIndex
|
|
@ -1,100 +0,0 @@
|
|||
INDEX_DEFAULT_SETTINGS = {
|
||||
"settings":
|
||||
{"analysis":
|
||||
{"analyzer": {
|
||||
"default": {"tokenizer": "whitespace", "filter": ["lowercase", "porter_stem"]}}},
|
||||
"index":
|
||||
{"refresh_interval": -1,
|
||||
"number_of_shards": 1,
|
||||
"number_of_replicas": 0,
|
||||
"sort": {
|
||||
"field": ["trending_score", "release_time"],
|
||||
"order": ["desc", "desc"]
|
||||
}}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"claim_id": {
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"ignore_above": 256,
|
||||
"type": "keyword"
|
||||
}
|
||||
},
|
||||
"type": "text",
|
||||
"index_prefixes": {
|
||||
"min_chars": 1,
|
||||
"max_chars": 10
|
||||
}
|
||||
},
|
||||
"sd_hash": {
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"ignore_above": 96,
|
||||
"type": "keyword"
|
||||
}
|
||||
},
|
||||
"type": "text",
|
||||
"index_prefixes": {
|
||||
"min_chars": 1,
|
||||
"max_chars": 4
|
||||
}
|
||||
},
|
||||
"height": {"type": "integer"},
|
||||
"claim_type": {"type": "byte"},
|
||||
"censor_type": {"type": "byte"},
|
||||
"trending_score": {"type": "double"},
|
||||
"release_time": {"type": "long"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FIELDS = {
|
||||
'_id',
|
||||
'claim_id', 'claim_type', 'claim_name', 'normalized_name',
|
||||
'tx_id', 'tx_nout', 'tx_position',
|
||||
'short_url', 'canonical_url',
|
||||
'is_controlling', 'last_take_over_height',
|
||||
'public_key_bytes', 'public_key_id', 'claims_in_channel',
|
||||
'channel_id', 'signature', 'signature_digest', 'is_signature_valid',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'fee_amount', 'fee_currency',
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'stream_type', 'media_type', 'censor_type',
|
||||
'title', 'author', 'description',
|
||||
'timestamp', 'creation_timestamp',
|
||||
'duration', 'release_time',
|
||||
'tags', 'languages', 'has_source', 'reposted_claim_type',
|
||||
'reposted_claim_id', 'repost_count', 'sd_hash',
|
||||
'trending_score', 'tx_num'
|
||||
}
|
||||
|
||||
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id',
|
||||
'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature',
|
||||
'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id',
|
||||
'tags', 'sd_hash'}
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_score', 'censor_type', 'tx_num'
|
||||
}
|
||||
|
||||
ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS
|
||||
|
||||
REPLACEMENTS = {
|
||||
'claim_name': 'normalized_name',
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'trending_group': 'trending_score',
|
||||
'trending_mixed': 'trending_score',
|
||||
'trending_global': 'trending_score',
|
||||
'trending_local': 'trending_score',
|
||||
'reposted': 'repost_count',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'valid_channel_signature': 'is_signature_valid'
|
||||
}
|
|
@ -1,726 +0,0 @@
|
|||
import time
|
||||
import asyncio
|
||||
import struct
|
||||
from binascii import unhexlify
|
||||
from collections import Counter, deque
|
||||
from decimal import Decimal
|
||||
from operator import itemgetter
|
||||
from typing import Optional, List, Iterable, Union
|
||||
|
||||
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||
from lbry.schema.result import Outputs, Censor
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.utils import LRUCache
|
||||
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
||||
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
|
||||
RANGE_FIELDS, ALL_FIELDS
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.server.db.common import ResolveResult
|
||||
|
||||
|
||||
class ChannelResolution(str):
|
||||
@classmethod
|
||||
def lookup_error(cls, url):
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
|
||||
|
||||
class StreamResolution(str):
|
||||
@classmethod
|
||||
def lookup_error(cls, url):
|
||||
return LookupError(f'Could not find claim at "{url}".')
|
||||
|
||||
|
||||
class IndexVersionMismatch(Exception):
|
||||
def __init__(self, got_version, expected_version):
|
||||
self.got_version = got_version
|
||||
self.expected_version = expected_version
|
||||
|
||||
|
||||
class SearchIndex:
|
||||
VERSION = 1
|
||||
|
||||
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200):
|
||||
self.search_timeout = search_timeout
|
||||
self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import
|
||||
self.search_client: Optional[AsyncElasticsearch] = None
|
||||
self.sync_client: Optional[AsyncElasticsearch] = None
|
||||
self.index = index_prefix + 'claims'
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.claim_cache = LRUCache(2 ** 15)
|
||||
self.search_cache = LRUCache(2 ** 17)
|
||||
self._elastic_host = elastic_host
|
||||
self._elastic_port = elastic_port
|
||||
|
||||
async def get_index_version(self) -> int:
|
||||
try:
|
||||
template = await self.sync_client.indices.get_template(self.index)
|
||||
return template[self.index]['version']
|
||||
except NotFoundError:
|
||||
return 0
|
||||
|
||||
async def set_index_version(self, version):
|
||||
await self.sync_client.indices.put_template(
|
||||
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
||||
)
|
||||
|
||||
async def start(self) -> bool:
|
||||
if self.sync_client:
|
||||
return False
|
||||
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
||||
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
||||
self.search_client = AsyncElasticsearch(hosts, timeout=self.search_timeout)
|
||||
while True:
|
||||
try:
|
||||
await self.sync_client.cluster.health(wait_for_status='yellow')
|
||||
break
|
||||
except ConnectionError:
|
||||
self.logger.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||
acked = res.get('acknowledged', False)
|
||||
if acked:
|
||||
await self.set_index_version(self.VERSION)
|
||||
return acked
|
||||
index_version = await self.get_index_version()
|
||||
if index_version != self.VERSION:
|
||||
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
return acked
|
||||
|
||||
def stop(self):
|
||||
clients = [self.sync_client, self.search_client]
|
||||
self.sync_client, self.search_client = None, None
|
||||
return asyncio.ensure_future(asyncio.gather(*(client.close() for client in clients)))
|
||||
|
||||
def delete_index(self):
|
||||
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||
|
||||
async def _consume_claim_producer(self, claim_producer):
|
||||
count = 0
|
||||
async for op, doc in claim_producer:
|
||||
if op == 'delete':
|
||||
yield {
|
||||
'_index': self.index,
|
||||
'_op_type': 'delete',
|
||||
'_id': doc
|
||||
}
|
||||
else:
|
||||
yield {
|
||||
'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS},
|
||||
'_id': doc['claim_id'],
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
count += 1
|
||||
if count % 100 == 0:
|
||||
self.logger.info("Indexing in progress, %d claims.", count)
|
||||
if count:
|
||||
self.logger.info("Indexing done for %d claims.", count)
|
||||
else:
|
||||
self.logger.debug("Indexing done for %d claims.", count)
|
||||
|
||||
async def claim_consumer(self, claim_producer):
|
||||
touched = set()
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, self._consume_claim_producer(claim_producer),
|
||||
raise_on_error=False):
|
||||
if not ok:
|
||||
self.logger.warning("indexing failed for an item: %s", item)
|
||||
else:
|
||||
item = item.popitem()[1]
|
||||
touched.add(item['_id'])
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.debug("Indexing done.")
|
||||
|
||||
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
||||
if channels:
|
||||
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
else:
|
||||
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
key = 'channel_id' if channels else 'claim_id'
|
||||
update['script'] = {
|
||||
"source": f"ctx._source.censor_type={censor_type}; "
|
||||
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
||||
"lang": "painless",
|
||||
"params": blockdict
|
||||
}
|
||||
return update
|
||||
|
||||
async def update_trending_score(self, params):
|
||||
update_trending_score_script = """
|
||||
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
|
||||
|
||||
double logsumexp(double x, double y)
|
||||
{
|
||||
double top;
|
||||
if(x > y)
|
||||
top = x;
|
||||
else
|
||||
top = y;
|
||||
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
|
||||
return(result);
|
||||
}
|
||||
|
||||
double logdiffexp(double big, double small)
|
||||
{
|
||||
return big + Math.log(1.0 - Math.exp(small - big));
|
||||
}
|
||||
|
||||
double squash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return -Math.log(1.0 - x);
|
||||
else
|
||||
return Math.log(x + 1.0);
|
||||
}
|
||||
|
||||
double unsquash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return 1.0 - Math.exp(-x);
|
||||
else
|
||||
return Math.exp(x) - 1.0;
|
||||
}
|
||||
|
||||
double log_to_squash(double x)
|
||||
{
|
||||
return logsumexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squash_to_log(double x)
|
||||
{
|
||||
//assert x > 0.0;
|
||||
return logdiffexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squashed_add(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
|
||||
// Cases where the signs are the same
|
||||
if (x < 0.0 && y < 0.0)
|
||||
return -logsumexp(-x, logdiffexp(-y, 0.0));
|
||||
if (x >= 0.0 && y >= 0.0)
|
||||
return logsumexp(x, logdiffexp(y, 0.0));
|
||||
// Where the signs differ
|
||||
if (x >= 0.0 && y < 0.0)
|
||||
if (Math.abs(x) >= Math.abs(y))
|
||||
return logsumexp(0.0, logdiffexp(x, -y));
|
||||
else
|
||||
return -logsumexp(0.0, logdiffexp(-y, x));
|
||||
if (x < 0.0 && y >= 0.0)
|
||||
{
|
||||
// Addition is commutative, hooray for new math
|
||||
return squashed_add(y, x);
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
double squashed_multiply(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
|
||||
int sign;
|
||||
if(x*y >= 0.0)
|
||||
sign = 1;
|
||||
else
|
||||
sign = -1;
|
||||
return sign*logsumexp(squash_to_log(Math.abs(x))
|
||||
+ squash_to_log(Math.abs(y)), 0.0);
|
||||
}
|
||||
|
||||
// Squashed inflated units
|
||||
double inflateUnits(int height) {
|
||||
double timescale = 576.0; // Half life of 400 = e-folding time of a day
|
||||
// by coincidence, so may as well go with it
|
||||
return log_to_squash(height / timescale);
|
||||
}
|
||||
|
||||
double spikePower(double newAmount) {
|
||||
if (newAmount < 50.0) {
|
||||
return(0.5);
|
||||
} else if (newAmount < 85.0) {
|
||||
return(newAmount / 100.0);
|
||||
} else {
|
||||
return(0.85);
|
||||
}
|
||||
}
|
||||
|
||||
double spikeMass(double oldAmount, double newAmount) {
|
||||
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
||||
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
||||
double power = spikePower(newAmount);
|
||||
if (oldAmount > newAmount) {
|
||||
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
} else {
|
||||
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
}
|
||||
}
|
||||
for (i in params.src.changes) {
|
||||
double units = inflateUnits(i.height);
|
||||
if (ctx._source.trending_score == null) {
|
||||
ctx._source.trending_score = 0.0;
|
||||
}
|
||||
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
|
||||
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
|
||||
}
|
||||
"""
|
||||
start = time.perf_counter()
|
||||
|
||||
def producer():
|
||||
for claim_id, claim_updates in params.items():
|
||||
yield {
|
||||
'_id': claim_id,
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'script': {
|
||||
'lang': 'painless',
|
||||
'source': update_trending_score_script,
|
||||
'params': {'src': {
|
||||
'changes': [
|
||||
{
|
||||
'height': p.height,
|
||||
'prev_amount': p.prev_amount / 1E8,
|
||||
'new_amount': p.new_amount / 1E8,
|
||||
} for p in claim_updates
|
||||
]
|
||||
}}
|
||||
},
|
||||
}
|
||||
if not params:
|
||||
return
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False):
|
||||
if not ok:
|
||||
self.logger.warning("updating trending failed for an item: %s", item)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000))
|
||||
|
||||
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||
if filtered_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if filtered_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.clear_caches()
|
||||
|
||||
def clear_caches(self):
|
||||
self.search_cache.clear()
|
||||
self.claim_cache.clear()
|
||||
|
||||
async def cached_search(self, kwargs):
|
||||
total_referenced = []
|
||||
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
return cache_item.result
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
return cache_item.result
|
||||
censor = Censor(Censor.SEARCH)
|
||||
if kwargs.get('no_totals'):
|
||||
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
else:
|
||||
response, offset, total = await self.search(**kwargs)
|
||||
censor.apply(response)
|
||||
total_referenced.extend(response)
|
||||
|
||||
if censor.censored:
|
||||
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
total_referenced.extend(response)
|
||||
response = [
|
||||
ResolveResult(
|
||||
name=r['claim_name'],
|
||||
normalized_name=r['normalized_name'],
|
||||
claim_hash=r['claim_hash'],
|
||||
tx_num=r['tx_num'],
|
||||
position=r['tx_nout'],
|
||||
tx_hash=r['tx_hash'],
|
||||
height=r['height'],
|
||||
amount=r['amount'],
|
||||
short_url=r['short_url'],
|
||||
is_controlling=r['is_controlling'],
|
||||
canonical_url=r['canonical_url'],
|
||||
creation_height=r['creation_height'],
|
||||
activation_height=r['activation_height'],
|
||||
expiration_height=r['expiration_height'],
|
||||
effective_amount=r['effective_amount'],
|
||||
support_amount=r['support_amount'],
|
||||
last_takeover_height=r['last_take_over_height'],
|
||||
claims_in_channel=r['claims_in_channel'],
|
||||
channel_hash=r['channel_hash'],
|
||||
reposted_claim_hash=r['reposted_claim_hash'],
|
||||
reposted=r['reposted'],
|
||||
signature_valid=r['signature_valid']
|
||||
) for r in response
|
||||
]
|
||||
extra = [
|
||||
ResolveResult(
|
||||
name=r['claim_name'],
|
||||
normalized_name=r['normalized_name'],
|
||||
claim_hash=r['claim_hash'],
|
||||
tx_num=r['tx_num'],
|
||||
position=r['tx_nout'],
|
||||
tx_hash=r['tx_hash'],
|
||||
height=r['height'],
|
||||
amount=r['amount'],
|
||||
short_url=r['short_url'],
|
||||
is_controlling=r['is_controlling'],
|
||||
canonical_url=r['canonical_url'],
|
||||
creation_height=r['creation_height'],
|
||||
activation_height=r['activation_height'],
|
||||
expiration_height=r['expiration_height'],
|
||||
effective_amount=r['effective_amount'],
|
||||
support_amount=r['support_amount'],
|
||||
last_takeover_height=r['last_take_over_height'],
|
||||
claims_in_channel=r['claims_in_channel'],
|
||||
channel_hash=r['channel_hash'],
|
||||
reposted_claim_hash=r['reposted_claim_hash'],
|
||||
reposted=r['reposted'],
|
||||
signature_valid=r['signature_valid']
|
||||
) for r in await self._get_referenced_rows(total_referenced)
|
||||
]
|
||||
result = Outputs.to_base64(
|
||||
response, extra, offset, total, censor
|
||||
)
|
||||
cache_item.result = result
|
||||
return result
|
||||
|
||||
async def get_many(self, *claim_ids):
|
||||
await self.populate_claim_cache(*claim_ids)
|
||||
return filter(None, map(self.claim_cache.get, claim_ids))
|
||||
|
||||
async def populate_claim_cache(self, *claim_ids):
|
||||
missing = [claim_id for claim_id in claim_ids if self.claim_cache.get(claim_id) is None]
|
||||
if missing:
|
||||
results = await self.search_client.mget(
|
||||
index=self.index, body={"ids": missing}
|
||||
)
|
||||
for result in expand_result(filter(lambda doc: doc['found'], results["docs"])):
|
||||
self.claim_cache.set(result['claim_id'], result)
|
||||
|
||||
|
||||
async def search(self, **kwargs):
|
||||
try:
|
||||
return await self.search_ahead(**kwargs)
|
||||
except NotFoundError:
|
||||
return [], 0, 0
|
||||
# return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
|
||||
|
||||
async def search_ahead(self, **kwargs):
|
||||
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
|
||||
per_channel_per_page = kwargs.pop('limit_claims_per_channel', 0) or 0
|
||||
remove_duplicates = kwargs.pop('remove_duplicates', False)
|
||||
page_size = kwargs.pop('limit', 10)
|
||||
offset = kwargs.pop('offset', 0)
|
||||
kwargs['limit'] = 1000
|
||||
cache_item = ResultCacheItem.from_cache(f"ahead{per_channel_per_page}{kwargs}", self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
reordered_hits = cache_item.result
|
||||
else:
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
reordered_hits = cache_item.result
|
||||
else:
|
||||
query = expand_query(**kwargs)
|
||||
search_hits = deque((await self.search_client.search(
|
||||
query, index=self.index, track_total_hits=False,
|
||||
_source_includes=['_id', 'channel_id', 'reposted_claim_id', 'creation_height']
|
||||
))['hits']['hits'])
|
||||
if remove_duplicates:
|
||||
search_hits = self.__remove_duplicates(search_hits)
|
||||
if per_channel_per_page > 0:
|
||||
reordered_hits = self.__search_ahead(search_hits, page_size, per_channel_per_page)
|
||||
else:
|
||||
reordered_hits = [(hit['_id'], hit['_source']['channel_id']) for hit in search_hits]
|
||||
cache_item.result = reordered_hits
|
||||
result = list(await self.get_many(*(claim_id for claim_id, _ in reordered_hits[offset:(offset + page_size)])))
|
||||
return result, 0, len(reordered_hits)
|
||||
|
||||
def __remove_duplicates(self, search_hits: deque) -> deque:
|
||||
known_ids = {} # claim_id -> (creation_height, hit_id), where hit_id is either reposted claim id or original
|
||||
dropped = set()
|
||||
for hit in search_hits:
|
||||
hit_height, hit_id = hit['_source']['creation_height'], hit['_source']['reposted_claim_id'] or hit['_id']
|
||||
if hit_id not in known_ids:
|
||||
known_ids[hit_id] = (hit_height, hit['_id'])
|
||||
else:
|
||||
previous_height, previous_id = known_ids[hit_id]
|
||||
if hit_height < previous_height:
|
||||
known_ids[hit_id] = (hit_height, hit['_id'])
|
||||
dropped.add(previous_id)
|
||||
else:
|
||||
dropped.add(hit['_id'])
|
||||
return deque(hit for hit in search_hits if hit['_id'] not in dropped)
|
||||
|
||||
def __search_ahead(self, search_hits: list, page_size: int, per_channel_per_page: int):
|
||||
reordered_hits = []
|
||||
channel_counters = Counter()
|
||||
next_page_hits_maybe_check_later = deque()
|
||||
while search_hits or next_page_hits_maybe_check_later:
|
||||
if reordered_hits and len(reordered_hits) % page_size == 0:
|
||||
channel_counters.clear()
|
||||
elif not reordered_hits:
|
||||
pass
|
||||
else:
|
||||
break # means last page was incomplete and we are left with bad replacements
|
||||
for _ in range(len(next_page_hits_maybe_check_later)):
|
||||
claim_id, channel_id = next_page_hits_maybe_check_later.popleft()
|
||||
if per_channel_per_page > 0 and channel_counters[channel_id] < per_channel_per_page:
|
||||
reordered_hits.append((claim_id, channel_id))
|
||||
channel_counters[channel_id] += 1
|
||||
else:
|
||||
next_page_hits_maybe_check_later.append((claim_id, channel_id))
|
||||
while search_hits:
|
||||
hit = search_hits.popleft()
|
||||
hit_id, hit_channel_id = hit['_id'], hit['_source']['channel_id']
|
||||
if hit_channel_id is None or per_channel_per_page <= 0:
|
||||
reordered_hits.append((hit_id, hit_channel_id))
|
||||
elif channel_counters[hit_channel_id] < per_channel_per_page:
|
||||
reordered_hits.append((hit_id, hit_channel_id))
|
||||
channel_counters[hit_channel_id] += 1
|
||||
if len(reordered_hits) % page_size == 0:
|
||||
break
|
||||
else:
|
||||
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
|
||||
return reordered_hits
|
||||
|
||||
async def _get_referenced_rows(self, txo_rows: List[dict]):
|
||||
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
|
||||
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
|
||||
referenced_ids |= set(filter(None, (row['channel_id'] for row in txo_rows)))
|
||||
referenced_ids |= set(filter(None, (row['censoring_channel_id'] for row in txo_rows)))
|
||||
|
||||
referenced_txos = []
|
||||
if referenced_ids:
|
||||
referenced_txos.extend(await self.get_many(*referenced_ids))
|
||||
referenced_ids = set(filter(None, (row['channel_id'] for row in referenced_txos)))
|
||||
|
||||
if referenced_ids:
|
||||
referenced_txos.extend(await self.get_many(*referenced_ids))
|
||||
|
||||
return referenced_txos
|
||||
|
||||
|
||||
def expand_query(**kwargs):
|
||||
if "amount_order" in kwargs:
|
||||
kwargs["limit"] = 1
|
||||
kwargs["order_by"] = "effective_amount"
|
||||
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
||||
if 'name' in kwargs:
|
||||
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
||||
if kwargs.get('is_controlling') is False:
|
||||
kwargs.pop('is_controlling')
|
||||
query = {'must': [], 'must_not': []}
|
||||
collapse = None
|
||||
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
||||
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
||||
for key, value in kwargs.items():
|
||||
key = key.replace('claim.', '')
|
||||
many = key.endswith('__in') or isinstance(value, list)
|
||||
if many and len(value) > 2048:
|
||||
raise TooManyClaimSearchParametersError(key, 2048)
|
||||
if many:
|
||||
key = key.replace('__in', '')
|
||||
value = list(filter(None, value))
|
||||
if value is None or isinstance(value, list) and len(value) == 0:
|
||||
continue
|
||||
key = REPLACEMENTS.get(key, key)
|
||||
if key in FIELDS:
|
||||
partial_id = False
|
||||
if key == 'claim_type':
|
||||
if isinstance(value, str):
|
||||
value = CLAIM_TYPES[value]
|
||||
else:
|
||||
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||
elif key == 'stream_type':
|
||||
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
if key == '_id':
|
||||
if isinstance(value, Iterable):
|
||||
value = [item[::-1].hex() for item in value]
|
||||
else:
|
||||
value = value[::-1].hex()
|
||||
if not many and key in ('_id', 'claim_id', 'sd_hash') and len(value) < 20:
|
||||
partial_id = True
|
||||
if key in ('signature_valid', 'has_source'):
|
||||
continue # handled later
|
||||
if key in TEXT_FIELDS:
|
||||
key += '.keyword'
|
||||
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
||||
if partial_id:
|
||||
query['must'].append({"prefix": {key: value}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
||||
operator_length = 2 if value[:2] in ops else 1
|
||||
operator, value = value[:operator_length], value[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"range": {key: {ops[operator]: value}}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, list) and all(v[0] in ops for v in value):
|
||||
range_constraints = []
|
||||
for v in value:
|
||||
operator_length = 2 if v[:2] in ops else 1
|
||||
operator, stripped_op_v = v[:operator_length], v[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
stripped_op_v = str(Decimal(stripped_op_v)*1000)
|
||||
range_constraints.append((operator, stripped_op_v))
|
||||
query['must'].append({"range": {key: {ops[operator]: v for operator, v in range_constraints}}})
|
||||
elif many:
|
||||
query['must'].append({"terms": {key: value}})
|
||||
else:
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"term": {key: {"value": value}}})
|
||||
elif key == 'not_channel_ids':
|
||||
for channel_id in value:
|
||||
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
||||
query['must_not'].append({"term": {'_id': channel_id}})
|
||||
elif key == 'channel_ids':
|
||||
query['must'].append({"terms": {'channel_id.keyword': value}})
|
||||
elif key == 'claim_ids':
|
||||
query['must'].append({"terms": {'claim_id.keyword': value}})
|
||||
elif key == 'media_types':
|
||||
query['must'].append({"terms": {'media_type.keyword': value}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': value}})
|
||||
elif key == 'all_languages':
|
||||
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
||||
elif key == 'any_tags':
|
||||
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
||||
elif key == 'all_tags':
|
||||
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_tags':
|
||||
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_claim_id':
|
||||
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
||||
elif key == 'limit_claims_per_channel':
|
||||
collapse = ('channel_id.keyword', value)
|
||||
if kwargs.get('has_channel_signature'):
|
||||
query['must'].append({"exists": {"field": "signature"}})
|
||||
if 'signature_valid' in kwargs:
|
||||
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
elif 'signature_valid' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
||||
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
if 'has_source' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
||||
query['should'].append(
|
||||
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
||||
if kwargs.get('text'):
|
||||
query['must'].append(
|
||||
{"simple_query_string":
|
||||
{"query": kwargs["text"], "fields": [
|
||||
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
||||
]}})
|
||||
query = {
|
||||
"_source": {"excludes": ["description", "title"]},
|
||||
'query': {'bool': query},
|
||||
"sort": [],
|
||||
}
|
||||
if "limit" in kwargs:
|
||||
query["size"] = kwargs["limit"]
|
||||
if 'offset' in kwargs:
|
||||
query["from"] = kwargs["offset"]
|
||||
if 'order_by' in kwargs:
|
||||
if isinstance(kwargs["order_by"], str):
|
||||
kwargs["order_by"] = [kwargs["order_by"]]
|
||||
for value in kwargs['order_by']:
|
||||
if 'trending_group' in value:
|
||||
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
||||
continue
|
||||
is_asc = value.startswith('^')
|
||||
value = value[1:] if is_asc else value
|
||||
value = REPLACEMENTS.get(value, value)
|
||||
if value in TEXT_FIELDS:
|
||||
value += '.keyword'
|
||||
query['sort'].append({value: "asc" if is_asc else "desc"})
|
||||
if collapse:
|
||||
query["collapse"] = {
|
||||
"field": collapse[0],
|
||||
"inner_hits": {
|
||||
"name": collapse[0],
|
||||
"size": collapse[1],
|
||||
"sort": query["sort"]
|
||||
}
|
||||
}
|
||||
return query
|
||||
|
||||
|
||||
def expand_result(results):
|
||||
inner_hits = []
|
||||
expanded = []
|
||||
for result in results:
|
||||
if result.get("inner_hits"):
|
||||
for _, inner_hit in result["inner_hits"].items():
|
||||
inner_hits.extend(inner_hit["hits"]["hits"])
|
||||
continue
|
||||
result = result['_source']
|
||||
result['claim_hash'] = unhexlify(result['claim_id'])[::-1]
|
||||
if result['reposted_claim_id']:
|
||||
result['reposted_claim_hash'] = unhexlify(result['reposted_claim_id'])[::-1]
|
||||
else:
|
||||
result['reposted_claim_hash'] = None
|
||||
result['channel_hash'] = unhexlify(result['channel_id'])[::-1] if result['channel_id'] else None
|
||||
result['txo_hash'] = unhexlify(result['tx_id'])[::-1] + struct.pack('<I', result['tx_nout'])
|
||||
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
|
||||
result['reposted'] = result.pop('repost_count')
|
||||
result['signature_valid'] = result.pop('is_signature_valid')
|
||||
# result['normalized'] = result.pop('normalized_name')
|
||||
# if result['censoring_channel_hash']:
|
||||
# result['censoring_channel_hash'] = unhexlify(result['censoring_channel_hash'])[::-1]
|
||||
expanded.append(result)
|
||||
if inner_hits:
|
||||
return expand_result(inner_hits)
|
||||
return expanded
|
||||
|
||||
|
||||
class ResultCacheItem:
|
||||
__slots__ = '_result', 'lock', 'has_result'
|
||||
|
||||
def __init__(self):
|
||||
self.has_result = asyncio.Event()
|
||||
self.lock = asyncio.Lock()
|
||||
self._result = None
|
||||
|
||||
@property
|
||||
def result(self) -> str:
|
||||
return self._result
|
||||
|
||||
@result.setter
|
||||
def result(self, result: str):
|
||||
self._result = result
|
||||
if result is not None:
|
||||
self.has_result.set()
|
||||
|
||||
@classmethod
|
||||
def from_cache(cls, cache_key, cache):
|
||||
cache_item = cache.get(cache_key)
|
||||
if cache_item is None:
|
||||
cache_item = cache[cache_key] = ResultCacheItem()
|
||||
return cache_item
|
|
@ -1,138 +0,0 @@
|
|||
import os
|
||||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
from elasticsearch import AsyncElasticsearch
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.db.elasticsearch.search import SearchIndex, IndexVersionMismatch
|
||||
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS
|
||||
|
||||
|
||||
async def get_recent_claims(env, index_name='claims', db=None):
|
||||
log = logging.getLogger()
|
||||
need_open = db is None
|
||||
db = db or LevelDB(env)
|
||||
try:
|
||||
if need_open:
|
||||
db.open_db()
|
||||
if db.es_sync_height == db.db_height or db.db_height <= 0:
|
||||
return
|
||||
if need_open:
|
||||
await db.initialize_caches()
|
||||
log.info(f"catching up ES ({db.es_sync_height}) to leveldb height: {db.db_height}")
|
||||
cnt = 0
|
||||
touched_claims = set()
|
||||
deleted_claims = set()
|
||||
for height in range(db.es_sync_height, db.db_height + 1):
|
||||
touched_or_deleted = db.prefix_db.touched_or_deleted.get(height)
|
||||
touched_claims.update(touched_or_deleted.touched_claims)
|
||||
deleted_claims.update(touched_or_deleted.deleted_claims)
|
||||
touched_claims.difference_update(deleted_claims)
|
||||
|
||||
for deleted in deleted_claims:
|
||||
yield {
|
||||
'_index': index_name,
|
||||
'_op_type': 'delete',
|
||||
'_id': deleted.hex()
|
||||
}
|
||||
for touched in touched_claims:
|
||||
claim = db.claim_producer(touched)
|
||||
if claim:
|
||||
yield {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': index_name,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
cnt += 1
|
||||
else:
|
||||
logging.warning("could not sync claim %s", touched.hex())
|
||||
if cnt % 10000 == 0:
|
||||
logging.info("%i claims sent to ES", cnt)
|
||||
|
||||
db.es_sync_height = db.db_height
|
||||
db.write_db_state()
|
||||
db.prefix_db.unsafe_commit()
|
||||
db.assert_db_state()
|
||||
|
||||
logging.info("finished sending %i claims to ES, deleted %i", cnt, len(deleted_claims))
|
||||
finally:
|
||||
if need_open:
|
||||
db.close()
|
||||
|
||||
|
||||
async def get_all_claims(env, index_name='claims', db=None):
|
||||
need_open = db is None
|
||||
db = db or LevelDB(env)
|
||||
if need_open:
|
||||
db.open_db()
|
||||
await db.initialize_caches()
|
||||
logging.info("Fetching claims to send ES from leveldb")
|
||||
try:
|
||||
cnt = 0
|
||||
async for claim in db.all_claims_producer():
|
||||
yield {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': index_name,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
cnt += 1
|
||||
if cnt % 10000 == 0:
|
||||
logging.info("sent %i claims to ES", cnt)
|
||||
finally:
|
||||
if need_open:
|
||||
db.close()
|
||||
|
||||
|
||||
async def make_es_index_and_run_sync(env: Env, clients=32, force=False, db=None, index_name='claims'):
|
||||
index = SearchIndex(env.es_index_prefix, elastic_host=env.elastic_host, elastic_port=env.elastic_port)
|
||||
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
|
||||
try:
|
||||
created = await index.start()
|
||||
except IndexVersionMismatch as err:
|
||||
logging.info(
|
||||
"dropping ES search index (version %s) for upgrade to version %s", err.got_version, err.expected_version
|
||||
)
|
||||
await index.delete_index()
|
||||
await index.stop()
|
||||
created = await index.start()
|
||||
finally:
|
||||
index.stop()
|
||||
|
||||
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
|
||||
if force or created:
|
||||
claim_generator = get_all_claims(env, index_name=index_name, db=db)
|
||||
else:
|
||||
claim_generator = get_recent_claims(env, index_name=index_name, db=db)
|
||||
try:
|
||||
async for ok, item in async_streaming_bulk(es, claim_generator, request_timeout=600, raise_on_error=False):
|
||||
if not ok:
|
||||
logging.warning("indexing failed for an item: %s", item)
|
||||
await es.indices.refresh(index=index_name)
|
||||
finally:
|
||||
await es.close()
|
||||
|
||||
|
||||
def run_elastic_sync():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
|
||||
logging.info('lbry.server starting')
|
||||
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
|
||||
parser.add_argument("-c", "--clients", type=int, default=32)
|
||||
parser.add_argument("-f", "--force", default=False, action='store_true')
|
||||
Env.contribute_to_arg_parser(parser)
|
||||
args = parser.parse_args()
|
||||
env = Env.from_arg_parser(args)
|
||||
|
||||
if not os.path.exists(os.path.join(args.db_dir, 'lbry-leveldb')):
|
||||
logging.info("DB path doesnt exist, nothing to sync to ES")
|
||||
return
|
||||
|
||||
asyncio.run(make_es_index_and_run_sync(env, clients=args.clients, force=args.force))
|
File diff suppressed because it is too large
Load diff
|
@ -1,175 +0,0 @@
|
|||
import struct
|
||||
import logging
|
||||
from string import printable
|
||||
from collections import defaultdict
|
||||
from typing import Tuple, Iterable, Callable, Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
|
||||
_OP_STRUCT = struct.Struct('>BLL')
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
class RevertableOp:
|
||||
__slots__ = [
|
||||
'key',
|
||||
'value',
|
||||
]
|
||||
is_put = 0
|
||||
|
||||
def __init__(self, key: bytes, value: bytes):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
@property
|
||||
def is_delete(self) -> bool:
|
||||
return not self.is_put
|
||||
|
||||
def invert(self) -> 'RevertableOp':
|
||||
raise NotImplementedError()
|
||||
|
||||
def pack(self) -> bytes:
|
||||
"""
|
||||
Serialize to bytes
|
||||
"""
|
||||
return struct.pack(
|
||||
f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key,
|
||||
self.value
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]:
|
||||
"""
|
||||
Deserialize from bytes
|
||||
|
||||
:param packed: bytes containing at least one packed revertable op
|
||||
:return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes
|
||||
"""
|
||||
is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9])
|
||||
key = packed[9:9 + key_len]
|
||||
value = packed[9 + key_len:9 + key_len + val_len]
|
||||
if is_put == 1:
|
||||
return RevertablePut(key, value), packed[9 + key_len + val_len:]
|
||||
return RevertableDelete(key, value), packed[9 + key_len + val_len:]
|
||||
|
||||
def __eq__(self, other: 'RevertableOp') -> bool:
|
||||
return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return str(self)
|
||||
|
||||
def __str__(self) -> str:
|
||||
from lbry.wallet.server.db.prefixes import auto_decode_item
|
||||
k, v = auto_decode_item(self.key, self.value)
|
||||
key = ''.join(c if c in printable else '.' for c in str(k))
|
||||
val = ''.join(c if c in printable else '.' for c in str(v))
|
||||
return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}"
|
||||
|
||||
|
||||
class RevertableDelete(RevertableOp):
|
||||
def invert(self):
|
||||
return RevertablePut(self.key, self.value)
|
||||
|
||||
|
||||
class RevertablePut(RevertableOp):
|
||||
is_put = True
|
||||
|
||||
def invert(self):
|
||||
return RevertableDelete(self.key, self.value)
|
||||
|
||||
|
||||
class OpStackIntegrity(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RevertableOpStack:
|
||||
def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None):
|
||||
"""
|
||||
This represents a sequence of revertable puts and deletes to a key-value database that checks for integrity
|
||||
violations when applying the puts and deletes. The integrity checks assure that keys that do not exist
|
||||
are not deleted, and that when keys are deleted the current value is correctly known so that the delete
|
||||
may be undone. When putting values, the integrity checks assure that existing values are not overwritten
|
||||
without first being deleted. Updates are performed by applying a delete op for the old value and a put op
|
||||
for the new value.
|
||||
|
||||
:param get_fn: getter function from an object implementing `KeyValueStorage`
|
||||
:param unsafe_prefixes: optional set of prefixes to ignore integrity errors for, violations are still logged
|
||||
"""
|
||||
self._get = get_fn
|
||||
self._items = defaultdict(list)
|
||||
self._unsafe_prefixes = unsafe_prefixes or set()
|
||||
|
||||
def append_op(self, op: RevertableOp):
|
||||
"""
|
||||
Apply a put or delete op, checking that it introduces no integrity errors
|
||||
"""
|
||||
|
||||
inverted = op.invert()
|
||||
if self._items[op.key] and inverted == self._items[op.key][-1]:
|
||||
self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both
|
||||
return
|
||||
elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op
|
||||
return # raise an error?
|
||||
stored_val = self._get(op.key)
|
||||
has_stored_val = stored_val is not None
|
||||
delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val)
|
||||
will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key])
|
||||
try:
|
||||
if op.is_put and has_stored_val and not will_delete_existing_stored:
|
||||
raise OpStackIntegrity(
|
||||
f"db op tries to add on top of existing key without deleting first: {op}"
|
||||
)
|
||||
elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored:
|
||||
# there is a value and we're not deleting it in this op
|
||||
# check that a delete for the stored value is in the stack
|
||||
raise OpStackIntegrity(f"db op tries to delete with incorrect existing value {op}")
|
||||
elif op.is_delete and not has_stored_val:
|
||||
raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}")
|
||||
elif op.is_delete and stored_val != op.value:
|
||||
raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}")
|
||||
except OpStackIntegrity as err:
|
||||
if op.key[:1] in self._unsafe_prefixes:
|
||||
log.debug(f"skipping over integrity error: {err}")
|
||||
else:
|
||||
raise err
|
||||
self._items[op.key].append(op)
|
||||
|
||||
def extend_ops(self, ops: Iterable[RevertableOp]):
|
||||
"""
|
||||
Apply a sequence of put or delete ops, checking that they introduce no integrity errors
|
||||
"""
|
||||
for op in ops:
|
||||
self.append_op(op)
|
||||
|
||||
def clear(self):
|
||||
self._items.clear()
|
||||
|
||||
def __len__(self):
|
||||
return sum(map(len, self._items.values()))
|
||||
|
||||
def __iter__(self):
|
||||
for key, ops in self._items.items():
|
||||
for op in ops:
|
||||
yield op
|
||||
|
||||
def __reversed__(self):
|
||||
for key, ops in self._items.items():
|
||||
for op in reversed(ops):
|
||||
yield op
|
||||
|
||||
def get_undo_ops(self) -> bytes:
|
||||
"""
|
||||
Get the serialized bytes to undo all of the changes made by the pending ops
|
||||
"""
|
||||
return b''.join(op.invert().pack() for op in reversed(self))
|
||||
|
||||
def apply_packed_undo_ops(self, packed: bytes):
|
||||
"""
|
||||
Unpack and apply a sequence of undo ops from serialized undo bytes
|
||||
"""
|
||||
while packed:
|
||||
op, packed = RevertableOp.unpack(packed)
|
||||
self.append_op(op)
|
||||
|
||||
def get_last_op_for_key(self, key: bytes) -> Optional[RevertableOp]:
|
||||
if key in self._items and self._items[key]:
|
||||
return self._items[key][-1]
|
|
@ -1,384 +0,0 @@
|
|||
# Copyright (c) 2016, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
import math
|
||||
import re
|
||||
import resource
|
||||
from os import environ
|
||||
from collections import namedtuple
|
||||
from ipaddress import ip_address
|
||||
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.server.coin import Coin, LBC, LBCTestNet, LBCRegTest
|
||||
import lbry.wallet.server.util as lib_util
|
||||
|
||||
|
||||
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
|
||||
|
||||
|
||||
class Env:
|
||||
|
||||
# Peer discovery
|
||||
PD_OFF, PD_SELF, PD_ON = range(3)
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
|
||||
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
|
||||
chain=None, es_index_prefix=None, es_mode=None, cache_MB=None, reorg_limit=None, tcp_port=None,
|
||||
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
|
||||
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
|
||||
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
|
||||
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
|
||||
session_timeout=None, drop_client=None, description=None, daily_fee=None,
|
||||
database_query_timeout=None, db_max_open_files=512):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
|
||||
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
|
||||
self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL')
|
||||
self.db_max_open_files = db_max_open_files
|
||||
|
||||
self.host = host if host is not None else self.default('HOST', 'localhost')
|
||||
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
|
||||
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
|
||||
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
|
||||
self.loop_policy = self.set_event_loop_policy(
|
||||
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
|
||||
)
|
||||
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
|
||||
self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4)
|
||||
self.websocket_host = websocket_host if websocket_host is not None else self.default('WEBSOCKET_HOST', self.host)
|
||||
self.websocket_port = websocket_port if websocket_port is not None else self.integer('WEBSOCKET_PORT', None)
|
||||
if coin is not None:
|
||||
assert issubclass(coin, Coin)
|
||||
self.coin = coin
|
||||
else:
|
||||
chain = chain if chain is not None else self.default('NET', 'mainnet').strip().lower()
|
||||
if chain == 'mainnet':
|
||||
self.coin = LBC
|
||||
elif chain == 'testnet':
|
||||
self.coin = LBCTestNet
|
||||
else:
|
||||
self.coin = LBCRegTest
|
||||
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
|
||||
self.es_mode = es_mode if es_mode is not None else self.default('ES_MODE', 'writer')
|
||||
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
|
||||
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
||||
# Server stuff
|
||||
self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None)
|
||||
self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port)
|
||||
self.ssl_port = ssl_port if ssl_port is not None else self.integer('SSL_PORT', None)
|
||||
if self.ssl_port:
|
||||
self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required('SSL_CERTFILE')
|
||||
self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required('SSL_KEYFILE')
|
||||
self.rpc_port = rpc_port if rpc_port is not None else self.integer('RPC_PORT', 8000)
|
||||
self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0)
|
||||
self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer('MAX_SUBSCRIPTIONS', 10000)
|
||||
self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None)
|
||||
# self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
|
||||
self.anon_logs = anon_logs if anon_logs is not None else self.boolean('ANON_LOGS', False)
|
||||
self.log_sessions = log_sessions if log_sessions is not None else self.integer('LOG_SESSIONS', 3600)
|
||||
self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False)
|
||||
self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False)
|
||||
self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False)
|
||||
self.country = country if country is not None else self.default('COUNTRY', 'US')
|
||||
# Peer discovery
|
||||
self.peer_discovery = self.peer_discovery_enum()
|
||||
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
|
||||
self.peer_hubs = self.extract_peer_hubs()
|
||||
# self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
|
||||
# self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
|
||||
# The electrum client takes the empty string as unspecified
|
||||
self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '')
|
||||
self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '')
|
||||
# Server limits to help prevent DoS
|
||||
self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000)
|
||||
self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000)
|
||||
# self.max_subs = self.integer('MAX_SUBS', 250000)
|
||||
self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions()
|
||||
# self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
|
||||
self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600)
|
||||
self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile)
|
||||
self.description = description if description is not None else self.default('DESCRIPTION', '')
|
||||
self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0')
|
||||
|
||||
# Identities
|
||||
clearnet_identity = self.clearnet_identity()
|
||||
tor_identity = self.tor_identity(clearnet_identity)
|
||||
self.identities = [identity
|
||||
for identity in (clearnet_identity, tor_identity)
|
||||
if identity is not None]
|
||||
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
|
||||
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
|
||||
|
||||
@classmethod
|
||||
def default(cls, envvar, default):
|
||||
return environ.get(envvar, default)
|
||||
|
||||
@classmethod
|
||||
def boolean(cls, envvar, default):
|
||||
default = 'Yes' if default else ''
|
||||
return bool(cls.default(envvar, default).strip())
|
||||
|
||||
@classmethod
|
||||
def required(cls, envvar):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
raise cls.Error(f'required envvar {envvar} not set')
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def string_amount(cls, envvar, default):
|
||||
value = environ.get(envvar, default)
|
||||
amount_pattern = re.compile("[0-9]{0,10}(\.[0-9]{1,8})?")
|
||||
if len(value) > 0 and not amount_pattern.fullmatch(value):
|
||||
raise cls.Error(f'{value} is not a valid amount for {envvar}')
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def integer(cls, envvar, default):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
return int(value)
|
||||
except Exception:
|
||||
raise cls.Error(f'cannot convert envvar {envvar} value {value} to an integer')
|
||||
|
||||
@classmethod
|
||||
def custom(cls, envvar, default, parse):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
return parse(value)
|
||||
except Exception as e:
|
||||
raise cls.Error(f'cannot parse envvar {envvar} value {value}') from e
|
||||
|
||||
@classmethod
|
||||
def obsolete(cls, envvars):
|
||||
bad = [envvar for envvar in envvars if environ.get(envvar)]
|
||||
if bad:
|
||||
raise cls.Error(f'remove obsolete environment variables {bad}')
|
||||
|
||||
@classmethod
|
||||
def set_event_loop_policy(cls, policy_name: str = None):
|
||||
if not policy_name or policy_name == 'default':
|
||||
import asyncio
|
||||
return asyncio.get_event_loop_policy()
|
||||
elif policy_name == 'uvloop':
|
||||
import uvloop
|
||||
import asyncio
|
||||
loop_policy = uvloop.EventLoopPolicy()
|
||||
asyncio.set_event_loop_policy(loop_policy)
|
||||
return loop_policy
|
||||
raise cls.Error(f'unknown event loop policy "{policy_name}"')
|
||||
|
||||
def cs_host(self, *, for_rpc):
|
||||
"""Returns the 'host' argument to pass to asyncio's create_server
|
||||
call. The result can be a single host name string, a list of
|
||||
host name strings, or an empty string to bind to all interfaces.
|
||||
|
||||
If rpc is True the host to use for the RPC server is returned.
|
||||
Otherwise the host to use for SSL/TCP servers is returned.
|
||||
"""
|
||||
host = self.rpc_host if for_rpc else self.host
|
||||
result = [part.strip() for part in host.split(',')]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
# An empty result indicates all interfaces, which we do not
|
||||
# permitted for an RPC server.
|
||||
if for_rpc and not result:
|
||||
result = 'localhost'
|
||||
if result == 'localhost':
|
||||
# 'localhost' resolves to ::1 (ipv6) on many systems, which fails on default setup of
|
||||
# docker, using 127.0.0.1 instead forces ipv4
|
||||
result = '127.0.0.1'
|
||||
return result
|
||||
|
||||
def sane_max_sessions(self):
|
||||
"""Return the maximum number of sessions to permit. Normally this
|
||||
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
|
||||
downwards if running with a small open file rlimit."""
|
||||
env_value = self.integer('MAX_SESSIONS', 1000)
|
||||
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
|
||||
# We give the DB 250 files; allow ElectrumX 100 for itself
|
||||
value = max(0, min(env_value, nofile_limit - 350))
|
||||
if value < env_value:
|
||||
self.logger.warning(f'lowered maximum sessions from {env_value:,d} to {value:,d} '
|
||||
f'because your open file limit is {nofile_limit:,d}')
|
||||
return value
|
||||
|
||||
def clearnet_identity(self):
|
||||
host = self.default('REPORT_HOST', None)
|
||||
if host is None:
|
||||
return None
|
||||
try:
|
||||
ip = ip_address(host)
|
||||
except ValueError:
|
||||
bad = (not lib_util.is_valid_hostname(host)
|
||||
or host.lower() == 'localhost')
|
||||
else:
|
||||
bad = (ip.is_multicast or ip.is_unspecified
|
||||
or (ip.is_private and self.peer_announce))
|
||||
if bad:
|
||||
raise self.Error(f'"{host}" is not a valid REPORT_HOST')
|
||||
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
|
||||
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
|
||||
if tcp_port == ssl_port:
|
||||
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
|
||||
f'both resolve to {tcp_port}')
|
||||
return NetIdentity(
|
||||
host,
|
||||
tcp_port,
|
||||
ssl_port,
|
||||
''
|
||||
)
|
||||
|
||||
def tor_identity(self, clearnet):
|
||||
host = self.default('REPORT_HOST_TOR', None)
|
||||
if host is None:
|
||||
return None
|
||||
if not host.endswith('.onion'):
|
||||
raise self.Error(f'tor host "{host}" must end with ".onion"')
|
||||
|
||||
def port(port_kind):
|
||||
"""Returns the clearnet identity port, if any and not zero,
|
||||
otherwise the listening port."""
|
||||
result = 0
|
||||
if clearnet:
|
||||
result = getattr(clearnet, port_kind)
|
||||
return result or getattr(self, port_kind)
|
||||
|
||||
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
|
||||
port('tcp_port')) or None
|
||||
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
|
||||
port('ssl_port')) or None
|
||||
if tcp_port == ssl_port:
|
||||
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
|
||||
f'both resolve to {tcp_port}')
|
||||
|
||||
return NetIdentity(
|
||||
host,
|
||||
tcp_port,
|
||||
ssl_port,
|
||||
'_tor',
|
||||
)
|
||||
|
||||
def hosts_dict(self):
|
||||
return {identity.host: {'tcp_port': identity.tcp_port,
|
||||
'ssl_port': identity.ssl_port}
|
||||
for identity in self.identities}
|
||||
|
||||
def peer_discovery_enum(self):
|
||||
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
|
||||
if pd in ('off', ''):
|
||||
return self.PD_OFF
|
||||
elif pd == 'self':
|
||||
return self.PD_SELF
|
||||
else:
|
||||
return self.PD_ON
|
||||
|
||||
def extract_peer_hubs(self):
|
||||
return [hub.strip() for hub in self.default('PEER_HUBS', '').split(',') if hub.strip()]
|
||||
|
||||
@classmethod
|
||||
def contribute_to_arg_parser(cls, parser):
|
||||
parser.add_argument('--db_dir', type=str, help='path of the directory containing lbry-leveldb',
|
||||
default=cls.default('DB_DIRECTORY', None))
|
||||
parser.add_argument('--daemon_url',
|
||||
help='URL for rpc from lbrycrd, <rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>',
|
||||
default=cls.default('DAEMON_URL', None))
|
||||
parser.add_argument('--db_max_open_files', type=int, default=512,
|
||||
help='number of files leveldb can have open at a time')
|
||||
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
|
||||
help='Interface for hub server to listen on')
|
||||
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
|
||||
help='TCP port to listen on for hub server')
|
||||
parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001),
|
||||
help='UDP port to listen on for hub server')
|
||||
parser.add_argument('--rpc_host', default=cls.default('RPC_HOST', 'localhost'), type=str,
|
||||
help='Listening interface for admin rpc')
|
||||
parser.add_argument('--rpc_port', default=cls.integer('RPC_PORT', 8000), type=int,
|
||||
help='Listening port for admin rpc')
|
||||
parser.add_argument('--websocket_host', default=cls.default('WEBSOCKET_HOST', 'localhost'), type=str,
|
||||
help='Listening interface for websocket')
|
||||
parser.add_argument('--websocket_port', default=cls.integer('WEBSOCKET_PORT', None), type=int,
|
||||
help='Listening port for websocket')
|
||||
|
||||
parser.add_argument('--ssl_port', default=cls.integer('SSL_PORT', None), type=int,
|
||||
help='SSL port to listen on for hub server')
|
||||
parser.add_argument('--ssl_certfile', default=cls.default('SSL_CERTFILE', None), type=str,
|
||||
help='Path to SSL cert file')
|
||||
parser.add_argument('--ssl_keyfile', default=cls.default('SSL_KEYFILE', None), type=str,
|
||||
help='Path to SSL key file')
|
||||
parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth')
|
||||
parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str,
|
||||
help='elasticsearch host')
|
||||
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
|
||||
help='elasticsearch port')
|
||||
parser.add_argument('--es_mode', default=cls.default('ES_MODE', 'writer'), type=str,
|
||||
choices=['reader', 'writer'])
|
||||
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
|
||||
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
|
||||
choices=['default', 'uvloop'])
|
||||
parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4),
|
||||
help='number of threads used by the request handler to read the database')
|
||||
parser.add_argument('--cache_MB', type=int, default=cls.integer('CACHE_MB', 1024),
|
||||
help='size of the leveldb lru cache, in megabytes')
|
||||
parser.add_argument('--cache_all_tx_hashes', type=bool,
|
||||
help='Load all tx hashes into memory. This will make address subscriptions and sync, '
|
||||
'resolve, transaction fetching, and block sync all faster at the expense of higher '
|
||||
'memory usage')
|
||||
parser.add_argument('--cache_all_claim_txos', type=bool,
|
||||
help='Load all claim txos into memory. This will make address subscriptions and sync, '
|
||||
'resolve, transaction fetching, and block sync all faster at the expense of higher '
|
||||
'memory usage')
|
||||
parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0),
|
||||
help='port for hub prometheus metrics to listen on, disabled by default')
|
||||
parser.add_argument('--max_subscriptions', type=int, default=cls.integer('MAX_SUBSCRIPTIONS', 10000),
|
||||
help='max subscriptions per connection')
|
||||
parser.add_argument('--banner_file', type=str, default=cls.default('BANNER_FILE', None),
|
||||
help='path to file containing banner text')
|
||||
parser.add_argument('--anon_logs', type=bool, default=cls.boolean('ANON_LOGS', False),
|
||||
help="don't log ip addresses")
|
||||
parser.add_argument('--allow_lan_udp', type=bool, default=cls.boolean('ALLOW_LAN_UDP', False),
|
||||
help='reply to hub UDP ping messages from LAN ip addresses')
|
||||
parser.add_argument('--country', type=str, default=cls.default('COUNTRY', 'US'), help='')
|
||||
parser.add_argument('--max_send', type=int, default=cls.default('MAX_SEND', 1000000), help='')
|
||||
parser.add_argument('--max_receive', type=int, default=cls.default('MAX_RECEIVE', 1000000), help='')
|
||||
parser.add_argument('--max_sessions', type=int, default=cls.default('MAX_SESSIONS', 1000), help='')
|
||||
parser.add_argument('--session_timeout', type=int, default=cls.default('SESSION_TIMEOUT', 600), help='')
|
||||
parser.add_argument('--drop_client', type=str, default=cls.default('DROP_CLIENT', None), help='')
|
||||
parser.add_argument('--description', type=str, default=cls.default('DESCRIPTION', ''), help='')
|
||||
parser.add_argument('--daily_fee', type=float, default=cls.default('DAILY_FEE', 0.0), help='')
|
||||
parser.add_argument('--payment_address', type=str, default=cls.default('PAYMENT_ADDRESS', ''), help='')
|
||||
parser.add_argument('--donation_address', type=str, default=cls.default('DONATION_ADDRESS', ''), help='')
|
||||
parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'),
|
||||
help="Which chain to use, default is mainnet")
|
||||
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
|
||||
help="elasticsearch query timeout")
|
||||
|
||||
@classmethod
|
||||
def from_arg_parser(cls, args):
|
||||
return cls(
|
||||
db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files,
|
||||
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
|
||||
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
|
||||
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
|
||||
es_mode=args.es_mode, cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
|
||||
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
|
||||
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
|
||||
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
|
||||
log_sessions=None, allow_lan_udp=args.allow_lan_udp,
|
||||
cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos,
|
||||
country=args.country, payment_address=args.payment_address, donation_address=args.donation_address,
|
||||
max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions,
|
||||
session_timeout=args.session_timeout, drop_client=args.drop_client, description=args.description,
|
||||
daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000)
|
||||
)
|
|
@ -1,160 +0,0 @@
|
|||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
"""Cryptograph hash functions and related classes."""
|
||||
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
|
||||
from lbry.wallet.server.util import bytes_to_int, int_to_bytes, hex_to_bytes
|
||||
|
||||
_sha256 = hashlib.sha256
|
||||
_sha512 = hashlib.sha512
|
||||
_new_hash = hashlib.new
|
||||
_new_hmac = hmac.new
|
||||
HASHX_LEN = 11
|
||||
CLAIM_HASH_LEN = 20
|
||||
|
||||
|
||||
def sha256(x):
|
||||
"""Simple wrapper of hashlib sha256."""
|
||||
return _sha256(x).digest()
|
||||
|
||||
|
||||
def ripemd160(x):
|
||||
"""Simple wrapper of hashlib ripemd160."""
|
||||
h = _new_hash('ripemd160')
|
||||
h.update(x)
|
||||
return h.digest()
|
||||
|
||||
|
||||
def double_sha256(x):
|
||||
"""SHA-256 of SHA-256, as used extensively in bitcoin."""
|
||||
return sha256(sha256(x))
|
||||
|
||||
|
||||
def hmac_sha512(key, msg):
|
||||
"""Use SHA-512 to provide an HMAC."""
|
||||
return _new_hmac(key, msg, _sha512).digest()
|
||||
|
||||
|
||||
def hash160(x):
|
||||
"""RIPEMD-160 of SHA-256.
|
||||
|
||||
Used to make bitcoin addresses from pubkeys."""
|
||||
return ripemd160(sha256(x))
|
||||
|
||||
|
||||
def hash_to_hex_str(x: bytes) -> str:
|
||||
"""Convert a big-endian binary hash to displayed hex string.
|
||||
|
||||
Display form of a binary hash is reversed and converted to hex.
|
||||
"""
|
||||
return x[::-1].hex()
|
||||
|
||||
|
||||
def hex_str_to_hash(x: str) -> bytes:
|
||||
"""Convert a displayed hex string to a binary hash."""
|
||||
return hex_to_bytes(x)[::-1]
|
||||
|
||||
|
||||
class Base58Error(Exception):
|
||||
"""Exception used for Base58 errors."""
|
||||
|
||||
|
||||
class Base58:
|
||||
"""Class providing base 58 functionality."""
|
||||
|
||||
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
assert len(chars) == 58
|
||||
cmap = {c: n for n, c in enumerate(chars)}
|
||||
|
||||
@staticmethod
|
||||
def char_value(c):
|
||||
val = Base58.cmap.get(c)
|
||||
if val is None:
|
||||
raise Base58Error(f'invalid base 58 character "{c}"')
|
||||
return val
|
||||
|
||||
@staticmethod
|
||||
def decode(txt):
|
||||
"""Decodes txt into a big-endian bytearray."""
|
||||
if not isinstance(txt, str):
|
||||
raise TypeError('a string is required')
|
||||
|
||||
if not txt:
|
||||
raise Base58Error('string cannot be empty')
|
||||
|
||||
value = 0
|
||||
for c in txt:
|
||||
value = value * 58 + Base58.char_value(c)
|
||||
|
||||
result = int_to_bytes(value)
|
||||
|
||||
# Prepend leading zero bytes if necessary
|
||||
count = 0
|
||||
for c in txt:
|
||||
if c != '1':
|
||||
break
|
||||
count += 1
|
||||
if count:
|
||||
result = bytes(count) + result
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def encode(be_bytes):
|
||||
"""Converts a big-endian bytearray into a base58 string."""
|
||||
value = bytes_to_int(be_bytes)
|
||||
|
||||
txt = ''
|
||||
while value:
|
||||
value, mod = divmod(value, 58)
|
||||
txt += Base58.chars[mod]
|
||||
|
||||
for byte in be_bytes:
|
||||
if byte != 0:
|
||||
break
|
||||
txt += '1'
|
||||
|
||||
return txt[::-1]
|
||||
|
||||
@staticmethod
|
||||
def decode_check(txt, *, hash_fn=double_sha256):
|
||||
"""Decodes a Base58Check-encoded string to a payload. The version
|
||||
prefixes it."""
|
||||
be_bytes = Base58.decode(txt)
|
||||
result, check = be_bytes[:-4], be_bytes[-4:]
|
||||
if check != hash_fn(result)[:4]:
|
||||
raise Base58Error(f'invalid base 58 checksum for {txt}')
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def encode_check(payload, *, hash_fn=double_sha256):
|
||||
"""Encodes a payload bytearray (which includes the version byte(s))
|
||||
into a Base58Check string."""
|
||||
be_bytes = payload + hash_fn(payload)[:4]
|
||||
return Base58.encode(be_bytes)
|
File diff suppressed because it is too large
Load diff
|
@ -1,361 +0,0 @@
|
|||
# Copyright (c) 2016-2018, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Mempool handling."""
|
||||
import asyncio
|
||||
import itertools
|
||||
import time
|
||||
import attr
|
||||
import typing
|
||||
from typing import Set, Optional, Callable, Awaitable
|
||||
from collections import defaultdict
|
||||
from prometheus_client import Histogram
|
||||
from lbry.wallet.server.hash import hash_to_hex_str, hex_str_to_hash
|
||||
from lbry.wallet.server.util import class_logger, chunks
|
||||
from lbry.wallet.server.leveldb import UTXO
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class MemPoolTx:
|
||||
prevouts = attr.ib()
|
||||
# A pair is a (hashX, value) tuple
|
||||
in_pairs = attr.ib()
|
||||
out_pairs = attr.ib()
|
||||
fee = attr.ib()
|
||||
size = attr.ib()
|
||||
raw_tx = attr.ib()
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class MemPoolTxSummary:
|
||||
hash = attr.ib()
|
||||
fee = attr.ib()
|
||||
has_unconfirmed_inputs = attr.ib()
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
|
||||
)
|
||||
mempool_process_time_metric = Histogram(
|
||||
"processed_mempool", "Time to process mempool and notify touched addresses",
|
||||
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
|
||||
|
||||
class MemPool:
|
||||
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0):
|
||||
self.coin = coin
|
||||
self._daemon = daemon
|
||||
self._db = db
|
||||
self._touched_mp = {}
|
||||
self._touched_bp = {}
|
||||
self._highest_block = -1
|
||||
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.txs = {}
|
||||
self.hashXs = defaultdict(set) # None can be a key
|
||||
self.cached_compact_histogram = []
|
||||
self.refresh_secs = refresh_secs
|
||||
self.log_status_secs = log_status_secs
|
||||
# Prevents mempool refreshes during fee histogram calculation
|
||||
self.lock = state_lock
|
||||
self.wakeup = asyncio.Event()
|
||||
self.mempool_process_time_metric = mempool_process_time_metric
|
||||
self.notified_mempool_txs = set()
|
||||
self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
|
||||
|
||||
async def _logging(self, synchronized_event):
|
||||
"""Print regular logs of mempool stats."""
|
||||
self.logger.info('beginning processing of daemon mempool. '
|
||||
'This can take some time...')
|
||||
start = time.perf_counter()
|
||||
await synchronized_event.wait()
|
||||
elapsed = time.perf_counter() - start
|
||||
self.logger.info(f'synced in {elapsed:.2f}s')
|
||||
while True:
|
||||
self.logger.info(f'{len(self.txs):,d} txs '
|
||||
f'touching {len(self.hashXs):,d} addresses')
|
||||
await asyncio.sleep(self.log_status_secs)
|
||||
await synchronized_event.wait()
|
||||
|
||||
def _accept_transactions(self, tx_map, utxo_map, touched):
|
||||
"""Accept transactions in tx_map to the mempool if all their inputs
|
||||
can be found in the existing mempool or a utxo_map from the
|
||||
DB.
|
||||
|
||||
Returns an (unprocessed tx_map, unspent utxo_map) pair.
|
||||
"""
|
||||
hashXs = self.hashXs
|
||||
txs = self.txs
|
||||
|
||||
deferred = {}
|
||||
unspent = set(utxo_map)
|
||||
# Try to find all prevouts so we can accept the TX
|
||||
for hash, tx in tx_map.items():
|
||||
in_pairs = []
|
||||
try:
|
||||
for prevout in tx.prevouts:
|
||||
utxo = utxo_map.get(prevout)
|
||||
if not utxo:
|
||||
prev_hash, prev_index = prevout
|
||||
# Raises KeyError if prev_hash is not in txs
|
||||
utxo = txs[prev_hash].out_pairs[prev_index]
|
||||
in_pairs.append(utxo)
|
||||
except KeyError:
|
||||
deferred[hash] = tx
|
||||
continue
|
||||
|
||||
# Spend the prevouts
|
||||
unspent.difference_update(tx.prevouts)
|
||||
|
||||
# Save the in_pairs, compute the fee and accept the TX
|
||||
tx.in_pairs = tuple(in_pairs)
|
||||
# Avoid negative fees if dealing with generation-like transactions
|
||||
# because some in_parts would be missing
|
||||
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
|
||||
sum(v for _, v in tx.out_pairs)))
|
||||
txs[hash] = tx
|
||||
|
||||
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
|
||||
touched.add(hashX)
|
||||
hashXs[hashX].add(hash)
|
||||
|
||||
return deferred, {prevout: utxo_map[prevout] for prevout in unspent}
|
||||
|
||||
async def _mempool_loop(self, synchronized_event):
|
||||
try:
|
||||
return await self._refresh_hashes(synchronized_event)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as e:
|
||||
self.logger.exception("MEMPOOL DIED")
|
||||
raise e
|
||||
|
||||
async def _refresh_hashes(self, synchronized_event):
|
||||
"""Refresh our view of the daemon's mempool."""
|
||||
while True:
|
||||
start = time.perf_counter()
|
||||
height = self._daemon.cached_height()
|
||||
hex_hashes = await self._daemon.mempool_hashes()
|
||||
if height != await self._daemon.height():
|
||||
continue
|
||||
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
|
||||
async with self.lock:
|
||||
new_hashes = hashes.difference(self.notified_mempool_txs)
|
||||
touched = await self._process_mempool(hashes)
|
||||
self.notified_mempool_txs.update(new_hashes)
|
||||
new_touched = {
|
||||
touched_hashx for touched_hashx, txs in self.hashXs.items() if txs.intersection(new_hashes)
|
||||
}
|
||||
synchronized_event.set()
|
||||
synchronized_event.clear()
|
||||
await self.on_mempool(touched, new_touched, height)
|
||||
duration = time.perf_counter() - start
|
||||
self.mempool_process_time_metric.observe(duration)
|
||||
try:
|
||||
# we wait up to `refresh_secs` but go early if a broadcast happens (which triggers wakeup event)
|
||||
await asyncio.wait_for(self.wakeup.wait(), timeout=self.refresh_secs)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
finally:
|
||||
self.wakeup.clear()
|
||||
|
||||
async def _process_mempool(self, all_hashes):
|
||||
# Re-sync with the new set of hashes
|
||||
txs = self.txs
|
||||
|
||||
hashXs = self.hashXs # hashX: [tx_hash, ...]
|
||||
touched = set()
|
||||
|
||||
# First handle txs that have disappeared
|
||||
for tx_hash in set(txs).difference(all_hashes):
|
||||
tx = txs.pop(tx_hash)
|
||||
tx_hashXs = {hashX for hashX, value in tx.in_pairs}
|
||||
tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
|
||||
for hashX in tx_hashXs:
|
||||
hashXs[hashX].remove(tx_hash)
|
||||
if not hashXs[hashX]:
|
||||
del hashXs[hashX]
|
||||
touched.update(tx_hashXs)
|
||||
|
||||
# Process new transactions
|
||||
new_hashes = list(all_hashes.difference(txs))
|
||||
if new_hashes:
|
||||
fetches = []
|
||||
for hashes in chunks(new_hashes, 200):
|
||||
fetches.append(self._fetch_and_accept(hashes, all_hashes, touched))
|
||||
tx_map = {}
|
||||
utxo_map = {}
|
||||
for fetch in asyncio.as_completed(fetches):
|
||||
deferred, unspent = await fetch
|
||||
tx_map.update(deferred)
|
||||
utxo_map.update(unspent)
|
||||
|
||||
prior_count = 0
|
||||
# FIXME: this is not particularly efficient
|
||||
while tx_map and len(tx_map) != prior_count:
|
||||
prior_count = len(tx_map)
|
||||
tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map, touched)
|
||||
|
||||
if tx_map:
|
||||
self.logger.info(f'{len(tx_map)} txs dropped')
|
||||
|
||||
return touched
|
||||
|
||||
async def _fetch_and_accept(self, hashes, all_hashes, touched):
|
||||
"""Fetch a list of mempool transactions."""
|
||||
raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes))
|
||||
|
||||
to_hashX = self.coin.hashX_from_script
|
||||
deserializer = self.coin.DESERIALIZER
|
||||
|
||||
tx_map = {}
|
||||
for hash, raw_tx in zip(hashes, raw_txs):
|
||||
# The daemon may have evicted the tx from its
|
||||
# mempool or it may have gotten in a block
|
||||
if not raw_tx:
|
||||
continue
|
||||
tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
|
||||
# Convert the inputs and outputs into (hashX, value) pairs
|
||||
# Drop generation-like inputs from MemPoolTx.prevouts
|
||||
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
|
||||
for txin in tx.inputs
|
||||
if not txin.is_generation())
|
||||
txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
|
||||
for txout in tx.outputs)
|
||||
tx_map[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
|
||||
0, tx_size, raw_tx)
|
||||
|
||||
# Determine all prevouts not in the mempool, and fetch the
|
||||
# UTXO information from the database. Failed prevout lookups
|
||||
# return None - concurrent database updates happen - which is
|
||||
# relied upon by _accept_transactions. Ignore prevouts that are
|
||||
# generation-like.
|
||||
prevouts = tuple(prevout for tx in tx_map.values()
|
||||
for prevout in tx.prevouts
|
||||
if prevout[0] not in all_hashes)
|
||||
utxos = await self._db.lookup_utxos(prevouts)
|
||||
utxo_map = dict(zip(prevouts, utxos))
|
||||
|
||||
return self._accept_transactions(tx_map, utxo_map, touched)
|
||||
|
||||
#
|
||||
# External interface
|
||||
#
|
||||
|
||||
async def keep_synchronized(self, synchronized_event):
|
||||
"""Keep the mempool synchronized with the daemon."""
|
||||
await asyncio.wait([
|
||||
self._mempool_loop(synchronized_event),
|
||||
# self._refresh_histogram(synchronized_event),
|
||||
self._logging(synchronized_event)
|
||||
])
|
||||
|
||||
async def balance_delta(self, hashX):
|
||||
"""Return the unconfirmed amount in the mempool for hashX.
|
||||
|
||||
Can be positive or negative.
|
||||
"""
|
||||
value = 0
|
||||
if hashX in self.hashXs:
|
||||
for hash in self.hashXs[hashX]:
|
||||
tx = self.txs[hash]
|
||||
value -= sum(v for h168, v in tx.in_pairs if h168 == hashX)
|
||||
value += sum(v for h168, v in tx.out_pairs if h168 == hashX)
|
||||
return value
|
||||
|
||||
def compact_fee_histogram(self):
|
||||
"""Return a compact fee histogram of the current mempool."""
|
||||
return self.cached_compact_histogram
|
||||
|
||||
async def potential_spends(self, hashX):
|
||||
"""Return a set of (prev_hash, prev_idx) pairs from mempool
|
||||
transactions that touch hashX.
|
||||
|
||||
None, some or all of these may be spends of the hashX, but all
|
||||
actual spends of it (in the DB or mempool) will be included.
|
||||
"""
|
||||
result = set()
|
||||
for tx_hash in self.hashXs.get(hashX, ()):
|
||||
tx = self.txs[tx_hash]
|
||||
result.update(tx.prevouts)
|
||||
return result
|
||||
|
||||
def transaction_summaries(self, hashX):
|
||||
"""Return a list of MemPoolTxSummary objects for the hashX."""
|
||||
result = []
|
||||
for tx_hash in self.hashXs.get(hashX, ()):
|
||||
tx = self.txs[tx_hash]
|
||||
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
|
||||
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
|
||||
return result
|
||||
|
||||
async def unordered_UTXOs(self, hashX):
|
||||
"""Return an unordered list of UTXO named tuples from mempool
|
||||
transactions that pay to hashX.
|
||||
|
||||
This does not consider if any other mempool transactions spend
|
||||
the outputs.
|
||||
"""
|
||||
utxos = []
|
||||
for tx_hash in self.hashXs.get(hashX, ()):
|
||||
tx = self.txs.get(tx_hash)
|
||||
for pos, (hX, value) in enumerate(tx.out_pairs):
|
||||
if hX == hashX:
|
||||
utxos.append(UTXO(-1, pos, tx_hash, 0, value))
|
||||
return utxos
|
||||
|
||||
def get_mempool_height(self, tx_hash):
|
||||
# Height Progression
|
||||
# -2: not broadcast
|
||||
# -1: in mempool but has unconfirmed inputs
|
||||
# 0: in mempool and all inputs confirmed
|
||||
# +num: confirmed in a specific block (height)
|
||||
if tx_hash not in self.txs:
|
||||
return -2
|
||||
tx = self.txs[tx_hash]
|
||||
unspent_inputs = sum(1 if hash in self.txs else 0 for hash, idx in tx.prevouts)
|
||||
if unspent_inputs:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
async def _maybe_notify(self, new_touched):
|
||||
tmp, tbp = self._touched_mp, self._touched_bp
|
||||
common = set(tmp).intersection(tbp)
|
||||
if common:
|
||||
height = max(common)
|
||||
elif tmp and max(tmp) == self._highest_block:
|
||||
height = self._highest_block
|
||||
else:
|
||||
# Either we are processing a block and waiting for it to
|
||||
# come in, or we have not yet had a mempool update for the
|
||||
# new block height
|
||||
return
|
||||
touched = tmp.pop(height)
|
||||
for old in [h for h in tmp if h <= height]:
|
||||
del tmp[old]
|
||||
for old in [h for h in tbp if h <= height]:
|
||||
touched.update(tbp.pop(old))
|
||||
# print("notify", height, len(touched), len(new_touched))
|
||||
await self.notify_sessions(height, touched, new_touched)
|
||||
|
||||
async def start(self, height, session_manager: 'LBRYSessionManager'):
|
||||
self._highest_block = height
|
||||
self.notify_sessions = session_manager._notify_sessions
|
||||
await self.notify_sessions(height, set(), set())
|
||||
|
||||
async def on_mempool(self, touched, new_touched, height):
|
||||
self._touched_mp[height] = touched
|
||||
await self._maybe_notify(new_touched)
|
||||
|
||||
async def on_block(self, touched, height):
|
||||
self._touched_bp[height] = touched
|
||||
self._highest_block = height
|
||||
await self._maybe_notify(set())
|
|
@ -1,258 +0,0 @@
|
|||
# Copyright (c) 2018, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Merkle trees, branches, proofs and roots."""
|
||||
|
||||
from asyncio import Event
|
||||
from math import ceil, log
|
||||
|
||||
from lbry.wallet.server.hash import double_sha256
|
||||
|
||||
|
||||
class Merkle:
|
||||
"""Perform merkle tree calculations on binary hashes using a given hash
|
||||
function.
|
||||
|
||||
If the hash count is not even, the final hash is repeated when
|
||||
calculating the next merkle layer up the tree.
|
||||
"""
|
||||
|
||||
def __init__(self, hash_func=double_sha256):
|
||||
self.hash_func = hash_func
|
||||
|
||||
@staticmethod
|
||||
def tree_depth(hash_count):
|
||||
return Merkle.branch_length(hash_count) + 1
|
||||
|
||||
@staticmethod
|
||||
def branch_length(hash_count):
|
||||
"""Return the length of a merkle branch given the number of hashes."""
|
||||
if not isinstance(hash_count, int):
|
||||
raise TypeError('hash_count must be an integer')
|
||||
if hash_count < 1:
|
||||
raise ValueError('hash_count must be at least 1')
|
||||
return ceil(log(hash_count, 2))
|
||||
|
||||
@staticmethod
|
||||
def branch_and_root(hashes, index, length=None, hash_func=double_sha256):
|
||||
"""Return a (merkle branch, merkle_root) pair given hashes, and the
|
||||
index of one of those hashes.
|
||||
"""
|
||||
hashes = list(hashes)
|
||||
if not isinstance(index, int):
|
||||
raise TypeError('index must be an integer')
|
||||
# This also asserts hashes is not empty
|
||||
if not 0 <= index < len(hashes):
|
||||
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
|
||||
natural_length = Merkle.branch_length(len(hashes))
|
||||
if length is None:
|
||||
length = natural_length
|
||||
else:
|
||||
if not isinstance(length, int):
|
||||
raise TypeError('length must be an integer')
|
||||
if length < natural_length:
|
||||
raise ValueError('length out of range')
|
||||
|
||||
branch = []
|
||||
for _ in range(length):
|
||||
if len(hashes) & 1:
|
||||
hashes.append(hashes[-1])
|
||||
branch.append(hashes[index ^ 1])
|
||||
index >>= 1
|
||||
hashes = [hash_func(hashes[n] + hashes[n + 1])
|
||||
for n in range(0, len(hashes), 2)]
|
||||
|
||||
return branch, hashes[0]
|
||||
|
||||
@staticmethod
|
||||
def root(hashes, length=None):
|
||||
"""Return the merkle root of a non-empty iterable of binary hashes."""
|
||||
branch, root = Merkle.branch_and_root(hashes, 0, length)
|
||||
return root
|
||||
|
||||
# @staticmethod
|
||||
# def root_from_proof(hash, branch, index, hash_func=double_sha256):
|
||||
# """Return the merkle root given a hash, a merkle branch to it, and
|
||||
# its index in the hashes array.
|
||||
#
|
||||
# branch is an iterable sorted deepest to shallowest. If the
|
||||
# returned root is the expected value then the merkle proof is
|
||||
# verified.
|
||||
#
|
||||
# The caller should have confirmed the length of the branch with
|
||||
# branch_length(). Unfortunately this is not easily done for
|
||||
# bitcoin transactions as the number of transactions in a block
|
||||
# is unknown to an SPV client.
|
||||
# """
|
||||
# for elt in branch:
|
||||
# if index & 1:
|
||||
# hash = hash_func(elt + hash)
|
||||
# else:
|
||||
# hash = hash_func(hash + elt)
|
||||
# index >>= 1
|
||||
# if index:
|
||||
# raise ValueError('index out of range for branch')
|
||||
# return hash
|
||||
|
||||
@staticmethod
|
||||
def level(hashes, depth_higher):
|
||||
"""Return a level of the merkle tree of hashes the given depth
|
||||
higher than the bottom row of the original tree."""
|
||||
size = 1 << depth_higher
|
||||
root = Merkle.root
|
||||
return [root(hashes[n: n + size], depth_higher)
|
||||
for n in range(0, len(hashes), size)]
|
||||
|
||||
@staticmethod
|
||||
def branch_and_root_from_level(level, leaf_hashes, index,
|
||||
depth_higher):
|
||||
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
|
||||
level cached.
|
||||
|
||||
To maximally reduce the amount of data hashed in computing a
|
||||
markle branch, cache a tree of depth N at level N // 2.
|
||||
|
||||
level is a list of hashes in the middle of the tree (returned
|
||||
by level())
|
||||
|
||||
leaf_hashes are the leaves needed to calculate a partial branch
|
||||
up to level.
|
||||
|
||||
depth_higher is how much higher level is than the leaves of the tree
|
||||
|
||||
index is the index in the full list of hashes of the hash whose
|
||||
merkle branch we want.
|
||||
"""
|
||||
if not isinstance(level, list):
|
||||
raise TypeError("level must be a list")
|
||||
if not isinstance(leaf_hashes, list):
|
||||
raise TypeError("leaf_hashes must be a list")
|
||||
leaf_index = (index >> depth_higher) << depth_higher
|
||||
leaf_branch, leaf_root = Merkle.branch_and_root(
|
||||
leaf_hashes, index - leaf_index, depth_higher)
|
||||
index >>= depth_higher
|
||||
level_branch, root = Merkle.branch_and_root(level, index)
|
||||
# Check last so that we know index is in-range
|
||||
if leaf_root != level[index]:
|
||||
raise ValueError('leaf hashes inconsistent with level')
|
||||
return leaf_branch + level_branch, root
|
||||
|
||||
|
||||
class MerkleCache:
|
||||
"""A cache to calculate merkle branches efficiently."""
|
||||
|
||||
def __init__(self, merkle, source_func):
|
||||
"""Initialise a cache hashes taken from source_func:
|
||||
|
||||
async def source_func(index, count):
|
||||
...
|
||||
"""
|
||||
self.merkle = merkle
|
||||
self.source_func = source_func
|
||||
self.length = 0
|
||||
self.depth_higher = 0
|
||||
self.initialized = Event()
|
||||
|
||||
def _segment_length(self):
|
||||
return 1 << self.depth_higher
|
||||
|
||||
def _leaf_start(self, index):
|
||||
"""Given a level's depth higher and a hash index, return the leaf
|
||||
index and leaf hash count needed to calculate a merkle branch.
|
||||
"""
|
||||
depth_higher = self.depth_higher
|
||||
return (index >> depth_higher) << depth_higher
|
||||
|
||||
def _level(self, hashes):
|
||||
return self.merkle.level(hashes, self.depth_higher)
|
||||
|
||||
async def _extend_to(self, length):
|
||||
"""Extend the length of the cache if necessary."""
|
||||
if length <= self.length:
|
||||
return
|
||||
# Start from the beginning of any final partial segment.
|
||||
# Retain the value of depth_higher; in practice this is fine
|
||||
start = self._leaf_start(self.length)
|
||||
hashes = await self.source_func(start, length - start)
|
||||
self.level[start >> self.depth_higher:] = self._level(hashes)
|
||||
self.length = length
|
||||
|
||||
async def _level_for(self, length):
|
||||
"""Return a (level_length, final_hash) pair for a truncation
|
||||
of the hashes to the given length."""
|
||||
if length == self.length:
|
||||
return self.level
|
||||
level = self.level[:length >> self.depth_higher]
|
||||
leaf_start = self._leaf_start(length)
|
||||
count = min(self._segment_length(), length - leaf_start)
|
||||
hashes = await self.source_func(leaf_start, count)
|
||||
level += self._level(hashes)
|
||||
return level
|
||||
|
||||
async def initialize(self, length):
|
||||
"""Call to initialize the cache to a source of given length."""
|
||||
self.length = length
|
||||
self.depth_higher = self.merkle.tree_depth(length) // 2
|
||||
self.level = self._level(await self.source_func(0, length))
|
||||
self.initialized.set()
|
||||
|
||||
def truncate(self, length):
|
||||
"""Truncate the cache so it covers no more than length underlying
|
||||
hashes."""
|
||||
if not isinstance(length, int):
|
||||
raise TypeError('length must be an integer')
|
||||
if length <= 0:
|
||||
raise ValueError('length must be positive')
|
||||
if length >= self.length:
|
||||
return
|
||||
length = self._leaf_start(length)
|
||||
self.length = length
|
||||
self.level[length >> self.depth_higher:] = []
|
||||
|
||||
async def branch_and_root(self, length, index):
|
||||
"""Return a merkle branch and root. Length is the number of
|
||||
hashes used to calculate the merkle root, index is the position
|
||||
of the hash to calculate the branch of.
|
||||
|
||||
index must be less than length, which must be at least 1."""
|
||||
if not isinstance(length, int):
|
||||
raise TypeError('length must be an integer')
|
||||
if not isinstance(index, int):
|
||||
raise TypeError('index must be an integer')
|
||||
if length <= 0:
|
||||
raise ValueError('length must be positive')
|
||||
if index >= length:
|
||||
raise ValueError('index must be less than length')
|
||||
await self.initialized.wait()
|
||||
await self._extend_to(length)
|
||||
leaf_start = self._leaf_start(index)
|
||||
count = min(self._segment_length(), length - leaf_start)
|
||||
leaf_hashes = await self.source_func(leaf_start, count)
|
||||
if length < self._segment_length():
|
||||
return self.merkle.branch_and_root(leaf_hashes, index)
|
||||
level = await self._level_for(length)
|
||||
return self.merkle.branch_and_root_from_level(
|
||||
level, leaf_hashes, index, self.depth_higher)
|
|
@ -1,135 +0,0 @@
|
|||
import time
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def calculate_elapsed(start) -> int:
|
||||
return int((time.perf_counter() - start) * 1000)
|
||||
|
||||
|
||||
def calculate_avg_percentiles(data) -> Tuple[int, int, int, int, int, int, int, int]:
|
||||
if not data:
|
||||
return 0, 0, 0, 0, 0, 0, 0, 0
|
||||
data.sort()
|
||||
size = len(data)
|
||||
return (
|
||||
int(sum(data) / size),
|
||||
data[0],
|
||||
data[math.ceil(size * .05) - 1],
|
||||
data[math.ceil(size * .25) - 1],
|
||||
data[math.ceil(size * .50) - 1],
|
||||
data[math.ceil(size * .75) - 1],
|
||||
data[math.ceil(size * .95) - 1],
|
||||
data[-1]
|
||||
)
|
||||
|
||||
|
||||
def remove_select_list(sql) -> str:
|
||||
return sql[sql.index('FROM'):]
|
||||
|
||||
|
||||
class APICallMetrics:
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
# total requests received
|
||||
self.receive_count = 0
|
||||
self.cache_response_count = 0
|
||||
|
||||
# millisecond timings for query based responses
|
||||
self.query_response_times = []
|
||||
self.query_intrp_times = []
|
||||
self.query_error_times = []
|
||||
|
||||
self.query_python_times = []
|
||||
self.query_wait_times = []
|
||||
self.query_sql_times = [] # aggregate total of multiple SQL calls made per request
|
||||
|
||||
self.individual_sql_times = [] # every SQL query run on server
|
||||
|
||||
# actual queries
|
||||
self.errored_queries = set()
|
||||
self.interrupted_queries = set()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
# total requests received
|
||||
"receive_count": self.receive_count,
|
||||
# sum of these is total responses made
|
||||
"cache_response_count": self.cache_response_count,
|
||||
"query_response_count": len(self.query_response_times),
|
||||
"intrp_response_count": len(self.query_intrp_times),
|
||||
"error_response_count": len(self.query_error_times),
|
||||
# millisecond timings for non-cache responses
|
||||
"response": calculate_avg_percentiles(self.query_response_times),
|
||||
"interrupt": calculate_avg_percentiles(self.query_intrp_times),
|
||||
"error": calculate_avg_percentiles(self.query_error_times),
|
||||
# response, interrupt and error each also report the python, wait and sql stats:
|
||||
"python": calculate_avg_percentiles(self.query_python_times),
|
||||
"wait": calculate_avg_percentiles(self.query_wait_times),
|
||||
"sql": calculate_avg_percentiles(self.query_sql_times),
|
||||
# extended timings for individual sql executions
|
||||
"individual_sql": calculate_avg_percentiles(self.individual_sql_times),
|
||||
"individual_sql_count": len(self.individual_sql_times),
|
||||
# actual queries
|
||||
"errored_queries": list(self.errored_queries),
|
||||
"interrupted_queries": list(self.interrupted_queries),
|
||||
}
|
||||
|
||||
def start(self):
|
||||
self.receive_count += 1
|
||||
|
||||
def cache_response(self):
|
||||
self.cache_response_count += 1
|
||||
|
||||
def _add_query_timings(self, request_total_time, metrics):
|
||||
if metrics and 'execute_query' in metrics:
|
||||
sub_process_total = metrics[self.name][0]['total']
|
||||
individual_query_times = [f['total'] for f in metrics['execute_query']]
|
||||
aggregated_query_time = sum(individual_query_times)
|
||||
self.individual_sql_times.extend(individual_query_times)
|
||||
self.query_sql_times.append(aggregated_query_time)
|
||||
self.query_python_times.append(sub_process_total - aggregated_query_time)
|
||||
self.query_wait_times.append(request_total_time - sub_process_total)
|
||||
|
||||
@staticmethod
|
||||
def _add_queries(query_set, metrics):
|
||||
if metrics and 'execute_query' in metrics:
|
||||
for execute_query in metrics['execute_query']:
|
||||
if 'sql' in execute_query:
|
||||
query_set.add(remove_select_list(execute_query['sql']))
|
||||
|
||||
def query_response(self, start, metrics):
|
||||
self.query_response_times.append(calculate_elapsed(start))
|
||||
self._add_query_timings(self.query_response_times[-1], metrics)
|
||||
|
||||
def query_interrupt(self, start, metrics):
|
||||
self.query_intrp_times.append(calculate_elapsed(start))
|
||||
self._add_queries(self.interrupted_queries, metrics)
|
||||
self._add_query_timings(self.query_intrp_times[-1], metrics)
|
||||
|
||||
def query_error(self, start, metrics):
|
||||
self.query_error_times.append(calculate_elapsed(start))
|
||||
self._add_queries(self.errored_queries, metrics)
|
||||
self._add_query_timings(self.query_error_times[-1], metrics)
|
||||
|
||||
|
||||
class ServerLoadData:
|
||||
|
||||
def __init__(self):
|
||||
self._apis = {}
|
||||
|
||||
def for_api(self, name) -> APICallMetrics:
|
||||
if name not in self._apis:
|
||||
self._apis[name] = APICallMetrics(name)
|
||||
return self._apis[name]
|
||||
|
||||
def to_json_and_reset(self, status):
|
||||
try:
|
||||
return {
|
||||
'api': {name: api.to_json() for name, api in self._apis.items()},
|
||||
'status': status
|
||||
}
|
||||
finally:
|
||||
self._apis = {}
|
|
@ -1,289 +0,0 @@
|
|||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Script-related classes and functions."""
|
||||
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
from lbry.wallet.server.util import unpack_le_uint16_from, unpack_le_uint32_from, \
|
||||
pack_le_uint16, pack_le_uint32
|
||||
|
||||
|
||||
class EnumError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Enumeration:
|
||||
|
||||
def __init__(self, name, enumList):
|
||||
self.__doc__ = name
|
||||
|
||||
lookup = {}
|
||||
reverseLookup = {}
|
||||
i = 0
|
||||
uniqueNames = set()
|
||||
uniqueValues = set()
|
||||
for x in enumList:
|
||||
if isinstance(x, tuple):
|
||||
x, i = x
|
||||
if not isinstance(x, str):
|
||||
raise EnumError(f"enum name {x} not a string")
|
||||
if not isinstance(i, int):
|
||||
raise EnumError(f"enum value {i} not an integer")
|
||||
if x in uniqueNames:
|
||||
raise EnumError(f"enum name {x} not unique")
|
||||
if i in uniqueValues:
|
||||
raise EnumError(f"enum value {i} not unique")
|
||||
uniqueNames.add(x)
|
||||
uniqueValues.add(i)
|
||||
lookup[x] = i
|
||||
reverseLookup[i] = x
|
||||
i = i + 1
|
||||
self.lookup = lookup
|
||||
self.reverseLookup = reverseLookup
|
||||
|
||||
def __getattr__(self, attr):
|
||||
result = self.lookup.get(attr)
|
||||
if result is None:
|
||||
raise AttributeError(f'enumeration has no member {attr}')
|
||||
return result
|
||||
|
||||
def whatis(self, value):
|
||||
return self.reverseLookup[value]
|
||||
|
||||
|
||||
class ScriptError(Exception):
|
||||
"""Exception used for script errors."""
|
||||
|
||||
|
||||
OpCodes = Enumeration("Opcodes", [
|
||||
("OP_0", 0), ("OP_PUSHDATA1", 76),
|
||||
"OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE",
|
||||
"OP_RESERVED",
|
||||
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7", "OP_8",
|
||||
"OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
|
||||
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF",
|
||||
"OP_ELSE", "OP_ENDIF", "OP_VERIFY", "OP_RETURN",
|
||||
"OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP",
|
||||
"OP_2OVER", "OP_2ROT", "OP_2SWAP", "OP_IFDUP", "OP_DEPTH", "OP_DROP",
|
||||
"OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
|
||||
"OP_SWAP", "OP_TUCK",
|
||||
"OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE",
|
||||
"OP_INVERT", "OP_AND", "OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY",
|
||||
"OP_RESERVED1", "OP_RESERVED2",
|
||||
"OP_1ADD", "OP_1SUB", "OP_2MUL", "OP_2DIV", "OP_NEGATE", "OP_ABS",
|
||||
"OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV", "OP_MOD",
|
||||
"OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR", "OP_NUMEQUAL",
|
||||
"OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN", "OP_GREATERTHAN",
|
||||
"OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
|
||||
"OP_WITHIN",
|
||||
"OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160", "OP_HASH256",
|
||||
"OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
|
||||
"OP_CHECKMULTISIGVERIFY",
|
||||
"OP_NOP1",
|
||||
"OP_CHECKLOCKTIMEVERIFY", "OP_CHECKSEQUENCEVERIFY"
|
||||
])
|
||||
|
||||
|
||||
# Paranoia to make it hard to create bad scripts
|
||||
assert OpCodes.OP_DUP == 0x76
|
||||
assert OpCodes.OP_HASH160 == 0xa9
|
||||
assert OpCodes.OP_EQUAL == 0x87
|
||||
assert OpCodes.OP_EQUALVERIFY == 0x88
|
||||
assert OpCodes.OP_CHECKSIG == 0xac
|
||||
assert OpCodes.OP_CHECKMULTISIG == 0xae
|
||||
|
||||
|
||||
def _match_ops(ops, pattern):
|
||||
if len(ops) != len(pattern):
|
||||
return False
|
||||
for op, pop in zip(ops, pattern):
|
||||
if pop != op:
|
||||
# -1 means 'data push', whose op is an (op, data) tuple
|
||||
if pop == -1 and isinstance(op, tuple):
|
||||
continue
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ScriptPubKey:
|
||||
"""A class for handling a tx output script that gives conditions
|
||||
necessary for spending.
|
||||
"""
|
||||
|
||||
TO_ADDRESS_OPS = [OpCodes.OP_DUP, OpCodes.OP_HASH160, -1,
|
||||
OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]
|
||||
TO_P2SH_OPS = [OpCodes.OP_HASH160, -1, OpCodes.OP_EQUAL]
|
||||
TO_PUBKEY_OPS = [-1, OpCodes.OP_CHECKSIG]
|
||||
|
||||
PayToHandlers = namedtuple('PayToHandlers', 'address script_hash pubkey '
|
||||
'unspendable strange')
|
||||
|
||||
@classmethod
|
||||
def pay_to(cls, handlers, script):
|
||||
"""Parse a script, invoke the appropriate handler and
|
||||
return the result.
|
||||
|
||||
One of the following handlers is invoked:
|
||||
handlers.address(hash160)
|
||||
handlers.script_hash(hash160)
|
||||
handlers.pubkey(pubkey)
|
||||
handlers.unspendable()
|
||||
handlers.strange(script)
|
||||
"""
|
||||
try:
|
||||
ops = Script.get_ops(script)
|
||||
except ScriptError:
|
||||
return handlers.unspendable()
|
||||
|
||||
match = _match_ops
|
||||
|
||||
if match(ops, cls.TO_ADDRESS_OPS):
|
||||
return handlers.address(ops[2][-1])
|
||||
if match(ops, cls.TO_P2SH_OPS):
|
||||
return handlers.script_hash(ops[1][-1])
|
||||
if match(ops, cls.TO_PUBKEY_OPS):
|
||||
return handlers.pubkey(ops[0][-1])
|
||||
if ops and ops[0] == OpCodes.OP_RETURN:
|
||||
return handlers.unspendable()
|
||||
return handlers.strange(script)
|
||||
|
||||
@classmethod
|
||||
def P2SH_script(cls, hash160):
|
||||
return (bytes([OpCodes.OP_HASH160])
|
||||
+ Script.push_data(hash160)
|
||||
+ bytes([OpCodes.OP_EQUAL]))
|
||||
|
||||
@classmethod
|
||||
def P2PKH_script(cls, hash160):
|
||||
return (bytes([OpCodes.OP_DUP, OpCodes.OP_HASH160])
|
||||
+ Script.push_data(hash160)
|
||||
+ bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]))
|
||||
|
||||
@classmethod
|
||||
def validate_pubkey(cls, pubkey, req_compressed=False):
|
||||
if isinstance(pubkey, (bytes, bytearray)):
|
||||
if len(pubkey) == 33 and pubkey[0] in (2, 3):
|
||||
return # Compressed
|
||||
if len(pubkey) == 65 and pubkey[0] == 4:
|
||||
if not req_compressed:
|
||||
return
|
||||
raise PubKeyError('uncompressed pubkeys are invalid')
|
||||
raise PubKeyError(f'invalid pubkey {pubkey}')
|
||||
|
||||
@classmethod
|
||||
def pubkey_script(cls, pubkey):
|
||||
cls.validate_pubkey(pubkey)
|
||||
return Script.push_data(pubkey) + bytes([OpCodes.OP_CHECKSIG])
|
||||
|
||||
@classmethod
|
||||
def multisig_script(cls, m, pubkeys):
|
||||
"""Returns the script for a pay-to-multisig transaction."""
|
||||
n = len(pubkeys)
|
||||
if not 1 <= m <= n <= 15:
|
||||
raise ScriptError(f'{m:d} of {n:d} multisig script not possible')
|
||||
for pubkey in pubkeys:
|
||||
cls.validate_pubkey(pubkey, req_compressed=True)
|
||||
# See https://bitcoin.org/en/developer-guide
|
||||
# 2 of 3 is: OP_2 pubkey1 pubkey2 pubkey3 OP_3 OP_CHECKMULTISIG
|
||||
return (bytes([OP_1 + m - 1])
|
||||
+ b''.join(cls.push_data(pubkey) for pubkey in pubkeys)
|
||||
+ bytes([OP_1 + n - 1, OP_CHECK_MULTISIG]))
|
||||
|
||||
|
||||
class Script:
|
||||
|
||||
@classmethod
|
||||
def get_ops(cls, script):
|
||||
ops = []
|
||||
|
||||
# The unpacks or script[n] below throw on truncated scripts
|
||||
try:
|
||||
n = 0
|
||||
while n < len(script):
|
||||
op = script[n]
|
||||
n += 1
|
||||
|
||||
if op <= OpCodes.OP_PUSHDATA4:
|
||||
# Raw bytes follow
|
||||
if op < OpCodes.OP_PUSHDATA1:
|
||||
dlen = op
|
||||
elif op == OpCodes.OP_PUSHDATA1:
|
||||
dlen = script[n]
|
||||
n += 1
|
||||
elif op == OpCodes.OP_PUSHDATA2:
|
||||
dlen, = unpack_le_uint16_from(script[n: n + 2])
|
||||
n += 2
|
||||
else:
|
||||
dlen, = unpack_le_uint32_from(script[n: n + 4])
|
||||
n += 4
|
||||
if n + dlen > len(script):
|
||||
raise IndexError
|
||||
op = (op, script[n:n + dlen])
|
||||
n += dlen
|
||||
|
||||
ops.append(op)
|
||||
except Exception:
|
||||
# Truncated script; e.g. tx_hash
|
||||
# ebc9fa1196a59e192352d76c0f6e73167046b9d37b8302b6bb6968dfd279b767
|
||||
raise ScriptError('truncated script')
|
||||
|
||||
return ops
|
||||
|
||||
@classmethod
|
||||
def push_data(cls, data):
|
||||
"""Returns the opcodes to push the data on the stack."""
|
||||
assert isinstance(data, (bytes, bytearray))
|
||||
|
||||
n = len(data)
|
||||
if n < OpCodes.OP_PUSHDATA1:
|
||||
return bytes([n]) + data
|
||||
if n < 256:
|
||||
return bytes([OpCodes.OP_PUSHDATA1, n]) + data
|
||||
if n < 65536:
|
||||
return bytes([OpCodes.OP_PUSHDATA2]) + pack_le_uint16(n) + data
|
||||
return bytes([OpCodes.OP_PUSHDATA4]) + pack_le_uint32(n) + data
|
||||
|
||||
@classmethod
|
||||
def opcode_name(cls, opcode):
|
||||
if OpCodes.OP_0 < opcode < OpCodes.OP_PUSHDATA1:
|
||||
return f'OP_{opcode:d}'
|
||||
try:
|
||||
return OpCodes.whatis(opcode)
|
||||
except KeyError:
|
||||
return f'OP_UNKNOWN:{opcode:d}'
|
||||
|
||||
@classmethod
|
||||
def dump(cls, script):
|
||||
opcodes, datas = cls.get_ops(script)
|
||||
for opcode, data in zip(opcodes, datas):
|
||||
name = cls.opcode_name(opcode)
|
||||
if data is None:
|
||||
print(name)
|
||||
else:
|
||||
print(f'{name} {data.hex()} ({len(data):d} bytes)')
|
|
@ -1,91 +0,0 @@
|
|||
import signal
|
||||
import logging
|
||||
import asyncio
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
import typing
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from lbry.prometheus import PrometheusServer
|
||||
|
||||
|
||||
class Server:
|
||||
|
||||
def __init__(self, env):
|
||||
self.env = env
|
||||
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
|
||||
self.shutdown_event = asyncio.Event()
|
||||
self.cancellable_tasks = []
|
||||
|
||||
self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url)
|
||||
self.db = db = LevelDB(env)
|
||||
self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event)
|
||||
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
||||
|
||||
self.session_mgr = LBRYSessionManager(
|
||||
env, db, bp, daemon, self.shutdown_event
|
||||
)
|
||||
self._indexer_task = None
|
||||
|
||||
async def start(self):
|
||||
env = self.env
|
||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||
self.log.info(f'software version: {lbry.__version__}')
|
||||
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||
self.log.info(f'event loop policy: {env.loop_policy}')
|
||||
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||
|
||||
await self.daemon.height()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
await self.start_prometheus()
|
||||
if self.env.udp_port:
|
||||
await self.bp.status_server.start(
|
||||
0, bytes.fromhex(self.bp.coin.GENESIS_HASH)[::-1], self.env.country,
|
||||
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
||||
)
|
||||
await _start_cancellable(self.bp.fetch_and_process_blocks)
|
||||
|
||||
await self.db.populate_header_merkle_cache()
|
||||
await _start_cancellable(self.bp.mempool.keep_synchronized)
|
||||
await _start_cancellable(self.session_mgr.serve, self.bp.mempool)
|
||||
|
||||
async def stop(self):
|
||||
for task in reversed(self.cancellable_tasks):
|
||||
task.cancel()
|
||||
await asyncio.wait(self.cancellable_tasks)
|
||||
if self.prometheus_server:
|
||||
await self.prometheus_server.stop()
|
||||
self.prometheus_server = None
|
||||
self.shutdown_event.set()
|
||||
await self.daemon.close()
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
executor = ThreadPoolExecutor(self.env.max_query_workers, thread_name_prefix='hub-worker')
|
||||
loop.set_default_executor(executor)
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
||||
executor.shutdown(True)
|
||||
|
||||
async def start_prometheus(self):
|
||||
if not self.prometheus_server and self.env.prometheus_port:
|
||||
self.prometheus_server = PrometheusServer()
|
||||
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)
|
File diff suppressed because it is too large
Load diff
|
@ -1,626 +0,0 @@
|
|||
# Copyright (c) 2016-2017, Neil Booth
|
||||
# Copyright (c) 2017, the ElectrumX authors
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Transaction-related classes and functions."""
|
||||
import typing
|
||||
from collections import namedtuple
|
||||
|
||||
from lbry.wallet.server.hash import sha256, double_sha256, hash_to_hex_str
|
||||
from lbry.wallet.server.script import OpCodes
|
||||
from lbry.wallet.server.util import (
|
||||
unpack_le_int32_from, unpack_le_int64_from, unpack_le_uint16_from,
|
||||
unpack_le_uint32_from, unpack_le_uint64_from, pack_le_int32, pack_varint,
|
||||
pack_le_uint32, pack_le_int64, pack_varbytes,
|
||||
)
|
||||
|
||||
ZERO = bytes(32)
|
||||
MINUS_1 = 4294967295
|
||||
|
||||
|
||||
class Tx(typing.NamedTuple):
|
||||
version: int
|
||||
inputs: typing.List['TxInput']
|
||||
outputs: typing.List['TxOutput']
|
||||
locktime: int
|
||||
raw: bytes
|
||||
|
||||
|
||||
class TxInput(typing.NamedTuple):
|
||||
prev_hash: bytes
|
||||
prev_idx: int
|
||||
script: bytes
|
||||
sequence: int
|
||||
|
||||
"""Class representing a transaction input."""
|
||||
def __str__(self):
|
||||
script = self.script.hex()
|
||||
prev_hash = hash_to_hex_str(self.prev_hash)
|
||||
return (f"Input({prev_hash}, {self.prev_idx:d}, script={script}, sequence={self.sequence:d})")
|
||||
|
||||
def is_generation(self):
|
||||
"""Test if an input is generation/coinbase like"""
|
||||
return self.prev_idx == MINUS_1 and self.prev_hash == ZERO
|
||||
|
||||
def serialize(self):
|
||||
return b''.join((
|
||||
self.prev_hash,
|
||||
pack_le_uint32(self.prev_idx),
|
||||
pack_varbytes(self.script),
|
||||
pack_le_uint32(self.sequence),
|
||||
))
|
||||
|
||||
|
||||
class TxOutput(typing.NamedTuple):
|
||||
value: int
|
||||
pk_script: bytes
|
||||
|
||||
def serialize(self):
|
||||
return b''.join((
|
||||
pack_le_int64(self.value),
|
||||
pack_varbytes(self.pk_script),
|
||||
))
|
||||
|
||||
|
||||
class Deserializer:
|
||||
"""Deserializes blocks into transactions.
|
||||
|
||||
External entry points are read_tx(), read_tx_and_hash(),
|
||||
read_tx_and_vsize() and read_block().
|
||||
|
||||
This code is performance sensitive as it is executed 100s of
|
||||
millions of times during sync.
|
||||
"""
|
||||
|
||||
TX_HASH_FN = staticmethod(double_sha256)
|
||||
|
||||
def __init__(self, binary, start=0):
|
||||
assert isinstance(binary, bytes)
|
||||
self.binary = binary
|
||||
self.binary_length = len(binary)
|
||||
self.cursor = start
|
||||
self.flags = 0
|
||||
|
||||
def read_tx(self):
|
||||
"""Return a deserialized transaction."""
|
||||
start = self.cursor
|
||||
return Tx(
|
||||
self._read_le_int32(), # version
|
||||
self._read_inputs(), # inputs
|
||||
self._read_outputs(), # outputs
|
||||
self._read_le_uint32(), # locktime
|
||||
self.binary[start:self.cursor],
|
||||
)
|
||||
|
||||
def read_tx_and_hash(self):
|
||||
"""Return a (deserialized TX, tx_hash) pair.
|
||||
|
||||
The hash needs to be reversed for human display; for efficiency
|
||||
we process it in the natural serialized order.
|
||||
"""
|
||||
start = self.cursor
|
||||
return self.read_tx(), self.TX_HASH_FN(self.binary[start:self.cursor])
|
||||
|
||||
def read_tx_and_vsize(self):
|
||||
"""Return a (deserialized TX, vsize) pair."""
|
||||
return self.read_tx(), self.binary_length
|
||||
|
||||
def read_tx_block(self):
|
||||
"""Returns a list of (deserialized_tx, tx_hash) pairs."""
|
||||
read = self.read_tx_and_hash
|
||||
# Some coins have excess data beyond the end of the transactions
|
||||
return [read() for _ in range(self._read_varint())]
|
||||
|
||||
def _read_inputs(self):
|
||||
read_input = self._read_input
|
||||
return [read_input() for i in range(self._read_varint())]
|
||||
|
||||
def _read_input(self):
|
||||
return TxInput(
|
||||
self._read_nbytes(32), # prev_hash
|
||||
self._read_le_uint32(), # prev_idx
|
||||
self._read_varbytes(), # script
|
||||
self._read_le_uint32() # sequence
|
||||
)
|
||||
|
||||
def _read_outputs(self):
|
||||
read_output = self._read_output
|
||||
return [read_output() for i in range(self._read_varint())]
|
||||
|
||||
def _read_output(self):
|
||||
return TxOutput(
|
||||
self._read_le_int64(), # value
|
||||
self._read_varbytes(), # pk_script
|
||||
)
|
||||
|
||||
def _read_byte(self):
|
||||
cursor = self.cursor
|
||||
self.cursor += 1
|
||||
return self.binary[cursor]
|
||||
|
||||
def _read_nbytes(self, n):
|
||||
cursor = self.cursor
|
||||
self.cursor = end = cursor + n
|
||||
assert self.binary_length >= end
|
||||
return self.binary[cursor:end]
|
||||
|
||||
def _read_varbytes(self):
|
||||
return self._read_nbytes(self._read_varint())
|
||||
|
||||
def _read_varint(self):
|
||||
n = self.binary[self.cursor]
|
||||
self.cursor += 1
|
||||
if n < 253:
|
||||
return n
|
||||
if n == 253:
|
||||
return self._read_le_uint16()
|
||||
if n == 254:
|
||||
return self._read_le_uint32()
|
||||
return self._read_le_uint64()
|
||||
|
||||
def _read_le_int32(self):
|
||||
result, = unpack_le_int32_from(self.binary, self.cursor)
|
||||
self.cursor += 4
|
||||
return result
|
||||
|
||||
def _read_le_int64(self):
|
||||
result, = unpack_le_int64_from(self.binary, self.cursor)
|
||||
self.cursor += 8
|
||||
return result
|
||||
|
||||
def _read_le_uint16(self):
|
||||
result, = unpack_le_uint16_from(self.binary, self.cursor)
|
||||
self.cursor += 2
|
||||
return result
|
||||
|
||||
def _read_le_uint32(self):
|
||||
result, = unpack_le_uint32_from(self.binary, self.cursor)
|
||||
self.cursor += 4
|
||||
return result
|
||||
|
||||
def _read_le_uint64(self):
|
||||
result, = unpack_le_uint64_from(self.binary, self.cursor)
|
||||
self.cursor += 8
|
||||
return result
|
||||
|
||||
|
||||
class TxSegWit(namedtuple("Tx", "version marker flag inputs outputs "
|
||||
"witness locktime raw")):
|
||||
"""Class representing a SegWit transaction."""
|
||||
|
||||
|
||||
class DeserializerSegWit(Deserializer):
|
||||
|
||||
# https://bitcoincore.org/en/segwit_wallet_dev/#transaction-serialization
|
||||
|
||||
def _read_witness(self, fields):
|
||||
read_witness_field = self._read_witness_field
|
||||
return [read_witness_field() for i in range(fields)]
|
||||
|
||||
def _read_witness_field(self):
|
||||
read_varbytes = self._read_varbytes
|
||||
return [read_varbytes() for i in range(self._read_varint())]
|
||||
|
||||
def _read_tx_parts(self):
|
||||
"""Return a (deserialized TX, tx_hash, vsize) tuple."""
|
||||
start = self.cursor
|
||||
marker = self.binary[self.cursor + 4]
|
||||
if marker:
|
||||
tx = super().read_tx()
|
||||
tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor])
|
||||
return tx, tx_hash, self.binary_length
|
||||
|
||||
# Ugh, this is nasty.
|
||||
version = self._read_le_int32()
|
||||
orig_ser = self.binary[start:self.cursor]
|
||||
|
||||
marker = self._read_byte()
|
||||
flag = self._read_byte()
|
||||
|
||||
start = self.cursor
|
||||
inputs = self._read_inputs()
|
||||
outputs = self._read_outputs()
|
||||
orig_ser += self.binary[start:self.cursor]
|
||||
|
||||
base_size = self.cursor - start
|
||||
witness = self._read_witness(len(inputs))
|
||||
|
||||
start = self.cursor
|
||||
locktime = self._read_le_uint32()
|
||||
orig_ser += self.binary[start:self.cursor]
|
||||
vsize = (3 * base_size + self.binary_length) // 4
|
||||
|
||||
return TxSegWit(version, marker, flag, inputs, outputs, witness,
|
||||
locktime, orig_ser), self.TX_HASH_FN(orig_ser), vsize
|
||||
|
||||
def read_tx(self):
|
||||
return self._read_tx_parts()[0]
|
||||
|
||||
def read_tx_and_hash(self):
|
||||
tx, tx_hash, vsize = self._read_tx_parts()
|
||||
return tx, tx_hash
|
||||
|
||||
def read_tx_and_vsize(self):
|
||||
tx, tx_hash, vsize = self._read_tx_parts()
|
||||
return tx, vsize
|
||||
|
||||
|
||||
class DeserializerAuxPow(Deserializer):
|
||||
VERSION_AUXPOW = (1 << 8)
|
||||
|
||||
def read_header(self, height, static_header_size):
|
||||
"""Return the AuxPow block header bytes"""
|
||||
start = self.cursor
|
||||
version = self._read_le_uint32()
|
||||
if version & self.VERSION_AUXPOW:
|
||||
# We are going to calculate the block size then read it as bytes
|
||||
self.cursor = start
|
||||
self.cursor += static_header_size # Block normal header
|
||||
self.read_tx() # AuxPow transaction
|
||||
self.cursor += 32 # Parent block hash
|
||||
merkle_size = self._read_varint()
|
||||
self.cursor += 32 * merkle_size # Merkle branch
|
||||
self.cursor += 4 # Index
|
||||
merkle_size = self._read_varint()
|
||||
self.cursor += 32 * merkle_size # Chain merkle branch
|
||||
self.cursor += 4 # Chain index
|
||||
self.cursor += 80 # Parent block header
|
||||
header_end = self.cursor
|
||||
else:
|
||||
header_end = static_header_size
|
||||
self.cursor = start
|
||||
return self._read_nbytes(header_end)
|
||||
|
||||
|
||||
class DeserializerAuxPowSegWit(DeserializerSegWit, DeserializerAuxPow):
|
||||
pass
|
||||
|
||||
|
||||
class DeserializerEquihash(Deserializer):
|
||||
def read_header(self, height, static_header_size):
|
||||
"""Return the block header bytes"""
|
||||
start = self.cursor
|
||||
# We are going to calculate the block size then read it as bytes
|
||||
self.cursor += static_header_size
|
||||
solution_size = self._read_varint()
|
||||
self.cursor += solution_size
|
||||
header_end = self.cursor
|
||||
self.cursor = start
|
||||
return self._read_nbytes(header_end)
|
||||
|
||||
|
||||
class DeserializerEquihashSegWit(DeserializerSegWit, DeserializerEquihash):
|
||||
pass
|
||||
|
||||
|
||||
class TxJoinSplit(namedtuple("Tx", "version inputs outputs locktime")):
|
||||
"""Class representing a JoinSplit transaction."""
|
||||
|
||||
|
||||
class DeserializerZcash(DeserializerEquihash):
|
||||
def read_tx(self):
|
||||
header = self._read_le_uint32()
|
||||
overwintered = ((header >> 31) == 1)
|
||||
if overwintered:
|
||||
version = header & 0x7fffffff
|
||||
self.cursor += 4 # versionGroupId
|
||||
else:
|
||||
version = header
|
||||
|
||||
is_overwinter_v3 = version == 3
|
||||
is_sapling_v4 = version == 4
|
||||
|
||||
base_tx = TxJoinSplit(
|
||||
version,
|
||||
self._read_inputs(), # inputs
|
||||
self._read_outputs(), # outputs
|
||||
self._read_le_uint32() # locktime
|
||||
)
|
||||
|
||||
if is_overwinter_v3 or is_sapling_v4:
|
||||
self.cursor += 4 # expiryHeight
|
||||
|
||||
has_shielded = False
|
||||
if is_sapling_v4:
|
||||
self.cursor += 8 # valueBalance
|
||||
shielded_spend_size = self._read_varint()
|
||||
self.cursor += shielded_spend_size * 384 # vShieldedSpend
|
||||
shielded_output_size = self._read_varint()
|
||||
self.cursor += shielded_output_size * 948 # vShieldedOutput
|
||||
has_shielded = shielded_spend_size > 0 or shielded_output_size > 0
|
||||
|
||||
if base_tx.version >= 2:
|
||||
joinsplit_size = self._read_varint()
|
||||
if joinsplit_size > 0:
|
||||
joinsplit_desc_len = 1506 + (192 if is_sapling_v4 else 296)
|
||||
# JSDescription
|
||||
self.cursor += joinsplit_size * joinsplit_desc_len
|
||||
self.cursor += 32 # joinSplitPubKey
|
||||
self.cursor += 64 # joinSplitSig
|
||||
|
||||
if is_sapling_v4 and has_shielded:
|
||||
self.cursor += 64 # bindingSig
|
||||
|
||||
return base_tx
|
||||
|
||||
|
||||
class TxTime(namedtuple("Tx", "version time inputs outputs locktime")):
|
||||
"""Class representing transaction that has a time field."""
|
||||
|
||||
|
||||
class DeserializerTxTime(Deserializer):
|
||||
def read_tx(self):
|
||||
return TxTime(
|
||||
self._read_le_int32(), # version
|
||||
self._read_le_uint32(), # time
|
||||
self._read_inputs(), # inputs
|
||||
self._read_outputs(), # outputs
|
||||
self._read_le_uint32(), # locktime
|
||||
)
|
||||
|
||||
|
||||
class DeserializerReddcoin(Deserializer):
|
||||
def read_tx(self):
|
||||
version = self._read_le_int32()
|
||||
inputs = self._read_inputs()
|
||||
outputs = self._read_outputs()
|
||||
locktime = self._read_le_uint32()
|
||||
if version > 1:
|
||||
time = self._read_le_uint32()
|
||||
else:
|
||||
time = 0
|
||||
|
||||
return TxTime(version, time, inputs, outputs, locktime)
|
||||
|
||||
|
||||
class DeserializerTxTimeAuxPow(DeserializerTxTime):
|
||||
VERSION_AUXPOW = (1 << 8)
|
||||
|
||||
def is_merged_block(self):
|
||||
start = self.cursor
|
||||
self.cursor = 0
|
||||
version = self._read_le_uint32()
|
||||
self.cursor = start
|
||||
if version & self.VERSION_AUXPOW:
|
||||
return True
|
||||
return False
|
||||
|
||||
def read_header(self, height, static_header_size):
|
||||
"""Return the AuxPow block header bytes"""
|
||||
start = self.cursor
|
||||
version = self._read_le_uint32()
|
||||
if version & self.VERSION_AUXPOW:
|
||||
# We are going to calculate the block size then read it as bytes
|
||||
self.cursor = start
|
||||
self.cursor += static_header_size # Block normal header
|
||||
self.read_tx() # AuxPow transaction
|
||||
self.cursor += 32 # Parent block hash
|
||||
merkle_size = self._read_varint()
|
||||
self.cursor += 32 * merkle_size # Merkle branch
|
||||
self.cursor += 4 # Index
|
||||
merkle_size = self._read_varint()
|
||||
self.cursor += 32 * merkle_size # Chain merkle branch
|
||||
self.cursor += 4 # Chain index
|
||||
self.cursor += 80 # Parent block header
|
||||
header_end = self.cursor
|
||||
else:
|
||||
header_end = static_header_size
|
||||
self.cursor = start
|
||||
return self._read_nbytes(header_end)
|
||||
|
||||
|
||||
class DeserializerBitcoinAtom(DeserializerSegWit):
|
||||
FORK_BLOCK_HEIGHT = 505888
|
||||
|
||||
def read_header(self, height, static_header_size):
|
||||
"""Return the block header bytes"""
|
||||
header_len = static_header_size
|
||||
if height >= self.FORK_BLOCK_HEIGHT:
|
||||
header_len += 4 # flags
|
||||
return self._read_nbytes(header_len)
|
||||
|
||||
|
||||
class DeserializerGroestlcoin(DeserializerSegWit):
|
||||
TX_HASH_FN = staticmethod(sha256)
|
||||
|
||||
|
||||
class TxInputTokenPay(TxInput):
|
||||
"""Class representing a TokenPay transaction input."""
|
||||
|
||||
OP_ANON_MARKER = 0xb9
|
||||
# 2byte marker (cpubkey + sigc + sigr)
|
||||
MIN_ANON_IN_SIZE = 2 + (33 + 32 + 32)
|
||||
|
||||
def _is_anon_input(self):
|
||||
return (len(self.script) >= self.MIN_ANON_IN_SIZE and
|
||||
self.script[0] == OpCodes.OP_RETURN and
|
||||
self.script[1] == self.OP_ANON_MARKER)
|
||||
|
||||
def is_generation(self):
|
||||
# Transactions coming in from stealth addresses are seen by
|
||||
# the blockchain as newly minted coins. The reverse, where coins
|
||||
# are sent TO a stealth address, are seen by the blockchain as
|
||||
# a coin burn.
|
||||
if self._is_anon_input():
|
||||
return True
|
||||
return super().is_generation()
|
||||
|
||||
|
||||
class TxInputTokenPayStealth(
|
||||
namedtuple("TxInput", "keyimage ringsize script sequence")):
|
||||
"""Class representing a TokenPay stealth transaction input."""
|
||||
|
||||
def __str__(self):
|
||||
script = self.script.hex()
|
||||
keyimage = bytes(self.keyimage).hex()
|
||||
return (f"Input({keyimage}, {self.ringsize[1]:d}, script={script}, sequence={self.sequence:d})")
|
||||
|
||||
def is_generation(self):
|
||||
return True
|
||||
|
||||
def serialize(self):
|
||||
return b''.join((
|
||||
self.keyimage,
|
||||
self.ringsize,
|
||||
pack_varbytes(self.script),
|
||||
pack_le_uint32(self.sequence),
|
||||
))
|
||||
|
||||
|
||||
class DeserializerTokenPay(DeserializerTxTime):
|
||||
|
||||
def _read_input(self):
|
||||
txin = TxInputTokenPay(
|
||||
self._read_nbytes(32), # prev_hash
|
||||
self._read_le_uint32(), # prev_idx
|
||||
self._read_varbytes(), # script
|
||||
self._read_le_uint32(), # sequence
|
||||
)
|
||||
if txin._is_anon_input():
|
||||
# Not sure if this is actually needed, and seems
|
||||
# extra work for no immediate benefit, but it at
|
||||
# least correctly represents a stealth input
|
||||
raw = txin.serialize()
|
||||
deserializer = Deserializer(raw)
|
||||
txin = TxInputTokenPayStealth(
|
||||
deserializer._read_nbytes(33), # keyimage
|
||||
deserializer._read_nbytes(3), # ringsize
|
||||
deserializer._read_varbytes(), # script
|
||||
deserializer._read_le_uint32() # sequence
|
||||
)
|
||||
return txin
|
||||
|
||||
|
||||
# Decred
|
||||
class TxInputDcr(namedtuple("TxInput", "prev_hash prev_idx tree sequence")):
|
||||
"""Class representing a Decred transaction input."""
|
||||
|
||||
def __str__(self):
|
||||
prev_hash = hash_to_hex_str(self.prev_hash)
|
||||
return (f"Input({prev_hash}, {self.prev_idx:d}, tree={self.tree}, sequence={self.sequence:d})")
|
||||
|
||||
def is_generation(self):
|
||||
"""Test if an input is generation/coinbase like"""
|
||||
return self.prev_idx == MINUS_1 and self.prev_hash == ZERO
|
||||
|
||||
|
||||
class TxOutputDcr(namedtuple("TxOutput", "value version pk_script")):
|
||||
"""Class representing a Decred transaction output."""
|
||||
pass
|
||||
|
||||
|
||||
class TxDcr(namedtuple("Tx", "version inputs outputs locktime expiry "
|
||||
"witness")):
|
||||
"""Class representing a Decred transaction."""
|
||||
|
||||
|
||||
class DeserializerDecred(Deserializer):
|
||||
@staticmethod
|
||||
def blake256(data):
|
||||
from blake256.blake256 import blake_hash
|
||||
return blake_hash(data)
|
||||
|
||||
@staticmethod
|
||||
def blake256d(data):
|
||||
from blake256.blake256 import blake_hash
|
||||
return blake_hash(blake_hash(data))
|
||||
|
||||
def read_tx(self):
|
||||
return self._read_tx_parts(produce_hash=False)[0]
|
||||
|
||||
def read_tx_and_hash(self):
|
||||
tx, tx_hash, vsize = self._read_tx_parts()
|
||||
return tx, tx_hash
|
||||
|
||||
def read_tx_and_vsize(self):
|
||||
tx, tx_hash, vsize = self._read_tx_parts(produce_hash=False)
|
||||
return tx, vsize
|
||||
|
||||
def read_tx_block(self):
|
||||
"""Returns a list of (deserialized_tx, tx_hash) pairs."""
|
||||
read = self.read_tx_and_hash
|
||||
txs = [read() for _ in range(self._read_varint())]
|
||||
stxs = [read() for _ in range(self._read_varint())]
|
||||
return txs + stxs
|
||||
|
||||
def read_tx_tree(self):
|
||||
"""Returns a list of deserialized_tx without tx hashes."""
|
||||
read_tx = self.read_tx
|
||||
return [read_tx() for _ in range(self._read_varint())]
|
||||
|
||||
def _read_input(self):
|
||||
return TxInputDcr(
|
||||
self._read_nbytes(32), # prev_hash
|
||||
self._read_le_uint32(), # prev_idx
|
||||
self._read_byte(), # tree
|
||||
self._read_le_uint32(), # sequence
|
||||
)
|
||||
|
||||
def _read_output(self):
|
||||
return TxOutputDcr(
|
||||
self._read_le_int64(), # value
|
||||
self._read_le_uint16(), # version
|
||||
self._read_varbytes(), # pk_script
|
||||
)
|
||||
|
||||
def _read_witness(self, fields):
|
||||
read_witness_field = self._read_witness_field
|
||||
assert fields == self._read_varint()
|
||||
return [read_witness_field() for _ in range(fields)]
|
||||
|
||||
def _read_witness_field(self):
|
||||
value_in = self._read_le_int64()
|
||||
block_height = self._read_le_uint32()
|
||||
block_index = self._read_le_uint32()
|
||||
script = self._read_varbytes()
|
||||
return value_in, block_height, block_index, script
|
||||
|
||||
def _read_tx_parts(self, produce_hash=True):
|
||||
start = self.cursor
|
||||
version = self._read_le_int32()
|
||||
inputs = self._read_inputs()
|
||||
outputs = self._read_outputs()
|
||||
locktime = self._read_le_uint32()
|
||||
expiry = self._read_le_uint32()
|
||||
end_prefix = self.cursor
|
||||
witness = self._read_witness(len(inputs))
|
||||
|
||||
if produce_hash:
|
||||
# TxSerializeNoWitness << 16 == 0x10000
|
||||
no_witness_header = pack_le_uint32(0x10000 | (version & 0xffff))
|
||||
prefix_tx = no_witness_header + self.binary[start+4:end_prefix]
|
||||
tx_hash = self.blake256(prefix_tx)
|
||||
else:
|
||||
tx_hash = None
|
||||
|
||||
return TxDcr(
|
||||
version,
|
||||
inputs,
|
||||
outputs,
|
||||
locktime,
|
||||
expiry,
|
||||
witness
|
||||
), tx_hash, self.cursor - start
|
|
@ -1,361 +0,0 @@
|
|||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Miscellaneous utility classes and functions."""
|
||||
|
||||
|
||||
import array
|
||||
import inspect
|
||||
from ipaddress import ip_address
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections import Container, Mapping
|
||||
from struct import pack, Struct
|
||||
|
||||
# Logging utilities
|
||||
|
||||
|
||||
class ConnectionLogger(logging.LoggerAdapter):
|
||||
"""Prepends a connection identifier to a logging message."""
|
||||
def process(self, msg, kwargs):
|
||||
conn_id = self.extra.get('conn_id', 'unknown')
|
||||
return f'[{conn_id}] {msg}', kwargs
|
||||
|
||||
|
||||
class CompactFormatter(logging.Formatter):
|
||||
"""Strips the module from the logger name to leave the class only."""
|
||||
def format(self, record):
|
||||
record.name = record.name.rpartition('.')[-1]
|
||||
return super().format(record)
|
||||
|
||||
|
||||
def make_logger(name, *, handler, level):
|
||||
"""Return the root ElectrumX logger."""
|
||||
logger = logging.getLogger(name)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.propagate = False
|
||||
return logger
|
||||
|
||||
|
||||
def class_logger(path, classname):
|
||||
"""Return a hierarchical logger for a class."""
|
||||
return logging.getLogger(path).getChild(classname)
|
||||
|
||||
|
||||
# Method decorator. To be used for calculations that will always
|
||||
# deliver the same result. The method cannot take any arguments
|
||||
# and should be accessed as an attribute.
|
||||
class cachedproperty:
|
||||
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
|
||||
def __get__(self, obj, type):
|
||||
obj = obj or type
|
||||
value = self.f(obj)
|
||||
setattr(obj, self.f.__name__, value)
|
||||
return value
|
||||
|
||||
|
||||
def formatted_time(t, sep=' '):
|
||||
"""Return a number of seconds as a string in days, hours, mins and
|
||||
maybe secs."""
|
||||
t = int(t)
|
||||
fmts = (('{:d}d', 86400), ('{:02d}h', 3600), ('{:02d}m', 60))
|
||||
parts = []
|
||||
for fmt, n in fmts:
|
||||
val = t // n
|
||||
if parts or val:
|
||||
parts.append(fmt.format(val))
|
||||
t %= n
|
||||
if len(parts) < 3:
|
||||
parts.append(f'{t:02d}s')
|
||||
return sep.join(parts)
|
||||
|
||||
|
||||
def deep_getsizeof(obj):
|
||||
"""Find the memory footprint of a Python object.
|
||||
|
||||
Based on code from code.tutsplus.com: http://goo.gl/fZ0DXK
|
||||
|
||||
This is a recursive function that drills down a Python object graph
|
||||
like a dictionary holding nested dictionaries with lists of lists
|
||||
and tuples and sets.
|
||||
|
||||
The sys.getsizeof function does a shallow size of only. It counts each
|
||||
object inside a container as pointer only regardless of how big it
|
||||
really is.
|
||||
"""
|
||||
|
||||
ids = set()
|
||||
|
||||
def size(o):
|
||||
if id(o) in ids:
|
||||
return 0
|
||||
|
||||
r = sys.getsizeof(o)
|
||||
ids.add(id(o))
|
||||
|
||||
if isinstance(o, (str, bytes, bytearray, array.array)):
|
||||
return r
|
||||
|
||||
if isinstance(o, Mapping):
|
||||
return r + sum(size(k) + size(v) for k, v in o.items())
|
||||
|
||||
if isinstance(o, Container):
|
||||
return r + sum(size(x) for x in o)
|
||||
|
||||
return r
|
||||
|
||||
return size(obj)
|
||||
|
||||
|
||||
def subclasses(base_class, strict=True):
|
||||
"""Return a list of subclasses of base_class in its module."""
|
||||
def select(obj):
|
||||
return (inspect.isclass(obj) and issubclass(obj, base_class) and
|
||||
(not strict or obj != base_class))
|
||||
|
||||
pairs = inspect.getmembers(sys.modules[base_class.__module__], select)
|
||||
return [pair[1] for pair in pairs]
|
||||
|
||||
|
||||
def chunks(items, size):
|
||||
"""Break up items, an iterable, into chunks of length size."""
|
||||
for i in range(0, len(items), size):
|
||||
yield items[i: i + size]
|
||||
|
||||
|
||||
def resolve_limit(limit):
|
||||
if limit is None:
|
||||
return -1
|
||||
assert isinstance(limit, int) and limit >= 0
|
||||
return limit
|
||||
|
||||
|
||||
def bytes_to_int(be_bytes):
|
||||
"""Interprets a big-endian sequence of bytes as an integer"""
|
||||
return int.from_bytes(be_bytes, 'big')
|
||||
|
||||
|
||||
def int_to_bytes(value):
|
||||
"""Converts an integer to a big-endian sequence of bytes"""
|
||||
return value.to_bytes((value.bit_length() + 7) // 8, 'big')
|
||||
|
||||
|
||||
def increment_byte_string(bs):
|
||||
"""Return the lexicographically next byte string of the same length.
|
||||
|
||||
Return None if there is none (when the input is all 0xff bytes)."""
|
||||
for n in range(1, len(bs) + 1):
|
||||
if bs[-n] != 0xff:
|
||||
return bs[:-n] + bytes([bs[-n] + 1]) + bytes(n - 1)
|
||||
return None
|
||||
|
||||
|
||||
class LogicalFile:
|
||||
"""A logical binary file split across several separate files on disk."""
|
||||
|
||||
def __init__(self, prefix, digits, file_size):
|
||||
digit_fmt = f'{{:0{digits:d}d}}'
|
||||
self.filename_fmt = prefix + digit_fmt
|
||||
self.file_size = file_size
|
||||
|
||||
def read(self, start, size=-1):
|
||||
"""Read up to size bytes from the virtual file, starting at offset
|
||||
start, and return them.
|
||||
|
||||
If size is -1 all bytes are read."""
|
||||
parts = []
|
||||
while size != 0:
|
||||
try:
|
||||
with self.open_file(start, False) as f:
|
||||
part = f.read(size)
|
||||
if not part:
|
||||
break
|
||||
except FileNotFoundError:
|
||||
break
|
||||
parts.append(part)
|
||||
start += len(part)
|
||||
if size > 0:
|
||||
size -= len(part)
|
||||
return b''.join(parts)
|
||||
|
||||
def write(self, start, b):
|
||||
"""Write the bytes-like object, b, to the underlying virtual file."""
|
||||
while b:
|
||||
size = min(len(b), self.file_size - (start % self.file_size))
|
||||
with self.open_file(start, True) as f:
|
||||
f.write(b if size == len(b) else b[:size])
|
||||
b = b[size:]
|
||||
start += size
|
||||
|
||||
def open_file(self, start, create):
|
||||
"""Open the virtual file and seek to start. Return a file handle.
|
||||
Raise FileNotFoundError if the file does not exist and create
|
||||
is False.
|
||||
"""
|
||||
file_num, offset = divmod(start, self.file_size)
|
||||
filename = self.filename_fmt.format(file_num)
|
||||
f = open_file(filename, create)
|
||||
f.seek(offset)
|
||||
return f
|
||||
|
||||
|
||||
def open_file(filename, create=False):
|
||||
"""Open the file name. Return its handle."""
|
||||
try:
|
||||
return open(filename, 'rb+')
|
||||
except FileNotFoundError:
|
||||
if create:
|
||||
return open(filename, 'wb+')
|
||||
raise
|
||||
|
||||
|
||||
def open_truncate(filename):
|
||||
"""Open the file name. Return its handle."""
|
||||
return open(filename, 'wb+')
|
||||
|
||||
|
||||
def address_string(address):
|
||||
"""Return an address as a correctly formatted string."""
|
||||
fmt = '{}:{:d}'
|
||||
host, port = address
|
||||
try:
|
||||
host = ip_address(host)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if host.version == 6:
|
||||
fmt = '[{}]:{:d}'
|
||||
return fmt.format(host, port)
|
||||
|
||||
# See http://stackoverflow.com/questions/2532053/validate-a-hostname-string
|
||||
# Note underscores are valid in domain names, but strictly invalid in host
|
||||
# names. We ignore that distinction.
|
||||
|
||||
|
||||
SEGMENT_REGEX = re.compile("(?!-)[A-Z_\\d-]{1,63}(?<!-)$", re.IGNORECASE)
|
||||
|
||||
|
||||
def is_valid_hostname(hostname):
|
||||
if len(hostname) > 255:
|
||||
return False
|
||||
# strip exactly one dot from the right, if present
|
||||
if hostname and hostname[-1] == ".":
|
||||
hostname = hostname[:-1]
|
||||
return all(SEGMENT_REGEX.match(x) for x in hostname.split("."))
|
||||
|
||||
|
||||
def protocol_tuple(s):
|
||||
"""Converts a protocol version number, such as "1.0" to a tuple (1, 0).
|
||||
|
||||
If the version number is bad, (0, ) indicating version 0 is returned."""
|
||||
try:
|
||||
return tuple(int(part) for part in s.split('.'))
|
||||
except Exception:
|
||||
return (0, )
|
||||
|
||||
|
||||
def version_string(ptuple):
|
||||
"""Convert a version tuple such as (1, 2) to "1.2".
|
||||
There is always at least one dot, so (1, ) becomes "1.0"."""
|
||||
while len(ptuple) < 2:
|
||||
ptuple += (0, )
|
||||
return '.'.join(str(p) for p in ptuple)
|
||||
|
||||
|
||||
def protocol_version(client_req, min_tuple, max_tuple):
|
||||
"""Given a client's protocol version string, return a pair of
|
||||
protocol tuples:
|
||||
|
||||
(negotiated version, client min request)
|
||||
|
||||
If the request is unsupported, the negotiated protocol tuple is
|
||||
None.
|
||||
"""
|
||||
if client_req is None:
|
||||
client_min = client_max = min_tuple
|
||||
else:
|
||||
if isinstance(client_req, list) and len(client_req) == 2:
|
||||
client_min, client_max = client_req
|
||||
else:
|
||||
client_min = client_max = client_req
|
||||
client_min = protocol_tuple(client_min)
|
||||
client_max = protocol_tuple(client_max)
|
||||
|
||||
result = min(client_max, max_tuple)
|
||||
if result < max(client_min, min_tuple) or result == (0, ):
|
||||
result = None
|
||||
|
||||
return result, client_min
|
||||
|
||||
|
||||
struct_le_i = Struct('<i')
|
||||
struct_le_q = Struct('<q')
|
||||
struct_le_H = Struct('<H')
|
||||
struct_le_I = Struct('<I')
|
||||
struct_le_Q = Struct('<Q')
|
||||
struct_be_H = Struct('>H')
|
||||
struct_be_I = Struct('>I')
|
||||
structB = Struct('B')
|
||||
|
||||
unpack_le_int32_from = struct_le_i.unpack_from
|
||||
unpack_le_int64_from = struct_le_q.unpack_from
|
||||
unpack_le_uint16_from = struct_le_H.unpack_from
|
||||
unpack_le_uint32_from = struct_le_I.unpack_from
|
||||
unpack_le_uint64_from = struct_le_Q.unpack_from
|
||||
unpack_be_uint16_from = struct_be_H.unpack_from
|
||||
unpack_be_uint32_from = struct_be_I.unpack_from
|
||||
|
||||
unpack_be_uint64 = lambda x: int.from_bytes(x, byteorder='big')
|
||||
|
||||
pack_le_int32 = struct_le_i.pack
|
||||
pack_le_int64 = struct_le_q.pack
|
||||
pack_le_uint16 = struct_le_H.pack
|
||||
pack_le_uint32 = struct_le_I.pack
|
||||
pack_be_uint64 = lambda x: x.to_bytes(8, byteorder='big')
|
||||
pack_be_uint16 = lambda x: x.to_bytes(2, byteorder='big')
|
||||
pack_be_uint32 = struct_be_I.pack
|
||||
pack_byte = structB.pack
|
||||
|
||||
hex_to_bytes = bytes.fromhex
|
||||
|
||||
|
||||
def pack_varint(n):
|
||||
if n < 253:
|
||||
return pack_byte(n)
|
||||
if n < 65536:
|
||||
return pack_byte(253) + pack_le_uint16(n)
|
||||
if n < 4294967296:
|
||||
return pack_byte(254) + pack_le_uint32(n)
|
||||
return pack_byte(255) + pack_le_uint64(n)
|
||||
|
||||
|
||||
def pack_varbytes(data):
|
||||
return pack_varint(len(data)) + data
|
|
@ -1,3 +0,0 @@
|
|||
# need this to avoid circular import
|
||||
PROTOCOL_MIN = (0, 54, 0)
|
||||
PROTOCOL_MAX = (0, 199, 0)
|
|
@ -1,55 +0,0 @@
|
|||
import asyncio
|
||||
from weakref import WeakSet
|
||||
|
||||
from aiohttp.web import Application, AppRunner, WebSocketResponse, TCPSite
|
||||
from aiohttp.http_websocket import WSMsgType, WSCloseCode
|
||||
|
||||
|
||||
class AdminWebSocket:
|
||||
|
||||
def __init__(self, manager):
|
||||
self.manager = manager
|
||||
self.app = Application()
|
||||
self.app['websockets'] = WeakSet()
|
||||
self.app.router.add_get('/', self.on_connect)
|
||||
self.app.on_shutdown.append(self.on_shutdown)
|
||||
self.runner = AppRunner(self.app)
|
||||
|
||||
async def on_status(self, _):
|
||||
if not self.app['websockets']:
|
||||
return
|
||||
self.send_message({
|
||||
'type': 'status',
|
||||
'height': self.manager.daemon.cached_height(),
|
||||
})
|
||||
|
||||
def send_message(self, msg):
|
||||
for web_socket in self.app['websockets']:
|
||||
asyncio.create_task(web_socket.send_json(msg))
|
||||
|
||||
async def start(self):
|
||||
await self.runner.setup()
|
||||
await TCPSite(self.runner, self.manager.env.websocket_host, self.manager.env.websocket_port).start()
|
||||
|
||||
async def stop(self):
|
||||
await self.runner.cleanup()
|
||||
|
||||
async def on_connect(self, request):
|
||||
web_socket = WebSocketResponse()
|
||||
await web_socket.prepare(request)
|
||||
self.app['websockets'].add(web_socket)
|
||||
try:
|
||||
async for msg in web_socket:
|
||||
if msg.type == WSMsgType.TEXT:
|
||||
await self.on_status(None)
|
||||
elif msg.type == WSMsgType.ERROR:
|
||||
print('web socket connection closed with exception %s' %
|
||||
web_socket.exception())
|
||||
finally:
|
||||
self.app['websockets'].discard(web_socket)
|
||||
return web_socket
|
||||
|
||||
@staticmethod
|
||||
async def on_shutdown(app):
|
||||
for web_socket in set(app['websockets']):
|
||||
await web_socket.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown')
|
|
@ -23,7 +23,7 @@ class SPVPing(NamedTuple):
|
|||
pad_bytes: bytes
|
||||
|
||||
def encode(self):
|
||||
return struct.pack(b'!lB64s', *self)
|
||||
return struct.pack(b'!lB64s', *self) # pylint: disable=not-an-iterable
|
||||
|
||||
@staticmethod
|
||||
def make() -> bytes:
|
||||
|
@ -49,7 +49,7 @@ class SPVPong(NamedTuple):
|
|||
country: int
|
||||
|
||||
def encode(self):
|
||||
return struct.pack(PONG_ENCODING, *self)
|
||||
return struct.pack(PONG_ENCODING, *self) # pylint: disable=not-an-iterable
|
||||
|
||||
@staticmethod
|
||||
def encode_address(address: str):
|
||||
|
@ -110,6 +110,7 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
|||
self._min_delay = 1 / throttle_reqs_per_sec
|
||||
self._allow_localhost = allow_localhost
|
||||
self._allow_lan = allow_lan
|
||||
self.closed = asyncio.Event()
|
||||
|
||||
def update_cached_response(self):
|
||||
self._left_cache, self._right_cache = SPVPong.make_sans_source_address(
|
||||
|
@ -160,13 +161,16 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
|
|||
|
||||
def connection_made(self, transport) -> None:
|
||||
self.transport = transport
|
||||
self.closed.clear()
|
||||
|
||||
def connection_lost(self, exc: Optional[Exception]) -> None:
|
||||
self.transport = None
|
||||
self.closed.set()
|
||||
|
||||
def close(self):
|
||||
async def close(self):
|
||||
if self.transport:
|
||||
self.transport.close()
|
||||
await self.closed.wait()
|
||||
|
||||
|
||||
class StatusServer:
|
||||
|
@ -184,9 +188,9 @@ class StatusServer:
|
|||
await loop.create_datagram_endpoint(lambda: self._protocol, (interface, port))
|
||||
log.info("started udp status server on %s:%i", interface, port)
|
||||
|
||||
def stop(self):
|
||||
async def stop(self):
|
||||
if self.is_running:
|
||||
self._protocol.close()
|
||||
await self._protocol.close()
|
||||
self._protocol = None
|
||||
|
||||
@property
|
|
@ -40,22 +40,17 @@ def checkrecord(record, expected_winner, expected_claim):
|
|||
|
||||
|
||||
async def checkcontrolling(daemon: Daemon, db: SQLDB):
|
||||
records, claim_ids, names, futs = [], [], [], []
|
||||
records, names, futs = [], [], []
|
||||
for record in db.get_claims('claimtrie.claim_hash as is_controlling, claim.*', is_controlling=True):
|
||||
records.append(record)
|
||||
claim_id = hex_reverted(record['claim_hash'])
|
||||
claim_ids.append((claim_id,))
|
||||
names.append((record['normalized'],))
|
||||
names.append((record['normalized'], (claim_id,), "", True)) # last parameter is IncludeValues
|
||||
if len(names) > 50000:
|
||||
futs.append(daemon._send_vector('getvalueforname', names[:]))
|
||||
futs.append(daemon._send_vector('getclaimbyid', claim_ids[:]))
|
||||
futs.append(daemon._send_vector('getclaimsfornamebyid', names))
|
||||
names.clear()
|
||||
claim_ids.clear()
|
||||
if names:
|
||||
futs.append(daemon._send_vector('getvalueforname', names[:]))
|
||||
futs.append(daemon._send_vector('getclaimbyid', claim_ids[:]))
|
||||
futs.append(daemon._send_vector('getclaimsfornamebyid', names))
|
||||
names.clear()
|
||||
claim_ids.clear()
|
||||
|
||||
while futs:
|
||||
winners, claims = futs.pop(0), futs.pop(0)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
SNAPSHOT_HEIGHT="1049658"
|
||||
SNAPSHOT_HEIGHT="1072108"
|
||||
|
||||
HUB_VOLUME_PATH="/var/lib/docker/volumes/${USER}_wallet_server"
|
||||
ES_VOLUME_PATH="/var/lib/docker/volumes/${USER}_es01"
|
||||
|
||||
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar"
|
||||
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar"
|
||||
SNAPSHOT_TAR_NAME="wallet_server_snapshot_${SNAPSHOT_HEIGHT}.tar.gz"
|
||||
ES_SNAPSHOT_TAR_NAME="es_snapshot_${SNAPSHOT_HEIGHT}.tar.gz"
|
||||
|
||||
SNAPSHOT_URL="https://snapshots.lbry.com/hub/${SNAPSHOT_TAR_NAME}"
|
||||
ES_SNAPSHOT_URL="https://snapshots.lbry.com/hub/${ES_SNAPSHOT_TAR_NAME}"
|
||||
|
|
23
setup.py
23
setup.py
|
@ -7,9 +7,11 @@ BASE = os.path.dirname(__file__)
|
|||
with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
PLYVEL = []
|
||||
if sys.platform.startswith('linux'):
|
||||
PLYVEL.append('plyvel==1.3.0')
|
||||
|
||||
ROCKSDB = []
|
||||
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
|
||||
ROCKSDB.append('lbry-rocksdb==0.8.2')
|
||||
|
||||
|
||||
setup(
|
||||
name=__name__,
|
||||
|
@ -28,16 +30,14 @@ setup(
|
|||
entry_points={
|
||||
'console_scripts': [
|
||||
'lbrynet=lbry.extras.cli:main',
|
||||
'lbry-hub=lbry.wallet.server.cli:main',
|
||||
'orchstr8=lbry.wallet.orchstr8.cli:main',
|
||||
'lbry-hub-elastic-sync=lbry.wallet.server.db.elasticsearch.sync:run_elastic_sync'
|
||||
'orchstr8=lbry.wallet.orchstr8.cli:main'
|
||||
],
|
||||
},
|
||||
install_requires=[
|
||||
'aiohttp==3.5.4',
|
||||
'aiohttp==3.7.4',
|
||||
'aioupnp==0.0.18',
|
||||
'appdirs==1.4.3',
|
||||
'certifi>=2018.11.29',
|
||||
'certifi>=2021.10.08',
|
||||
'colorama==0.3.7',
|
||||
'distro==1.4.0',
|
||||
'base58==1.0.0',
|
||||
|
@ -49,7 +49,7 @@ setup(
|
|||
'ecdsa==0.13.3',
|
||||
'pyyaml==5.3.1',
|
||||
'docopt==0.6.2',
|
||||
'hachoir',
|
||||
'hachoir==3.1.2',
|
||||
'multidict==4.6.1',
|
||||
'coincurve==15.0.0',
|
||||
'pbkdf2==1.3',
|
||||
|
@ -57,12 +57,13 @@ setup(
|
|||
'pylru==1.1.0',
|
||||
'elasticsearch==7.10.1',
|
||||
'grpcio==1.38.0',
|
||||
'filetype==1.0.9'
|
||||
] + PLYVEL,
|
||||
'filetype==1.0.9',
|
||||
] + ROCKSDB,
|
||||
extras_require={
|
||||
'torrent': ['lbry-libtorrent'],
|
||||
'lint': ['pylint==2.10.0'],
|
||||
'test': ['coverage'],
|
||||
'scribe': ['scribe @ git+https://github.com/lbryio/scribe.git'],
|
||||
},
|
||||
classifiers=[
|
||||
'Framework :: AsyncIO',
|
||||
|
|
|
@ -103,7 +103,7 @@ class AccountManagement(CommandTestCase):
|
|||
second_account = await self.daemon.jsonrpc_account_create('second account')
|
||||
|
||||
tx = await self.daemon.jsonrpc_account_send(
|
||||
'0.05', await self.daemon.jsonrpc_address_unused(account_id=second_account.id)
|
||||
'0.05', await self.daemon.jsonrpc_address_unused(account_id=second_account.id), blocking=True
|
||||
)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.assertOutputAmount(['0.05', '9.949876'], utxo_list())
|
||||
|
|
|
@ -9,7 +9,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
VERBOSITY = logging.WARN
|
||||
|
||||
async def assertBlockHash(self, height):
|
||||
bp = self.conductor.spv_node.server.bp
|
||||
bp = self.conductor.spv_node.writer
|
||||
|
||||
def get_txids():
|
||||
return [
|
||||
|
@ -29,15 +29,16 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
||||
|
||||
async def test_reorg(self):
|
||||
bp = self.conductor.spv_node.server.bp
|
||||
bp = self.conductor.spv_node.writer
|
||||
bp.reorg_count_metric.set(0)
|
||||
# invalidate current block, move forward 2
|
||||
height = 206
|
||||
self.assertEqual(self.ledger.headers.height, height)
|
||||
await self.assertBlockHash(height)
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
||||
block_hash = (await self.ledger.headers.hash(206)).decode()
|
||||
await self.blockchain.invalidate_block(block_hash)
|
||||
await self.blockchain.generate(2)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 207)
|
||||
await asyncio.wait_for(self.on_header(207), 3.0)
|
||||
self.assertEqual(self.ledger.headers.height, 207)
|
||||
await self.assertBlockHash(206)
|
||||
await self.assertBlockHash(207)
|
||||
|
@ -46,14 +47,14 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
# invalidate current block, move forward 3
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(206)).decode())
|
||||
await self.blockchain.generate(3)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 208)
|
||||
await asyncio.wait_for(self.on_header(208), 3.0)
|
||||
self.assertEqual(self.ledger.headers.height, 208)
|
||||
await self.assertBlockHash(206)
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
self.assertEqual(2, bp.reorg_count_metric._samples()[0][2])
|
||||
await self.blockchain.generate(3)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 211)
|
||||
await asyncio.wait_for(self.on_header(211), 3.0)
|
||||
await self.assertBlockHash(209)
|
||||
await self.assertBlockHash(210)
|
||||
await self.assertBlockHash(211)
|
||||
|
@ -62,7 +63,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
)
|
||||
await self.ledger.wait(still_valid)
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 212)
|
||||
await asyncio.wait_for(self.on_header(212), 1.0)
|
||||
claim_id = still_valid.outputs[0].claim_id
|
||||
c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id']
|
||||
c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id']
|
||||
|
@ -71,7 +72,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
|
||||
abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id)
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 213)
|
||||
await asyncio.wait_for(self.on_header(213), 1.0)
|
||||
c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}')
|
||||
c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}'])
|
||||
c3 = await self.daemon.jsonrpc_resolve([f'still-valid'])
|
||||
|
@ -112,11 +113,10 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
||||
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
await self.assertBlockHash(209)
|
||||
|
@ -142,9 +142,8 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
# broadcast the claim in a different block
|
||||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||
self.assertEqual(broadcast_tx.id, new_txid)
|
||||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await self.blockchain.generate(1)
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
|
@ -191,7 +190,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
|
@ -222,8 +221,6 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||
self.assertEqual(broadcast_tx.id, new_txid)
|
||||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
import asyncio
|
||||
import scribe
|
||||
|
||||
import lbry
|
||||
from unittest.mock import Mock
|
||||
|
||||
from scribe.blockchain.network import LBCRegTest
|
||||
from scribe.hub.udp import StatusServer
|
||||
from scribe.hub.session import LBRYElectrumX
|
||||
|
||||
from lbry.wallet.network import Network
|
||||
from lbry.wallet.orchstr8 import Conductor
|
||||
from lbry.wallet.orchstr8.node import SPVNode
|
||||
from lbry.wallet.rpc import RPCSession
|
||||
from lbry.wallet.server.udp import StatusServer
|
||||
from lbry.testcase import IntegrationTestCase, AsyncioTestCase
|
||||
from lbry.conf import Config
|
||||
|
||||
|
@ -22,7 +25,7 @@ class NetworkTests(IntegrationTestCase):
|
|||
|
||||
async def test_server_features(self):
|
||||
self.assertDictEqual({
|
||||
'genesis_hash': self.conductor.spv_node.coin_class.GENESIS_HASH,
|
||||
'genesis_hash': LBCRegTest.GENESIS_HASH,
|
||||
'hash_function': 'sha256',
|
||||
'hosts': {},
|
||||
'protocol_max': '0.199.0',
|
||||
|
@ -32,22 +35,27 @@ class NetworkTests(IntegrationTestCase):
|
|||
'payment_address': '',
|
||||
'donation_address': '',
|
||||
'daily_fee': '0',
|
||||
'server_version': lbry.__version__,
|
||||
'server_version': scribe.__version__,
|
||||
'trending_algorithm': 'fast_ar',
|
||||
}, await self.ledger.network.get_server_features())
|
||||
# await self.conductor.spv_node.stop()
|
||||
payment_address, donation_address = await self.account.get_addresses(limit=2)
|
||||
|
||||
original_address = self.conductor.spv_node.server.env.payment_address
|
||||
original_donation_address = self.conductor.spv_node.server.env.donation_address
|
||||
original_description = self.conductor.spv_node.server.env.description
|
||||
original_daily_fee = self.conductor.spv_node.server.env.daily_fee
|
||||
|
||||
self.conductor.spv_node.server.env.payment_address = payment_address
|
||||
self.conductor.spv_node.server.env.donation_address = donation_address
|
||||
self.conductor.spv_node.server.env.description = 'Fastest server in the west.'
|
||||
self.conductor.spv_node.server.env.daily_fee = '42'
|
||||
|
||||
from lbry.wallet.server.session import LBRYElectrumX
|
||||
LBRYElectrumX.set_server_features(self.conductor.spv_node.server.env)
|
||||
|
||||
# await self.ledger.network.on_connected.first
|
||||
self.assertDictEqual({
|
||||
'genesis_hash': self.conductor.spv_node.coin_class.GENESIS_HASH,
|
||||
'genesis_hash': LBCRegTest.GENESIS_HASH,
|
||||
'hash_function': 'sha256',
|
||||
'hosts': {},
|
||||
'protocol_max': '0.199.0',
|
||||
|
@ -57,16 +65,23 @@ class NetworkTests(IntegrationTestCase):
|
|||
'payment_address': payment_address,
|
||||
'donation_address': donation_address,
|
||||
'daily_fee': '42',
|
||||
'server_version': lbry.__version__,
|
||||
'server_version': scribe.__version__,
|
||||
'trending_algorithm': 'fast_ar',
|
||||
}, await self.ledger.network.get_server_features())
|
||||
|
||||
# cleanup the changes since the attributes are set on the class
|
||||
self.conductor.spv_node.server.env.payment_address = original_address
|
||||
self.conductor.spv_node.server.env.donation_address = original_donation_address
|
||||
self.conductor.spv_node.server.env.description = original_description
|
||||
self.conductor.spv_node.server.env.daily_fee = original_daily_fee
|
||||
LBRYElectrumX.set_server_features(self.conductor.spv_node.server.env)
|
||||
|
||||
|
||||
class ReconnectTests(IntegrationTestCase):
|
||||
|
||||
async def test_multiple_servers(self):
|
||||
# we have a secondary node that connects later, so
|
||||
node2 = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
node2 = SPVNode(node_number=2)
|
||||
await node2.start(self.blockchain)
|
||||
|
||||
self.ledger.network.config['explicit_servers'].append((node2.hostname, node2.port))
|
||||
|
@ -86,7 +101,7 @@ class ReconnectTests(IntegrationTestCase):
|
|||
await self.ledger.stop()
|
||||
initial_height = self.ledger.local_height_including_downloaded_height
|
||||
await self.blockchain.generate(100)
|
||||
while self.conductor.spv_node.server.session_mgr.notified_height < initial_height + 99: # off by 1
|
||||
while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 99: # off by 1
|
||||
await asyncio.sleep(0.1)
|
||||
self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height)
|
||||
await self.ledger.headers.open()
|
||||
|
@ -101,12 +116,7 @@ class ReconnectTests(IntegrationTestCase):
|
|||
self.ledger.network.client.transport.close()
|
||||
self.assertFalse(self.ledger.network.is_connected)
|
||||
await self.ledger.resolve([], 'derp')
|
||||
sendtxid = await self.blockchain.send_to_address(address1, 1.1337)
|
||||
# await self.ledger.resolve([], 'derp')
|
||||
# self.assertTrue(self.ledger.network.is_connected)
|
||||
await asyncio.wait_for(self.on_transaction_id(sendtxid), 10.0) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await self.on_transaction_id(sendtxid) # confirmed
|
||||
sendtxid = await self.send_to_address_and_wait(address1, 1.1337, 1)
|
||||
self.assertLess(self.ledger.network.client.response_time, 1) # response time properly set lower, we are fine
|
||||
|
||||
await self.assertBalance(self.account, '1.1337')
|
||||
|
@ -135,7 +145,7 @@ class ReconnectTests(IntegrationTestCase):
|
|||
await self.conductor.spv_node.stop()
|
||||
self.assertFalse(self.ledger.network.is_connected)
|
||||
await asyncio.sleep(0.2) # let it retry and fail once
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node)
|
||||
await self.ledger.network.on_connected.first
|
||||
self.assertTrue(self.ledger.network.is_connected)
|
||||
|
||||
|
@ -161,15 +171,16 @@ class ReconnectTests(IntegrationTestCase):
|
|||
|
||||
|
||||
class UDPServerFailDiscoveryTest(AsyncioTestCase):
|
||||
|
||||
async def test_wallet_connects_despite_lack_of_udp(self):
|
||||
conductor = Conductor()
|
||||
conductor.spv_node.udp_port = '0'
|
||||
await conductor.start_blockchain()
|
||||
self.addCleanup(conductor.stop_blockchain)
|
||||
await conductor.start_lbcd()
|
||||
self.addCleanup(conductor.stop_lbcd)
|
||||
await conductor.start_lbcwallet()
|
||||
self.addCleanup(conductor.stop_lbcwallet)
|
||||
await conductor.start_spv()
|
||||
self.addCleanup(conductor.stop_spv)
|
||||
self.assertFalse(conductor.spv_node.server.bp.status_server.is_running)
|
||||
self.assertFalse(conductor.spv_node.server.status_server.is_running)
|
||||
await asyncio.wait_for(conductor.start_wallet(), timeout=5)
|
||||
self.addCleanup(conductor.stop_wallet)
|
||||
self.assertTrue(conductor.wallet_node.ledger.network.is_connected)
|
||||
|
|
|
@ -103,7 +103,7 @@ class PurchaseCommandTests(CommandTestCase):
|
|||
|
||||
# purchase non-existent claim fails
|
||||
with self.assertRaisesRegex(Exception, "Could not find claim with claim_id"):
|
||||
await self.daemon.jsonrpc_purchase_create('abc123')
|
||||
await self.daemon.jsonrpc_purchase_create('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
|
||||
|
||||
# purchase stream with no price fails
|
||||
no_price_stream = await self.priced_stream('no_price_stream', price=None)
|
||||
|
@ -174,8 +174,7 @@ class PurchaseCommandTests(CommandTestCase):
|
|||
self.merchant_address = await self.account.receiving.get_or_create_usable_address()
|
||||
daemon2 = await self.add_daemon()
|
||||
address2 = await daemon2.wallet_manager.default_account.receiving.get_or_create_usable_address()
|
||||
sendtxid = await self.blockchain.send_to_address(address2, 2)
|
||||
await self.confirm_tx(sendtxid, daemon2.ledger)
|
||||
await self.send_to_address_and_wait(address2, 2, 1, ledger=daemon2.ledger)
|
||||
|
||||
stream = await self.priced_stream('a', '1.0')
|
||||
await self.assertBalance(self.account, '9.987893')
|
||||
|
|
|
@ -63,7 +63,7 @@ class SyncTests(IntegrationTestCase):
|
|||
await self.assertBalance(account1, '1.0')
|
||||
await self.assertBalance(account2, '1.0')
|
||||
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
|
||||
# pay 0.01 from main node to receiving node, would have increased change addresses
|
||||
address0 = (await account0.receiving.get_addresses())[0]
|
||||
|
@ -79,7 +79,7 @@ class SyncTests(IntegrationTestCase):
|
|||
account1.ledger.wait(tx),
|
||||
account2.ledger.wait(tx),
|
||||
])
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await asyncio.wait([
|
||||
account0.ledger.wait(tx),
|
||||
account1.ledger.wait(tx),
|
||||
|
@ -92,7 +92,7 @@ class SyncTests(IntegrationTestCase):
|
|||
await self.assertBalance(account1, '0.989876')
|
||||
await self.assertBalance(account2, '0.989876')
|
||||
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
|
||||
# create a new mirror node and see if it syncs to same balance from scratch
|
||||
node3 = await self.make_wallet_node(account1.seed)
|
||||
|
|
|
@ -11,7 +11,7 @@ from lbry.wallet.dewies import dict_values_to_lbc
|
|||
class WalletCommands(CommandTestCase):
|
||||
|
||||
async def test_wallet_create_and_add_subscribe(self):
|
||||
session = next(iter(self.conductor.spv_node.server.session_mgr.sessions.values()))
|
||||
session = next(iter(self.conductor.spv_node.server.session_manager.sessions.values()))
|
||||
self.assertEqual(len(session.hashX_subs), 27)
|
||||
wallet = await self.daemon.jsonrpc_wallet_create('foo', create_account=True, single_key=True)
|
||||
self.assertEqual(len(session.hashX_subs), 28)
|
||||
|
@ -23,7 +23,7 @@ class WalletCommands(CommandTestCase):
|
|||
async def test_wallet_syncing_status(self):
|
||||
address = await self.daemon.jsonrpc_address_unused()
|
||||
self.assertFalse(self.daemon.jsonrpc_wallet_status()['is_syncing'])
|
||||
await self.blockchain.send_to_address(address, 1)
|
||||
await self.send_to_address_and_wait(address, 1)
|
||||
await self.ledger._update_tasks.started.wait()
|
||||
self.assertTrue(self.daemon.jsonrpc_wallet_status()['is_syncing'])
|
||||
await self.ledger._update_tasks.done.wait()
|
||||
|
@ -47,9 +47,9 @@ class WalletCommands(CommandTestCase):
|
|||
status = await self.daemon.jsonrpc_status()
|
||||
self.assertEqual(len(status['wallet']['servers']), 1)
|
||||
self.assertEqual(status['wallet']['servers'][0]['port'], 50002)
|
||||
await self.conductor.spv_node.stop(True)
|
||||
await self.conductor.spv_node.stop()
|
||||
self.conductor.spv_node.port = 54320
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node)
|
||||
status = await self.daemon.jsonrpc_status()
|
||||
self.assertEqual(len(status['wallet']['servers']), 0)
|
||||
self.daemon.jsonrpc_settings_set('lbryum_servers', ['localhost:54320'])
|
||||
|
@ -59,23 +59,22 @@ class WalletCommands(CommandTestCase):
|
|||
self.assertEqual(status['wallet']['servers'][0]['port'], 54320)
|
||||
|
||||
async def test_sending_to_scripthash_address(self):
|
||||
self.assertEqual(await self.blockchain.get_balance(), '95.99973580')
|
||||
bal = await self.blockchain.get_balance()
|
||||
await self.assertBalance(self.account, '10.0')
|
||||
p2sh_address1 = await self.blockchain.get_new_address(self.blockchain.P2SH_SEGWIT_ADDRESS)
|
||||
tx = await self.account_send('2.0', p2sh_address1)
|
||||
self.assertEqual(tx['outputs'][0]['address'], p2sh_address1)
|
||||
self.assertEqual(await self.blockchain.get_balance(), '98.99973580') # +1 lbc for confirm block
|
||||
self.assertEqual(await self.blockchain.get_balance(), str(float(bal)+3)) # +1 lbc for confirm block
|
||||
await self.assertBalance(self.account, '7.999877')
|
||||
await self.wallet_send('3.0', p2sh_address1)
|
||||
self.assertEqual(await self.blockchain.get_balance(), '102.99973580') # +1 lbc for confirm block
|
||||
self.assertEqual(await self.blockchain.get_balance(), str(float(bal)+7)) # +1 lbc for confirm block
|
||||
await self.assertBalance(self.account, '4.999754')
|
||||
|
||||
async def test_balance_caching(self):
|
||||
account2 = await self.daemon.jsonrpc_account_create("Tip-er")
|
||||
address2 = await self.daemon.jsonrpc_address_unused(account2.id)
|
||||
sendtxid = await self.blockchain.send_to_address(address2, 10)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.generate(1)
|
||||
await self.send_to_address_and_wait(address2, 10, 2)
|
||||
await self.ledger.tasks_are_done() # don't mess with the query count while we need it
|
||||
|
||||
wallet_balance = self.daemon.jsonrpc_wallet_balance
|
||||
ledger = self.ledger
|
||||
|
@ -90,14 +89,16 @@ class WalletCommands(CommandTestCase):
|
|||
self.assertIsNone(ledger._balance_cache.get(self.account.id))
|
||||
|
||||
query_count += 2
|
||||
self.assertEqual(await wallet_balance(), expected)
|
||||
balance = await wallet_balance()
|
||||
self.assertEqual(self.ledger.db.db.query_count, query_count)
|
||||
self.assertEqual(balance, expected)
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(self.account.id))['total'], '10.0')
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(account2.id))['total'], '10.0')
|
||||
|
||||
# calling again uses cache
|
||||
self.assertEqual(await wallet_balance(), expected)
|
||||
balance = await wallet_balance()
|
||||
self.assertEqual(self.ledger.db.db.query_count, query_count)
|
||||
self.assertEqual(balance, expected)
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(self.account.id))['total'], '10.0')
|
||||
self.assertEqual(dict_values_to_lbc(ledger._balance_cache.get(account2.id))['total'], '10.0')
|
||||
|
||||
|
@ -123,8 +124,7 @@ class WalletCommands(CommandTestCase):
|
|||
wallet2 = await self.daemon.jsonrpc_wallet_create('foo', create_account=True)
|
||||
account3 = wallet2.default_account
|
||||
address3 = await self.daemon.jsonrpc_address_unused(account3.id, wallet2.id)
|
||||
await self.confirm_tx(await self.blockchain.send_to_address(address3, 1))
|
||||
await self.generate(1)
|
||||
await self.send_to_address_and_wait(address3, 1, 1)
|
||||
|
||||
account_balance = self.daemon.jsonrpc_account_balance
|
||||
wallet_balance = self.daemon.jsonrpc_wallet_balance
|
||||
|
@ -154,7 +154,7 @@ class WalletCommands(CommandTestCase):
|
|||
address2 = await self.daemon.jsonrpc_address_unused(account2.id)
|
||||
|
||||
# send lbc to someone else
|
||||
tx = await self.daemon.jsonrpc_account_send('1.0', address2)
|
||||
tx = await self.daemon.jsonrpc_account_send('1.0', address2, blocking=True)
|
||||
await self.confirm_tx(tx.id)
|
||||
self.assertEqual(await account_balance(), {
|
||||
'total': '8.97741',
|
||||
|
@ -187,7 +187,7 @@ class WalletCommands(CommandTestCase):
|
|||
})
|
||||
|
||||
# tip claimed
|
||||
tx = await self.daemon.jsonrpc_support_abandon(txid=support1['txid'], nout=0)
|
||||
tx = await self.daemon.jsonrpc_support_abandon(txid=support1['txid'], nout=0, blocking=True)
|
||||
await self.confirm_tx(tx.id)
|
||||
self.assertEqual(await account_balance(), {
|
||||
'total': '9.277303',
|
||||
|
@ -238,8 +238,7 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
|
|||
"carbon smart garage balance margin twelve"
|
||||
)
|
||||
address = (await self.daemon2.wallet_manager.default_account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||
sendtxid = await self.blockchain.send_to_address(address, 1)
|
||||
await self.confirm_tx(sendtxid, self.daemon2.ledger)
|
||||
await self.send_to_address_and_wait(address, 1, 1, ledger=self.daemon2.ledger)
|
||||
|
||||
def assertWalletEncrypted(self, wallet_path, encrypted):
|
||||
with open(wallet_path) as opened:
|
||||
|
@ -294,7 +293,7 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
|
|||
'3056301006072a8648ce3d020106052b8104000a034200049ae7283f3f6723e0a1'
|
||||
'66b7e19e1d1167f6dc5f4af61b4a58066a0d2a8bed2b35c66bccb4ec3eba316b16'
|
||||
'a97a6d6a4a8effd29d748901bb9789352519cd00b13d'
|
||||
), self.daemon2)
|
||||
), self.daemon2, blocking=True)
|
||||
await self.confirm_tx(channel['txid'], self.daemon2.ledger)
|
||||
|
||||
# both daemons will have the channel but only one has the cert so far
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
import asyncio
|
||||
|
||||
import lbry
|
||||
import lbry.wallet
|
||||
import scribe
|
||||
from scribe.hub.session import LBRYElectrumX
|
||||
|
||||
from lbry.error import ServerPaymentFeeAboveMaxAllowedError
|
||||
from lbry.wallet.network import ClientSession
|
||||
from lbry.wallet.rpc import RPCError
|
||||
from lbry.wallet.server.db.elasticsearch.sync import make_es_index_and_run_sync
|
||||
from lbry.wallet.server.session import LBRYElectrumX
|
||||
from lbry.testcase import IntegrationTestCase, CommandTestCase
|
||||
from lbry.wallet.orchstr8.node import SPVNode
|
||||
|
||||
|
@ -25,17 +24,17 @@ class TestSessions(IntegrationTestCase):
|
|||
)
|
||||
await session.create_connection()
|
||||
await session.send_request('server.banner', ())
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_mgr.sessions), 1)
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_manager.sessions), 1)
|
||||
self.assertFalse(session.is_closing())
|
||||
await asyncio.sleep(1.1)
|
||||
with self.assertRaises(asyncio.TimeoutError):
|
||||
await session.send_request('server.banner', ())
|
||||
self.assertTrue(session.is_closing())
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_mgr.sessions), 0)
|
||||
self.assertEqual(len(self.conductor.spv_node.server.session_manager.sessions), 0)
|
||||
|
||||
async def test_proper_version(self):
|
||||
info = await self.ledger.network.get_server_features()
|
||||
self.assertEqual(lbry.__version__, info['server_version'])
|
||||
self.assertEqual(scribe.__version__, info['server_version'])
|
||||
|
||||
async def test_client_errors(self):
|
||||
# Goal is ensuring thsoe are raised and not trapped accidentally
|
||||
|
@ -46,7 +45,7 @@ class TestSessions(IntegrationTestCase):
|
|||
|
||||
|
||||
class TestUsagePayment(CommandTestCase):
|
||||
async def _test_single_server_payment(self):
|
||||
async def test_single_server_payment(self):
|
||||
wallet_pay_service = self.daemon.component_manager.get_component('wallet_server_payments')
|
||||
wallet_pay_service.payment_period = 1
|
||||
# only starts with a positive max key fee
|
||||
|
@ -63,8 +62,8 @@ class TestUsagePayment(CommandTestCase):
|
|||
_, history = await self.ledger.get_local_status_and_history(address)
|
||||
self.assertEqual(history, [])
|
||||
|
||||
node = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
await node.start(self.blockchain, extraconf={"PAYMENT_ADDRESS": address, "DAILY_FEE": "1.1"})
|
||||
node = SPVNode(node_number=2)
|
||||
await node.start(self.blockchain, extraconf={"payment_address": address, "daily_fee": "1.1"})
|
||||
self.addCleanup(node.stop)
|
||||
self.daemon.jsonrpc_settings_set('lbryum_servers', [f"{node.hostname}:{node.port}"])
|
||||
await self.daemon.jsonrpc_wallet_reconnect()
|
||||
|
@ -90,56 +89,78 @@ class TestUsagePayment(CommandTestCase):
|
|||
|
||||
class TestESSync(CommandTestCase):
|
||||
async def test_es_sync_utility(self):
|
||||
es_writer = self.conductor.spv_node.es_writer
|
||||
server_search_client = self.conductor.spv_node.server.session_manager.search_index
|
||||
|
||||
for i in range(10):
|
||||
await self.stream_create(f"stream{i}", bid='0.001')
|
||||
await self.generate(1)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
db = self.conductor.spv_node.server.db
|
||||
env = self.conductor.spv_node.server.env
|
||||
|
||||
await db.search_index.delete_index()
|
||||
db.search_index.clear_caches()
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
await db.search_index.stop()
|
||||
|
||||
async def resync():
|
||||
await db.search_index.start()
|
||||
db.search_index.clear_caches()
|
||||
await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# delete the index and verify nothing is returned by claim search
|
||||
await es_writer.delete_index()
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
await resync()
|
||||
|
||||
# this time we will test a migration from unversioned to v1
|
||||
await db.search_index.sync_client.indices.delete_template(db.search_index.index)
|
||||
await db.search_index.stop()
|
||||
|
||||
await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
|
||||
await db.search_index.start()
|
||||
|
||||
await resync()
|
||||
# reindex, 10 claims should be returned
|
||||
await es_writer.reindex(force=True)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# reindex again, this should not appear to do anything but will delete and reinsert the same 10 claims
|
||||
await es_writer.reindex(force=True)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# delete the index again and stop the writer, upon starting it the writer should reindex automatically
|
||||
await es_writer.delete_index()
|
||||
await es_writer.stop()
|
||||
server_search_client.clear_caches()
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
await es_writer.start(reindex=True)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
# stop the es writer and advance the chain by 1, adding a new claim. upon resuming the es writer, it should
|
||||
# add the new claim
|
||||
await es_writer.stop()
|
||||
await self.stream_create(f"stream11", bid='0.001', confirm=False)
|
||||
generate_block_task = asyncio.create_task(self.generate(1))
|
||||
await es_writer.start()
|
||||
await generate_block_task
|
||||
self.assertEqual(11, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
|
||||
# # this time we will test a migration from unversioned to v1
|
||||
# await db.search_index.sync_client.indices.delete_template(db.search_index.index)
|
||||
# await db.search_index.stop()
|
||||
#
|
||||
# await make_es_index_and_run_sync(env, db=db, index_name=db.search_index.index, force=True)
|
||||
# await db.search_index.start()
|
||||
#
|
||||
# await es_writer.reindex()
|
||||
# self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
|
||||
class TestHubDiscovery(CommandTestCase):
|
||||
|
||||
async def test_hub_discovery(self):
|
||||
us_final_node = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
await us_final_node.start(self.blockchain, extraconf={"COUNTRY": "US"})
|
||||
us_final_node = SPVNode(node_number=2)
|
||||
await us_final_node.start(self.blockchain, extraconf={"country": "US"})
|
||||
self.addCleanup(us_final_node.stop)
|
||||
final_node_host = f"{us_final_node.hostname}:{us_final_node.port}"
|
||||
|
||||
kp_final_node = SPVNode(self.conductor.spv_module, node_number=3)
|
||||
await kp_final_node.start(self.blockchain, extraconf={"COUNTRY": "KP"})
|
||||
kp_final_node = SPVNode(node_number=3)
|
||||
await kp_final_node.start(self.blockchain, extraconf={"country": "KP"})
|
||||
self.addCleanup(kp_final_node.stop)
|
||||
kp_final_node_host = f"{kp_final_node.hostname}:{kp_final_node.port}"
|
||||
|
||||
relay_node = SPVNode(self.conductor.spv_module, node_number=4)
|
||||
relay_node = SPVNode(node_number=4)
|
||||
await relay_node.start(self.blockchain, extraconf={
|
||||
"COUNTRY": "FR",
|
||||
"PEER_HUBS": ",".join([kp_final_node_host, final_node_host])
|
||||
"country": "FR",
|
||||
"peer_hubs": ",".join([kp_final_node_host, final_node_host])
|
||||
})
|
||||
relay_node_host = f"{relay_node.hostname}:{relay_node.port}"
|
||||
self.addCleanup(relay_node.stop)
|
||||
|
@ -186,7 +207,7 @@ class TestHubDiscovery(CommandTestCase):
|
|||
self.daemon.ledger.network.client.server_address_and_port, ('127.0.0.1', kp_final_node.port)
|
||||
)
|
||||
|
||||
kp_final_node.server.session_mgr._notify_peer('127.0.0.1:9988')
|
||||
kp_final_node.server.session_manager._notify_peer('127.0.0.1:9988')
|
||||
await self.daemon.ledger.network.on_hub.first
|
||||
await asyncio.sleep(0.5) # wait for above event to be processed by other listeners
|
||||
self.assertEqual(
|
||||
|
|
|
@ -12,7 +12,6 @@ from lbry.error import InsufficientFundsError
|
|||
from lbry.extras.daemon.daemon import DEFAULT_PAGE_SIZE
|
||||
from lbry.testcase import CommandTestCase
|
||||
from lbry.wallet.orchstr8.node import SPVNode
|
||||
from lbry.wallet.server.db.common import STREAM_TYPES
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.util import satoshis_to_coins as lbc
|
||||
from lbry.crypto.hash import sha256
|
||||
|
@ -20,6 +19,16 @@ from lbry.crypto.hash import sha256
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
STREAM_TYPES = {
|
||||
'video': 1,
|
||||
'audio': 2,
|
||||
'image': 3,
|
||||
'document': 4,
|
||||
'binary': 5,
|
||||
'model': 6,
|
||||
}
|
||||
|
||||
|
||||
def verify(channel, data, signature, channel_hash=None):
|
||||
pieces = [
|
||||
signature['signing_ts'].encode(),
|
||||
|
@ -125,18 +134,6 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
with self.assertRaises(ConnectionResetError):
|
||||
await self.claim_search(claim_ids=claim_ids)
|
||||
|
||||
async def test_claim_search_as_reader_server(self):
|
||||
node2 = SPVNode(self.conductor.spv_module, node_number=2)
|
||||
current_prefix = self.conductor.spv_node.server.bp.env.es_index_prefix
|
||||
await node2.start(self.blockchain, extraconf={'ES_MODE': 'reader', 'ES_INDEX_PREFIX': current_prefix})
|
||||
self.addCleanup(node2.stop)
|
||||
self.ledger.network.config['default_servers'] = [(node2.hostname, node2.port)]
|
||||
await self.ledger.stop()
|
||||
await self.ledger.start()
|
||||
channel2 = await self.channel_create('@abc', '0.1', allow_duplicate_name=True)
|
||||
await asyncio.sleep(1) # fixme: find a way to block on the writer
|
||||
await self.assertFindsClaims([channel2], name='@abc')
|
||||
|
||||
async def test_basic_claim_search(self):
|
||||
await self.create_channel()
|
||||
channel_txo = self.channel['outputs'][0]
|
||||
|
@ -405,6 +402,18 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
not_channel_ids=[chan2_id], has_channel_signature=True, valid_channel_signature=True)
|
||||
await match([], not_channel_ids=[chan1_id, chan2_id], has_channel_signature=True, valid_channel_signature=True)
|
||||
|
||||
@skip
|
||||
async def test_no_source_and_valid_channel_signature_and_media_type(self):
|
||||
await self.channel_create('@spam2', '1.0')
|
||||
await self.stream_create('barrrrrr', '1.0', channel_name='@spam2', file_path=self.video_file_name)
|
||||
paradox_no_source_claims = await self.claim_search(has_no_source=True, valid_channel_signature=True,
|
||||
media_type="video/mp4")
|
||||
mp4_claims = await self.claim_search(media_type="video/mp4")
|
||||
no_source_claims = await self.claim_search(has_no_source=True, valid_channel_signature=True)
|
||||
self.assertEqual(0, len(paradox_no_source_claims))
|
||||
self.assertEqual(1, len(no_source_claims))
|
||||
self.assertEqual(1, len(mp4_claims))
|
||||
|
||||
async def test_limit_claims_per_channel(self):
|
||||
match = self.assertFindsClaims
|
||||
chan1_id = self.get_claim_id(await self.channel_create('@chan1'))
|
||||
|
@ -494,8 +503,7 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
tx = await Transaction.claim_create(
|
||||
'unknown', b'{"sources":{"lbry_sd_hash":""}}', 1, address, [self.account], self.account)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
octet = await self.stream_create()
|
||||
video = await self.stream_create('chrome', file_path=self.video_file_name)
|
||||
|
@ -1226,7 +1234,7 @@ class ChannelCommands(CommandTestCase):
|
|||
data_to_sign = "CAFEBABE"
|
||||
# claim new name
|
||||
await self.channel_create('@someotherchan')
|
||||
channel_tx = await self.daemon.jsonrpc_channel_create('@signer', '0.1')
|
||||
channel_tx = await self.daemon.jsonrpc_channel_create('@signer', '0.1', blocking=True)
|
||||
await self.confirm_tx(channel_tx.id)
|
||||
channel = channel_tx.outputs[0]
|
||||
signature1 = await self.out(self.daemon.jsonrpc_channel_sign(channel_name='@signer', hexdata=data_to_sign))
|
||||
|
@ -1373,7 +1381,7 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertEqual('8.989893', (await self.daemon.jsonrpc_account_balance())['available'])
|
||||
|
||||
result = await self.out(self.daemon.jsonrpc_account_send(
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(account2_id), blocking=True
|
||||
))
|
||||
await self.confirm_tx(result['txid'])
|
||||
|
||||
|
@ -1514,10 +1522,13 @@ class StreamCommands(ClaimTestCase):
|
|||
await self.channel_create('@filtering', '0.1')
|
||||
)
|
||||
self.conductor.spv_node.server.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_streams))
|
||||
await self.stream_repost(bad_content_id, 'filter1', '0.1', channel_name='@filtering')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_streams))
|
||||
self.conductor.spv_node.es_writer.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id))
|
||||
|
||||
self.assertEqual(0, len(self.conductor.spv_node.es_writer.db.filtered_streams))
|
||||
await self.stream_repost(bad_content_id, 'filter1', '0.1', channel_name='@filtering')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.es_writer.db.filtered_streams))
|
||||
|
||||
self.assertEqual('0.1', (await self.out(self.daemon.jsonrpc_resolve('bad_content')))['bad_content']['amount'])
|
||||
# search for filtered content directly
|
||||
result = await self.out(self.daemon.jsonrpc_claim_search(name='bad_content'))
|
||||
blocked = result['blocked']
|
||||
|
@ -1560,14 +1571,14 @@ class StreamCommands(ClaimTestCase):
|
|||
)
|
||||
# test setting from env vars and starting from scratch
|
||||
await self.conductor.spv_node.stop(False)
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node,
|
||||
extraconf={'BLOCKING_CHANNEL_IDS': blocking_channel_id,
|
||||
'FILTERING_CHANNEL_IDS': filtering_channel_id})
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node,
|
||||
extraconf={'blocking_channel_ids': [blocking_channel_id],
|
||||
'filtering_channel_ids': [filtering_channel_id]})
|
||||
await self.daemon.wallet_manager.reset()
|
||||
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_streams))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.es_writer.db.blocked_streams))
|
||||
await self.stream_repost(bad_content_id, 'block1', '0.1', channel_name='@blocking')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_streams))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.es_writer.db.blocked_streams))
|
||||
|
||||
# blocked content is not resolveable
|
||||
error = (await self.resolve('lbry://@some_channel/bad_content'))['error']
|
||||
|
@ -1626,6 +1637,11 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertEqual((await self.resolve('lbry://worse_content'))['error']['name'], 'BLOCKED')
|
||||
self.assertEqual((await self.resolve('lbry://@bad_channel/worse_content'))['error']['name'], 'BLOCKED')
|
||||
|
||||
await self.stream_update(worse_content_id, channel_name='@bad_channel', tags=['bad-stuff'])
|
||||
self.assertEqual((await self.resolve('lbry://@bad_channel'))['error']['name'], 'BLOCKED')
|
||||
self.assertEqual((await self.resolve('lbry://worse_content'))['error']['name'], 'BLOCKED')
|
||||
self.assertEqual((await self.resolve('lbry://@bad_channel/worse_content'))['error']['name'], 'BLOCKED')
|
||||
|
||||
async def test_publish_updates_file_list(self):
|
||||
tx = await self.stream_create(title='created')
|
||||
txo = tx['outputs'][0]
|
||||
|
@ -1651,6 +1667,7 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertEqual(tx['txid'], files[0]['txid'])
|
||||
self.assertEqual(expected, files[0]['metadata'])
|
||||
|
||||
@skip
|
||||
async def test_setting_stream_fields(self):
|
||||
values = {
|
||||
'title': "Cool Content",
|
||||
|
@ -1791,22 +1808,35 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=self.account.id), 3)
|
||||
self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=account2_id), 1)
|
||||
|
||||
self.assertEqual(3, len(await self.claim_search(release_time='>0', order_by=['release_time'])))
|
||||
self.assertEqual(3, len(await self.claim_search(release_time='>=0', order_by=['release_time'])))
|
||||
self.assertEqual(4, len(await self.claim_search(release_time='>0', order_by=['release_time'])))
|
||||
self.assertEqual(3, len(await self.claim_search(release_time='>0', order_by=['release_time'], claim_type='stream')))
|
||||
|
||||
self.assertEqual(4, len(await self.claim_search(release_time='>=0', order_by=['release_time'])))
|
||||
self.assertEqual(4, len(await self.claim_search(order_by=['release_time'])))
|
||||
self.assertEqual(3, len(await self.claim_search(claim_type='stream', order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(claim_type='channel', order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(release_time='>=123456', order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(release_time='>123456', order_by=['release_time'])))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time='<123457', order_by=['release_time'])))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time='>=123456', order_by=['release_time'])))
|
||||
|
||||
self.assertEqual(2, len(await self.claim_search(release_time=['<123457'], order_by=['release_time'])))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time=['>0', '<123457'], order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(release_time='>=123456', order_by=['release_time'], claim_type='stream')))
|
||||
|
||||
self.assertEqual(1, len(await self.claim_search(release_time='>123456', order_by=['release_time'], claim_type='stream')))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time='>123456', order_by=['release_time'])))
|
||||
|
||||
self.assertEqual(3, len(await self.claim_search(release_time='<123457', order_by=['release_time'])))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time='<123457', order_by=['release_time'], claim_type='stream')))
|
||||
|
||||
self.assertEqual(2, len(await self.claim_search(release_time=['<123457'], order_by=['release_time'], claim_type='stream')))
|
||||
self.assertEqual(3, len(await self.claim_search(release_time=['<123457'], order_by=['release_time'])))
|
||||
self.assertEqual(3, len(await self.claim_search(release_time=['>0', '<123457'], order_by=['release_time'])))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time=['>0', '<123457'], order_by=['release_time'], claim_type='stream')))
|
||||
self.assertEqual(3, len(await self.claim_search(release_time=['<123457'], order_by=['release_time'], height=['>0'])))
|
||||
self.assertEqual(4, len(await self.claim_search(order_by=['release_time'], height=['>0'])))
|
||||
self.assertEqual(4, len(await self.claim_search(order_by=['release_time'], height=['>0'], claim_type=['stream', 'channel'])))
|
||||
self.assertEqual(
|
||||
2, len(await self.claim_search(release_time=['>=123097', '<123457'], order_by=['release_time']))
|
||||
3, len(await self.claim_search(release_time=['>=123097', '<123457'], order_by=['release_time']))
|
||||
)
|
||||
self.assertEqual(
|
||||
2, len(await self.claim_search(release_time=['<123457', '>0'], order_by=['release_time']))
|
||||
3, len(await self.claim_search(release_time=['<123457', '>0'], order_by=['release_time']))
|
||||
)
|
||||
|
||||
async def test_setting_fee_fields(self):
|
||||
|
@ -2177,7 +2207,7 @@ class SupportCommands(CommandTestCase):
|
|||
tip = await self.out(
|
||||
self.daemon.jsonrpc_support_create(
|
||||
claim_id, '1.0', True, account_id=account2.id, wallet_id='wallet2',
|
||||
funding_account_ids=[account2.id])
|
||||
funding_account_ids=[account2.id], blocking=True)
|
||||
)
|
||||
await self.confirm_tx(tip['txid'])
|
||||
|
||||
|
@ -2209,7 +2239,7 @@ class SupportCommands(CommandTestCase):
|
|||
support = await self.out(
|
||||
self.daemon.jsonrpc_support_create(
|
||||
claim_id, '2.0', False, account_id=account2.id, wallet_id='wallet2',
|
||||
funding_account_ids=[account2.id])
|
||||
funding_account_ids=[account2.id], blocking=True)
|
||||
)
|
||||
await self.confirm_tx(support['txid'])
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import unittest
|
||||
from unittest import skipIf
|
||||
import asyncio
|
||||
import os
|
||||
|
@ -36,8 +37,7 @@ class FileCommands(CommandTestCase):
|
|||
tx_to_update.outputs[0], claim, 1, address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
self.client_session = self.daemon.file_manager.source_managers['torrent'].torrent_session
|
||||
self.client_session._session.add_dht_node(('localhost', 4040))
|
||||
self.client_session.wait_start = False # fixme: this is super slow on tests
|
||||
|
@ -216,6 +216,7 @@ class FileCommands(CommandTestCase):
|
|||
await self.wait_files_to_complete()
|
||||
self.assertNotEqual(first_path, second_path)
|
||||
|
||||
@unittest.SkipTest # FIXME: claimname/updateclaim is gone. #3480 wip, unblock #3479"
|
||||
async def test_file_list_updated_metadata_on_resolve(self):
|
||||
await self.stream_create('foo', '0.01')
|
||||
txo = (await self.daemon.resolve(self.wallet.accounts, ['lbry://foo']))['lbry://foo']
|
||||
|
@ -504,8 +505,7 @@ class FileCommands(CommandTestCase):
|
|||
tx.outputs[0].claim.stream.fee.address_bytes = b''
|
||||
tx.outputs[0].script.generate()
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
async def __raw_value_update_no_fee_amount(self, tx, claim_address):
|
||||
tx = await self.daemon.jsonrpc_stream_update(
|
||||
|
@ -515,8 +515,7 @@ class FileCommands(CommandTestCase):
|
|||
tx.outputs[0].claim.stream.fee.message.ClearField('amount')
|
||||
tx.outputs[0].script.generate()
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
|
||||
class DiskSpaceManagement(CommandTestCase):
|
||||
|
|
|
@ -80,7 +80,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
|
||||
# After some soul searching Chris decides that his story needs more
|
||||
# heart and a better ending. He takes down the story and begins the rewrite.
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(claim_id, blocking=False))
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(claim_id, blocking=True))
|
||||
self.assertEqual(abandon['inputs'][0]['claim_id'], claim_id)
|
||||
await self.confirm_tx(abandon['txid'])
|
||||
|
||||
|
@ -103,7 +103,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# 1 LBC to which Chris readily obliges
|
||||
ramsey_account_id = (await self.out(self.daemon.jsonrpc_account_create("Ramsey")))['id']
|
||||
ramsey_address = await self.daemon.jsonrpc_address_unused(ramsey_account_id)
|
||||
result = await self.out(self.daemon.jsonrpc_account_send('1.0', ramsey_address))
|
||||
result = await self.out(self.daemon.jsonrpc_account_send('1.0', ramsey_address, blocking=True))
|
||||
self.assertIn("txid", result)
|
||||
await self.confirm_tx(result['txid'])
|
||||
|
||||
|
@ -133,7 +133,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# And voila, and bravo and encore! His Best Friend Ramsey read the story and immediately knew this was a hit
|
||||
# Now to keep this claim winning on the lbry blockchain he immediately supports the claim
|
||||
tx = await self.out(self.daemon.jsonrpc_support_create(
|
||||
claim_id2, '0.2', account_id=ramsey_account_id
|
||||
claim_id2, '0.2', account_id=ramsey_account_id, blocking=True
|
||||
))
|
||||
await self.confirm_tx(tx['txid'])
|
||||
|
||||
|
@ -147,7 +147,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# Now he also wanted to support the original creator of the Award Winning Novel
|
||||
# So he quickly decides to send a tip to him
|
||||
tx = await self.out(
|
||||
self.daemon.jsonrpc_support_create(claim_id2, '0.3', tip=True, account_id=ramsey_account_id)
|
||||
self.daemon.jsonrpc_support_create(claim_id2, '0.3', tip=True, account_id=ramsey_account_id, blocking=True)
|
||||
)
|
||||
await self.confirm_tx(tx['txid'])
|
||||
|
||||
|
@ -158,7 +158,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
await self.generate(5)
|
||||
|
||||
# Seeing the ravishing success of his novel Chris adds support to his claim too
|
||||
tx = await self.out(self.daemon.jsonrpc_support_create(claim_id2, '0.4'))
|
||||
tx = await self.out(self.daemon.jsonrpc_support_create(claim_id2, '0.4', blocking=True))
|
||||
await self.confirm_tx(tx['txid'])
|
||||
|
||||
# And check if his support showed up
|
||||
|
@ -183,7 +183,7 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
|
||||
# But sadly Ramsey wasn't so pleased. It was hard for him to tell Chris...
|
||||
# Chris, though a bit heartbroken, abandoned the claim for now, but instantly started working on new hit lyrics
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(txid=tx['txid'], nout=0, blocking=False))
|
||||
abandon = await self.out(self.daemon.jsonrpc_stream_abandon(txid=tx['txid'], nout=0, blocking=True))
|
||||
self.assertTrue(abandon['inputs'][0]['txid'], tx['txid'])
|
||||
await self.confirm_tx(abandon['txid'])
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import asyncio
|
||||
import json
|
||||
import hashlib
|
||||
import sys
|
||||
from bisect import bisect_right
|
||||
from binascii import hexlify, unhexlify
|
||||
from collections import defaultdict
|
||||
|
@ -23,7 +24,7 @@ class BaseResolveTestCase(CommandTestCase):
|
|||
def assertMatchESClaim(self, claim_from_es, claim_from_db):
|
||||
self.assertEqual(claim_from_es['claim_hash'][::-1].hex(), claim_from_db.claim_hash.hex())
|
||||
self.assertEqual(claim_from_es['claim_id'], claim_from_db.claim_hash.hex())
|
||||
self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height)
|
||||
self.assertEqual(claim_from_es['activation_height'], claim_from_db.activation_height, f"es height: {claim_from_es['activation_height']}, rocksdb height: {claim_from_db.activation_height}")
|
||||
self.assertEqual(claim_from_es['last_take_over_height'], claim_from_db.last_takeover_height)
|
||||
self.assertEqual(claim_from_es['tx_id'], claim_from_db.tx_hash[::-1].hex())
|
||||
self.assertEqual(claim_from_es['tx_nout'], claim_from_db.position)
|
||||
|
@ -31,125 +32,151 @@ class BaseResolveTestCase(CommandTestCase):
|
|||
self.assertEqual(claim_from_es['effective_amount'], claim_from_db.effective_amount)
|
||||
|
||||
def assertMatchDBClaim(self, expected, claim):
|
||||
self.assertEqual(expected['claimId'], claim.claim_hash.hex())
|
||||
self.assertEqual(expected['validAtHeight'], claim.activation_height)
|
||||
self.assertEqual(expected['lastTakeoverHeight'], claim.last_takeover_height)
|
||||
self.assertEqual(expected['txId'], claim.tx_hash[::-1].hex())
|
||||
self.assertEqual(expected['claimid'], claim.claim_hash.hex())
|
||||
self.assertEqual(expected['validatheight'], claim.activation_height)
|
||||
self.assertEqual(expected['lasttakeoverheight'], claim.last_takeover_height)
|
||||
self.assertEqual(expected['txid'], claim.tx_hash[::-1].hex())
|
||||
self.assertEqual(expected['n'], claim.position)
|
||||
self.assertEqual(expected['amount'], claim.amount)
|
||||
self.assertEqual(expected['effectiveAmount'], claim.effective_amount)
|
||||
self.assertEqual(expected['effectiveamount'], claim.effective_amount)
|
||||
|
||||
async def assertResolvesToClaimId(self, name, claim_id):
|
||||
other = await self.resolve(name)
|
||||
if claim_id is None:
|
||||
self.assertIn('error', other)
|
||||
self.assertEqual(other['error']['name'], 'NOT_FOUND')
|
||||
claims_from_es = (await self.conductor.spv_node.server.bp.db.search_index.search(name=name))[0]
|
||||
claims_from_es = (await self.conductor.spv_node.server.session_manager.search_index.search(name=name))[0]
|
||||
claims_from_es = [c['claim_hash'][::-1].hex() for c in claims_from_es]
|
||||
self.assertNotIn(claim_id, claims_from_es)
|
||||
else:
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(claim_id=claim_id)
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id)
|
||||
self.assertEqual(claim_id, other['claim_id'])
|
||||
self.assertEqual(claim_id, claim_from_es[0][0]['claim_hash'][::-1].hex())
|
||||
|
||||
async def assertNoClaimForName(self, name: str):
|
||||
lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.bp.db.resolve(name)
|
||||
self.assertNotIn('claimId', lbrycrd_winning)
|
||||
lbrycrd_winning = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name)
|
||||
if 'claims' in lbrycrd_winning and lbrycrd_winning['claims'] is not None:
|
||||
self.assertEqual(len(lbrycrd_winning['claims']), 0)
|
||||
if stream is not None:
|
||||
self.assertIsInstance(stream, LookupError)
|
||||
else:
|
||||
self.assertIsInstance(channel, LookupError)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(name=name)
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(name=name)
|
||||
self.assertListEqual([], claim_from_es[0])
|
||||
|
||||
async def assertNoClaim(self, claim_id: str):
|
||||
self.assertDictEqual(
|
||||
{}, json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
||||
)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(claim_id=claim_id)
|
||||
async def assertNoClaim(self, name: str, claim_id: str):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebyid', name, '["' + claim_id + '"]'))
|
||||
if 'claims' in expected and expected['claims'] is not None:
|
||||
# ensure that if we do have the matching claim that it is not active
|
||||
self.assertEqual(expected['claims'][0]['effectiveamount'], 0)
|
||||
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(claim_id=claim_id)
|
||||
self.assertListEqual([], claim_from_es[0])
|
||||
claim = await self.conductor.spv_node.server.bp.db.fs_getclaimbyid(claim_id)
|
||||
claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id)
|
||||
self.assertIsNone(claim)
|
||||
|
||||
async def assertMatchWinningClaim(self, name):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.bp.db.resolve(name)
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebybid', name, "[0]"))
|
||||
stream, channel, _, _ = await self.conductor.spv_node.server.db.resolve(name)
|
||||
claim = stream if stream else channel
|
||||
await self._assertMatchClaim(expected, claim)
|
||||
expected['claims'][0]['lasttakeoverheight'] = expected['lasttakeoverheight']
|
||||
await self._assertMatchClaim(expected['claims'][0], claim)
|
||||
return claim
|
||||
|
||||
async def _assertMatchClaim(self, expected, claim):
|
||||
self.assertMatchDBClaim(expected, claim)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim.claim_hash.hex()
|
||||
)
|
||||
self.assertEqual(len(claim_from_es[0]), 1)
|
||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||
self._check_supports(claim.claim_hash.hex(), expected['supports'], claim_from_es[0][0]['support_amount'])
|
||||
self._check_supports(claim.claim_hash.hex(), expected.get('supports', []),
|
||||
claim_from_es[0][0]['support_amount'])
|
||||
|
||||
async def assertMatchClaim(self, claim_id, is_active_in_lbrycrd=True):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimbyid', claim_id))
|
||||
claim = await self.conductor.spv_node.server.bp.db.fs_getclaimbyid(claim_id)
|
||||
if is_active_in_lbrycrd:
|
||||
if not expected:
|
||||
self.assertIsNone(claim)
|
||||
return
|
||||
self.assertMatchDBClaim(expected, claim)
|
||||
else:
|
||||
self.assertDictEqual({}, expected)
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
async def assertMatchClaim(self, name, claim_id, is_active_in_lbrycrd=True):
|
||||
claim = await self.conductor.spv_node.server.db.fs_getclaimbyid(claim_id)
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim.claim_hash.hex()
|
||||
)
|
||||
self.assertEqual(len(claim_from_es[0]), 1)
|
||||
self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), claim.claim_hash.hex())
|
||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||
self._check_supports(
|
||||
claim.claim_hash.hex(), expected.get('supports', []), claim_from_es[0][0]['support_amount'],
|
||||
is_active_in_lbrycrd
|
||||
)
|
||||
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsfornamebyid', name, '["' + claim_id + '"]'))
|
||||
if is_active_in_lbrycrd:
|
||||
if not expected:
|
||||
self.assertIsNone(claim)
|
||||
return
|
||||
expected['claims'][0]['lasttakeoverheight'] = expected['lasttakeoverheight']
|
||||
self.assertMatchDBClaim(expected['claims'][0], claim)
|
||||
self._check_supports(claim.claim_hash.hex(), expected['claims'][0].get('supports', []),
|
||||
claim_from_es[0][0]['support_amount'])
|
||||
else:
|
||||
if 'claims' in expected and expected['claims'] is not None:
|
||||
# ensure that if we do have the matching claim that it is not active
|
||||
self.assertEqual(expected['claims'][0]['effectiveamount'], 0)
|
||||
return claim
|
||||
|
||||
async def assertMatchClaimIsWinning(self, name, claim_id):
|
||||
self.assertEqual(claim_id, (await self.assertMatchWinningClaim(name)).claim_hash.hex())
|
||||
await self.assertMatchClaimsForName(name)
|
||||
|
||||
def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount, is_active_in_lbrycrd=True):
|
||||
total_amount = 0
|
||||
db = self.conductor.spv_node.server.bp.db
|
||||
def _check_supports(self, claim_id, lbrycrd_supports, es_support_amount):
|
||||
total_lbrycrd_amount = 0.0
|
||||
total_es_amount = 0.0
|
||||
active_es_amount = 0.0
|
||||
db = self.conductor.spv_node.server.db
|
||||
es_supports = db.get_supports(bytes.fromhex(claim_id))
|
||||
|
||||
for i, (tx_num, position, amount) in enumerate(db.get_supports(bytes.fromhex(claim_id))):
|
||||
total_amount += amount
|
||||
if is_active_in_lbrycrd:
|
||||
support = lbrycrd_supports[i]
|
||||
self.assertEqual(support['txId'], db.prefix_db.tx_hash.get(tx_num, deserialize_value=False)[::-1].hex())
|
||||
self.assertEqual(support['n'], position)
|
||||
self.assertEqual(support['height'], bisect_right(db.tx_counts, tx_num))
|
||||
self.assertEqual(support['validAtHeight'], db.get_activation(tx_num, position, is_support=True))
|
||||
self.assertEqual(total_amount, es_support_amount, f"lbrycrd support amount: {total_amount} vs es: {es_support_amount}")
|
||||
# we're only concerned about active supports here, and they should match
|
||||
self.assertTrue(len(es_supports) >= len(lbrycrd_supports))
|
||||
|
||||
for i, (tx_num, position, amount) in enumerate(es_supports):
|
||||
total_es_amount += amount
|
||||
valid_height = db.get_activation(tx_num, position, is_support=True)
|
||||
if valid_height > db.db_height:
|
||||
continue
|
||||
active_es_amount += amount
|
||||
txid = db.prefix_db.tx_hash.get(tx_num, deserialize_value=False)[::-1].hex()
|
||||
support = next(filter(lambda s: s['txid'] == txid and s['n'] == position, lbrycrd_supports))
|
||||
total_lbrycrd_amount += support['amount']
|
||||
self.assertEqual(support['height'], bisect_right(db.tx_counts, tx_num))
|
||||
self.assertEqual(support['validatheight'], valid_height)
|
||||
|
||||
self.assertEqual(total_es_amount, es_support_amount)
|
||||
self.assertEqual(active_es_amount, total_lbrycrd_amount)
|
||||
|
||||
async def assertMatchClaimsForName(self, name):
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name))
|
||||
|
||||
db = self.conductor.spv_node.server.bp.db
|
||||
# self.assertEqual(len(expected['claims']), len(db_claims.claims))
|
||||
# self.assertEqual(expected['lastTakeoverHeight'], db_claims.lastTakeoverHeight)
|
||||
last_takeover = json.loads(await self.blockchain._cli_cmnd('getvalueforname', name))['lastTakeoverHeight']
|
||||
expected = json.loads(await self.blockchain._cli_cmnd('getclaimsforname', name, "", "true"))
|
||||
db = self.conductor.spv_node.server.db
|
||||
|
||||
for c in expected['claims']:
|
||||
c['lastTakeoverHeight'] = last_takeover
|
||||
claim_id = c['claimId']
|
||||
c['lasttakeoverheight'] = expected['lasttakeoverheight']
|
||||
claim_id = c['claimid']
|
||||
claim_hash = bytes.fromhex(claim_id)
|
||||
claim = db._fs_get_claim_by_hash(claim_hash)
|
||||
self.assertMatchDBClaim(c, claim)
|
||||
|
||||
claim_from_es = await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
claim_id=c['claimId']
|
||||
claim_from_es = await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim_id
|
||||
)
|
||||
self.assertEqual(len(claim_from_es[0]), 1)
|
||||
self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), c['claimId'])
|
||||
self.assertEqual(claim_from_es[0][0]['claim_hash'][::-1].hex(), claim_id)
|
||||
self.assertMatchESClaim(claim_from_es[0][0], claim)
|
||||
self._check_supports(c['claimId'], c['supports'], claim_from_es[0][0]['support_amount'])
|
||||
self._check_supports(claim_id, c.get('supports', []),
|
||||
claim_from_es[0][0]['support_amount'])
|
||||
|
||||
async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int,
|
||||
non_winning_claims: List[ClaimStateValue]):
|
||||
self.assertEqual(height, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, winning_claim_id)
|
||||
for non_winning in non_winning_claims:
|
||||
claim = await self.assertMatchClaim(
|
||||
name, non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd
|
||||
)
|
||||
self.assertEqual(non_winning.activation_height, claim.activation_height)
|
||||
self.assertEqual(last_takeover_height, claim.last_takeover_height)
|
||||
|
||||
|
||||
class ResolveCommand(BaseResolveTestCase):
|
||||
|
@ -261,19 +288,20 @@ class ResolveCommand(BaseResolveTestCase):
|
|||
tx_details = await self.blockchain.get_raw_transaction(claim['txid'])
|
||||
self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations'])
|
||||
|
||||
# FIXME : claimname/updateclaim is gone. #3480 wip, unblock #3479"
|
||||
# resolve handles invalid data
|
||||
await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1")
|
||||
await self.generate(1)
|
||||
response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish"))
|
||||
self.assertSetEqual({'lbry://gibberish'}, set(response))
|
||||
claim = response['lbry://gibberish']
|
||||
self.assertEqual(claim['name'], 'gibberish')
|
||||
self.assertNotIn('value', claim)
|
||||
# await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1")
|
||||
# await self.generate(1)
|
||||
# response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish"))
|
||||
# self.assertSetEqual({'lbry://gibberish'}, set(response))
|
||||
# claim = response['lbry://gibberish']
|
||||
# self.assertEqual(claim['name'], 'gibberish')
|
||||
# self.assertNotIn('value', claim)
|
||||
|
||||
# resolve retries
|
||||
await self.conductor.spv_node.stop()
|
||||
resolve_task = asyncio.create_task(self.resolve('foo'))
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
await self.conductor.spv_node.start(self.conductor.lbcwallet_node)
|
||||
self.assertIsNotNone((await resolve_task)['claim_id'])
|
||||
|
||||
async def test_winning_by_effective_amount(self):
|
||||
|
@ -443,16 +471,16 @@ class ResolveCommand(BaseResolveTestCase):
|
|||
self.assertEqual(one, claim6['name'])
|
||||
|
||||
async def test_resolve_old_claim(self):
|
||||
channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0')
|
||||
channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0', blocking=True)
|
||||
await self.confirm_tx(channel.id)
|
||||
address = channel.outputs[0].get_address(self.account.ledger)
|
||||
claim = generate_signed_legacy(address, channel.outputs[0])
|
||||
tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
response = await self.resolve('@olds/example')
|
||||
self.assertTrue('is_channel_signature_valid' in response, str(response))
|
||||
self.assertTrue(response['is_channel_signature_valid'])
|
||||
|
||||
claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature))
|
||||
|
@ -460,8 +488,7 @@ class ResolveCommand(BaseResolveTestCase):
|
|||
'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
await self.broadcast_and_confirm(tx)
|
||||
|
||||
response = await self.resolve('bad_example')
|
||||
self.assertFalse(response['is_channel_signature_valid'])
|
||||
|
@ -606,6 +633,12 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
self.assertDictEqual(await self.resolve('@other/signed4'),
|
||||
await self.resolve('signed4'))
|
||||
|
||||
self.assertEqual(2, len(await self.claim_search(channel_ids=[channel_id2])))
|
||||
|
||||
await self.channel_update(channel_id2)
|
||||
await make_claim('third_signed', '0.01', channel_id=channel_id2)
|
||||
self.assertEqual(3, len(await self.claim_search(channel_ids=[channel_id2])))
|
||||
|
||||
async def _test_activation_delay(self):
|
||||
name = 'derp'
|
||||
# initially claim the name
|
||||
|
@ -643,10 +676,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
|
||||
async def assertNameState(self, height: int, name: str, winning_claim_id: str, last_takeover_height: int,
|
||||
non_winning_claims: List[ClaimStateValue]):
|
||||
self.assertEqual(height, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(height, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, winning_claim_id)
|
||||
for non_winning in non_winning_claims:
|
||||
claim = await self.assertMatchClaim(
|
||||
claim = await self.assertMatchClaim(name,
|
||||
non_winning.claim_id, is_active_in_lbrycrd=non_winning.active_in_lbrycrd
|
||||
)
|
||||
self.assertEqual(non_winning.activation_height, claim.activation_height)
|
||||
|
@ -961,7 +994,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
)
|
||||
greater_than_or_equal_to_zero = [
|
||||
claim['claim_id'] for claim in (
|
||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
channel_id=channel_id, fee_amount=">=0"
|
||||
))[0]
|
||||
]
|
||||
|
@ -969,7 +1002,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
self.assertSetEqual(set(greater_than_or_equal_to_zero), {stream_with_no_fee, stream_with_fee})
|
||||
greater_than_zero = [
|
||||
claim['claim_id'] for claim in (
|
||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
channel_id=channel_id, fee_amount=">0"
|
||||
))[0]
|
||||
]
|
||||
|
@ -977,7 +1010,7 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
self.assertSetEqual(set(greater_than_zero), {stream_with_fee})
|
||||
equal_to_zero = [
|
||||
claim['claim_id'] for claim in (
|
||||
await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
channel_id=channel_id, fee_amount="<=0"
|
||||
))[0]
|
||||
]
|
||||
|
@ -992,10 +1025,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
name = 'test'
|
||||
await self.generate(494)
|
||||
address = (await self.account.receiving.get_addresses(True))[0]
|
||||
await self.blockchain.send_to_address(address, 400.0)
|
||||
await self.send_to_address_and_wait(address, 400.0)
|
||||
await self.account.ledger.on_address.first
|
||||
await self.generate(100)
|
||||
self.assertEqual(800, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(800, self.conductor.spv_node.server.db.db_height)
|
||||
|
||||
# Block 801: Claim A for 10 LBC is accepted.
|
||||
# It is the first claim, so it immediately becomes active and controlling.
|
||||
|
@ -1007,10 +1040,10 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
# Its activation height is 1121 + min(4032, floor((1121-801) / 32)) = 1121 + 10 = 1131.
|
||||
# State: A(10) is controlling, B(20) is accepted.
|
||||
await self.generate(32 * 10 - 1)
|
||||
self.assertEqual(1120, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1120, self.conductor.spv_node.server.db.db_height)
|
||||
claim_id_B = (await self.stream_create(name, '20.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
claim_B, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_B}")
|
||||
self.assertEqual(1121, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_B, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_B}")
|
||||
self.assertEqual(1121, self.conductor.spv_node.server.db.db_height)
|
||||
self.assertEqual(1131, claim_B.activation_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
|
@ -1018,33 +1051,33 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
# Since it is a support for the controlling claim, it activates immediately.
|
||||
# State: A(10+14) is controlling, B(20) is accepted.
|
||||
await self.support_create(claim_id_A, bid='14.0')
|
||||
self.assertEqual(1122, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1122, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
# Block 1123: Claim C for 50 LBC is accepted.
|
||||
# The activation height is 1123 + min(4032, floor((1123-801) / 32)) = 1123 + 10 = 1133.
|
||||
# State: A(10+14) is controlling, B(20) is accepted, C(50) is accepted.
|
||||
claim_id_C = (await self.stream_create(name, '50.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
self.assertEqual(1123, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_C, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_C}")
|
||||
self.assertEqual(1123, self.conductor.spv_node.server.db.db_height)
|
||||
claim_C, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_C}")
|
||||
self.assertEqual(1133, claim_C.activation_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
await self.generate(7)
|
||||
self.assertEqual(1130, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1130, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
await self.generate(1)
|
||||
|
||||
# Block 1131: Claim B activates. It has 20 LBC, while claim A has 24 LBC (10 original + 14 from support X). There is no takeover, and claim A remains controlling.
|
||||
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted.
|
||||
self.assertEqual(1131, self.conductor.spv_node.server.bp.db.db_height)
|
||||
self.assertEqual(1131, self.conductor.spv_node.server.db.db_height)
|
||||
await self.assertMatchClaimIsWinning(name, claim_id_A)
|
||||
|
||||
# Block 1132: Claim D for 300 LBC is accepted. The activation height is 1132 + min(4032, floor((1132-801) / 32)) = 1132 + 10 = 1142.
|
||||
# State: A(10+14) is controlling, B(20) is active, C(50) is accepted, D(300) is accepted.
|
||||
claim_id_D = (await self.stream_create(name, '300.0', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
self.assertEqual(1132, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(1132, self.conductor.spv_node.server.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(False, claim_D.is_controlling)
|
||||
self.assertEqual(801, claim_D.last_takeover_height)
|
||||
self.assertEqual(1142, claim_D.activation_height)
|
||||
|
@ -1053,8 +1086,8 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
# Block 1133: Claim C activates. It has 50 LBC, while claim A has 24 LBC, so a takeover is initiated. The takeover height for this name is set to 1133, and therefore the activation delay for all the claims becomes min(4032, floor((1133-1133) / 32)) = 0. All the claims become active. The totals for each claim are recalculated, and claim D becomes controlling because it has the highest total.
|
||||
# State: A(10+14) is active, B(20) is active, C(50) is active, D(300) is controlling
|
||||
await self.generate(1)
|
||||
self.assertEqual(1133, self.conductor.spv_node.server.bp.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.bp.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(1133, self.conductor.spv_node.server.db.db_height)
|
||||
claim_D, _, _, _ = await self.conductor.spv_node.server.db.resolve(f"{name}:{claim_id_D}")
|
||||
self.assertEqual(True, claim_D.is_controlling)
|
||||
self.assertEqual(1133, claim_D.last_takeover_height)
|
||||
self.assertEqual(1133, claim_D.activation_height)
|
||||
|
@ -1327,15 +1360,15 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
await self.generate(8)
|
||||
await self.assertMatchClaimIsWinning(name, first_claim_id)
|
||||
# abandon the support that causes the winning claim to have the highest staked
|
||||
tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id)
|
||||
tx = await self.daemon.jsonrpc_txo_spend(type='support', txid=controlling_support_tx.id, blocking=True)
|
||||
await self.generate(1)
|
||||
await self.assertMatchClaimIsWinning(name, first_claim_id)
|
||||
# await self.assertMatchClaim(second_claim_id)
|
||||
|
||||
await self.assertNameState(538, name, first_claim_id, last_takeover_height=207, non_winning_claims=[
|
||||
ClaimStateValue(second_claim_id, activation_height=539, active_in_lbrycrd=False)
|
||||
])
|
||||
await self.generate(1)
|
||||
|
||||
await self.assertMatchClaim(first_claim_id)
|
||||
await self.assertMatchClaimIsWinning(name, second_claim_id)
|
||||
await self.assertNameState(539, name, second_claim_id, last_takeover_height=539, non_winning_claims=[
|
||||
ClaimStateValue(first_claim_id, activation_height=207, active_in_lbrycrd=True)
|
||||
])
|
||||
|
||||
async def test_remove_controlling_support(self):
|
||||
name = 'derp'
|
||||
|
@ -1405,14 +1438,14 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
await self.generate(32)
|
||||
|
||||
second_claim_id = (await self.stream_create(name, '0.01', allow_duplicate_name=True))['outputs'][0]['claim_id']
|
||||
await self.assertNoClaim(second_claim_id)
|
||||
await self.assertNoClaim(name, second_claim_id)
|
||||
self.assertEqual(
|
||||
len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 1
|
||||
len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 1
|
||||
)
|
||||
await self.generate(1)
|
||||
await self.assertMatchClaim(second_claim_id)
|
||||
await self.assertMatchClaim(name, second_claim_id)
|
||||
self.assertEqual(
|
||||
len((await self.conductor.spv_node.server.bp.db.search_index.search(claim_name=name))[0]), 2
|
||||
len((await self.conductor.spv_node.server.session_manager.search_index.search(claim_name=name))[0]), 2
|
||||
)
|
||||
|
||||
async def test_abandon_controlling_same_block_as_new_claim(self):
|
||||
|
@ -1428,35 +1461,47 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
|
|||
|
||||
async def test_trending(self):
|
||||
async def get_trending_score(claim_id):
|
||||
return (await self.conductor.spv_node.server.bp.db.search_index.search(
|
||||
return (await self.conductor.spv_node.server.session_manager.search_index.search(
|
||||
claim_id=claim_id
|
||||
))[0][0]['trending_score']
|
||||
|
||||
claim_id1 = (await self.stream_create('derp', '1.0'))['outputs'][0]['claim_id']
|
||||
COIN = 1E8
|
||||
COIN = int(1E8)
|
||||
|
||||
height = 99000
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height, 0, 10 * COIN
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 207)
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(208, bytes.fromhex(claim_id1)), (0, 10 * COIN)
|
||||
)
|
||||
await self.generate(1)
|
||||
self.assertEqual(172.64252836433135, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height + 1, 10 * COIN, 100 * COIN
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 208)
|
||||
|
||||
self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN)
|
||||
)
|
||||
await self.generate(1)
|
||||
self.assertEqual(173.45931832928875, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height + 100, 100 * COIN, 1000000 * COIN
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 209)
|
||||
self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN)
|
||||
)
|
||||
await self.generate(1)
|
||||
self.assertEqual(176.65517070393514, await get_trending_score(claim_id1))
|
||||
self.conductor.spv_node.server.bp._add_claim_activation_change_notification(
|
||||
claim_id1, height + 200, 1000000 * COIN, 1 * COIN
|
||||
await self.generate(100)
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 309)
|
||||
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
|
||||
|
||||
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
|
||||
(409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN)
|
||||
)
|
||||
|
||||
await self.generate(99)
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 408)
|
||||
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
|
||||
|
||||
await self.generate(1)
|
||||
self.assertEqual(-174.951347102643, await get_trending_score(claim_id1))
|
||||
search_results = (await self.conductor.spv_node.server.bp.db.search_index.search(claim_name="derp"))[0]
|
||||
self.assertEqual(self.conductor.spv_node.writer.height, 409)
|
||||
|
||||
self.assertEqual(-3.4256156592205627, await get_trending_score(claim_id1))
|
||||
search_results = (await self.conductor.spv_node.server.session_manager.search_index.search(claim_name="derp"))[0]
|
||||
self.assertEqual(1, len(search_results))
|
||||
self.assertListEqual([claim_id1], [c['claim_id'] for c in search_results])
|
||||
|
||||
|
@ -1465,22 +1510,31 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
async def reorg(self, start):
|
||||
blocks = self.ledger.headers.height - start
|
||||
self.blockchain.block_expected = start - 1
|
||||
|
||||
|
||||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
|
||||
# go back to start
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
|
||||
# go to previous + 1
|
||||
await self.generate(blocks + 2)
|
||||
await self.blockchain.generate(blocks + 2)
|
||||
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
# await asyncio.wait_for(self.on_header(self.blockchain.block_expected), 30.0)
|
||||
|
||||
async def assertBlockHash(self, height):
|
||||
bp = self.conductor.spv_node.server.bp
|
||||
reader_db = self.conductor.spv_node.server.db
|
||||
block_hash = await self.blockchain.get_block_hash(height)
|
||||
|
||||
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
|
||||
self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex())
|
||||
self.assertEqual(block_hash, (await reader_db.fs_block_hashes(height, 1))[0][::-1].hex())
|
||||
txids = [
|
||||
tx_hash[::-1].hex() for tx_hash in bp.db.get_block_txs(height)
|
||||
tx_hash[::-1].hex() for tx_hash in reader_db.get_block_txs(height)
|
||||
]
|
||||
txs = await bp.db.get_transactions_and_merkles(txids)
|
||||
block_txs = (await bp.daemon.deserialised_block(block_hash))['tx']
|
||||
txs = await reader_db.get_transactions_and_merkles(txids)
|
||||
block_txs = (await self.conductor.spv_node.server.daemon.deserialised_block(block_hash))['tx']
|
||||
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
|
||||
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')
|
||||
|
||||
|
@ -1491,9 +1545,18 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
channel_id = self.get_claim_id(
|
||||
await self.channel_create(channel_name, '0.01')
|
||||
)
|
||||
self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=207, name='@abc', winning_claim_id=channel_id, last_takeover_height=207,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
await self.reorg(206)
|
||||
self.assertEqual(channel_id, (await self.assertMatchWinningClaim(channel_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=208, name='@abc', winning_claim_id=channel_id, last_takeover_height=207,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
# await self.assertNoClaimForName(channel_name)
|
||||
# self.assertNotIn('error', await self.resolve(channel_name))
|
||||
|
@ -1502,16 +1565,29 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
stream_id = self.get_claim_id(
|
||||
await self.stream_create(stream_name, '0.01', channel_id=channel_id)
|
||||
)
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=209, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
await self.reorg(206)
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
await self.assertNameState(
|
||||
height=210, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
await self.support_create(stream_id, '0.01')
|
||||
self.assertNotIn('error', await self.resolve(stream_name))
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
|
||||
await self.assertNameState(
|
||||
height=211, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
await self.reorg(206)
|
||||
# self.assertNotIn('error', await self.resolve(stream_name))
|
||||
self.assertEqual(stream_id, (await self.assertMatchWinningClaim(stream_name)).claim_hash.hex())
|
||||
await self.assertNameState(
|
||||
height=212, name=stream_name, winning_claim_id=stream_id, last_takeover_height=209,
|
||||
non_winning_claims=[]
|
||||
)
|
||||
|
||||
await self.stream_abandon(stream_id)
|
||||
self.assertNotIn('error', await self.resolve(channel_name))
|
||||
|
@ -1553,7 +1629,6 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
await self.ledger.wait(broadcast_tx)
|
||||
await self.support_create(still_valid.outputs[0].claim_id, '0.01')
|
||||
|
||||
# await self.generate(1)
|
||||
await self.ledger.wait(broadcast_tx, self.blockchain.block_expected)
|
||||
self.assertEqual(self.ledger.headers.height, 208)
|
||||
await self.assertBlockHash(208)
|
||||
|
@ -1570,7 +1645,7 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
|
@ -1603,7 +1678,7 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
await asyncio.wait_for(self.on_header(210), 3.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
republished = await self.resolve('hovercraft')
|
||||
|
@ -1649,11 +1724,11 @@ class ResolveAfterReorg(BaseResolveTestCase):
|
|||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.conductor.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
||||
await asyncio.wait_for(self.on_header(209), 30.0)
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
await self.assertBlockHash(209)
|
||||
|
|
|
@ -21,9 +21,8 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
[asyncio.ensure_future(self.on_address_update(address1)),
|
||||
asyncio.ensure_future(self.on_address_update(address2))]
|
||||
))
|
||||
sendtxid1 = await self.blockchain.send_to_address(address1, 5)
|
||||
sendtxid2 = await self.blockchain.send_to_address(address2, 5)
|
||||
await self.blockchain.generate(1)
|
||||
await self.send_to_address_and_wait(address1, 5)
|
||||
await self.send_to_address_and_wait(address2, 5, 1)
|
||||
await notifications
|
||||
|
||||
self.assertEqual(d2l(await self.account.get_balance()), '10.0')
|
||||
|
@ -57,7 +56,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
notifications = asyncio.create_task(asyncio.wait(
|
||||
[asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))]
|
||||
))
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await notifications
|
||||
self.assertEqual(d2l(await self.account.get_balance()), '7.985786')
|
||||
self.assertEqual(d2l(await self.account.get_balance(include_claims=True)), '9.985786')
|
||||
|
@ -70,7 +69,7 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
await self.broadcast(abandon_tx)
|
||||
await notify
|
||||
notify = asyncio.create_task(self.ledger.wait(abandon_tx))
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await notify
|
||||
|
||||
response = await self.ledger.resolve([], ['lbry://@bar/foo'])
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import unittest
|
||||
|
||||
from lbry.testcase import CommandTestCase
|
||||
|
||||
|
||||
|
@ -17,7 +19,7 @@ class TransactionCommandsTestCase(CommandTestCase):
|
|||
async def test_transaction_show(self):
|
||||
# local tx
|
||||
result = await self.out(self.daemon.jsonrpc_account_send(
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id)
|
||||
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id), blocking=True
|
||||
))
|
||||
await self.confirm_tx(result['txid'])
|
||||
tx = await self.daemon.jsonrpc_transaction_show(result['txid'])
|
||||
|
@ -38,10 +40,9 @@ class TransactionCommandsTestCase(CommandTestCase):
|
|||
self.assertFalse(result['success'])
|
||||
|
||||
async def test_utxo_release(self):
|
||||
sendtxid = await self.blockchain.send_to_address(
|
||||
await self.account.receiving.get_or_create_usable_address(), 1
|
||||
await self.send_to_address_and_wait(
|
||||
await self.account.receiving.get_or_create_usable_address(), 1, 1
|
||||
)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.assertBalance(self.account, '11.0')
|
||||
await self.ledger.reserve_outputs(await self.account.get_utxos())
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
|
@ -51,6 +52,7 @@ class TransactionCommandsTestCase(CommandTestCase):
|
|||
|
||||
class TestSegwit(CommandTestCase):
|
||||
|
||||
@unittest.SkipTest
|
||||
async def test_segwit(self):
|
||||
p2sh_address1 = await self.blockchain.get_new_address(self.blockchain.P2SH_SEGWIT_ADDRESS)
|
||||
p2sh_address2 = await self.blockchain.get_new_address(self.blockchain.P2SH_SEGWIT_ADDRESS)
|
||||
|
@ -64,14 +66,13 @@ class TestSegwit(CommandTestCase):
|
|||
p2sh_txid2 = await self.blockchain.send_to_address(p2sh_address2, '1.0')
|
||||
bech32_txid1 = await self.blockchain.send_to_address(bech32_address1, '1.0')
|
||||
bech32_txid2 = await self.blockchain.send_to_address(bech32_address2, '1.0')
|
||||
|
||||
await self.generate(1)
|
||||
|
||||
# P2SH & BECH32 can pay to P2SH address
|
||||
tx = await self.blockchain.create_raw_transaction([
|
||||
{"txid": p2sh_txid1, "vout": 0},
|
||||
{"txid": bech32_txid1, "vout": 0},
|
||||
], [{p2sh_address3: '1.9'}]
|
||||
], {p2sh_address3: 1.9}
|
||||
)
|
||||
tx = await self.blockchain.sign_raw_transaction_with_wallet(tx)
|
||||
p2sh_txid3 = await self.blockchain.send_raw_transaction(tx)
|
||||
|
@ -82,7 +83,7 @@ class TestSegwit(CommandTestCase):
|
|||
tx = await self.blockchain.create_raw_transaction([
|
||||
{"txid": p2sh_txid2, "vout": 0},
|
||||
{"txid": bech32_txid2, "vout": 0},
|
||||
], [{bech32_address3: '1.9'}]
|
||||
], {bech32_address3: 1.9}
|
||||
)
|
||||
tx = await self.blockchain.sign_raw_transaction_with_wallet(tx)
|
||||
bech32_txid3 = await self.blockchain.send_raw_transaction(tx)
|
||||
|
@ -94,12 +95,9 @@ class TestSegwit(CommandTestCase):
|
|||
tx = await self.blockchain.create_raw_transaction([
|
||||
{"txid": p2sh_txid3, "vout": 0},
|
||||
{"txid": bech32_txid3, "vout": 0},
|
||||
], [{address: '3.5'}]
|
||||
], {address: 3.5}
|
||||
)
|
||||
tx = await self.blockchain.sign_raw_transaction_with_wallet(tx)
|
||||
txid = await self.blockchain.send_raw_transaction(tx)
|
||||
await self.on_transaction_id(txid)
|
||||
await self.generate(1)
|
||||
await self.on_transaction_id(txid)
|
||||
|
||||
await self.generate_and_wait(1, [txid])
|
||||
await self.assertBalance(self.account, '13.5')
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import asyncio
|
||||
import random
|
||||
from itertools import chain
|
||||
|
||||
import lbry.wallet.rpc.jsonrpc
|
||||
from lbry.wallet.transaction import Transaction, Output, Input
|
||||
from lbry.testcase import IntegrationTestCase
|
||||
from lbry.wallet.util import satoshis_to_coins, coins_to_satoshis
|
||||
|
@ -9,9 +9,8 @@ from lbry.wallet.manager import WalletManager
|
|||
|
||||
|
||||
class BasicTransactionTests(IntegrationTestCase):
|
||||
|
||||
async def test_variety_of_transactions_and_longish_history(self):
|
||||
await self.blockchain.generate(300)
|
||||
await self.generate(300)
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
addresses = await self.account.receiving.get_addresses()
|
||||
|
||||
|
@ -19,10 +18,10 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
# to the 10th receiving address for a total of 30 UTXOs on the entire account
|
||||
for i in range(10):
|
||||
notification = asyncio.ensure_future(self.on_address_update(addresses[i]))
|
||||
txid = await self.blockchain.send_to_address(addresses[i], 10)
|
||||
_ = await self.send_to_address_and_wait(addresses[i], 10)
|
||||
await notification
|
||||
notification = asyncio.ensure_future(self.on_address_update(addresses[9]))
|
||||
txid = await self.blockchain.send_to_address(addresses[9], 10)
|
||||
_ = await self.send_to_address_and_wait(addresses[9], 10)
|
||||
await notification
|
||||
|
||||
# use batching to reduce issues with send_to_address on cli
|
||||
|
@ -57,7 +56,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
for tx in await self.ledger.db.get_transactions(txid__in=[tx.id for tx in txs])
|
||||
]))
|
||||
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await asyncio.wait([self.ledger.wait(tx) for tx in txs])
|
||||
await self.assertBalance(self.account, '199.99876')
|
||||
|
||||
|
@ -74,7 +73,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
)
|
||||
await self.broadcast(tx)
|
||||
await self.ledger.wait(tx)
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx)
|
||||
|
||||
self.assertEqual(2, await self.account.get_utxo_count()) # 199 + change
|
||||
|
@ -88,12 +87,10 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
await self.assertBalance(account2, '0.0')
|
||||
|
||||
addresses = await account1.receiving.get_addresses()
|
||||
txids = await asyncio.gather(*(
|
||||
self.blockchain.send_to_address(address, 1.1) for address in addresses[:5]
|
||||
))
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids]) # confirmed
|
||||
txids = []
|
||||
for address in addresses[:5]:
|
||||
txids.append(await self.send_to_address_and_wait(address, 1.1))
|
||||
await self.generate_and_wait(1, txids)
|
||||
await self.assertBalance(account1, '5.5')
|
||||
await self.assertBalance(account2, '0.0')
|
||||
|
||||
|
@ -107,7 +104,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
)
|
||||
await self.broadcast(tx)
|
||||
await self.ledger.wait(tx) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx) # confirmed
|
||||
|
||||
await self.assertBalance(account1, '3.499802')
|
||||
|
@ -121,7 +118,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
)
|
||||
await self.broadcast(tx)
|
||||
await self.ledger.wait(tx) # mempool
|
||||
await self.blockchain.generate(1)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx) # confirmed
|
||||
|
||||
tx = (await account1.get_transactions(include_is_my_input=True, include_is_my_output=True))[1]
|
||||
|
@ -133,11 +130,11 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
self.assertTrue(tx.outputs[1].is_internal_transfer)
|
||||
|
||||
async def test_history_edge_cases(self):
|
||||
await self.blockchain.generate(300)
|
||||
await self.generate(300)
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
address = await self.account.receiving.get_or_create_usable_address()
|
||||
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
||||
original_summary = self.conductor.spv_node.server.bp.mempool.transaction_summaries
|
||||
original_summary = self.conductor.spv_node.server.mempool.transaction_summaries
|
||||
|
||||
def random_summary(*args, **kwargs):
|
||||
summary = original_summary(*args, **kwargs)
|
||||
|
@ -146,13 +143,10 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
while summary == ordered:
|
||||
random.shuffle(summary)
|
||||
return summary
|
||||
self.conductor.spv_node.server.bp.mempool.transaction_summaries = random_summary
|
||||
self.conductor.spv_node.server.mempool.transaction_summaries = random_summary
|
||||
# 10 unconfirmed txs, all from blockchain wallet
|
||||
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
||||
# use batching to reduce issues with send_to_address on cli
|
||||
for batch in range(0, len(sends), 10):
|
||||
txids = await asyncio.gather(*sends[batch:batch + 10])
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids])
|
||||
for i in range(10):
|
||||
await self.send_to_address_and_wait(address, 10)
|
||||
remote_status = await self.ledger.network.subscribe_address(address)
|
||||
self.assertTrue(await self.ledger.update_history(address, remote_status))
|
||||
# 20 unconfirmed txs, 10 from blockchain, 10 from local to local
|
||||
|
@ -170,8 +164,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
remote_status = await self.ledger.network.subscribe_address(address)
|
||||
self.assertTrue(await self.ledger.update_history(address, remote_status))
|
||||
# server history grows unordered
|
||||
txid = await self.blockchain.send_to_address(address, 1)
|
||||
await self.on_transaction_id(txid)
|
||||
await self.send_to_address_and_wait(address, 1)
|
||||
self.assertTrue(await self.ledger.update_history(address, remote_status))
|
||||
self.assertEqual(21, len((await self.ledger.get_local_status_and_history(address))[1]))
|
||||
self.assertEqual(0, len(self.ledger._known_addresses_out_of_sync))
|
||||
|
@ -195,37 +188,37 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
self.ledger, 2000000000000, [self.account], set_reserved=False, return_insufficient_funds=True
|
||||
)
|
||||
got_amounts = [estimator.effective_amount for estimator in spendable]
|
||||
self.assertListEqual(amounts, got_amounts)
|
||||
self.assertListEqual(sorted(amounts), sorted(got_amounts))
|
||||
|
||||
async def test_sqlite_coin_chooser(self):
|
||||
wallet_manager = WalletManager([self.wallet], {self.ledger.get_id(): self.ledger})
|
||||
await self.blockchain.generate(300)
|
||||
await self.generate(300)
|
||||
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
address = await self.account.receiving.get_or_create_usable_address()
|
||||
other_account = self.wallet.generate_account(self.ledger)
|
||||
other_address = await other_account.receiving.get_or_create_usable_address()
|
||||
self.ledger.coin_selection_strategy = 'sqlite'
|
||||
await self.ledger.subscribe_account(self.account)
|
||||
await self.ledger.subscribe_account(other_account)
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 1.0)
|
||||
_ = await self.send_to_address_and_wait(address, 1.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 1.0)
|
||||
_ = await self.send_to_address_and_wait(address, 1.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 3.0)
|
||||
_ = await self.send_to_address_and_wait(address, 3.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 5.0)
|
||||
_ = await self.send_to_address_and_wait(address, 5.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 10.0)
|
||||
_ = await self.send_to_address_and_wait(address, 10.0)
|
||||
await accepted
|
||||
|
||||
await self.assertBalance(self.account, '20.0')
|
||||
|
@ -266,6 +259,12 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
async def broadcast(tx):
|
||||
try:
|
||||
return await real_broadcast(tx)
|
||||
except lbry.wallet.rpc.jsonrpc.RPCError as err:
|
||||
# this is expected in tests where we try to double spend.
|
||||
if 'the transaction was rejected by network rules.' in str(err):
|
||||
pass
|
||||
else:
|
||||
raise err
|
||||
finally:
|
||||
e.set()
|
||||
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
import time
|
||||
import unittest
|
||||
from lbry.wallet.server.metrics import ServerLoadData, calculate_avg_percentiles
|
||||
|
||||
|
||||
class TestPercentileCalculation(unittest.TestCase):
|
||||
|
||||
def test_calculate_percentiles(self):
|
||||
self.assertEqual(calculate_avg_percentiles([]), (0, 0, 0, 0, 0, 0, 0, 0))
|
||||
self.assertEqual(calculate_avg_percentiles([1]), (1, 1, 1, 1, 1, 1, 1, 1))
|
||||
self.assertEqual(calculate_avg_percentiles([1, 2]), (1, 1, 1, 1, 1, 2, 2, 2))
|
||||
self.assertEqual(calculate_avg_percentiles([1, 2, 3]), (2, 1, 1, 1, 2, 3, 3, 3))
|
||||
self.assertEqual(calculate_avg_percentiles([4, 1, 2, 3]), (2, 1, 1, 1, 2, 3, 4, 4))
|
||||
self.assertEqual(calculate_avg_percentiles([1, 2, 3, 4, 5, 6]), (3, 1, 1, 2, 3, 5, 6, 6))
|
||||
self.assertEqual(calculate_avg_percentiles(
|
||||
list(range(1, 101))), (50, 1, 5, 25, 50, 75, 95, 100))
|
||||
|
||||
|
||||
class TestCollectingMetrics(unittest.TestCase):
|
||||
|
||||
def test_happy_path(self):
|
||||
self.maxDiff = None
|
||||
load = ServerLoadData()
|
||||
search = load.for_api('search')
|
||||
self.assertEqual(search.name, 'search')
|
||||
search.start()
|
||||
search.cache_response()
|
||||
search.cache_response()
|
||||
metrics = {
|
||||
'search': [{'total': 40}],
|
||||
'execute_query': [
|
||||
{'total': 20},
|
||||
{'total': 10}
|
||||
]
|
||||
}
|
||||
for x in range(5):
|
||||
search.query_response(time.perf_counter() - 0.055 + 0.001*x, metrics)
|
||||
metrics['execute_query'][0]['total'] = 10
|
||||
metrics['execute_query'][0]['sql'] = "select lots, of, stuff FROM claim where something=1"
|
||||
search.query_interrupt(time.perf_counter() - 0.050, metrics)
|
||||
search.query_error(time.perf_counter() - 0.050, metrics)
|
||||
search.query_error(time.perf_counter() - 0.052, {})
|
||||
self.assertEqual(load.to_json_and_reset({}), {'status': {}, 'api': {'search': {
|
||||
"receive_count": 1,
|
||||
"cache_response_count": 2,
|
||||
"query_response_count": 5,
|
||||
"intrp_response_count": 1,
|
||||
"error_response_count": 2,
|
||||
"response": (53, 51, 51, 52, 53, 54, 55, 55),
|
||||
"interrupt": (50, 50, 50, 50, 50, 50, 50, 50),
|
||||
"error": (51, 50, 50, 50, 50, 52, 52, 52),
|
||||
"python": (12, 10, 10, 10, 10, 20, 20, 20),
|
||||
"wait": (12, 10, 10, 10, 12, 14, 15, 15),
|
||||
"sql": (27, 20, 20, 20, 30, 30, 30, 30),
|
||||
"individual_sql": (13, 10, 10, 10, 10, 20, 20, 20),
|
||||
"individual_sql_count": 14,
|
||||
"errored_queries": ['FROM claim where something=1'],
|
||||
"interrupted_queries": ['FROM claim where something=1'],
|
||||
}}})
|
||||
self.assertEqual(load.to_json_and_reset({}), {'status': {}, 'api': {}})
|
|
@ -1,153 +0,0 @@
|
|||
import unittest
|
||||
import tempfile
|
||||
import shutil
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertableDelete, RevertablePut, OpStackIntegrity
|
||||
from lbry.wallet.server.db.prefixes import ClaimToTXOPrefixRow, HubDB
|
||||
|
||||
|
||||
class TestRevertableOpStack(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.fake_db = {}
|
||||
self.stack = RevertableOpStack(self.fake_db.get)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.stack.clear()
|
||||
self.fake_db.clear()
|
||||
|
||||
def process_stack(self):
|
||||
for op in self.stack:
|
||||
if op.is_put:
|
||||
self.fake_db[op.key] = op.value
|
||||
else:
|
||||
self.fake_db.pop(op.key)
|
||||
self.stack.clear()
|
||||
|
||||
def update(self, key1: bytes, value1: bytes, key2: bytes, value2: bytes):
|
||||
self.stack.append_op(RevertableDelete(key1, value1))
|
||||
self.stack.append_op(RevertablePut(key2, value2))
|
||||
|
||||
def test_simplify(self):
|
||||
key1 = ClaimToTXOPrefixRow.pack_key(b'\x01' * 20)
|
||||
key2 = ClaimToTXOPrefixRow.pack_key(b'\x02' * 20)
|
||||
key3 = ClaimToTXOPrefixRow.pack_key(b'\x03' * 20)
|
||||
key4 = ClaimToTXOPrefixRow.pack_key(b'\x04' * 20)
|
||||
|
||||
val1 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'derp')
|
||||
val2 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'oops')
|
||||
val3 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'other')
|
||||
|
||||
# check that we can't delete a non existent value
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.stack.append_op(RevertableDelete(key1, val1))
|
||||
|
||||
self.stack.append_op(RevertablePut(key1, val1))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
self.stack.append_op(RevertableDelete(key1, val1))
|
||||
self.assertEqual(0, len(self.stack))
|
||||
|
||||
self.stack.append_op(RevertablePut(key1, val1))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
# try to delete the wrong value
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.stack.append_op(RevertableDelete(key2, val2))
|
||||
|
||||
self.stack.append_op(RevertableDelete(key1, val1))
|
||||
self.assertEqual(0, len(self.stack))
|
||||
self.stack.append_op(RevertablePut(key2, val3))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
|
||||
self.process_stack()
|
||||
|
||||
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||
|
||||
# check that we can't put on top of the existing stored value
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.stack.append_op(RevertablePut(key2, val1))
|
||||
|
||||
self.assertEqual(0, len(self.stack))
|
||||
self.stack.append_op(RevertableDelete(key2, val3))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
self.stack.append_op(RevertablePut(key2, val3))
|
||||
self.assertEqual(0, len(self.stack))
|
||||
|
||||
self.update(key2, val3, key2, val1)
|
||||
self.assertEqual(2, len(self.stack))
|
||||
|
||||
self.process_stack()
|
||||
self.assertDictEqual({key2: val1}, self.fake_db)
|
||||
|
||||
self.update(key2, val1, key2, val2)
|
||||
self.assertEqual(2, len(self.stack))
|
||||
self.update(key2, val2, key2, val3)
|
||||
self.update(key2, val3, key2, val2)
|
||||
self.update(key2, val2, key2, val3)
|
||||
self.update(key2, val3, key2, val2)
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.update(key2, val3, key2, val2)
|
||||
self.update(key2, val2, key2, val3)
|
||||
self.assertEqual(2, len(self.stack))
|
||||
self.stack.append_op(RevertableDelete(key2, val3))
|
||||
self.process_stack()
|
||||
self.assertDictEqual({}, self.fake_db)
|
||||
|
||||
self.stack.append_op(RevertablePut(key2, val3))
|
||||
self.process_stack()
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.update(key2, val2, key2, val2)
|
||||
self.update(key2, val3, key2, val2)
|
||||
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||
undo = self.stack.get_undo_ops()
|
||||
self.process_stack()
|
||||
self.assertDictEqual({key2: val2}, self.fake_db)
|
||||
self.stack.apply_packed_undo_ops(undo)
|
||||
self.process_stack()
|
||||
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||
|
||||
|
||||
class TestRevertablePrefixDB(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.tmp_dir = tempfile.mkdtemp()
|
||||
self.db = HubDB(self.tmp_dir, cache_mb=1, max_open_files=32)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.db.close()
|
||||
shutil.rmtree(self.tmp_dir)
|
||||
|
||||
def test_rollback(self):
|
||||
name = 'derp'
|
||||
claim_hash1 = 20 * b'\x00'
|
||||
claim_hash2 = 20 * b'\x01'
|
||||
claim_hash3 = 20 * b'\x02'
|
||||
|
||||
takeover_height = 10000000
|
||||
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash1, takeover_height))
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get_pending(name).height)
|
||||
|
||||
self.db.commit(10000000)
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1))
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1))
|
||||
self.db.commit(10000001)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2))
|
||||
self.db.commit(10000002)
|
||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3))
|
||||
self.db.commit(10000003)
|
||||
self.assertEqual(10000003, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.rollback(10000003)
|
||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||
self.db.rollback(10000002)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.rollback(10000001)
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||
self.db.rollback(10000000)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
1
tox.ini
1
tox.ini
|
@ -3,6 +3,7 @@ deps =
|
|||
coverage
|
||||
extras =
|
||||
test
|
||||
scribe
|
||||
torrent
|
||||
changedir = {toxinidir}/tests
|
||||
setenv =
|
||||
|
|
Loading…
Reference in a new issue