forked from LBRYCommunity/lbry-sdk
Merge pull request #3205 from lbryio/leveldb-resolve
drop sqlite in the hub and make resolve handle reorgs
This commit is contained in:
commit
bc6822e397
63 changed files with 7265 additions and 6290 deletions
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
|
@ -78,7 +78,11 @@ jobs:
|
|||
test:
|
||||
- datanetwork
|
||||
- blockchain
|
||||
- blockchain_legacy_search
|
||||
- claims
|
||||
- takeovers
|
||||
- transactions
|
||||
- claims_legacy_search
|
||||
- takeovers_legacy_search
|
||||
- other
|
||||
steps:
|
||||
- name: Configure sysctl limits
|
||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -13,7 +13,7 @@ __pycache__
|
|||
_trial_temp/
|
||||
trending*.log
|
||||
|
||||
/tests/integration/blockchain/files
|
||||
/tests/integration/claims/files
|
||||
/tests/.coverage.*
|
||||
|
||||
/lbry/wallet/bin
|
||||
|
|
|
@ -252,9 +252,10 @@ class ResolveTimeoutError(WalletError):
|
|||
|
||||
class ResolveCensoredError(WalletError):
|
||||
|
||||
def __init__(self, url, censor_id):
|
||||
def __init__(self, url, censor_id, censor_row):
|
||||
self.url = url
|
||||
self.censor_id = censor_id
|
||||
self.censor_row = censor_row
|
||||
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
|
||||
|
||||
|
||||
|
|
|
@ -2282,7 +2282,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
accounts = wallet.get_accounts_or_all(funding_account_ids)
|
||||
txo = None
|
||||
if claim_id:
|
||||
txo = await self.ledger.get_claim_by_claim_id(accounts, claim_id, include_purchase_receipt=True)
|
||||
txo = await self.ledger.get_claim_by_claim_id(claim_id, accounts, include_purchase_receipt=True)
|
||||
if not isinstance(txo, Output) or not txo.is_claim:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(f"Could not find claim with claim_id '{claim_id}'.")
|
||||
|
@ -3616,7 +3616,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
claim_address = old_txo.get_address(account.ledger)
|
||||
|
||||
channel = None
|
||||
if channel_id or channel_name:
|
||||
if not clear_channel and (channel_id or channel_name):
|
||||
channel = await self.get_channel_or_error(
|
||||
wallet, channel_account_id, channel_id, channel_name, for_signing=True)
|
||||
elif old_txo.claim.is_signed and not clear_channel and not replace:
|
||||
|
@ -3646,11 +3646,13 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
else:
|
||||
claim = Claim.from_bytes(old_txo.claim.to_bytes())
|
||||
claim.stream.update(file_path=file_path, **kwargs)
|
||||
if clear_channel:
|
||||
claim.clear_signature()
|
||||
tx = await Transaction.claim_update(
|
||||
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
|
||||
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0],
|
||||
channel if not clear_channel else None
|
||||
)
|
||||
new_txo = tx.outputs[0]
|
||||
|
||||
stream_hash = None
|
||||
if not preview:
|
||||
old_stream = self.file_manager.get_filtered(sd_hash=old_txo.claim.stream.source.sd_hash)
|
||||
|
@ -4148,7 +4150,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
|
||||
if claim_id:
|
||||
txo = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
||||
txo = await self.ledger.get_claim_by_claim_id(claim_id, wallet.accounts)
|
||||
if not isinstance(txo, Output) or not txo.is_claim:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(f"Could not find collection with claim_id '{claim_id}'.")
|
||||
|
@ -4215,7 +4217,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
funding_accounts = wallet.get_accounts_or_all(funding_account_ids)
|
||||
channel = await self.get_channel_or_none(wallet, channel_account_id, channel_id, channel_name, for_signing=True)
|
||||
amount = self.get_dewies_or_error("amount", amount)
|
||||
claim = await self.ledger.get_claim_by_claim_id(wallet.accounts, claim_id)
|
||||
claim = await self.ledger.get_claim_by_claim_id(claim_id)
|
||||
claim_address = claim.get_address(self.ledger)
|
||||
if not tip:
|
||||
account = wallet.get_account_or_default(account_id)
|
||||
|
|
|
@ -1,23 +1,27 @@
|
|||
import base64
|
||||
import struct
|
||||
from typing import List
|
||||
from typing import List, TYPE_CHECKING, Union, Optional
|
||||
from binascii import hexlify
|
||||
from itertools import chain
|
||||
|
||||
from lbry.error import ResolveCensoredError
|
||||
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
|
||||
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
|
||||
if TYPE_CHECKING:
|
||||
from lbry.wallet.server.leveldb import ResolveResult
|
||||
|
||||
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
|
||||
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
||||
BLOCKED = ErrorMessage.Code.Name(ErrorMessage.BLOCKED)
|
||||
|
||||
|
||||
def set_reference(reference, txo_row):
|
||||
if txo_row:
|
||||
reference.tx_hash = txo_row['txo_hash'][:32]
|
||||
reference.nout = struct.unpack('<I', txo_row['txo_hash'][32:])[0]
|
||||
reference.height = txo_row['height']
|
||||
def set_reference(reference, claim_hash, rows):
|
||||
if claim_hash:
|
||||
for txo in rows:
|
||||
if claim_hash == txo.claim_hash:
|
||||
reference.tx_hash = txo.tx_hash
|
||||
reference.nout = txo.position
|
||||
reference.height = txo.height
|
||||
return
|
||||
|
||||
|
||||
class Censor:
|
||||
|
@ -38,19 +42,19 @@ class Censor:
|
|||
def apply(self, rows):
|
||||
return [row for row in rows if not self.censor(row)]
|
||||
|
||||
def censor(self, row) -> bool:
|
||||
def censor(self, row) -> Optional[bytes]:
|
||||
if self.is_censored(row):
|
||||
censoring_channel_hash = bytes.fromhex(row['censoring_channel_id'])[::-1]
|
||||
self.censored.setdefault(censoring_channel_hash, set())
|
||||
self.censored[censoring_channel_hash].add(row['tx_hash'])
|
||||
return True
|
||||
return False
|
||||
return censoring_channel_hash
|
||||
return None
|
||||
|
||||
def to_message(self, outputs: OutputsMessage, extra_txo_rows: dict):
|
||||
for censoring_channel_hash, count in self.censored.items():
|
||||
blocked = outputs.blocked.add()
|
||||
blocked.count = len(count)
|
||||
set_reference(blocked.channel, extra_txo_rows.get(censoring_channel_hash))
|
||||
set_reference(blocked.channel, censoring_channel_hash, extra_txo_rows)
|
||||
outputs.blocked_total += len(count)
|
||||
|
||||
|
||||
|
@ -115,10 +119,10 @@ class Outputs:
|
|||
'expiration_height': claim.expiration_height,
|
||||
'effective_amount': claim.effective_amount,
|
||||
'support_amount': claim.support_amount,
|
||||
'trending_group': claim.trending_group,
|
||||
'trending_mixed': claim.trending_mixed,
|
||||
'trending_local': claim.trending_local,
|
||||
'trending_global': claim.trending_global,
|
||||
# 'trending_group': claim.trending_group,
|
||||
# 'trending_mixed': claim.trending_mixed,
|
||||
# 'trending_local': claim.trending_local,
|
||||
# 'trending_global': claim.trending_global,
|
||||
}
|
||||
if claim.HasField('channel'):
|
||||
txo.channel = tx_map[claim.channel.tx_hash].outputs[claim.channel.nout]
|
||||
|
@ -169,51 +173,54 @@ class Outputs:
|
|||
|
||||
@classmethod
|
||||
def to_bytes(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked: Censor = None) -> bytes:
|
||||
extra_txo_rows = {row['claim_hash']: row for row in extra_txo_rows}
|
||||
page = OutputsMessage()
|
||||
page.offset = offset
|
||||
if total is not None:
|
||||
page.total = total
|
||||
if blocked is not None:
|
||||
blocked.to_message(page, extra_txo_rows)
|
||||
for row in extra_txo_rows:
|
||||
cls.encode_txo(page.extra_txos.add(), row)
|
||||
|
||||
for row in txo_rows:
|
||||
cls.row_to_message(row, page.txos.add(), extra_txo_rows)
|
||||
for row in extra_txo_rows.values():
|
||||
cls.row_to_message(row, page.extra_txos.add(), extra_txo_rows)
|
||||
# cls.row_to_message(row, page.txos.add(), extra_txo_rows)
|
||||
txo_message: 'OutputsMessage' = page.txos.add()
|
||||
cls.encode_txo(txo_message, row)
|
||||
if not isinstance(row, Exception):
|
||||
if row.channel_hash:
|
||||
set_reference(txo_message.claim.channel, row.channel_hash, extra_txo_rows)
|
||||
if row.reposted_claim_hash:
|
||||
set_reference(txo_message.claim.repost, row.reposted_claim_hash, extra_txo_rows)
|
||||
elif isinstance(row, ResolveCensoredError):
|
||||
set_reference(txo_message.error.blocked.channel, row.censor_id, extra_txo_rows)
|
||||
return page.SerializeToString()
|
||||
|
||||
@classmethod
|
||||
def row_to_message(cls, txo, txo_message, extra_row_dict: dict):
|
||||
if isinstance(txo, Exception):
|
||||
txo_message.error.text = txo.args[0]
|
||||
if isinstance(txo, ValueError):
|
||||
def encode_txo(cls, txo_message, resolve_result: Union['ResolveResult', Exception]):
|
||||
if isinstance(resolve_result, Exception):
|
||||
txo_message.error.text = resolve_result.args[0]
|
||||
if isinstance(resolve_result, ValueError):
|
||||
txo_message.error.code = ErrorMessage.INVALID
|
||||
elif isinstance(txo, LookupError):
|
||||
elif isinstance(resolve_result, LookupError):
|
||||
txo_message.error.code = ErrorMessage.NOT_FOUND
|
||||
elif isinstance(txo, ResolveCensoredError):
|
||||
elif isinstance(resolve_result, ResolveCensoredError):
|
||||
txo_message.error.code = ErrorMessage.BLOCKED
|
||||
set_reference(txo_message.error.blocked.channel, extra_row_dict.get(bytes.fromhex(txo.censor_id)[::-1]))
|
||||
return
|
||||
txo_message.tx_hash = txo['txo_hash'][:32]
|
||||
txo_message.nout, = struct.unpack('<I', txo['txo_hash'][32:])
|
||||
txo_message.height = txo['height']
|
||||
txo_message.claim.short_url = txo['short_url']
|
||||
txo_message.claim.reposted = txo['reposted']
|
||||
if txo['canonical_url'] is not None:
|
||||
txo_message.claim.canonical_url = txo['canonical_url']
|
||||
txo_message.claim.is_controlling = bool(txo['is_controlling'])
|
||||
if txo['last_take_over_height'] is not None:
|
||||
txo_message.claim.take_over_height = txo['last_take_over_height']
|
||||
txo_message.claim.creation_height = txo['creation_height']
|
||||
txo_message.claim.activation_height = txo['activation_height']
|
||||
txo_message.claim.expiration_height = txo['expiration_height']
|
||||
if txo['claims_in_channel'] is not None:
|
||||
txo_message.claim.claims_in_channel = txo['claims_in_channel']
|
||||
txo_message.claim.effective_amount = txo['effective_amount']
|
||||
txo_message.claim.support_amount = txo['support_amount']
|
||||
txo_message.claim.trending_group = txo['trending_group']
|
||||
txo_message.claim.trending_mixed = txo['trending_mixed']
|
||||
txo_message.claim.trending_local = txo['trending_local']
|
||||
txo_message.claim.trending_global = txo['trending_global']
|
||||
set_reference(txo_message.claim.channel, extra_row_dict.get(txo['channel_hash']))
|
||||
set_reference(txo_message.claim.repost, extra_row_dict.get(txo['reposted_claim_hash']))
|
||||
txo_message.tx_hash = resolve_result.tx_hash
|
||||
txo_message.nout = resolve_result.position
|
||||
txo_message.height = resolve_result.height
|
||||
txo_message.claim.short_url = resolve_result.short_url
|
||||
txo_message.claim.reposted = resolve_result.reposted
|
||||
txo_message.claim.is_controlling = resolve_result.is_controlling
|
||||
txo_message.claim.creation_height = resolve_result.creation_height
|
||||
txo_message.claim.activation_height = resolve_result.activation_height
|
||||
txo_message.claim.expiration_height = resolve_result.expiration_height
|
||||
txo_message.claim.effective_amount = resolve_result.effective_amount
|
||||
txo_message.claim.support_amount = resolve_result.support_amount
|
||||
|
||||
if resolve_result.canonical_url is not None:
|
||||
txo_message.claim.canonical_url = resolve_result.canonical_url
|
||||
if resolve_result.last_takeover_height is not None:
|
||||
txo_message.claim.take_over_height = resolve_result.last_takeover_height
|
||||
if resolve_result.claims_in_channel is not None:
|
||||
txo_message.claim.claims_in_channel = resolve_result.claims_in_channel
|
||||
|
|
|
@ -11,7 +11,7 @@ from google.protobuf import symbol_database as _symbol_database
|
|||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
import lbry.schema.types.v2.result_pb2 as result__pb2
|
||||
from . import result_pb2 as result__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
|
@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor.FileDescriptor(
|
|||
syntax='proto3',
|
||||
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\t\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x9c\r\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\r\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x01(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x01(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x01(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x01(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x01(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x01(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12$\n\x0c\x63hannel_join\x18# \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_group\x18\' \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_mixed\x18( \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_local\x18) \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0ftrending_global\x18* \x01(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\r\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x32\x31\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\t\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\xa3\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\r\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x01(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x01(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x01(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x01(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x01(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x01(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x01(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x01(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x01(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12$\n\x0c\x63hannel_join\x18# \x01(\x0b\x32\x0e.pb.RangeField\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x01(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x01(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\r\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x32\x31\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||
,
|
||||
dependencies=[result__pb2.DESCRIPTOR,])
|
||||
|
||||
|
@ -485,140 +485,119 @@ _SEARCHREQUEST = _descriptor.Descriptor(
|
|||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_group', full_name='pb.SearchRequest.trending_group', index=38,
|
||||
name='trending_score', full_name='pb.SearchRequest.trending_score', index=38,
|
||||
number=39, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_mixed', full_name='pb.SearchRequest.trending_mixed', index=39,
|
||||
number=40, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_local', full_name='pb.SearchRequest.trending_local', index=40,
|
||||
number=41, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_global', full_name='pb.SearchRequest.trending_global', index=41,
|
||||
number=42, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_id', full_name='pb.SearchRequest.tx_id', index=42,
|
||||
name='tx_id', full_name='pb.SearchRequest.tx_id', index=39,
|
||||
number=43, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=43,
|
||||
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=40,
|
||||
number=44, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='signature', full_name='pb.SearchRequest.signature', index=44,
|
||||
name='signature', full_name='pb.SearchRequest.signature', index=41,
|
||||
number=45, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=45,
|
||||
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=42,
|
||||
number=46, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=46,
|
||||
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=43,
|
||||
number=47, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=47,
|
||||
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=44,
|
||||
number=48, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='any_tags', full_name='pb.SearchRequest.any_tags', index=48,
|
||||
name='any_tags', full_name='pb.SearchRequest.any_tags', index=45,
|
||||
number=49, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='all_tags', full_name='pb.SearchRequest.all_tags', index=49,
|
||||
name='all_tags', full_name='pb.SearchRequest.all_tags', index=46,
|
||||
number=50, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='not_tags', full_name='pb.SearchRequest.not_tags', index=50,
|
||||
name='not_tags', full_name='pb.SearchRequest.not_tags', index=47,
|
||||
number=51, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=51,
|
||||
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=48,
|
||||
number=52, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='has_source', full_name='pb.SearchRequest.has_source', index=52,
|
||||
name='has_source', full_name='pb.SearchRequest.has_source', index=49,
|
||||
number=53, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=53,
|
||||
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=50,
|
||||
number=54, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='any_languages', full_name='pb.SearchRequest.any_languages', index=54,
|
||||
name='any_languages', full_name='pb.SearchRequest.any_languages', index=51,
|
||||
number=55, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='all_languages', full_name='pb.SearchRequest.all_languages', index=55,
|
||||
name='all_languages', full_name='pb.SearchRequest.all_languages', index=52,
|
||||
number=56, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=56,
|
||||
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=53,
|
||||
number=57, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='no_totals', full_name='pb.SearchRequest.no_totals', index=57,
|
||||
name='no_totals', full_name='pb.SearchRequest.no_totals', index=54,
|
||||
number=58, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
|
@ -637,7 +616,7 @@ _SEARCHREQUEST = _descriptor.Descriptor(
|
|||
oneofs=[
|
||||
],
|
||||
serialized_start=248,
|
||||
serialized_end=1940,
|
||||
serialized_end=1819,
|
||||
)
|
||||
|
||||
_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP
|
||||
|
@ -661,10 +640,7 @@ _SEARCHREQUEST.fields_by_name['channel_join'].message_type = _RANGEFIELD
|
|||
_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE
|
||||
_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_group'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_mixed'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_local'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_global'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_score'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE
|
||||
_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE
|
||||
DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD
|
||||
|
@ -719,8 +695,8 @@ _HUB = _descriptor.ServiceDescriptor(
|
|||
index=0,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_start=1942,
|
||||
serialized_end=1991,
|
||||
serialized_start=1821,
|
||||
serialized_end=1870,
|
||||
methods=[
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Search',
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
||||
import lbry.schema.types.v2.hub_pb2 as hub__pb2
|
||||
import lbry.schema.types.v2.result_pb2 as result__pb2
|
||||
from . import hub_pb2 as hub__pb2
|
||||
from . import result_pb2 as result__pb2
|
||||
|
||||
|
||||
class HubStub(object):
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: result.proto
|
||||
|
||||
import sys
|
||||
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf import descriptor_pb2
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
@ -19,9 +17,10 @@ DESCRIPTOR = _descriptor.FileDescriptor(
|
|||
name='result.proto',
|
||||
package='pb',
|
||||
syntax='proto3',
|
||||
serialized_pb=_b('\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xaf\x03\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_group\x18\x16 \x01(\r\x12\x16\n\x0etrending_mixed\x18\x17 \x01(\x02\x12\x16\n\x0etrending_local\x18\x18 \x01(\x02\x12\x17\n\x0ftrending_global\x18\x19 \x01(\x02\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.Outputb\x06proto3')
|
||||
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n\x0cresult.proto\x12\x02pb\"\x97\x01\n\x07Outputs\x12\x18\n\x04txos\x18\x01 \x03(\x0b\x32\n.pb.Output\x12\x1e\n\nextra_txos\x18\x02 \x03(\x0b\x32\n.pb.Output\x12\r\n\x05total\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\r\x12\x1c\n\x07\x62locked\x18\x05 \x03(\x0b\x32\x0b.pb.Blocked\x12\x15\n\rblocked_total\x18\x06 \x01(\r\"{\n\x06Output\x12\x0f\n\x07tx_hash\x18\x01 \x01(\x0c\x12\x0c\n\x04nout\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\x1e\n\x05\x63laim\x18\x07 \x01(\x0b\x32\r.pb.ClaimMetaH\x00\x12\x1a\n\x05\x65rror\x18\x0f \x01(\x0b\x32\t.pb.ErrorH\x00\x42\x06\n\x04meta\"\xe6\x02\n\tClaimMeta\x12\x1b\n\x07\x63hannel\x18\x01 \x01(\x0b\x32\n.pb.Output\x12\x1a\n\x06repost\x18\x02 \x01(\x0b\x32\n.pb.Output\x12\x11\n\tshort_url\x18\x03 \x01(\t\x12\x15\n\rcanonical_url\x18\x04 \x01(\t\x12\x16\n\x0eis_controlling\x18\x05 \x01(\x08\x12\x18\n\x10take_over_height\x18\x06 \x01(\r\x12\x17\n\x0f\x63reation_height\x18\x07 \x01(\r\x12\x19\n\x11\x61\x63tivation_height\x18\x08 \x01(\r\x12\x19\n\x11\x65xpiration_height\x18\t \x01(\r\x12\x19\n\x11\x63laims_in_channel\x18\n \x01(\r\x12\x10\n\x08reposted\x18\x0b \x01(\r\x12\x18\n\x10\x65\x66\x66\x65\x63tive_amount\x18\x14 \x01(\x04\x12\x16\n\x0esupport_amount\x18\x15 \x01(\x04\x12\x16\n\x0etrending_score\x18\x16 \x01(\x01\"\x94\x01\n\x05\x45rror\x12\x1c\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x0e.pb.Error.Code\x12\x0c\n\x04text\x18\x02 \x01(\t\x12\x1c\n\x07\x62locked\x18\x03 \x01(\x0b\x32\x0b.pb.Blocked\"A\n\x04\x43ode\x12\x10\n\x0cUNKNOWN_CODE\x10\x00\x12\r\n\tNOT_FOUND\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x12\x0b\n\x07\x42LOCKED\x10\x03\"5\n\x07\x42locked\x12\r\n\x05\x63ount\x18\x01 \x01(\r\x12\x1b\n\x07\x63hannel\x18\x02 \x01(\x0b\x32\n.pb.OutputB&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||
)
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
|
||||
|
||||
|
@ -30,28 +29,33 @@ _ERROR_CODE = _descriptor.EnumDescriptor(
|
|||
full_name='pb.Error.Code',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
values=[
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='UNKNOWN_CODE', index=0, number=0,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='NOT_FOUND', index=1, number=1,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='INVALID', index=2, number=2,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='BLOCKED', index=3, number=3,
|
||||
options=None,
|
||||
type=None),
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
containing_type=None,
|
||||
options=None,
|
||||
serialized_start=817,
|
||||
serialized_end=882,
|
||||
serialized_options=None,
|
||||
serialized_start=744,
|
||||
serialized_end=809,
|
||||
)
|
||||
_sym_db.RegisterEnumDescriptor(_ERROR_CODE)
|
||||
|
||||
|
@ -62,6 +66,7 @@ _OUTPUTS = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='txos', full_name='pb.Outputs.txos', index=0,
|
||||
|
@ -69,49 +74,49 @@ _OUTPUTS = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='extra_txos', full_name='pb.Outputs.extra_txos', index=1,
|
||||
number=2, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='total', full_name='pb.Outputs.total', index=2,
|
||||
number=3, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='offset', full_name='pb.Outputs.offset', index=3,
|
||||
number=4, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='blocked', full_name='pb.Outputs.blocked', index=4,
|
||||
number=5, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='blocked_total', full_name='pb.Outputs.blocked_total', index=5,
|
||||
number=6, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
|
@ -128,56 +133,59 @@ _OUTPUT = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_hash', full_name='pb.Output.tx_hash', index=0,
|
||||
number=1, type=12, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b(""),
|
||||
has_default_value=False, default_value=b"",
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='nout', full_name='pb.Output.nout', index=1,
|
||||
number=2, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='height', full_name='pb.Output.height', index=2,
|
||||
number=3, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim', full_name='pb.Output.claim', index=3,
|
||||
number=7, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='error', full_name='pb.Output.error', index=4,
|
||||
number=15, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
_descriptor.OneofDescriptor(
|
||||
name='meta', full_name='pb.Output.meta',
|
||||
index=0, containing_type=None, fields=[]),
|
||||
index=0, containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[]),
|
||||
],
|
||||
serialized_start=174,
|
||||
serialized_end=297,
|
||||
|
@ -190,6 +198,7 @@ _CLAIMMETA = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel', full_name='pb.ClaimMeta.channel', index=0,
|
||||
|
@ -197,133 +206,112 @@ _CLAIMMETA = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='repost', full_name='pb.ClaimMeta.repost', index=1,
|
||||
number=2, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='short_url', full_name='pb.ClaimMeta.short_url', index=2,
|
||||
number=3, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='canonical_url', full_name='pb.ClaimMeta.canonical_url', index=3,
|
||||
number=4, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='is_controlling', full_name='pb.ClaimMeta.is_controlling', index=4,
|
||||
number=5, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='take_over_height', full_name='pb.ClaimMeta.take_over_height', index=5,
|
||||
number=6, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='creation_height', full_name='pb.ClaimMeta.creation_height', index=6,
|
||||
number=7, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='activation_height', full_name='pb.ClaimMeta.activation_height', index=7,
|
||||
number=8, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='expiration_height', full_name='pb.ClaimMeta.expiration_height', index=8,
|
||||
number=9, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claims_in_channel', full_name='pb.ClaimMeta.claims_in_channel', index=9,
|
||||
number=10, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='reposted', full_name='pb.ClaimMeta.reposted', index=10,
|
||||
number=11, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='effective_amount', full_name='pb.ClaimMeta.effective_amount', index=11,
|
||||
number=20, type=4, cpp_type=4, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='support_amount', full_name='pb.ClaimMeta.support_amount', index=12,
|
||||
number=21, type=4, cpp_type=4, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_group', full_name='pb.ClaimMeta.trending_group', index=13,
|
||||
number=22, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_mixed', full_name='pb.ClaimMeta.trending_mixed', index=14,
|
||||
number=23, type=2, cpp_type=6, label=1,
|
||||
name='trending_score', full_name='pb.ClaimMeta.trending_score', index=13,
|
||||
number=22, type=1, cpp_type=5, label=1,
|
||||
has_default_value=False, default_value=float(0),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_local', full_name='pb.ClaimMeta.trending_local', index=15,
|
||||
number=24, type=2, cpp_type=6, label=1,
|
||||
has_default_value=False, default_value=float(0),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_global', full_name='pb.ClaimMeta.trending_global', index=16,
|
||||
number=25, type=2, cpp_type=6, label=1,
|
||||
has_default_value=False, default_value=float(0),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=300,
|
||||
serialized_end=731,
|
||||
serialized_end=658,
|
||||
)
|
||||
|
||||
|
||||
|
@ -333,6 +321,7 @@ _ERROR = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='code', full_name='pb.Error.code', index=0,
|
||||
|
@ -340,21 +329,21 @@ _ERROR = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='text', full_name='pb.Error.text', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=_b("").decode('utf-8'),
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='blocked', full_name='pb.Error.blocked', index=2,
|
||||
number=3, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
|
@ -362,14 +351,14 @@ _ERROR = _descriptor.Descriptor(
|
|||
enum_types=[
|
||||
_ERROR_CODE,
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=734,
|
||||
serialized_end=882,
|
||||
serialized_start=661,
|
||||
serialized_end=809,
|
||||
)
|
||||
|
||||
|
||||
|
@ -379,6 +368,7 @@ _BLOCKED = _descriptor.Descriptor(
|
|||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='count', full_name='pb.Blocked.count', index=0,
|
||||
|
@ -386,28 +376,28 @@ _BLOCKED = _descriptor.Descriptor(
|
|||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel', full_name='pb.Blocked.channel', index=1,
|
||||
number=2, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
options=None),
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
options=None,
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=884,
|
||||
serialized_end=937,
|
||||
serialized_start=811,
|
||||
serialized_end=864,
|
||||
)
|
||||
|
||||
_OUTPUTS.fields_by_name['txos'].message_type = _OUTPUT
|
||||
|
@ -432,41 +422,43 @@ DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
|
|||
DESCRIPTOR.message_types_by_name['ClaimMeta'] = _CLAIMMETA
|
||||
DESCRIPTOR.message_types_by_name['Error'] = _ERROR
|
||||
DESCRIPTOR.message_types_by_name['Blocked'] = _BLOCKED
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), dict(
|
||||
DESCRIPTOR = _OUTPUTS,
|
||||
__module__ = 'result_pb2'
|
||||
Outputs = _reflection.GeneratedProtocolMessageType('Outputs', (_message.Message,), {
|
||||
'DESCRIPTOR' : _OUTPUTS,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Outputs)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Outputs)
|
||||
|
||||
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), dict(
|
||||
DESCRIPTOR = _OUTPUT,
|
||||
__module__ = 'result_pb2'
|
||||
Output = _reflection.GeneratedProtocolMessageType('Output', (_message.Message,), {
|
||||
'DESCRIPTOR' : _OUTPUT,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Output)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Output)
|
||||
|
||||
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), dict(
|
||||
DESCRIPTOR = _CLAIMMETA,
|
||||
__module__ = 'result_pb2'
|
||||
ClaimMeta = _reflection.GeneratedProtocolMessageType('ClaimMeta', (_message.Message,), {
|
||||
'DESCRIPTOR' : _CLAIMMETA,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.ClaimMeta)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(ClaimMeta)
|
||||
|
||||
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), dict(
|
||||
DESCRIPTOR = _ERROR,
|
||||
__module__ = 'result_pb2'
|
||||
Error = _reflection.GeneratedProtocolMessageType('Error', (_message.Message,), {
|
||||
'DESCRIPTOR' : _ERROR,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Error)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Error)
|
||||
|
||||
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), dict(
|
||||
DESCRIPTOR = _BLOCKED,
|
||||
__module__ = 'result_pb2'
|
||||
Blocked = _reflection.GeneratedProtocolMessageType('Blocked', (_message.Message,), {
|
||||
'DESCRIPTOR' : _BLOCKED,
|
||||
'__module__' : 'result_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.Blocked)
|
||||
))
|
||||
})
|
||||
_sym_db.RegisterMessage(Blocked)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
|
4
lbry/schema/types/v2/result_pb2_grpc.py
Normal file
4
lbry/schema/types/v2/result_pb2_grpc.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
|
@ -490,13 +490,15 @@ class CommandTestCase(IntegrationTestCase):
|
|||
""" Synchronous version of `out` method. """
|
||||
return json.loads(jsonrpc_dumps_pretty(value, ledger=self.ledger))['result']
|
||||
|
||||
async def confirm_and_render(self, awaitable, confirm) -> Transaction:
|
||||
async def confirm_and_render(self, awaitable, confirm, return_tx=False) -> Transaction:
|
||||
tx = await awaitable
|
||||
if confirm:
|
||||
await self.ledger.wait(tx)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(tx, self.blockchain.block_expected)
|
||||
return self.sout(tx)
|
||||
if not return_tx:
|
||||
return self.sout(tx)
|
||||
return tx
|
||||
|
||||
def create_upload_file(self, data, prefix=None, suffix=None):
|
||||
file_path = tempfile.mktemp(prefix=prefix or "tmp", suffix=suffix or "", dir=self.daemon.conf.upload_dir)
|
||||
|
@ -507,19 +509,19 @@ class CommandTestCase(IntegrationTestCase):
|
|||
|
||||
async def stream_create(
|
||||
self, name='hovercraft', bid='1.0', file_path=None,
|
||||
data=b'hi!', confirm=True, prefix=None, suffix=None, **kwargs):
|
||||
data=b'hi!', confirm=True, prefix=None, suffix=None, return_tx=False, **kwargs):
|
||||
if file_path is None and data is not None:
|
||||
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
||||
return await self.confirm_and_render(
|
||||
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm
|
||||
self.daemon.jsonrpc_stream_create(name, bid, file_path=file_path, **kwargs), confirm, return_tx
|
||||
)
|
||||
|
||||
async def stream_update(
|
||||
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, **kwargs):
|
||||
self, claim_id, data=None, prefix=None, suffix=None, confirm=True, return_tx=False, **kwargs):
|
||||
if data is not None:
|
||||
file_path = self.create_upload_file(data=data, prefix=prefix, suffix=suffix)
|
||||
return await self.confirm_and_render(
|
||||
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm
|
||||
self.daemon.jsonrpc_stream_update(claim_id, file_path=file_path, **kwargs), confirm, return_tx
|
||||
)
|
||||
return await self.confirm_and_render(
|
||||
self.daemon.jsonrpc_stream_update(claim_id, **kwargs), confirm
|
||||
|
@ -625,6 +627,9 @@ class CommandTestCase(IntegrationTestCase):
|
|||
async def claim_search(self, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_claim_search(**kwargs)))['items']
|
||||
|
||||
async def get_claim_by_claim_id(self, claim_id):
|
||||
return await self.out(self.ledger.get_claim_by_claim_id(claim_id))
|
||||
|
||||
async def file_list(self, *args, **kwargs):
|
||||
return (await self.out(self.daemon.jsonrpc_file_list(*args, **kwargs)))['items']
|
||||
|
||||
|
|
|
@ -556,7 +556,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
|
||||
|
||||
assert len(pending_synced_history) == len(remote_history), \
|
||||
f"{len(pending_synced_history)} vs {len(remote_history)}"
|
||||
f"{len(pending_synced_history)} vs {len(remote_history)} for {address}"
|
||||
synced_history = ""
|
||||
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
|
||||
assert i == remote_i, f"{i} vs {remote_i}"
|
||||
|
@ -894,9 +894,21 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
hub_server=new_sdk_server is not None
|
||||
)
|
||||
|
||||
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
|
||||
return claim
|
||||
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||
# return await self.network.get_claim_by_id(claim_id)
|
||||
|
||||
async def get_claim_by_claim_id(self, claim_id, accounts=None, include_purchase_receipt=False,
|
||||
include_is_my_output=False):
|
||||
accounts = accounts or []
|
||||
# return await self.network.get_claim_by_id(claim_id)
|
||||
inflated = await self._inflate_outputs(
|
||||
self.network.get_claim_by_id(claim_id), accounts,
|
||||
include_purchase_receipt=include_purchase_receipt,
|
||||
include_is_my_output=include_is_my_output,
|
||||
)
|
||||
txos = inflated[0]
|
||||
if txos:
|
||||
return txos[0]
|
||||
|
||||
async def _report_state(self):
|
||||
try:
|
||||
|
|
|
@ -238,7 +238,7 @@ class Network:
|
|||
log.exception("error looking up dns for spv server %s:%i", server, port)
|
||||
|
||||
# accumulate the dns results
|
||||
if self.config['explicit_servers']:
|
||||
if self.config.get('explicit_servers', []):
|
||||
hubs = self.config['explicit_servers']
|
||||
elif self.known_hubs:
|
||||
hubs = self.known_hubs
|
||||
|
@ -254,7 +254,7 @@ class Network:
|
|||
sent_ping_timestamps = {}
|
||||
_, ip_to_hostnames = await self.resolve_spv_dns()
|
||||
n = len(ip_to_hostnames)
|
||||
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config['explicit_servers']))
|
||||
log.info("%i possible spv servers to try (%i urls in config)", n, len(self.config.get('explicit_servers', [])))
|
||||
pongs = {}
|
||||
known_hubs = self.known_hubs
|
||||
try:
|
||||
|
@ -299,8 +299,8 @@ class Network:
|
|||
if (pong is not None and self.jurisdiction is not None) and \
|
||||
(pong.country_name != self.jurisdiction):
|
||||
continue
|
||||
client = ClientSession(network=self, server=(host, port), timeout=self.config['hub_timeout'],
|
||||
concurrency=self.config['concurrent_hub_requests'])
|
||||
client = ClientSession(network=self, server=(host, port), timeout=self.config.get('hub_timeout', 30),
|
||||
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||
try:
|
||||
await client.create_connection()
|
||||
log.warning("Connected to spv server %s:%i", host, port)
|
||||
|
@ -465,6 +465,12 @@ class Network:
|
|||
def get_server_features(self):
|
||||
return self.rpc('server.features', (), restricted=True)
|
||||
|
||||
# def get_claims_by_ids(self, claim_ids):
|
||||
# return self.rpc('blockchain.claimtrie.getclaimsbyids', claim_ids)
|
||||
|
||||
def get_claim_by_id(self, claim_id):
|
||||
return self.rpc('blockchain.claimtrie.getclaimbyid', [claim_id])
|
||||
|
||||
def resolve(self, urls, session_override=None):
|
||||
return self.rpc('blockchain.claimtrie.resolve', urls, False, session_override)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
__hub_url__ = (
|
||||
"https://github.com/lbryio/hub/releases/download/v0.2021.08.24-beta/hub"
|
||||
"https://github.com/lbryio/hub/releases/download/leveldb-hub/hub"
|
||||
)
|
||||
from .node import Conductor
|
||||
from .service import ConductorService
|
||||
|
|
|
@ -196,11 +196,10 @@ class SPVNode:
|
|||
self.session_timeout = 600
|
||||
self.rpc_port = '0' # disabled by default
|
||||
self.stopped = False
|
||||
self.index_name = None
|
||||
self.index_name = uuid4().hex
|
||||
|
||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.index_name = uuid4().hex
|
||||
conf = {
|
||||
'DESCRIPTION': '',
|
||||
'PAYMENT_ADDRESS': '',
|
||||
|
@ -223,7 +222,7 @@ class SPVNode:
|
|||
# TODO: don't use os.environ
|
||||
os.environ.update(conf)
|
||||
self.server = Server(Env(self.coin_class))
|
||||
self.server.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
||||
self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5
|
||||
await self.server.start()
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
|
|
|
@ -496,6 +496,17 @@ class RPCSession(SessionBase):
|
|||
self.abort()
|
||||
return False
|
||||
|
||||
async def send_notifications(self, notifications) -> bool:
|
||||
"""Send an RPC notification over the network."""
|
||||
message, _ = self.connection.send_batch(notifications)
|
||||
try:
|
||||
await self._send_message(message)
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
self.logger.info("timeout sending address notification to %s", self.peer_address_str(for_log=True))
|
||||
self.abort()
|
||||
return False
|
||||
|
||||
def send_batch(self, raise_errors=False):
|
||||
"""Return a BatchRequest. Intended to be used like so:
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -14,8 +14,7 @@ from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
|||
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
||||
from lbry.wallet.server.db.writer import LBRYLevelDB
|
||||
from lbry.wallet.server.block_processor import LBRYBlockProcessor
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
|
||||
|
||||
Block = namedtuple("Block", "raw header transactions")
|
||||
|
@ -39,7 +38,7 @@ class Coin:
|
|||
SESSIONCLS = LBRYElectrumX
|
||||
DESERIALIZER = lib_tx.Deserializer
|
||||
DAEMON = Daemon
|
||||
BLOCK_PROCESSOR = LBRYBlockProcessor
|
||||
BLOCK_PROCESSOR = BlockProcessor
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DB = LevelDB
|
||||
HEADER_VALUES = [
|
||||
|
@ -214,6 +213,11 @@ class Coin:
|
|||
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
|
||||
return Block(raw_block, header, txs)
|
||||
|
||||
@classmethod
|
||||
def transaction(cls, raw_tx: bytes):
|
||||
"""Return a Block namedtuple given a raw block and its height."""
|
||||
return cls.DESERIALIZER(raw_tx).read_tx()
|
||||
|
||||
@classmethod
|
||||
def decimal_value(cls, value):
|
||||
"""Return the number of standard coin units as a Decimal given a
|
||||
|
@ -237,10 +241,9 @@ class Coin:
|
|||
class LBC(Coin):
|
||||
DAEMON = LBCDaemon
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
BLOCK_PROCESSOR = LBRYBlockProcessor
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DESERIALIZER = DeserializerSegWit
|
||||
DB = LBRYLevelDB
|
||||
DB = LevelDB
|
||||
NAME = "LBRY"
|
||||
SHORTNAME = "LBC"
|
||||
NET = "mainnet"
|
||||
|
@ -258,6 +261,18 @@ class LBC(Coin):
|
|||
TX_PER_BLOCK = 1
|
||||
RPC_PORT = 9245
|
||||
REORG_LIMIT = 200
|
||||
|
||||
nOriginalClaimExpirationTime = 262974
|
||||
nExtendedClaimExpirationTime = 2102400
|
||||
nExtendedClaimExpirationForkHeight = 400155
|
||||
nNormalizedNameForkHeight = 539940 # targeting 21 March 2019
|
||||
nMinTakeoverWorkaroundHeight = 496850
|
||||
nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019
|
||||
nWitnessForkHeight = 680770 # targeting 11 Dec 2019
|
||||
nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019
|
||||
proportionalDelayFactor = 32
|
||||
maxTakeoverDelay = 4032
|
||||
|
||||
PEERS = [
|
||||
]
|
||||
|
||||
|
@ -335,6 +350,18 @@ class LBC(Coin):
|
|||
else:
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
@classmethod
|
||||
def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int:
|
||||
if extended:
|
||||
return last_updated_height + cls.nExtendedClaimExpirationTime
|
||||
if last_updated_height < cls.nExtendedClaimExpirationForkHeight:
|
||||
return last_updated_height + cls.nOriginalClaimExpirationTime
|
||||
return last_updated_height + cls.nExtendedClaimExpirationTime
|
||||
|
||||
@classmethod
|
||||
def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int:
|
||||
return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay)
|
||||
|
||||
|
||||
class LBCRegTest(LBC):
|
||||
NET = "regtest"
|
||||
|
@ -344,6 +371,15 @@ class LBCRegTest(LBC):
|
|||
P2PKH_VERBYTE = bytes.fromhex("6f")
|
||||
P2SH_VERBYTES = bytes.fromhex("c4")
|
||||
|
||||
nOriginalClaimExpirationTime = 500
|
||||
nExtendedClaimExpirationTime = 600
|
||||
nExtendedClaimExpirationForkHeight = 800
|
||||
nNormalizedNameForkHeight = 250
|
||||
nMinTakeoverWorkaroundHeight = -1
|
||||
nMaxTakeoverWorkaroundHeight = -1
|
||||
nWitnessForkHeight = 150
|
||||
nAllClaimsInMerkleForkHeight = 350
|
||||
|
||||
|
||||
class LBCTestNet(LBCRegTest):
|
||||
NET = "testnet"
|
||||
|
|
|
@ -364,6 +364,11 @@ class LBCDaemon(Daemon):
|
|||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getvalueforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getnamesintrie(self):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getnamesintrie')
|
||||
|
||||
@handles_errors
|
||||
async def claimname(self, name, hexvalue, amount):
|
||||
'''Claim a name, used for functional tests only.'''
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
import enum
|
||||
|
||||
|
||||
@enum.unique
|
||||
class DB_PREFIXES(enum.Enum):
|
||||
claim_to_support = b'K'
|
||||
support_to_claim = b'L'
|
||||
|
||||
claim_to_txo = b'E'
|
||||
txo_to_claim = b'G'
|
||||
|
||||
claim_to_channel = b'I'
|
||||
channel_to_claim = b'J'
|
||||
|
||||
claim_short_id_prefix = b'F'
|
||||
effective_amount = b'D'
|
||||
claim_expiration = b'O'
|
||||
|
||||
claim_takeover = b'P'
|
||||
pending_activation = b'Q'
|
||||
activated_claim_and_support = b'R'
|
||||
active_amount = b'S'
|
||||
|
||||
repost = b'V'
|
||||
reposted_claim = b'W'
|
||||
|
||||
undo = b'M'
|
||||
claim_diff = b'Y'
|
||||
|
||||
tx = b'B'
|
||||
block_hash = b'C'
|
||||
header = b'H'
|
||||
tx_num = b'N'
|
||||
tx_count = b'T'
|
||||
tx_hash = b'X'
|
||||
utxo = b'u'
|
||||
hashx_utxo = b'h'
|
||||
hashx_history = b'x'
|
||||
db_state = b's'
|
||||
channel_count = b'Z'
|
||||
support_amount = b'a'
|
|
@ -1,22 +0,0 @@
|
|||
class FindShortestID:
|
||||
__slots__ = 'short_id', 'new_id'
|
||||
|
||||
def __init__(self):
|
||||
self.short_id = ''
|
||||
self.new_id = None
|
||||
|
||||
def step(self, other_id, new_id):
|
||||
self.new_id = new_id
|
||||
for i in range(len(self.new_id)):
|
||||
if other_id[i] != self.new_id[i]:
|
||||
if i > len(self.short_id)-1:
|
||||
self.short_id = self.new_id[:i+1]
|
||||
break
|
||||
|
||||
def finalize(self):
|
||||
if self.short_id:
|
||||
return '#'+self.short_id
|
||||
|
||||
|
||||
def register_canonical_functions(connection):
|
||||
connection.create_aggregate("shortest_id", 2, FindShortestID)
|
|
@ -1,3 +1,5 @@
|
|||
import typing
|
||||
|
||||
CLAIM_TYPES = {
|
||||
'stream': 1,
|
||||
'channel': 2,
|
||||
|
@ -418,3 +420,28 @@ INDEXED_LANGUAGES = [
|
|||
'zh',
|
||||
'zu'
|
||||
]
|
||||
|
||||
|
||||
class ResolveResult(typing.NamedTuple):
|
||||
name: str
|
||||
normalized_name: str
|
||||
claim_hash: bytes
|
||||
tx_num: int
|
||||
position: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
amount: int
|
||||
short_url: str
|
||||
is_controlling: bool
|
||||
canonical_url: str
|
||||
creation_height: int
|
||||
activation_height: int
|
||||
expiration_height: int
|
||||
effective_amount: int
|
||||
support_amount: int
|
||||
reposted: int
|
||||
last_takeover_height: typing.Optional[int]
|
||||
claims_in_channel: typing.Optional[int]
|
||||
channel_hash: typing.Optional[bytes]
|
||||
reposted_claim_hash: typing.Optional[bytes]
|
||||
signature_valid: typing.Optional[bool]
|
||||
|
|
119
lbry/wallet/server/db/db.py
Normal file
119
lbry/wallet/server/db/db.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
import struct
|
||||
from typing import Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||
|
||||
|
||||
class KeyValueStorage:
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
raise NotImplemented()
|
||||
|
||||
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
|
||||
include_key=True, include_value=True, fill_cache=True):
|
||||
raise NotImplemented()
|
||||
|
||||
def write_batch(self, transaction: bool = False):
|
||||
raise NotImplemented()
|
||||
|
||||
def close(self):
|
||||
raise NotImplemented()
|
||||
|
||||
@property
|
||||
def closed(self) -> bool:
|
||||
raise NotImplemented()
|
||||
|
||||
|
||||
class PrefixDB:
|
||||
UNDO_KEY_STRUCT = struct.Struct(b'>Q')
|
||||
|
||||
def __init__(self, db: KeyValueStorage, max_undo_depth: int = 200, unsafe_prefixes=None):
|
||||
self._db = db
|
||||
self._op_stack = RevertableOpStack(db.get, unsafe_prefixes=unsafe_prefixes)
|
||||
self._max_undo_depth = max_undo_depth
|
||||
|
||||
def unsafe_commit(self):
|
||||
"""
|
||||
Write staged changes to the database without keeping undo information
|
||||
Changes written cannot be undone
|
||||
"""
|
||||
try:
|
||||
with self._db.write_batch(transaction=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
for staged_change in self._op_stack:
|
||||
if staged_change.is_put:
|
||||
batch_put(staged_change.key, staged_change.value)
|
||||
else:
|
||||
batch_delete(staged_change.key)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def commit(self, height: int):
|
||||
"""
|
||||
Write changes for a block height to the database and keep undo information so that the changes can be reverted
|
||||
"""
|
||||
undo_ops = self._op_stack.get_undo_ops()
|
||||
delete_undos = []
|
||||
if height > self._max_undo_depth:
|
||||
delete_undos.extend(self._db.iterator(
|
||||
start=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(0),
|
||||
stop=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
|
||||
include_value=False
|
||||
))
|
||||
try:
|
||||
with self._db.write_batch(transaction=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
for staged_change in self._op_stack:
|
||||
if staged_change.is_put:
|
||||
batch_put(staged_change.key, staged_change.value)
|
||||
else:
|
||||
batch_delete(staged_change.key)
|
||||
for undo_to_delete in delete_undos:
|
||||
batch_delete(undo_to_delete)
|
||||
batch_put(DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height), undo_ops)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def rollback(self, height: int):
|
||||
"""
|
||||
Revert changes for a block height
|
||||
"""
|
||||
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height)
|
||||
self._op_stack.apply_packed_undo_ops(self._db.get(undo_key))
|
||||
try:
|
||||
with self._db.write_batch(transaction=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
for staged_change in self._op_stack:
|
||||
if staged_change.is_put:
|
||||
batch_put(staged_change.key, staged_change.value)
|
||||
else:
|
||||
batch_delete(staged_change.key)
|
||||
batch_delete(undo_key)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
return self._db.get(key, fill_cache=fill_cache)
|
||||
|
||||
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
|
||||
include_key=True, include_value=True, fill_cache=True):
|
||||
return self._db.iterator(
|
||||
reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop,
|
||||
prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache
|
||||
)
|
||||
|
||||
def close(self):
|
||||
if not self._db.closed:
|
||||
self._db.close()
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._db.closed
|
||||
|
||||
def stage_raw_put(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertablePut(key, value))
|
||||
|
||||
def stage_raw_delete(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertableDelete(key, value))
|
|
@ -8,7 +8,7 @@ INDEX_DEFAULT_SETTINGS = {
|
|||
"number_of_shards": 1,
|
||||
"number_of_replicas": 0,
|
||||
"sort": {
|
||||
"field": ["trending_mixed", "release_time"],
|
||||
"field": ["trending_score", "release_time"],
|
||||
"order": ["desc", "desc"]
|
||||
}}
|
||||
},
|
||||
|
@ -30,8 +30,8 @@ INDEX_DEFAULT_SETTINGS = {
|
|||
"height": {"type": "integer"},
|
||||
"claim_type": {"type": "byte"},
|
||||
"censor_type": {"type": "byte"},
|
||||
"trending_mixed": {"type": "float"},
|
||||
"release_time": {"type": "long"},
|
||||
"trending_score": {"type": "double"},
|
||||
"release_time": {"type": "long"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -53,30 +53,32 @@ FIELDS = {
|
|||
'duration', 'release_time',
|
||||
'tags', 'languages', 'has_source', 'reposted_claim_type',
|
||||
'reposted_claim_id', 'repost_count',
|
||||
'trending_group', 'trending_mixed', 'trending_local', 'trending_global',
|
||||
'trending_score', 'tx_num'
|
||||
}
|
||||
|
||||
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'claim_name', 'description', 'claim_id', 'censoring_channel_id',
|
||||
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id',
|
||||
'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature',
|
||||
'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id', 'tags'}
|
||||
'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id',
|
||||
'tags'}
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'channel_join', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_group', 'trending_mixed', 'censor_type',
|
||||
'trending_local', 'trending_global',
|
||||
'trending_score', 'censor_type', 'tx_num'
|
||||
}
|
||||
|
||||
ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS
|
||||
|
||||
REPLACEMENTS = {
|
||||
'claim_name': 'normalized_name',
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'valid_channel_signature': 'is_signature_valid',
|
||||
'trending_mixed': 'trending_score',
|
||||
'reposted': 'repost_count',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'reposted': 'repost_count'
|
||||
'valid_channel_signature': 'is_signature_valid'
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import time
|
||||
import asyncio
|
||||
import struct
|
||||
from binascii import unhexlify
|
||||
|
@ -8,8 +9,6 @@ from typing import Optional, List, Iterable, Union
|
|||
|
||||
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
|
||||
from lbry.crypto.base58 import Base58
|
||||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||
from lbry.schema.result import Outputs, Censor
|
||||
from lbry.schema.tags import clean_tags
|
||||
|
@ -19,6 +18,7 @@ from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
|||
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
|
||||
RANGE_FIELDS, ALL_FIELDS
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.server.db.common import ResolveResult
|
||||
|
||||
|
||||
class ChannelResolution(str):
|
||||
|
@ -42,7 +42,8 @@ class IndexVersionMismatch(Exception):
|
|||
class SearchIndex:
|
||||
VERSION = 1
|
||||
|
||||
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200):
|
||||
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200,
|
||||
half_life=0.4, whale_threshold=10000, whale_half_life=0.99):
|
||||
self.search_timeout = search_timeout
|
||||
self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import
|
||||
self.search_client: Optional[AsyncElasticsearch] = None
|
||||
|
@ -55,6 +56,9 @@ class SearchIndex:
|
|||
self.resolution_cache = LRUCache(2 ** 17)
|
||||
self._elastic_host = elastic_host
|
||||
self._elastic_port = elastic_port
|
||||
self._trending_half_life = half_life
|
||||
self._trending_whale_threshold = whale_threshold
|
||||
self._trending_whale_half_life = whale_half_life
|
||||
|
||||
async def get_index_version(self) -> int:
|
||||
try:
|
||||
|
@ -91,6 +95,7 @@ class SearchIndex:
|
|||
if index_version != self.VERSION:
|
||||
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
return acked
|
||||
|
||||
def stop(self):
|
||||
|
@ -103,15 +108,28 @@ class SearchIndex:
|
|||
|
||||
async def _consume_claim_producer(self, claim_producer):
|
||||
count = 0
|
||||
for op, doc in claim_producer:
|
||||
async for op, doc in claim_producer:
|
||||
if op == 'delete':
|
||||
yield {'_index': self.index, '_op_type': 'delete', '_id': doc}
|
||||
yield {
|
||||
'_index': self.index,
|
||||
'_op_type': 'delete',
|
||||
'_id': doc
|
||||
}
|
||||
else:
|
||||
yield extract_doc(doc, self.index)
|
||||
yield {
|
||||
'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS},
|
||||
'_id': doc['claim_id'],
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
count += 1
|
||||
if count % 100 == 0:
|
||||
self.logger.info("Indexing in progress, %d claims.", count)
|
||||
self.logger.info("Indexing done for %d claims.", count)
|
||||
if count:
|
||||
self.logger.info("Indexing done for %d claims.", count)
|
||||
else:
|
||||
self.logger.debug("Indexing done for %d claims.", count)
|
||||
|
||||
async def claim_consumer(self, claim_producer):
|
||||
touched = set()
|
||||
|
@ -123,22 +141,98 @@ class SearchIndex:
|
|||
item = item.popitem()[1]
|
||||
touched.add(item['_id'])
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.info("Indexing done.")
|
||||
self.logger.debug("Indexing done.")
|
||||
|
||||
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||
blockdict = {key[::-1].hex(): value[::-1].hex() for key, value in blockdict.items()}
|
||||
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
||||
if channels:
|
||||
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
else:
|
||||
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
key = 'channel_id' if channels else 'claim_id'
|
||||
update['script'] = {
|
||||
"source": f"ctx._source.censor_type={censor_type}; ctx._source.censoring_channel_id=params[ctx._source.{key}]",
|
||||
"source": f"ctx._source.censor_type={censor_type}; "
|
||||
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
||||
"lang": "painless",
|
||||
"params": blockdict
|
||||
}
|
||||
return update
|
||||
|
||||
async def update_trending_score(self, params):
|
||||
update_trending_score_script = """
|
||||
double softenLBC(double lbc) { Math.pow(lbc, 1.0f / 3.0f) }
|
||||
double inflateUnits(int height) {
|
||||
int renormalizationPeriod = 100000;
|
||||
double doublingRate = 400.0f;
|
||||
Math.pow(2.0, (height % renormalizationPeriod) / doublingRate)
|
||||
}
|
||||
double spikePower(double newAmount) {
|
||||
if (newAmount < 50.0) {
|
||||
0.5
|
||||
} else if (newAmount < 85.0) {
|
||||
newAmount / 100.0
|
||||
} else {
|
||||
0.85
|
||||
}
|
||||
}
|
||||
double spikeMass(double oldAmount, double newAmount) {
|
||||
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
||||
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
||||
double power = spikePower(newAmount);
|
||||
if (oldAmount > newAmount) {
|
||||
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
} else {
|
||||
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
}
|
||||
}
|
||||
for (i in params.src.changes) {
|
||||
double units = inflateUnits(i.height);
|
||||
if (i.added) {
|
||||
if (ctx._source.trending_score == null) {
|
||||
ctx._source.trending_score = (units * spikeMass(i.prev_amount, i.prev_amount + i.new_amount));
|
||||
} else {
|
||||
ctx._source.trending_score += (units * spikeMass(i.prev_amount, i.prev_amount + i.new_amount));
|
||||
}
|
||||
} else {
|
||||
if (ctx._source.trending_score == null) {
|
||||
ctx._source.trending_score = (units * spikeMass(i.prev_amount, i.prev_amount - i.new_amount));
|
||||
} else {
|
||||
ctx._source.trending_score += (units * spikeMass(i.prev_amount, i.prev_amount - i.new_amount));
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
start = time.perf_counter()
|
||||
|
||||
def producer():
|
||||
for claim_id, claim_updates in params.items():
|
||||
yield {
|
||||
'_id': claim_id,
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'script': {
|
||||
'lang': 'painless',
|
||||
'source': update_trending_score_script,
|
||||
'params': {'src': {
|
||||
'changes': [
|
||||
{
|
||||
'height': p.height,
|
||||
'added': p.added,
|
||||
'prev_amount': p.prev_amount * 1E-9,
|
||||
'new_amount': p.new_amount * 1E-9,
|
||||
} for p in claim_updates
|
||||
]
|
||||
}}
|
||||
},
|
||||
}
|
||||
if not params:
|
||||
return
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False):
|
||||
if not ok:
|
||||
self.logger.warning("updating trending failed for an item: %s", item)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000))
|
||||
|
||||
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||
if filtered_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
|
@ -170,52 +264,82 @@ class SearchIndex:
|
|||
self.claim_cache.clear()
|
||||
self.resolution_cache.clear()
|
||||
|
||||
async def session_query(self, query_name, kwargs):
|
||||
offset, total = kwargs.get('offset', 0) if isinstance(kwargs, dict) else 0, 0
|
||||
async def cached_search(self, kwargs):
|
||||
total_referenced = []
|
||||
if query_name == 'resolve':
|
||||
total_referenced, response, censor = await self.resolve(*kwargs)
|
||||
else:
|
||||
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
return cache_item.result
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
return cache_item.result
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
return cache_item.result
|
||||
censor = Censor(Censor.SEARCH)
|
||||
if kwargs.get('no_totals'):
|
||||
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
else:
|
||||
response, offset, total = await self.search(**kwargs)
|
||||
censor.apply(response)
|
||||
censor = Censor(Censor.SEARCH)
|
||||
if kwargs.get('no_totals'):
|
||||
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
else:
|
||||
response, offset, total = await self.search(**kwargs)
|
||||
censor.apply(response)
|
||||
total_referenced.extend(response)
|
||||
|
||||
if censor.censored:
|
||||
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
total_referenced.extend(response)
|
||||
if censor.censored:
|
||||
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
total_referenced.extend(response)
|
||||
result = Outputs.to_base64(
|
||||
response, await self._get_referenced_rows(total_referenced), offset, total, censor
|
||||
)
|
||||
cache_item.result = result
|
||||
return result
|
||||
return Outputs.to_base64(response, await self._get_referenced_rows(total_referenced), offset, total, censor)
|
||||
|
||||
async def resolve(self, *urls):
|
||||
censor = Censor(Censor.RESOLVE)
|
||||
results = [await self.resolve_url(url) for url in urls]
|
||||
# just heat the cache
|
||||
await self.populate_claim_cache(*filter(lambda x: isinstance(x, str), results))
|
||||
results = [self._get_from_cache_or_error(url, result) for url, result in zip(urls, results)]
|
||||
|
||||
censored = [
|
||||
result if not isinstance(result, dict) or not censor.censor(result)
|
||||
else ResolveCensoredError(url, result['censoring_channel_id'])
|
||||
for url, result in zip(urls, results)
|
||||
]
|
||||
return results, censored, censor
|
||||
|
||||
def _get_from_cache_or_error(self, url: str, resolution: Union[LookupError, StreamResolution, ChannelResolution]):
|
||||
cached = self.claim_cache.get(resolution)
|
||||
return cached or (resolution if isinstance(resolution, LookupError) else resolution.lookup_error(url))
|
||||
response = [
|
||||
ResolveResult(
|
||||
name=r['claim_name'],
|
||||
normalized_name=r['normalized_name'],
|
||||
claim_hash=r['claim_hash'],
|
||||
tx_num=r['tx_num'],
|
||||
position=r['tx_nout'],
|
||||
tx_hash=r['tx_hash'],
|
||||
height=r['height'],
|
||||
amount=r['amount'],
|
||||
short_url=r['short_url'],
|
||||
is_controlling=r['is_controlling'],
|
||||
canonical_url=r['canonical_url'],
|
||||
creation_height=r['creation_height'],
|
||||
activation_height=r['activation_height'],
|
||||
expiration_height=r['expiration_height'],
|
||||
effective_amount=r['effective_amount'],
|
||||
support_amount=r['support_amount'],
|
||||
last_takeover_height=r['last_take_over_height'],
|
||||
claims_in_channel=r['claims_in_channel'],
|
||||
channel_hash=r['channel_hash'],
|
||||
reposted_claim_hash=r['reposted_claim_hash'],
|
||||
reposted=r['reposted'],
|
||||
signature_valid=r['signature_valid']
|
||||
) for r in response
|
||||
]
|
||||
extra = [
|
||||
ResolveResult(
|
||||
name=r['claim_name'],
|
||||
normalized_name=r['normalized_name'],
|
||||
claim_hash=r['claim_hash'],
|
||||
tx_num=r['tx_num'],
|
||||
position=r['tx_nout'],
|
||||
tx_hash=r['tx_hash'],
|
||||
height=r['height'],
|
||||
amount=r['amount'],
|
||||
short_url=r['short_url'],
|
||||
is_controlling=r['is_controlling'],
|
||||
canonical_url=r['canonical_url'],
|
||||
creation_height=r['creation_height'],
|
||||
activation_height=r['activation_height'],
|
||||
expiration_height=r['expiration_height'],
|
||||
effective_amount=r['effective_amount'],
|
||||
support_amount=r['support_amount'],
|
||||
last_takeover_height=r['last_take_over_height'],
|
||||
claims_in_channel=r['claims_in_channel'],
|
||||
channel_hash=r['channel_hash'],
|
||||
reposted_claim_hash=r['reposted_claim_hash'],
|
||||
reposted=r['reposted'],
|
||||
signature_valid=r['signature_valid']
|
||||
) for r in await self._get_referenced_rows(total_referenced)
|
||||
]
|
||||
result = Outputs.to_base64(
|
||||
response, extra, offset, total, censor
|
||||
)
|
||||
cache_item.result = result
|
||||
return result
|
||||
|
||||
async def get_many(self, *claim_ids):
|
||||
await self.populate_claim_cache(*claim_ids)
|
||||
|
@ -247,15 +371,11 @@ class SearchIndex:
|
|||
return self.short_id_cache.get(key, None)
|
||||
|
||||
async def search(self, **kwargs):
|
||||
if 'channel' in kwargs:
|
||||
kwargs['channel_id'] = await self.resolve_url(kwargs.pop('channel'))
|
||||
if not kwargs['channel_id'] or not isinstance(kwargs['channel_id'], str):
|
||||
return [], 0, 0
|
||||
try:
|
||||
return await self.search_ahead(**kwargs)
|
||||
except NotFoundError:
|
||||
return [], 0, 0
|
||||
return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
|
||||
# return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
|
||||
|
||||
async def search_ahead(self, **kwargs):
|
||||
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
|
||||
|
@ -335,78 +455,6 @@ class SearchIndex:
|
|||
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
|
||||
return reordered_hits
|
||||
|
||||
async def resolve_url(self, raw_url):
|
||||
if raw_url not in self.resolution_cache:
|
||||
self.resolution_cache[raw_url] = await self._resolve_url(raw_url)
|
||||
return self.resolution_cache[raw_url]
|
||||
|
||||
async def _resolve_url(self, raw_url):
|
||||
try:
|
||||
url = URL.parse(raw_url)
|
||||
except ValueError as e:
|
||||
return e
|
||||
|
||||
stream = LookupError(f'Could not find claim at "{raw_url}".')
|
||||
|
||||
channel_id = await self.resolve_channel_id(url)
|
||||
if isinstance(channel_id, LookupError):
|
||||
return channel_id
|
||||
stream = (await self.resolve_stream(url, channel_id if isinstance(channel_id, str) else None)) or stream
|
||||
if url.has_stream:
|
||||
return StreamResolution(stream)
|
||||
else:
|
||||
return ChannelResolution(channel_id)
|
||||
|
||||
async def resolve_channel_id(self, url: URL):
|
||||
if not url.has_channel:
|
||||
return
|
||||
if url.channel.is_fullid:
|
||||
return url.channel.claim_id
|
||||
if url.channel.is_shortid:
|
||||
channel_id = await self.full_id_from_short_id(url.channel.name, url.channel.claim_id)
|
||||
if not channel_id:
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
return channel_id
|
||||
|
||||
query = url.channel.to_dict()
|
||||
if set(query) == {'name'}:
|
||||
query['is_controlling'] = True
|
||||
else:
|
||||
query['order_by'] = ['^creation_height']
|
||||
matches, _, _ = await self.search(**query, limit=1)
|
||||
if matches:
|
||||
channel_id = matches[0]['claim_id']
|
||||
else:
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
return channel_id
|
||||
|
||||
async def resolve_stream(self, url: URL, channel_id: str = None):
|
||||
if not url.has_stream:
|
||||
return None
|
||||
if url.has_channel and channel_id is None:
|
||||
return None
|
||||
query = url.stream.to_dict()
|
||||
if url.stream.claim_id is not None:
|
||||
if url.stream.is_fullid:
|
||||
claim_id = url.stream.claim_id
|
||||
else:
|
||||
claim_id = await self.full_id_from_short_id(query['name'], query['claim_id'], channel_id)
|
||||
return claim_id
|
||||
|
||||
if channel_id is not None:
|
||||
if set(query) == {'name'}:
|
||||
# temporarily emulate is_controlling for claims in channel
|
||||
query['order_by'] = ['effective_amount', '^height']
|
||||
else:
|
||||
query['order_by'] = ['^channel_join']
|
||||
query['channel_id'] = channel_id
|
||||
query['signature_valid'] = True
|
||||
elif set(query) == {'name'}:
|
||||
query['is_controlling'] = True
|
||||
matches, _, _ = await self.search(**query, limit=1)
|
||||
if matches:
|
||||
return matches[0]['claim_id']
|
||||
|
||||
async def _get_referenced_rows(self, txo_rows: List[dict]):
|
||||
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
|
||||
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
|
||||
|
@ -424,33 +472,6 @@ class SearchIndex:
|
|||
return referenced_txos
|
||||
|
||||
|
||||
def extract_doc(doc, index):
|
||||
doc['claim_id'] = doc.pop('claim_hash')[::-1].hex()
|
||||
if doc['reposted_claim_hash'] is not None:
|
||||
doc['reposted_claim_id'] = doc.pop('reposted_claim_hash')[::-1].hex()
|
||||
else:
|
||||
doc['reposted_claim_id'] = None
|
||||
channel_hash = doc.pop('channel_hash')
|
||||
doc['channel_id'] = channel_hash[::-1].hex() if channel_hash else channel_hash
|
||||
doc['censoring_channel_id'] = doc.get('censoring_channel_id')
|
||||
txo_hash = doc.pop('txo_hash')
|
||||
doc['tx_id'] = txo_hash[:32][::-1].hex()
|
||||
doc['tx_nout'] = struct.unpack('<I', txo_hash[32:])[0]
|
||||
doc['repost_count'] = doc.pop('reposted')
|
||||
doc['is_controlling'] = bool(doc['is_controlling'])
|
||||
doc['signature'] = (doc.pop('signature') or b'').hex() or None
|
||||
doc['signature_digest'] = (doc.pop('signature_digest') or b'').hex() or None
|
||||
doc['public_key_bytes'] = (doc.pop('public_key_bytes') or b'').hex() or None
|
||||
doc['public_key_id'] = (doc.pop('public_key_hash') or b'').hex() or None
|
||||
doc['is_signature_valid'] = bool(doc['signature_valid'])
|
||||
doc['claim_type'] = doc.get('claim_type', 0) or 0
|
||||
doc['stream_type'] = int(doc.get('stream_type', 0) or 0)
|
||||
doc['has_source'] = bool(doc['has_source'])
|
||||
doc['normalized_name'] = doc.pop('normalized')
|
||||
doc = {key: value for key, value in doc.items() if key in ALL_FIELDS}
|
||||
return {'doc': doc, '_id': doc['claim_id'], '_index': index, '_op_type': 'update', 'doc_as_upsert': True}
|
||||
|
||||
|
||||
def expand_query(**kwargs):
|
||||
if "amount_order" in kwargs:
|
||||
kwargs["limit"] = 1
|
||||
|
@ -462,6 +483,8 @@ def expand_query(**kwargs):
|
|||
kwargs.pop('is_controlling')
|
||||
query = {'must': [], 'must_not': []}
|
||||
collapse = None
|
||||
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
||||
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
||||
for key, value in kwargs.items():
|
||||
key = key.replace('claim.', '')
|
||||
many = key.endswith('__in') or isinstance(value, list)
|
||||
|
@ -481,7 +504,7 @@ def expand_query(**kwargs):
|
|||
else:
|
||||
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||
elif key == 'stream_type':
|
||||
value = STREAM_TYPES[value] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
if key == '_id':
|
||||
if isinstance(value, Iterable):
|
||||
value = [item[::-1].hex() for item in value]
|
||||
|
@ -489,8 +512,6 @@ def expand_query(**kwargs):
|
|||
value = value[::-1].hex()
|
||||
if not many and key in ('_id', 'claim_id') and len(value) < 20:
|
||||
partial_id = True
|
||||
if key == 'public_key_id':
|
||||
value = Base58.decode(value)[1:21].hex()
|
||||
if key in ('signature_valid', 'has_source'):
|
||||
continue # handled later
|
||||
if key in TEXT_FIELDS:
|
||||
|
@ -537,13 +558,13 @@ def expand_query(**kwargs):
|
|||
elif key == 'limit_claims_per_channel':
|
||||
collapse = ('channel_id.keyword', value)
|
||||
if kwargs.get('has_channel_signature'):
|
||||
query['must'].append({"exists": {"field": "signature_digest"}})
|
||||
query['must'].append({"exists": {"field": "signature"}})
|
||||
if 'signature_valid' in kwargs:
|
||||
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
elif 'signature_valid' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature_digest"}}}})
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
||||
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
if 'has_source' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
|
@ -612,7 +633,9 @@ def expand_result(results):
|
|||
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
|
||||
result['reposted'] = result.pop('repost_count')
|
||||
result['signature_valid'] = result.pop('is_signature_valid')
|
||||
result['normalized'] = result.pop('normalized_name')
|
||||
# result['normalized'] = result.pop('normalized_name')
|
||||
# if result['censoring_channel_hash']:
|
||||
# result['censoring_channel_hash'] = unhexlify(result['censoring_channel_hash'])[::-1]
|
||||
expanded.append(result)
|
||||
if inner_hits:
|
||||
return expand_result(inner_hits)
|
||||
|
|
|
@ -1,70 +1,37 @@
|
|||
import argparse
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from collections import namedtuple
|
||||
from multiprocessing import Process
|
||||
|
||||
import sqlite3
|
||||
from elasticsearch import AsyncElasticsearch
|
||||
from elasticsearch.helpers import async_bulk
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.coin import LBC
|
||||
from lbry.wallet.server.db.elasticsearch.search import extract_doc, SearchIndex, IndexVersionMismatch
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.db.elasticsearch.search import SearchIndex, IndexVersionMismatch
|
||||
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS
|
||||
|
||||
|
||||
async def get_all(db, shard_num, shards_total, limit=0, index_name='claims'):
|
||||
logging.info("shard %d starting", shard_num)
|
||||
|
||||
def namedtuple_factory(cursor, row):
|
||||
Row = namedtuple('Row', (d[0] for d in cursor.description))
|
||||
return Row(*row)
|
||||
db.row_factory = namedtuple_factory
|
||||
total = db.execute(f"select count(*) as total from claim where height % {shards_total} = {shard_num};").fetchone()[0]
|
||||
for num, claim in enumerate(db.execute(f"""
|
||||
SELECT claimtrie.claim_hash as is_controlling,
|
||||
claimtrie.last_take_over_height,
|
||||
(select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags,
|
||||
(select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages,
|
||||
cr.has_source as reposted_has_source,
|
||||
cr.claim_type as reposted_claim_type,
|
||||
cr.stream_type as reposted_stream_type,
|
||||
cr.media_type as reposted_media_type,
|
||||
cr.duration as reposted_duration,
|
||||
cr.fee_amount as reposted_fee_amount,
|
||||
cr.fee_currency as reposted_fee_currency,
|
||||
claim.*
|
||||
FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash
|
||||
WHERE claim.height % {shards_total} = {shard_num}
|
||||
ORDER BY claim.height desc
|
||||
""")):
|
||||
claim = dict(claim._asdict())
|
||||
claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source'])
|
||||
claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type']
|
||||
claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type']
|
||||
claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount']
|
||||
claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency']
|
||||
claim['duration'] = claim.pop('reposted_duration') or claim['duration']
|
||||
claim['censor_type'] = 0
|
||||
claim['censoring_channel_id'] = None
|
||||
claim['tags'] = claim['tags'].split(',,') if claim['tags'] else []
|
||||
claim['languages'] = claim['languages'].split(' ') if claim['languages'] else []
|
||||
if num % 10_000 == 0:
|
||||
logging.info("%d/%d", num, total)
|
||||
yield extract_doc(claim, index_name)
|
||||
if 0 < limit <= num:
|
||||
break
|
||||
|
||||
|
||||
async def consume(producer, index_name):
|
||||
async def get_all_claims(index_name='claims', db=None):
|
||||
env = Env(LBC)
|
||||
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
|
||||
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
|
||||
need_open = db is None
|
||||
db = db or LevelDB(env)
|
||||
if need_open:
|
||||
await db.open_dbs()
|
||||
try:
|
||||
await async_bulk(es, producer, request_timeout=120)
|
||||
await es.indices.refresh(index=index_name)
|
||||
cnt = 0
|
||||
async for claim in db.all_claims_producer():
|
||||
yield {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': index_name,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
cnt += 1
|
||||
if cnt % 10000 == 0:
|
||||
print(f"{cnt} claims sent")
|
||||
finally:
|
||||
await es.close()
|
||||
if need_open:
|
||||
db.close()
|
||||
|
||||
|
||||
async def make_es_index(index=None):
|
||||
|
@ -85,16 +52,17 @@ async def make_es_index(index=None):
|
|||
index.stop()
|
||||
|
||||
|
||||
async def run(db_path, clients, blocks, shard, index_name='claims'):
|
||||
db = sqlite3.connect(db_path, isolation_level=None, check_same_thread=False, uri=True)
|
||||
db.execute('pragma journal_mode=wal;')
|
||||
db.execute('pragma temp_store=memory;')
|
||||
producer = get_all(db, shard, clients, limit=blocks, index_name=index_name)
|
||||
await asyncio.gather(*(consume(producer, index_name=index_name) for _ in range(min(8, clients))))
|
||||
async def run_sync(index_name='claims', db=None, clients=32):
|
||||
env = Env(LBC)
|
||||
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
|
||||
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
|
||||
claim_generator = get_all_claims(index_name=index_name, db=db)
|
||||
|
||||
|
||||
def __run(args, shard):
|
||||
asyncio.run(run(args.db_path, args.clients, args.blocks, shard))
|
||||
try:
|
||||
await async_bulk(es, claim_generator, request_timeout=600)
|
||||
await es.indices.refresh(index=index_name)
|
||||
finally:
|
||||
await es.close()
|
||||
|
||||
|
||||
def run_elastic_sync():
|
||||
|
@ -104,23 +72,17 @@ def run_elastic_sync():
|
|||
|
||||
logging.info('lbry.server starting')
|
||||
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
|
||||
parser.add_argument("db_path", type=str)
|
||||
parser.add_argument("-c", "--clients", type=int, default=16)
|
||||
# parser.add_argument("db_path", type=str)
|
||||
parser.add_argument("-c", "--clients", type=int, default=32)
|
||||
parser.add_argument("-b", "--blocks", type=int, default=0)
|
||||
parser.add_argument("-f", "--force", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
processes = []
|
||||
|
||||
if not args.force and not os.path.exists(args.db_path):
|
||||
logging.info("DB path doesnt exist")
|
||||
return
|
||||
# if not args.force and not os.path.exists(args.db_path):
|
||||
# logging.info("DB path doesnt exist")
|
||||
# return
|
||||
|
||||
if not args.force and not asyncio.run(make_es_index()):
|
||||
logging.info("ES is already initialized")
|
||||
return
|
||||
for i in range(args.clients):
|
||||
processes.append(Process(target=__run, args=(args, i)))
|
||||
processes[-1].start()
|
||||
for process in processes:
|
||||
process.join()
|
||||
process.close()
|
||||
asyncio.run(run_sync(clients=args.clients))
|
||||
|
|
1613
lbry/wallet/server/db/prefixes.py
Normal file
1613
lbry/wallet/server/db/prefixes.py
Normal file
File diff suppressed because it is too large
Load diff
147
lbry/wallet/server/db/revertable.py
Normal file
147
lbry/wallet/server/db/revertable.py
Normal file
|
@ -0,0 +1,147 @@
|
|||
import struct
|
||||
import logging
|
||||
from string import printable
|
||||
from collections import defaultdict
|
||||
from typing import Tuple, Iterable, Callable, Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
|
||||
_OP_STRUCT = struct.Struct('>BLL')
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
class RevertableOp:
|
||||
__slots__ = [
|
||||
'key',
|
||||
'value',
|
||||
]
|
||||
is_put = 0
|
||||
|
||||
def __init__(self, key: bytes, value: bytes):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
@property
|
||||
def is_delete(self) -> bool:
|
||||
return not self.is_put
|
||||
|
||||
def invert(self) -> 'RevertableOp':
|
||||
raise NotImplementedError()
|
||||
|
||||
def pack(self) -> bytes:
|
||||
"""
|
||||
Serialize to bytes
|
||||
"""
|
||||
return struct.pack(
|
||||
f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key,
|
||||
self.value
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]:
|
||||
"""
|
||||
Deserialize from bytes
|
||||
|
||||
:param packed: bytes containing at least one packed revertable op
|
||||
:return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes
|
||||
"""
|
||||
is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9])
|
||||
key = packed[9:9 + key_len]
|
||||
value = packed[9 + key_len:9 + key_len + val_len]
|
||||
if is_put == 1:
|
||||
return RevertablePut(key, value), packed[9 + key_len + val_len:]
|
||||
return RevertableDelete(key, value), packed[9 + key_len + val_len:]
|
||||
|
||||
def __eq__(self, other: 'RevertableOp') -> bool:
|
||||
return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return str(self)
|
||||
|
||||
def __str__(self) -> str:
|
||||
from lbry.wallet.server.db.prefixes import auto_decode_item
|
||||
k, v = auto_decode_item(self.key, self.value)
|
||||
key = ''.join(c if c in printable else '.' for c in str(k))
|
||||
val = ''.join(c if c in printable else '.' for c in str(v))
|
||||
return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}"
|
||||
|
||||
|
||||
class RevertableDelete(RevertableOp):
|
||||
def invert(self):
|
||||
return RevertablePut(self.key, self.value)
|
||||
|
||||
|
||||
class RevertablePut(RevertableOp):
|
||||
is_put = True
|
||||
|
||||
def invert(self):
|
||||
return RevertableDelete(self.key, self.value)
|
||||
|
||||
|
||||
class OpStackIntegrity(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RevertableOpStack:
|
||||
def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None):
|
||||
self._get = get_fn
|
||||
self._items = defaultdict(list)
|
||||
self._unsafe_prefixes = unsafe_prefixes or set()
|
||||
|
||||
def append_op(self, op: RevertableOp):
|
||||
inverted = op.invert()
|
||||
if self._items[op.key] and inverted == self._items[op.key][-1]:
|
||||
self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both
|
||||
return
|
||||
elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op
|
||||
return # raise an error?
|
||||
stored_val = self._get(op.key)
|
||||
has_stored_val = stored_val is not None
|
||||
delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val)
|
||||
will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key])
|
||||
try:
|
||||
if op.is_put and has_stored_val and not will_delete_existing_stored:
|
||||
raise OpStackIntegrity(
|
||||
f"db op tries to add on top of existing key without deleting first: {op}"
|
||||
)
|
||||
elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored:
|
||||
# there is a value and we're not deleting it in this op
|
||||
# check that a delete for the stored value is in the stack
|
||||
raise OpStackIntegrity(f"delete {op}")
|
||||
elif op.is_delete and not has_stored_val:
|
||||
raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}")
|
||||
elif op.is_delete and stored_val != op.value:
|
||||
raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}")
|
||||
except OpStackIntegrity as err:
|
||||
if op.key[:1] in self._unsafe_prefixes:
|
||||
log.error(f"skipping over integrity error: {err}")
|
||||
else:
|
||||
raise err
|
||||
self._items[op.key].append(op)
|
||||
|
||||
def extend_ops(self, ops: Iterable[RevertableOp]):
|
||||
for op in ops:
|
||||
self.append_op(op)
|
||||
|
||||
def clear(self):
|
||||
self._items.clear()
|
||||
|
||||
def __len__(self):
|
||||
return sum(map(len, self._items.values()))
|
||||
|
||||
def __iter__(self):
|
||||
for key, ops in self._items.items():
|
||||
for op in ops:
|
||||
yield op
|
||||
|
||||
def __reversed__(self):
|
||||
for key, ops in self._items.items():
|
||||
for op in reversed(ops):
|
||||
yield op
|
||||
|
||||
def get_undo_ops(self) -> bytes:
|
||||
return b''.join(op.invert().pack() for op in reversed(self))
|
||||
|
||||
def apply_packed_undo_ops(self, packed: bytes):
|
||||
while packed:
|
||||
op, packed = RevertableOp.unpack(packed)
|
||||
self.append_op(op)
|
|
@ -1,9 +0,0 @@
|
|||
from . import zscore
|
||||
from . import ar
|
||||
from . import variable_decay
|
||||
|
||||
TRENDING_ALGORITHMS = {
|
||||
'zscore': zscore,
|
||||
'ar': ar,
|
||||
'variable_decay': variable_decay
|
||||
}
|
|
@ -1,265 +0,0 @@
|
|||
import copy
|
||||
import math
|
||||
import time
|
||||
|
||||
# Half life in blocks
|
||||
HALF_LIFE = 134
|
||||
|
||||
# Decay coefficient per block
|
||||
DECAY = 0.5**(1.0/HALF_LIFE)
|
||||
|
||||
# How frequently to write trending values to the db
|
||||
SAVE_INTERVAL = 10
|
||||
|
||||
# Renormalisation interval
|
||||
RENORM_INTERVAL = 1000
|
||||
|
||||
# Assertion
|
||||
assert RENORM_INTERVAL % SAVE_INTERVAL == 0
|
||||
|
||||
# Decay coefficient per renormalisation interval
|
||||
DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL)
|
||||
|
||||
# Log trending calculations?
|
||||
TRENDING_LOG = True
|
||||
|
||||
|
||||
def install(connection):
|
||||
"""
|
||||
Install the AR trending algorithm.
|
||||
"""
|
||||
check_trending_values(connection)
|
||||
|
||||
if TRENDING_LOG:
|
||||
f = open("trending_ar.log", "a")
|
||||
f.close()
|
||||
|
||||
# Stub
|
||||
CREATE_TREND_TABLE = ""
|
||||
|
||||
|
||||
def check_trending_values(connection):
|
||||
"""
|
||||
If the trending values appear to be based on the zscore algorithm,
|
||||
reset them. This will allow resyncing from a standard snapshot.
|
||||
"""
|
||||
c = connection.cursor()
|
||||
needs_reset = False
|
||||
for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"):
|
||||
if row[0] != 0:
|
||||
needs_reset = True
|
||||
break
|
||||
|
||||
if needs_reset:
|
||||
print("Resetting some columns. This might take a while...", flush=True, end="")
|
||||
c.execute(""" BEGIN;
|
||||
UPDATE claim SET trending_group = 0;
|
||||
UPDATE claim SET trending_mixed = 0;
|
||||
UPDATE claim SET trending_global = 0;
|
||||
UPDATE claim SET trending_local = 0;
|
||||
COMMIT;""")
|
||||
print("done.")
|
||||
|
||||
|
||||
def spike_height(trending_score, x, x_old, time_boost=1.0):
|
||||
"""
|
||||
Compute the size of a trending spike.
|
||||
"""
|
||||
|
||||
# Change in softened amount
|
||||
change_in_softened_amount = x**0.25 - x_old**0.25
|
||||
|
||||
# Softened change in amount
|
||||
delta = x - x_old
|
||||
softened_change_in_amount = abs(delta)**0.25
|
||||
|
||||
# Softened change in amount counts more for minnows
|
||||
if delta > 0.0:
|
||||
if trending_score >= 0.0:
|
||||
multiplier = 0.1/((trending_score/time_boost + softened_change_in_amount) + 1.0)
|
||||
softened_change_in_amount *= multiplier
|
||||
else:
|
||||
softened_change_in_amount *= -1.0
|
||||
|
||||
return time_boost*(softened_change_in_amount + change_in_softened_amount)
|
||||
|
||||
|
||||
def get_time_boost(height):
|
||||
"""
|
||||
Return the time boost at a given height.
|
||||
"""
|
||||
return 1.0/DECAY**(height % RENORM_INTERVAL)
|
||||
|
||||
|
||||
def trending_log(s):
|
||||
"""
|
||||
Log a string.
|
||||
"""
|
||||
if TRENDING_LOG:
|
||||
fout = open("trending_ar.log", "a")
|
||||
fout.write(s)
|
||||
fout.flush()
|
||||
fout.close()
|
||||
|
||||
class TrendingData:
|
||||
"""
|
||||
An object of this class holds trending data
|
||||
"""
|
||||
def __init__(self):
|
||||
self.claims = {}
|
||||
|
||||
# Have all claims been read from db yet?
|
||||
self.initialised = False
|
||||
|
||||
def insert_claim_from_load(self, claim_hash, trending_score, total_amount):
|
||||
assert not self.initialised
|
||||
self.claims[claim_hash] = {"trending_score": trending_score,
|
||||
"total_amount": total_amount,
|
||||
"changed": False}
|
||||
|
||||
|
||||
def update_claim(self, claim_hash, total_amount, time_boost=1.0):
|
||||
"""
|
||||
Update trending data for a claim, given its new total amount.
|
||||
"""
|
||||
assert self.initialised
|
||||
|
||||
# Extract existing total amount and trending score
|
||||
# or use starting values if the claim is new
|
||||
if claim_hash in self.claims:
|
||||
old_state = copy.deepcopy(self.claims[claim_hash])
|
||||
else:
|
||||
old_state = {"trending_score": 0.0,
|
||||
"total_amount": 0.0,
|
||||
"changed": False}
|
||||
|
||||
# Calculate LBC change
|
||||
change = total_amount - old_state["total_amount"]
|
||||
|
||||
# Modify data if there was an LBC change
|
||||
if change != 0.0:
|
||||
spike = spike_height(old_state["trending_score"],
|
||||
total_amount,
|
||||
old_state["total_amount"],
|
||||
time_boost)
|
||||
trending_score = old_state["trending_score"] + spike
|
||||
self.claims[claim_hash] = {"total_amount": total_amount,
|
||||
"trending_score": trending_score,
|
||||
"changed": True}
|
||||
|
||||
|
||||
|
||||
def test_trending():
|
||||
"""
|
||||
Quick trending test for something receiving 10 LBC per block
|
||||
"""
|
||||
data = TrendingData()
|
||||
data.insert_claim_from_load("abc", 10.0, 1.0)
|
||||
data.initialised = True
|
||||
|
||||
for height in range(1, 5000):
|
||||
|
||||
if height % RENORM_INTERVAL == 0:
|
||||
data.claims["abc"]["trending_score"] *= DECAY_PER_RENORM
|
||||
|
||||
time_boost = get_time_boost(height)
|
||||
data.update_claim("abc", data.claims["abc"]["total_amount"] + 10.0,
|
||||
time_boost=time_boost)
|
||||
|
||||
|
||||
print(str(height) + " " + str(time_boost) + " " \
|
||||
+ str(data.claims["abc"]["trending_score"]))
|
||||
|
||||
|
||||
|
||||
# One global instance
|
||||
# pylint: disable=C0103
|
||||
trending_data = TrendingData()
|
||||
|
||||
def run(db, height, final_height, recalculate_claim_hashes):
|
||||
|
||||
if height < final_height - 5*HALF_LIFE:
|
||||
trending_log("Skipping AR trending at block {h}.\n".format(h=height))
|
||||
return
|
||||
|
||||
start = time.time()
|
||||
|
||||
trending_log("Calculating AR trending at block {h}.\n".format(h=height))
|
||||
trending_log(" Length of trending data = {l}.\n"\
|
||||
.format(l=len(trending_data.claims)))
|
||||
|
||||
# Renormalise trending scores and mark all as having changed
|
||||
if height % RENORM_INTERVAL == 0:
|
||||
trending_log(" Renormalising trending scores...")
|
||||
|
||||
keys = trending_data.claims.keys()
|
||||
for key in keys:
|
||||
if trending_data.claims[key]["trending_score"] != 0.0:
|
||||
trending_data.claims[key]["trending_score"] *= DECAY_PER_RENORM
|
||||
trending_data.claims[key]["changed"] = True
|
||||
|
||||
# Tiny becomes zero
|
||||
if abs(trending_data.claims[key]["trending_score"]) < 1E-9:
|
||||
trending_data.claims[key]["trending_score"] = 0.0
|
||||
|
||||
trending_log("done.\n")
|
||||
|
||||
|
||||
# Regular message.
|
||||
trending_log(" Reading total_amounts from db and updating"\
|
||||
+ " trending scores in RAM...")
|
||||
|
||||
# Get the value of the time boost
|
||||
time_boost = get_time_boost(height)
|
||||
|
||||
# Update claims from db
|
||||
if not trending_data.initialised:
|
||||
# On fresh launch
|
||||
for row in db.execute("""
|
||||
SELECT claim_hash, trending_mixed,
|
||||
(amount + support_amount)
|
||||
AS total_amount
|
||||
FROM claim;
|
||||
"""):
|
||||
trending_data.insert_claim_from_load(row[0], row[1], 1E-8*row[2])
|
||||
trending_data.initialised = True
|
||||
else:
|
||||
for row in db.execute(f"""
|
||||
SELECT claim_hash,
|
||||
(amount + support_amount)
|
||||
AS total_amount
|
||||
FROM claim
|
||||
WHERE claim_hash IN
|
||||
({','.join('?' for _ in recalculate_claim_hashes)});
|
||||
""", list(recalculate_claim_hashes)):
|
||||
trending_data.update_claim(row[0], 1E-8*row[1], time_boost)
|
||||
|
||||
trending_log("done.\n")
|
||||
|
||||
|
||||
# Write trending scores to DB
|
||||
if height % SAVE_INTERVAL == 0:
|
||||
|
||||
trending_log(" Writing trending scores to db...")
|
||||
|
||||
the_list = []
|
||||
keys = trending_data.claims.keys()
|
||||
for key in keys:
|
||||
if trending_data.claims[key]["changed"]:
|
||||
the_list.append((trending_data.claims[key]["trending_score"],
|
||||
key))
|
||||
trending_data.claims[key]["changed"] = False
|
||||
|
||||
trending_log("{n} scores to write...".format(n=len(the_list)))
|
||||
|
||||
db.executemany("UPDATE claim SET trending_mixed=? WHERE claim_hash=?;",
|
||||
the_list)
|
||||
|
||||
trending_log("done.\n")
|
||||
|
||||
trending_log("Trending operations took {time} seconds.\n\n"\
|
||||
.format(time=time.time() - start))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_trending()
|
|
@ -1,485 +0,0 @@
|
|||
"""
|
||||
AR-like trending with a delayed effect and a faster
|
||||
decay rate for high valued claims.
|
||||
"""
|
||||
|
||||
import math
|
||||
import time
|
||||
import sqlite3
|
||||
|
||||
# Half life in blocks *for lower LBC claims* (it's shorter for whale claims)
|
||||
HALF_LIFE = 200
|
||||
|
||||
# Whale threshold, in LBC (higher -> less DB writing)
|
||||
WHALE_THRESHOLD = 10000.0
|
||||
|
||||
# Decay coefficient per block
|
||||
DECAY = 0.5**(1.0/HALF_LIFE)
|
||||
|
||||
# How frequently to write trending values to the db
|
||||
SAVE_INTERVAL = 10
|
||||
|
||||
# Renormalisation interval
|
||||
RENORM_INTERVAL = 1000
|
||||
|
||||
# Assertion
|
||||
assert RENORM_INTERVAL % SAVE_INTERVAL == 0
|
||||
|
||||
# Decay coefficient per renormalisation interval
|
||||
DECAY_PER_RENORM = DECAY**(RENORM_INTERVAL)
|
||||
|
||||
# Log trending calculations?
|
||||
TRENDING_LOG = True
|
||||
|
||||
|
||||
def install(connection):
|
||||
"""
|
||||
Install the trending algorithm.
|
||||
"""
|
||||
check_trending_values(connection)
|
||||
trending_data.initialise(connection.cursor())
|
||||
|
||||
if TRENDING_LOG:
|
||||
f = open("trending_variable_decay.log", "a")
|
||||
f.close()
|
||||
|
||||
# Stub
|
||||
CREATE_TREND_TABLE = ""
|
||||
|
||||
def check_trending_values(connection):
|
||||
"""
|
||||
If the trending values appear to be based on the zscore algorithm,
|
||||
reset them. This will allow resyncing from a standard snapshot.
|
||||
"""
|
||||
c = connection.cursor()
|
||||
needs_reset = False
|
||||
for row in c.execute("SELECT COUNT(*) num FROM claim WHERE trending_global <> 0;"):
|
||||
if row[0] != 0:
|
||||
needs_reset = True
|
||||
break
|
||||
|
||||
if needs_reset:
|
||||
print("Resetting some columns. This might take a while...", flush=True,
|
||||
end="")
|
||||
c.execute(""" BEGIN;
|
||||
UPDATE claim SET trending_group = 0;
|
||||
UPDATE claim SET trending_mixed = 0;
|
||||
COMMIT;""")
|
||||
print("done.")
|
||||
|
||||
|
||||
|
||||
|
||||
def trending_log(s):
|
||||
"""
|
||||
Log a string to the log file
|
||||
"""
|
||||
if TRENDING_LOG:
|
||||
fout = open("trending_variable_decay.log", "a")
|
||||
fout.write(s)
|
||||
fout.flush()
|
||||
fout.close()
|
||||
|
||||
|
||||
def trending_unit(height):
|
||||
"""
|
||||
Return the trending score unit at a given height.
|
||||
"""
|
||||
# Round to the beginning of a SAVE_INTERVAL batch of blocks.
|
||||
_height = height - (height % SAVE_INTERVAL)
|
||||
return 1.0/DECAY**(height % RENORM_INTERVAL)
|
||||
|
||||
|
||||
class TrendingDB:
|
||||
"""
|
||||
An in-memory database of trending scores
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.conn = sqlite3.connect(":memory:", check_same_thread=False)
|
||||
self.cursor = self.conn.cursor()
|
||||
self.initialised = False
|
||||
self.write_needed = set()
|
||||
|
||||
def execute(self, query, *args, **kwargs):
|
||||
return self.conn.execute(query, *args, **kwargs)
|
||||
|
||||
def executemany(self, query, *args, **kwargs):
|
||||
return self.conn.executemany(query, *args, **kwargs)
|
||||
|
||||
def begin(self):
|
||||
self.execute("BEGIN;")
|
||||
|
||||
def commit(self):
|
||||
self.execute("COMMIT;")
|
||||
|
||||
def initialise(self, db):
|
||||
"""
|
||||
Pass in claims.db
|
||||
"""
|
||||
if self.initialised:
|
||||
return
|
||||
|
||||
trending_log("Initialising trending database...")
|
||||
|
||||
# The need for speed
|
||||
self.execute("PRAGMA JOURNAL_MODE=OFF;")
|
||||
self.execute("PRAGMA SYNCHRONOUS=0;")
|
||||
|
||||
self.begin()
|
||||
|
||||
# Create the tables
|
||||
self.execute("""
|
||||
CREATE TABLE IF NOT EXISTS claims
|
||||
(claim_hash BYTES PRIMARY KEY,
|
||||
lbc REAL NOT NULL DEFAULT 0.0,
|
||||
trending_score REAL NOT NULL DEFAULT 0.0)
|
||||
WITHOUT ROWID;""")
|
||||
|
||||
self.execute("""
|
||||
CREATE TABLE IF NOT EXISTS spikes
|
||||
(id INTEGER PRIMARY KEY,
|
||||
claim_hash BYTES NOT NULL,
|
||||
height INTEGER NOT NULL,
|
||||
mass REAL NOT NULL,
|
||||
FOREIGN KEY (claim_hash)
|
||||
REFERENCES claims (claim_hash));""")
|
||||
|
||||
# Clear out any existing data
|
||||
self.execute("DELETE FROM claims;")
|
||||
self.execute("DELETE FROM spikes;")
|
||||
|
||||
# Create indexes
|
||||
self.execute("CREATE INDEX idx1 ON spikes (claim_hash, height, mass);")
|
||||
self.execute("CREATE INDEX idx2 ON spikes (claim_hash, height, mass DESC);")
|
||||
self.execute("CREATE INDEX idx3 on claims (lbc DESC, claim_hash, trending_score);")
|
||||
|
||||
# Import data from claims.db
|
||||
for row in db.execute("""
|
||||
SELECT claim_hash,
|
||||
1E-8*(amount + support_amount) AS lbc,
|
||||
trending_mixed
|
||||
FROM claim;
|
||||
"""):
|
||||
self.execute("INSERT INTO claims VALUES (?, ?, ?);", row)
|
||||
self.commit()
|
||||
|
||||
self.initialised = True
|
||||
trending_log("done.\n")
|
||||
|
||||
def apply_spikes(self, height):
|
||||
"""
|
||||
Apply spikes that are due. This occurs inside a transaction.
|
||||
"""
|
||||
|
||||
spikes = []
|
||||
unit = trending_unit(height)
|
||||
for row in self.execute("""
|
||||
SELECT SUM(mass), claim_hash FROM spikes
|
||||
WHERE height = ?
|
||||
GROUP BY claim_hash;
|
||||
""", (height, )):
|
||||
spikes.append((row[0]*unit, row[1]))
|
||||
self.write_needed.add(row[1])
|
||||
|
||||
self.executemany("""
|
||||
UPDATE claims
|
||||
SET trending_score = (trending_score + ?)
|
||||
WHERE claim_hash = ?;
|
||||
""", spikes)
|
||||
self.execute("DELETE FROM spikes WHERE height = ?;", (height, ))
|
||||
|
||||
|
||||
def decay_whales(self, height):
|
||||
"""
|
||||
Occurs inside transaction.
|
||||
"""
|
||||
if height % SAVE_INTERVAL != 0:
|
||||
return
|
||||
|
||||
whales = self.execute("""
|
||||
SELECT trending_score, lbc, claim_hash
|
||||
FROM claims
|
||||
WHERE lbc >= ?;
|
||||
""", (WHALE_THRESHOLD, )).fetchall()
|
||||
whales2 = []
|
||||
for whale in whales:
|
||||
trending, lbc, claim_hash = whale
|
||||
|
||||
# Overall multiplication factor for decay rate
|
||||
# At WHALE_THRESHOLD, this is 1
|
||||
# At 10*WHALE_THRESHOLD, it is 3
|
||||
decay_rate_factor = 1.0 + 2.0*math.log10(lbc/WHALE_THRESHOLD)
|
||||
|
||||
# The -1 is because this is just the *extra* part being applied
|
||||
factor = (DECAY**SAVE_INTERVAL)**(decay_rate_factor - 1.0)
|
||||
|
||||
# Decay
|
||||
trending *= factor
|
||||
whales2.append((trending, claim_hash))
|
||||
self.write_needed.add(claim_hash)
|
||||
|
||||
self.executemany("UPDATE claims SET trending_score=? WHERE claim_hash=?;",
|
||||
whales2)
|
||||
|
||||
|
||||
def renorm(self, height):
|
||||
"""
|
||||
Renormalise trending scores. Occurs inside a transaction.
|
||||
"""
|
||||
|
||||
if height % RENORM_INTERVAL == 0:
|
||||
threshold = 1.0E-3/DECAY_PER_RENORM
|
||||
for row in self.execute("""SELECT claim_hash FROM claims
|
||||
WHERE ABS(trending_score) >= ?;""",
|
||||
(threshold, )):
|
||||
self.write_needed.add(row[0])
|
||||
|
||||
self.execute("""UPDATE claims SET trending_score = ?*trending_score
|
||||
WHERE ABS(trending_score) >= ?;""",
|
||||
(DECAY_PER_RENORM, threshold))
|
||||
|
||||
def write_to_claims_db(self, db, height):
|
||||
"""
|
||||
Write changed trending scores to claims.db.
|
||||
"""
|
||||
if height % SAVE_INTERVAL != 0:
|
||||
return
|
||||
|
||||
rows = self.execute(f"""
|
||||
SELECT trending_score, claim_hash
|
||||
FROM claims
|
||||
WHERE claim_hash IN
|
||||
({','.join('?' for _ in self.write_needed)});
|
||||
""", list(self.write_needed)).fetchall()
|
||||
|
||||
db.executemany("""UPDATE claim SET trending_mixed = ?
|
||||
WHERE claim_hash = ?;""", rows)
|
||||
|
||||
# Clear list of claims needing to be written to claims.db
|
||||
self.write_needed = set()
|
||||
|
||||
|
||||
def update(self, db, height, recalculate_claim_hashes):
|
||||
"""
|
||||
Update trending scores.
|
||||
Input is a cursor to claims.db, the block height, and the list of
|
||||
claims that changed.
|
||||
"""
|
||||
assert self.initialised
|
||||
|
||||
self.begin()
|
||||
self.renorm(height)
|
||||
|
||||
# Fetch changed/new claims from claims.db
|
||||
for row in db.execute(f"""
|
||||
SELECT claim_hash,
|
||||
1E-8*(amount + support_amount) AS lbc
|
||||
FROM claim
|
||||
WHERE claim_hash IN
|
||||
({','.join('?' for _ in recalculate_claim_hashes)});
|
||||
""", list(recalculate_claim_hashes)):
|
||||
claim_hash, lbc = row
|
||||
|
||||
# Insert into trending db if it does not exist
|
||||
self.execute("""
|
||||
INSERT INTO claims (claim_hash)
|
||||
VALUES (?)
|
||||
ON CONFLICT (claim_hash) DO NOTHING;""",
|
||||
(claim_hash, ))
|
||||
|
||||
# See if it was an LBC change
|
||||
old = self.execute("SELECT * FROM claims WHERE claim_hash=?;",
|
||||
(claim_hash, )).fetchone()
|
||||
lbc_old = old[1]
|
||||
|
||||
# Save new LBC value into trending db
|
||||
self.execute("UPDATE claims SET lbc = ? WHERE claim_hash = ?;",
|
||||
(lbc, claim_hash))
|
||||
|
||||
if lbc > lbc_old:
|
||||
|
||||
# Schedule a future spike
|
||||
delay = min(int((lbc + 1E-8)**0.4), HALF_LIFE)
|
||||
spike = (claim_hash, height + delay, spike_mass(lbc, lbc_old))
|
||||
self.execute("""INSERT INTO spikes
|
||||
(claim_hash, height, mass)
|
||||
VALUES (?, ?, ?);""", spike)
|
||||
|
||||
elif lbc < lbc_old:
|
||||
|
||||
# Subtract from future spikes
|
||||
penalty = spike_mass(lbc_old, lbc)
|
||||
spikes = self.execute("""
|
||||
SELECT * FROM spikes
|
||||
WHERE claim_hash = ?
|
||||
ORDER BY height ASC, mass DESC;
|
||||
""", (claim_hash, )).fetchall()
|
||||
for spike in spikes:
|
||||
spike_id, mass = spike[0], spike[3]
|
||||
|
||||
if mass > penalty:
|
||||
# The entire penalty merely reduces this spike
|
||||
self.execute("UPDATE spikes SET mass=? WHERE id=?;",
|
||||
(mass - penalty, spike_id))
|
||||
penalty = 0.0
|
||||
else:
|
||||
# Removing this spike entirely accounts for some (or
|
||||
# all) of the penalty, then move on to other spikes
|
||||
self.execute("DELETE FROM spikes WHERE id=?;",
|
||||
(spike_id, ))
|
||||
penalty -= mass
|
||||
|
||||
# If penalty remains, that's a negative spike to be applied
|
||||
# immediately.
|
||||
if penalty > 0.0:
|
||||
self.execute("""
|
||||
INSERT INTO spikes (claim_hash, height, mass)
|
||||
VALUES (?, ?, ?);""",
|
||||
(claim_hash, height, -penalty))
|
||||
|
||||
self.apply_spikes(height)
|
||||
self.decay_whales(height)
|
||||
self.commit()
|
||||
|
||||
self.write_to_claims_db(db, height)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# The "global" instance to work with
|
||||
# pylint: disable=C0103
|
||||
trending_data = TrendingDB()
|
||||
|
||||
def spike_mass(x, x_old):
|
||||
"""
|
||||
Compute the mass of a trending spike (normed - constant units).
|
||||
x_old = old LBC value
|
||||
x = new LBC value
|
||||
"""
|
||||
|
||||
# Sign of trending spike
|
||||
sign = 1.0
|
||||
if x < x_old:
|
||||
sign = -1.0
|
||||
|
||||
# Magnitude
|
||||
mag = abs(x**0.25 - x_old**0.25)
|
||||
|
||||
# Minnow boost
|
||||
mag *= 1.0 + 2E4/(x + 100.0)**2
|
||||
|
||||
return sign*mag
|
||||
|
||||
|
||||
def run(db, height, final_height, recalculate_claim_hashes):
|
||||
if height < final_height - 5*HALF_LIFE:
|
||||
trending_log(f"Skipping trending calculations at block {height}.\n")
|
||||
return
|
||||
|
||||
start = time.time()
|
||||
trending_log(f"Calculating variable_decay trending at block {height}.\n")
|
||||
trending_data.update(db, height, recalculate_claim_hashes)
|
||||
end = time.time()
|
||||
trending_log(f"Trending operations took {end - start} seconds.\n\n")
|
||||
|
||||
def test_trending():
|
||||
"""
|
||||
Quick trending test for claims with different support patterns.
|
||||
Actually use the run() function.
|
||||
"""
|
||||
|
||||
# Create a fake "claims.db" for testing
|
||||
# pylint: disable=I1101
|
||||
dbc = apsw.Connection(":memory:")
|
||||
db = dbc.cursor()
|
||||
|
||||
# Create table
|
||||
db.execute("""
|
||||
BEGIN;
|
||||
CREATE TABLE claim (claim_hash TEXT PRIMARY KEY,
|
||||
amount REAL NOT NULL DEFAULT 0.0,
|
||||
support_amount REAL NOT NULL DEFAULT 0.0,
|
||||
trending_mixed REAL NOT NULL DEFAULT 0.0);
|
||||
COMMIT;
|
||||
""")
|
||||
|
||||
# Initialise trending data before anything happens with the claims
|
||||
trending_data.initialise(db)
|
||||
|
||||
# Insert initial states of claims
|
||||
everything = {"huge_whale": 0.01, "medium_whale": 0.01, "small_whale": 0.01,
|
||||
"huge_whale_botted": 0.01, "minnow": 0.01}
|
||||
|
||||
def to_list_of_tuples(stuff):
|
||||
l = []
|
||||
for key in stuff:
|
||||
l.append((key, stuff[key]))
|
||||
return l
|
||||
|
||||
db.executemany("""
|
||||
INSERT INTO claim (claim_hash, amount) VALUES (?, 1E8*?);
|
||||
""", to_list_of_tuples(everything))
|
||||
|
||||
# Process block zero
|
||||
height = 0
|
||||
run(db, height, height, everything.keys())
|
||||
|
||||
# Save trajectories for plotting
|
||||
trajectories = {}
|
||||
for row in trending_data.execute("""
|
||||
SELECT claim_hash, trending_score
|
||||
FROM claims;
|
||||
"""):
|
||||
trajectories[row[0]] = [row[1]/trending_unit(height)]
|
||||
|
||||
# Main loop
|
||||
for height in range(1, 1000):
|
||||
|
||||
# One-off supports
|
||||
if height == 1:
|
||||
everything["huge_whale"] += 5E5
|
||||
everything["medium_whale"] += 5E4
|
||||
everything["small_whale"] += 5E3
|
||||
|
||||
# Every block
|
||||
if height < 500:
|
||||
everything["huge_whale_botted"] += 5E5/500
|
||||
everything["minnow"] += 1
|
||||
|
||||
# Remove supports
|
||||
if height == 500:
|
||||
for key in everything:
|
||||
everything[key] = 0.01
|
||||
|
||||
# Whack into the db
|
||||
db.executemany("""
|
||||
UPDATE claim SET amount = 1E8*? WHERE claim_hash = ?;
|
||||
""", [(y, x) for (x, y) in to_list_of_tuples(everything)])
|
||||
|
||||
# Call run()
|
||||
run(db, height, height, everything.keys())
|
||||
|
||||
# Append current trending scores to trajectories
|
||||
for row in db.execute("""
|
||||
SELECT claim_hash, trending_mixed
|
||||
FROM claim;
|
||||
"""):
|
||||
trajectories[row[0]].append(row[1]/trending_unit(height))
|
||||
|
||||
dbc.close()
|
||||
|
||||
# pylint: disable=C0415
|
||||
import matplotlib.pyplot as plt
|
||||
for key in trajectories:
|
||||
plt.plot(trajectories[key], label=key)
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_trending()
|
|
@ -1,119 +0,0 @@
|
|||
from math import sqrt
|
||||
|
||||
# TRENDING_WINDOW is the number of blocks in ~6hr period (21600 seconds / 161 seconds per block)
|
||||
TRENDING_WINDOW = 134
|
||||
|
||||
# TRENDING_DATA_POINTS says how many samples to use for the trending algorithm
|
||||
# i.e. only consider claims from the most recent (TRENDING_WINDOW * TRENDING_DATA_POINTS) blocks
|
||||
TRENDING_DATA_POINTS = 28
|
||||
|
||||
CREATE_TREND_TABLE = """
|
||||
create table if not exists trend (
|
||||
claim_hash bytes not null,
|
||||
height integer not null,
|
||||
amount integer not null,
|
||||
primary key (claim_hash, height)
|
||||
) without rowid;
|
||||
"""
|
||||
|
||||
|
||||
class ZScore:
|
||||
__slots__ = 'count', 'total', 'power', 'last'
|
||||
|
||||
def __init__(self):
|
||||
self.count = 0
|
||||
self.total = 0
|
||||
self.power = 0
|
||||
self.last = None
|
||||
|
||||
def step(self, value):
|
||||
if self.last is not None:
|
||||
self.count += 1
|
||||
self.total += self.last
|
||||
self.power += self.last ** 2
|
||||
self.last = value
|
||||
|
||||
@property
|
||||
def mean(self):
|
||||
return self.total / self.count
|
||||
|
||||
@property
|
||||
def standard_deviation(self):
|
||||
value = (self.power / self.count) - self.mean ** 2
|
||||
return sqrt(value) if value > 0 else 0
|
||||
|
||||
def finalize(self):
|
||||
if self.count == 0:
|
||||
return self.last
|
||||
return (self.last - self.mean) / (self.standard_deviation or 1)
|
||||
|
||||
|
||||
def install(connection):
|
||||
connection.create_aggregate("zscore", 1, ZScore)
|
||||
connection.executescript(CREATE_TREND_TABLE)
|
||||
|
||||
|
||||
def run(db, height, final_height, affected_claims):
|
||||
# don't start tracking until we're at the end of initial sync
|
||||
if height < (final_height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)):
|
||||
return
|
||||
|
||||
if height % TRENDING_WINDOW != 0:
|
||||
return
|
||||
|
||||
db.execute(f"""
|
||||
DELETE FROM trend WHERE height < {height - (TRENDING_WINDOW * TRENDING_DATA_POINTS)}
|
||||
""")
|
||||
|
||||
start = (height - TRENDING_WINDOW) + 1
|
||||
db.execute(f"""
|
||||
INSERT OR IGNORE INTO trend (claim_hash, height, amount)
|
||||
SELECT claim_hash, {start}, COALESCE(
|
||||
(SELECT SUM(amount) FROM support WHERE claim_hash=claim.claim_hash
|
||||
AND height >= {start}), 0
|
||||
) AS support_sum
|
||||
FROM claim WHERE support_sum > 0
|
||||
""")
|
||||
|
||||
zscore = ZScore()
|
||||
for global_sum in db.execute("SELECT AVG(amount) AS avg_amount FROM trend GROUP BY height"):
|
||||
zscore.step(global_sum.avg_amount)
|
||||
global_mean, global_deviation = 0, 1
|
||||
if zscore.count > 0:
|
||||
global_mean = zscore.mean
|
||||
global_deviation = zscore.standard_deviation
|
||||
|
||||
db.execute(f"""
|
||||
UPDATE claim SET
|
||||
trending_local = COALESCE((
|
||||
SELECT zscore(amount) FROM trend
|
||||
WHERE claim_hash=claim.claim_hash ORDER BY height DESC
|
||||
), 0),
|
||||
trending_global = COALESCE((
|
||||
SELECT (amount - {global_mean}) / {global_deviation} FROM trend
|
||||
WHERE claim_hash=claim.claim_hash AND height = {start}
|
||||
), 0),
|
||||
trending_group = 0,
|
||||
trending_mixed = 0
|
||||
""")
|
||||
|
||||
# trending_group and trending_mixed determine how trending will show in query results
|
||||
# normally the SQL will be: "ORDER BY trending_group, trending_mixed"
|
||||
# changing the trending_group will have significant impact on trending results
|
||||
# changing the value used for trending_mixed will only impact trending within a trending_group
|
||||
db.execute(f"""
|
||||
UPDATE claim SET
|
||||
trending_group = CASE
|
||||
WHEN trending_local > 0 AND trending_global > 0 THEN 4
|
||||
WHEN trending_local <= 0 AND trending_global > 0 THEN 3
|
||||
WHEN trending_local > 0 AND trending_global <= 0 THEN 2
|
||||
WHEN trending_local <= 0 AND trending_global <= 0 THEN 1
|
||||
END,
|
||||
trending_mixed = CASE
|
||||
WHEN trending_local > 0 AND trending_global > 0 THEN trending_global
|
||||
WHEN trending_local <= 0 AND trending_global > 0 THEN trending_local
|
||||
WHEN trending_local > 0 AND trending_global <= 0 THEN trending_local
|
||||
WHEN trending_local <= 0 AND trending_global <= 0 THEN trending_global
|
||||
END
|
||||
WHERE trending_local <> 0 OR trending_global <> 0
|
||||
""")
|
|
@ -1,994 +0,0 @@
|
|||
import os
|
||||
|
||||
import sqlite3
|
||||
from typing import Union, Tuple, Set, List
|
||||
from itertools import chain
|
||||
from decimal import Decimal
|
||||
from collections import namedtuple
|
||||
from binascii import unhexlify, hexlify
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.database import query, constraints_to_sql
|
||||
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.mime_types import guess_stream_type
|
||||
from lbry.wallet import Ledger, RegTestLedger
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.wallet.server.db.canonical import register_canonical_functions
|
||||
from lbry.wallet.server.db.trending import TRENDING_ALGORITHMS
|
||||
|
||||
from .common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES
|
||||
from lbry.wallet.server.db.elasticsearch import SearchIndex
|
||||
|
||||
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
|
||||
sqlite3.enable_callback_tracebacks(True)
|
||||
|
||||
|
||||
class SQLDB:
|
||||
|
||||
PRAGMAS = """
|
||||
pragma journal_mode=WAL;
|
||||
"""
|
||||
|
||||
CREATE_CLAIM_TABLE = """
|
||||
create table if not exists claim (
|
||||
claim_hash bytes primary key,
|
||||
claim_id text not null,
|
||||
claim_name text not null,
|
||||
normalized text not null,
|
||||
txo_hash bytes not null,
|
||||
tx_position integer not null,
|
||||
amount integer not null,
|
||||
timestamp integer not null, -- last updated timestamp
|
||||
creation_timestamp integer not null,
|
||||
height integer not null, -- last updated height
|
||||
creation_height integer not null,
|
||||
activation_height integer,
|
||||
expiration_height integer not null,
|
||||
release_time integer not null,
|
||||
|
||||
short_url text not null, -- normalized#shortest-unique-claim_id
|
||||
canonical_url text, -- channel's-short_url/normalized#shortest-unique-claim_id-within-channel
|
||||
|
||||
title text,
|
||||
author text,
|
||||
description text,
|
||||
|
||||
claim_type integer,
|
||||
has_source bool,
|
||||
reposted integer default 0,
|
||||
|
||||
-- streams
|
||||
stream_type text,
|
||||
media_type text,
|
||||
fee_amount integer default 0,
|
||||
fee_currency text,
|
||||
duration integer,
|
||||
|
||||
-- reposts
|
||||
reposted_claim_hash bytes,
|
||||
|
||||
-- claims which are channels
|
||||
public_key_bytes bytes,
|
||||
public_key_hash bytes,
|
||||
claims_in_channel integer,
|
||||
|
||||
-- claims which are inside channels
|
||||
channel_hash bytes,
|
||||
channel_join integer, -- height at which claim got valid signature / joined channel
|
||||
signature bytes,
|
||||
signature_digest bytes,
|
||||
signature_valid bool,
|
||||
|
||||
effective_amount integer not null default 0,
|
||||
support_amount integer not null default 0,
|
||||
trending_group integer not null default 0,
|
||||
trending_mixed integer not null default 0,
|
||||
trending_local integer not null default 0,
|
||||
trending_global integer not null default 0
|
||||
);
|
||||
|
||||
create index if not exists claim_normalized_idx on claim (normalized, activation_height);
|
||||
create index if not exists claim_channel_hash_idx on claim (channel_hash, signature, claim_hash);
|
||||
create index if not exists claim_claims_in_channel_idx on claim (signature_valid, channel_hash, normalized);
|
||||
create index if not exists claim_txo_hash_idx on claim (txo_hash);
|
||||
create index if not exists claim_activation_height_idx on claim (activation_height, claim_hash);
|
||||
create index if not exists claim_expiration_height_idx on claim (expiration_height);
|
||||
create index if not exists claim_reposted_claim_hash_idx on claim (reposted_claim_hash);
|
||||
"""
|
||||
|
||||
CREATE_SUPPORT_TABLE = """
|
||||
create table if not exists support (
|
||||
txo_hash bytes primary key,
|
||||
tx_position integer not null,
|
||||
height integer not null,
|
||||
claim_hash bytes not null,
|
||||
amount integer not null
|
||||
);
|
||||
create index if not exists support_claim_hash_idx on support (claim_hash, height);
|
||||
"""
|
||||
|
||||
CREATE_TAG_TABLE = """
|
||||
create table if not exists tag (
|
||||
tag text not null,
|
||||
claim_hash bytes not null,
|
||||
height integer not null
|
||||
);
|
||||
create unique index if not exists tag_claim_hash_tag_idx on tag (claim_hash, tag);
|
||||
"""
|
||||
|
||||
CREATE_LANGUAGE_TABLE = """
|
||||
create table if not exists language (
|
||||
language text not null,
|
||||
claim_hash bytes not null,
|
||||
height integer not null
|
||||
);
|
||||
create unique index if not exists language_claim_hash_language_idx on language (claim_hash, language);
|
||||
"""
|
||||
|
||||
CREATE_CLAIMTRIE_TABLE = """
|
||||
create table if not exists claimtrie (
|
||||
normalized text primary key,
|
||||
claim_hash bytes not null,
|
||||
last_take_over_height integer not null
|
||||
);
|
||||
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
|
||||
"""
|
||||
|
||||
CREATE_CHANGELOG_TRIGGER = """
|
||||
create table if not exists changelog (
|
||||
claim_hash bytes primary key
|
||||
);
|
||||
create index if not exists claimtrie_claim_hash_idx on claimtrie (claim_hash);
|
||||
create trigger if not exists claim_changelog after update on claim
|
||||
begin
|
||||
insert or ignore into changelog (claim_hash) values (new.claim_hash);
|
||||
end;
|
||||
create trigger if not exists claimtrie_changelog after update on claimtrie
|
||||
begin
|
||||
insert or ignore into changelog (claim_hash) values (new.claim_hash);
|
||||
insert or ignore into changelog (claim_hash) values (old.claim_hash);
|
||||
end;
|
||||
"""
|
||||
|
||||
SEARCH_INDEXES = """
|
||||
-- used by any tag clouds
|
||||
create index if not exists tag_tag_idx on tag (tag, claim_hash);
|
||||
|
||||
-- naked order bys (no filters)
|
||||
create unique index if not exists claim_release_idx on claim (release_time, claim_hash);
|
||||
create unique index if not exists claim_trending_idx on claim (trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists claim_effective_amount_idx on claim (effective_amount, claim_hash);
|
||||
|
||||
-- claim_type filter + order by
|
||||
create unique index if not exists claim_type_release_idx on claim (release_time, claim_type, claim_hash);
|
||||
create unique index if not exists claim_type_trending_idx on claim (trending_group, trending_mixed, claim_type, claim_hash);
|
||||
create unique index if not exists claim_type_effective_amount_idx on claim (effective_amount, claim_type, claim_hash);
|
||||
|
||||
-- stream_type filter + order by
|
||||
create unique index if not exists stream_type_release_idx on claim (stream_type, release_time, claim_hash);
|
||||
create unique index if not exists stream_type_trending_idx on claim (stream_type, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists stream_type_effective_amount_idx on claim (stream_type, effective_amount, claim_hash);
|
||||
|
||||
-- channel_hash filter + order by
|
||||
create unique index if not exists channel_hash_release_idx on claim (channel_hash, release_time, claim_hash);
|
||||
create unique index if not exists channel_hash_trending_idx on claim (channel_hash, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists channel_hash_effective_amount_idx on claim (channel_hash, effective_amount, claim_hash);
|
||||
|
||||
-- duration filter + order by
|
||||
create unique index if not exists duration_release_idx on claim (duration, release_time, claim_hash);
|
||||
create unique index if not exists duration_trending_idx on claim (duration, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists duration_effective_amount_idx on claim (duration, effective_amount, claim_hash);
|
||||
|
||||
-- fee_amount + order by
|
||||
create unique index if not exists fee_amount_release_idx on claim (fee_amount, release_time, claim_hash);
|
||||
create unique index if not exists fee_amount_trending_idx on claim (fee_amount, trending_group, trending_mixed, claim_hash);
|
||||
create unique index if not exists fee_amount_effective_amount_idx on claim (fee_amount, effective_amount, claim_hash);
|
||||
|
||||
-- TODO: verify that all indexes below are used
|
||||
create index if not exists claim_height_normalized_idx on claim (height, normalized asc);
|
||||
create index if not exists claim_resolve_idx on claim (normalized, claim_id);
|
||||
create index if not exists claim_id_idx on claim (claim_id, claim_hash);
|
||||
create index if not exists claim_timestamp_idx on claim (timestamp);
|
||||
create index if not exists claim_public_key_hash_idx on claim (public_key_hash);
|
||||
create index if not exists claim_signature_valid_idx on claim (signature_valid);
|
||||
"""
|
||||
|
||||
TAG_INDEXES = '\n'.join(
|
||||
f"create unique index if not exists tag_{tag_key}_idx on tag (tag, claim_hash) WHERE tag='{tag_value}';"
|
||||
for tag_value, tag_key in COMMON_TAGS.items()
|
||||
)
|
||||
|
||||
LANGUAGE_INDEXES = '\n'.join(
|
||||
f"create unique index if not exists language_{language}_idx on language (language, claim_hash) WHERE language='{language}';"
|
||||
for language in INDEXED_LANGUAGES
|
||||
)
|
||||
|
||||
CREATE_TABLES_QUERY = (
|
||||
CREATE_CLAIM_TABLE +
|
||||
CREATE_SUPPORT_TABLE +
|
||||
CREATE_CLAIMTRIE_TABLE +
|
||||
CREATE_TAG_TABLE +
|
||||
CREATE_CHANGELOG_TRIGGER +
|
||||
CREATE_LANGUAGE_TABLE
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list):
|
||||
self.main = main
|
||||
self._db_path = path
|
||||
self.db = None
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger
|
||||
self.blocked_streams = None
|
||||
self.blocked_channels = None
|
||||
self.blocking_channel_hashes = {
|
||||
unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id
|
||||
}
|
||||
self.filtered_streams = None
|
||||
self.filtered_channels = None
|
||||
self.filtering_channel_hashes = {
|
||||
unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id
|
||||
}
|
||||
self.trending = trending
|
||||
self.pending_deletes = set()
|
||||
|
||||
def open(self):
|
||||
self.db = sqlite3.connect(self._db_path, isolation_level=None, check_same_thread=False, uri=True)
|
||||
|
||||
def namedtuple_factory(cursor, row):
|
||||
Row = namedtuple('Row', (d[0] for d in cursor.description))
|
||||
return Row(*row)
|
||||
self.db.row_factory = namedtuple_factory
|
||||
self.db.executescript(self.PRAGMAS)
|
||||
self.db.executescript(self.CREATE_TABLES_QUERY)
|
||||
register_canonical_functions(self.db)
|
||||
self.blocked_streams = {}
|
||||
self.blocked_channels = {}
|
||||
self.filtered_streams = {}
|
||||
self.filtered_channels = {}
|
||||
self.update_blocked_and_filtered_claims()
|
||||
for algorithm in self.trending:
|
||||
algorithm.install(self.db)
|
||||
|
||||
def close(self):
|
||||
if self.db is not None:
|
||||
self.db.close()
|
||||
|
||||
def update_blocked_and_filtered_claims(self):
|
||||
self.update_claims_from_channel_hashes(
|
||||
self.blocked_streams, self.blocked_channels, self.blocking_channel_hashes
|
||||
)
|
||||
self.update_claims_from_channel_hashes(
|
||||
self.filtered_streams, self.filtered_channels, self.filtering_channel_hashes
|
||||
)
|
||||
self.filtered_streams.update(self.blocked_streams)
|
||||
self.filtered_channels.update(self.blocked_channels)
|
||||
|
||||
def update_claims_from_channel_hashes(self, shared_streams, shared_channels, channel_hashes):
|
||||
streams, channels = {}, {}
|
||||
if channel_hashes:
|
||||
sql = query(
|
||||
"SELECT repost.channel_hash, repost.reposted_claim_hash, target.claim_type "
|
||||
"FROM claim as repost JOIN claim AS target ON (target.claim_hash=repost.reposted_claim_hash)", **{
|
||||
'repost.reposted_claim_hash__is_not_null': 1,
|
||||
'repost.channel_hash__in': channel_hashes
|
||||
}
|
||||
)
|
||||
for blocked_claim in self.execute(*sql):
|
||||
if blocked_claim.claim_type == CLAIM_TYPES['stream']:
|
||||
streams[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
|
||||
elif blocked_claim.claim_type == CLAIM_TYPES['channel']:
|
||||
channels[blocked_claim.reposted_claim_hash] = blocked_claim.channel_hash
|
||||
shared_streams.clear()
|
||||
shared_streams.update(streams)
|
||||
shared_channels.clear()
|
||||
shared_channels.update(channels)
|
||||
|
||||
@staticmethod
|
||||
def _insert_sql(table: str, data: dict) -> Tuple[str, list]:
|
||||
columns, values = [], []
|
||||
for column, value in data.items():
|
||||
columns.append(column)
|
||||
values.append(value)
|
||||
sql = (
|
||||
f"INSERT INTO {table} ({', '.join(columns)}) "
|
||||
f"VALUES ({', '.join(['?'] * len(values))})"
|
||||
)
|
||||
return sql, values
|
||||
|
||||
@staticmethod
|
||||
def _update_sql(table: str, data: dict, where: str,
|
||||
constraints: Union[list, tuple]) -> Tuple[str, list]:
|
||||
columns, values = [], []
|
||||
for column, value in data.items():
|
||||
columns.append(f"{column} = ?")
|
||||
values.append(value)
|
||||
values.extend(constraints)
|
||||
return f"UPDATE {table} SET {', '.join(columns)} WHERE {where}", values
|
||||
|
||||
@staticmethod
|
||||
def _delete_sql(table: str, constraints: dict) -> Tuple[str, dict]:
|
||||
where, values = constraints_to_sql(constraints)
|
||||
return f"DELETE FROM {table} WHERE {where}", values
|
||||
|
||||
def execute(self, *args):
|
||||
return self.db.execute(*args)
|
||||
|
||||
def executemany(self, *args):
|
||||
return self.db.executemany(*args)
|
||||
|
||||
def begin(self):
|
||||
self.execute('begin;')
|
||||
|
||||
def commit(self):
|
||||
self.execute('commit;')
|
||||
|
||||
def _upsertable_claims(self, txos: List[Output], header, clear_first=False):
|
||||
claim_hashes, claims, tags, languages = set(), [], {}, {}
|
||||
for txo in txos:
|
||||
tx = txo.tx_ref.tx
|
||||
|
||||
try:
|
||||
assert txo.claim_name
|
||||
assert txo.normalized_name
|
||||
except:
|
||||
#self.logger.exception(f"Could not decode claim name for {tx.id}:{txo.position}.")
|
||||
continue
|
||||
|
||||
language = 'none'
|
||||
try:
|
||||
if txo.claim.is_stream and txo.claim.stream.languages:
|
||||
language = txo.claim.stream.languages[0].language
|
||||
except:
|
||||
pass
|
||||
|
||||
claim_hash = txo.claim_hash
|
||||
claim_hashes.add(claim_hash)
|
||||
claim_record = {
|
||||
'claim_hash': claim_hash,
|
||||
'claim_id': txo.claim_id,
|
||||
'claim_name': txo.claim_name,
|
||||
'normalized': txo.normalized_name,
|
||||
'txo_hash': txo.ref.hash,
|
||||
'tx_position': tx.position,
|
||||
'amount': txo.amount,
|
||||
'timestamp': header['timestamp'],
|
||||
'height': tx.height,
|
||||
'title': None,
|
||||
'description': None,
|
||||
'author': None,
|
||||
'duration': None,
|
||||
'claim_type': None,
|
||||
'has_source': False,
|
||||
'stream_type': None,
|
||||
'media_type': None,
|
||||
'release_time': None,
|
||||
'fee_currency': None,
|
||||
'fee_amount': 0,
|
||||
'reposted_claim_hash': None
|
||||
}
|
||||
claims.append(claim_record)
|
||||
|
||||
try:
|
||||
claim = txo.claim
|
||||
except:
|
||||
#self.logger.exception(f"Could not parse claim protobuf for {tx.id}:{txo.position}.")
|
||||
continue
|
||||
|
||||
if claim.is_stream:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['stream']
|
||||
claim_record['has_source'] = claim.stream.has_source
|
||||
claim_record['media_type'] = claim.stream.source.media_type
|
||||
claim_record['stream_type'] = STREAM_TYPES[guess_stream_type(claim_record['media_type'])]
|
||||
claim_record['title'] = claim.stream.title
|
||||
claim_record['description'] = claim.stream.description
|
||||
claim_record['author'] = claim.stream.author
|
||||
if claim.stream.video and claim.stream.video.duration:
|
||||
claim_record['duration'] = claim.stream.video.duration
|
||||
if claim.stream.audio and claim.stream.audio.duration:
|
||||
claim_record['duration'] = claim.stream.audio.duration
|
||||
if claim.stream.release_time:
|
||||
claim_record['release_time'] = claim.stream.release_time
|
||||
if claim.stream.has_fee:
|
||||
fee = claim.stream.fee
|
||||
if isinstance(fee.currency, str):
|
||||
claim_record['fee_currency'] = fee.currency.lower()
|
||||
if isinstance(fee.amount, Decimal):
|
||||
if fee.amount >= 0 and int(fee.amount*1000) < 9223372036854775807:
|
||||
claim_record['fee_amount'] = int(fee.amount*1000)
|
||||
elif claim.is_repost:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['repost']
|
||||
claim_record['reposted_claim_hash'] = claim.repost.reference.claim_hash
|
||||
elif claim.is_channel:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['channel']
|
||||
elif claim.is_collection:
|
||||
claim_record['claim_type'] = CLAIM_TYPES['collection']
|
||||
|
||||
languages[(language, claim_hash)] = (language, claim_hash, tx.height)
|
||||
|
||||
for tag in clean_tags(claim.message.tags):
|
||||
tags[(tag, claim_hash)] = (tag, claim_hash, tx.height)
|
||||
|
||||
if clear_first:
|
||||
self._clear_claim_metadata(claim_hashes)
|
||||
|
||||
if tags:
|
||||
self.executemany(
|
||||
"INSERT OR IGNORE INTO tag (tag, claim_hash, height) VALUES (?, ?, ?)", tags.values()
|
||||
)
|
||||
if languages:
|
||||
self.executemany(
|
||||
"INSERT OR IGNORE INTO language (language, claim_hash, height) VALUES (?, ?, ?)", languages.values()
|
||||
)
|
||||
|
||||
return claims
|
||||
|
||||
def insert_claims(self, txos: List[Output], header):
|
||||
claims = self._upsertable_claims(txos, header)
|
||||
if claims:
|
||||
self.executemany("""
|
||||
INSERT OR REPLACE INTO claim (
|
||||
claim_hash, claim_id, claim_name, normalized, txo_hash, tx_position, amount,
|
||||
claim_type, media_type, stream_type, timestamp, creation_timestamp, has_source,
|
||||
fee_currency, fee_amount, title, description, author, duration, height, reposted_claim_hash,
|
||||
creation_height, release_time, activation_height, expiration_height, short_url)
|
||||
VALUES (
|
||||
:claim_hash, :claim_id, :claim_name, :normalized, :txo_hash, :tx_position, :amount,
|
||||
:claim_type, :media_type, :stream_type, :timestamp, :timestamp, :has_source,
|
||||
:fee_currency, :fee_amount, :title, :description, :author, :duration, :height, :reposted_claim_hash, :height,
|
||||
CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE :timestamp END,
|
||||
CASE WHEN :normalized NOT IN (SELECT normalized FROM claimtrie) THEN :height END,
|
||||
CASE WHEN :height >= 137181 THEN :height+2102400 ELSE :height+262974 END,
|
||||
:claim_name||COALESCE(
|
||||
(SELECT shortest_id(claim_id, :claim_id) FROM claim WHERE normalized = :normalized),
|
||||
'#'||substr(:claim_id, 1, 1)
|
||||
)
|
||||
)""", claims)
|
||||
|
||||
def update_claims(self, txos: List[Output], header):
|
||||
claims = self._upsertable_claims(txos, header, clear_first=True)
|
||||
if claims:
|
||||
self.executemany("""
|
||||
UPDATE claim SET
|
||||
txo_hash=:txo_hash, tx_position=:tx_position, amount=:amount, height=:height,
|
||||
claim_type=:claim_type, media_type=:media_type, stream_type=:stream_type,
|
||||
timestamp=:timestamp, fee_amount=:fee_amount, fee_currency=:fee_currency, has_source=:has_source,
|
||||
title=:title, duration=:duration, description=:description, author=:author, reposted_claim_hash=:reposted_claim_hash,
|
||||
release_time=CASE WHEN :release_time IS NOT NULL THEN :release_time ELSE release_time END
|
||||
WHERE claim_hash=:claim_hash;
|
||||
""", claims)
|
||||
|
||||
def delete_claims(self, claim_hashes: Set[bytes]):
|
||||
""" Deletes claim supports and from claimtrie in case of an abandon. """
|
||||
if claim_hashes:
|
||||
affected_channels = self.execute(*query(
|
||||
"SELECT channel_hash FROM claim", channel_hash__is_not_null=1, claim_hash__in=claim_hashes
|
||||
)).fetchall()
|
||||
for table in ('claim', 'support', 'claimtrie'):
|
||||
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
|
||||
self._clear_claim_metadata(claim_hashes)
|
||||
return {r.channel_hash for r in affected_channels}
|
||||
return set()
|
||||
|
||||
def delete_claims_above_height(self, height: int):
|
||||
claim_hashes = [x[0] for x in self.execute(
|
||||
"SELECT claim_hash FROM claim WHERE height>?", (height, )
|
||||
).fetchall()]
|
||||
while claim_hashes:
|
||||
batch = set(claim_hashes[:500])
|
||||
claim_hashes = claim_hashes[500:]
|
||||
self.delete_claims(batch)
|
||||
|
||||
def _clear_claim_metadata(self, claim_hashes: Set[bytes]):
|
||||
if claim_hashes:
|
||||
for table in ('tag',): # 'language', 'location', etc
|
||||
self.execute(*self._delete_sql(table, {'claim_hash__in': claim_hashes}))
|
||||
|
||||
def split_inputs_into_claims_supports_and_other(self, txis):
|
||||
txo_hashes = {txi.txo_ref.hash for txi in txis}
|
||||
claims = self.execute(*query(
|
||||
"SELECT txo_hash, claim_hash, normalized FROM claim", txo_hash__in=txo_hashes
|
||||
)).fetchall()
|
||||
txo_hashes -= {r.txo_hash for r in claims}
|
||||
supports = {}
|
||||
if txo_hashes:
|
||||
supports = self.execute(*query(
|
||||
"SELECT txo_hash, claim_hash FROM support", txo_hash__in=txo_hashes
|
||||
)).fetchall()
|
||||
txo_hashes -= {r.txo_hash for r in supports}
|
||||
return claims, supports, txo_hashes
|
||||
|
||||
def insert_supports(self, txos: List[Output]):
|
||||
supports = []
|
||||
for txo in txos:
|
||||
tx = txo.tx_ref.tx
|
||||
supports.append((
|
||||
txo.ref.hash, tx.position, tx.height,
|
||||
txo.claim_hash, txo.amount
|
||||
))
|
||||
if supports:
|
||||
self.executemany(
|
||||
"INSERT OR IGNORE INTO support ("
|
||||
" txo_hash, tx_position, height, claim_hash, amount"
|
||||
") "
|
||||
"VALUES (?, ?, ?, ?, ?)", supports
|
||||
)
|
||||
|
||||
def delete_supports(self, txo_hashes: Set[bytes]):
|
||||
if txo_hashes:
|
||||
self.execute(*self._delete_sql('support', {'txo_hash__in': txo_hashes}))
|
||||
|
||||
def calculate_reposts(self, txos: List[Output]):
|
||||
targets = set()
|
||||
for txo in txos:
|
||||
try:
|
||||
claim = txo.claim
|
||||
except:
|
||||
continue
|
||||
if claim.is_repost:
|
||||
targets.add((claim.repost.reference.claim_hash,))
|
||||
if targets:
|
||||
self.executemany(
|
||||
"""
|
||||
UPDATE claim SET reposted = (
|
||||
SELECT count(*) FROM claim AS repost WHERE repost.reposted_claim_hash = claim.claim_hash
|
||||
)
|
||||
WHERE claim_hash = ?
|
||||
""", targets
|
||||
)
|
||||
return {target[0] for target in targets}
|
||||
|
||||
def validate_channel_signatures(self, height, new_claims, updated_claims, spent_claims, affected_channels, timer):
|
||||
if not new_claims and not updated_claims and not spent_claims:
|
||||
return
|
||||
|
||||
sub_timer = timer.add_timer('segregate channels and signables')
|
||||
sub_timer.start()
|
||||
channels, new_channel_keys, signables = {}, {}, {}
|
||||
for txo in chain(new_claims, updated_claims):
|
||||
try:
|
||||
claim = txo.claim
|
||||
except:
|
||||
continue
|
||||
if claim.is_channel:
|
||||
channels[txo.claim_hash] = txo
|
||||
new_channel_keys[txo.claim_hash] = claim.channel.public_key_bytes
|
||||
else:
|
||||
signables[txo.claim_hash] = txo
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('make list of channels we need to lookup')
|
||||
sub_timer.start()
|
||||
missing_channel_keys = set()
|
||||
for txo in signables.values():
|
||||
claim = txo.claim
|
||||
if claim.is_signed and claim.signing_channel_hash not in new_channel_keys:
|
||||
missing_channel_keys.add(claim.signing_channel_hash)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('lookup missing channels')
|
||||
sub_timer.start()
|
||||
all_channel_keys = {}
|
||||
if new_channel_keys or missing_channel_keys or affected_channels:
|
||||
all_channel_keys = dict(self.execute(*query(
|
||||
"SELECT claim_hash, public_key_bytes FROM claim",
|
||||
claim_hash__in=set(new_channel_keys) | missing_channel_keys | affected_channels
|
||||
)))
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('prepare for updating claims')
|
||||
sub_timer.start()
|
||||
changed_channel_keys = {}
|
||||
for claim_hash, new_key in new_channel_keys.items():
|
||||
if claim_hash not in all_channel_keys or all_channel_keys[claim_hash] != new_key:
|
||||
all_channel_keys[claim_hash] = new_key
|
||||
changed_channel_keys[claim_hash] = new_key
|
||||
|
||||
claim_updates = []
|
||||
|
||||
for claim_hash, txo in signables.items():
|
||||
claim = txo.claim
|
||||
update = {
|
||||
'claim_hash': claim_hash,
|
||||
'channel_hash': None,
|
||||
'signature': None,
|
||||
'signature_digest': None,
|
||||
'signature_valid': None
|
||||
}
|
||||
if claim.is_signed:
|
||||
update.update({
|
||||
'channel_hash': claim.signing_channel_hash,
|
||||
'signature': txo.get_encoded_signature(),
|
||||
'signature_digest': txo.get_signature_digest(self.ledger),
|
||||
'signature_valid': 0
|
||||
})
|
||||
claim_updates.append(update)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('find claims affected by a change in channel key')
|
||||
sub_timer.start()
|
||||
if changed_channel_keys:
|
||||
sql = f"""
|
||||
SELECT * FROM claim WHERE
|
||||
channel_hash IN ({','.join('?' for _ in changed_channel_keys)}) AND
|
||||
signature IS NOT NULL
|
||||
"""
|
||||
for affected_claim in self.execute(sql, list(changed_channel_keys.keys())):
|
||||
if affected_claim.claim_hash not in signables:
|
||||
claim_updates.append({
|
||||
'claim_hash': affected_claim.claim_hash,
|
||||
'channel_hash': affected_claim.channel_hash,
|
||||
'signature': affected_claim.signature,
|
||||
'signature_digest': affected_claim.signature_digest,
|
||||
'signature_valid': 0
|
||||
})
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('verify signatures')
|
||||
sub_timer.start()
|
||||
for update in claim_updates:
|
||||
channel_pub_key = all_channel_keys.get(update['channel_hash'])
|
||||
if channel_pub_key and update['signature']:
|
||||
update['signature_valid'] = Output.is_signature_valid(
|
||||
bytes(update['signature']), bytes(update['signature_digest']), channel_pub_key
|
||||
)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update claims')
|
||||
sub_timer.start()
|
||||
if claim_updates:
|
||||
self.executemany(f"""
|
||||
UPDATE claim SET
|
||||
channel_hash=:channel_hash, signature=:signature, signature_digest=:signature_digest,
|
||||
signature_valid=:signature_valid,
|
||||
channel_join=CASE
|
||||
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN channel_join
|
||||
WHEN :signature_valid=1 THEN {height}
|
||||
END,
|
||||
canonical_url=CASE
|
||||
WHEN signature_valid=1 AND :signature_valid=1 AND channel_hash=:channel_hash THEN canonical_url
|
||||
WHEN :signature_valid=1 THEN
|
||||
(SELECT short_url FROM claim WHERE claim_hash=:channel_hash)||'/'||
|
||||
claim_name||COALESCE(
|
||||
(SELECT shortest_id(other_claim.claim_id, claim.claim_id) FROM claim AS other_claim
|
||||
WHERE other_claim.signature_valid = 1 AND
|
||||
other_claim.channel_hash = :channel_hash AND
|
||||
other_claim.normalized = claim.normalized),
|
||||
'#'||substr(claim_id, 1, 1)
|
||||
)
|
||||
END
|
||||
WHERE claim_hash=:claim_hash;
|
||||
""", claim_updates)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update claims affected by spent channels')
|
||||
sub_timer.start()
|
||||
if spent_claims:
|
||||
self.execute(
|
||||
f"""
|
||||
UPDATE claim SET
|
||||
signature_valid=CASE WHEN signature IS NOT NULL THEN 0 END,
|
||||
channel_join=NULL, canonical_url=NULL
|
||||
WHERE channel_hash IN ({','.join('?' for _ in spent_claims)})
|
||||
""", list(spent_claims)
|
||||
)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update channels')
|
||||
sub_timer.start()
|
||||
if channels:
|
||||
self.executemany(
|
||||
"""
|
||||
UPDATE claim SET
|
||||
public_key_bytes=:public_key_bytes,
|
||||
public_key_hash=:public_key_hash
|
||||
WHERE claim_hash=:claim_hash""", [{
|
||||
'claim_hash': claim_hash,
|
||||
'public_key_bytes': txo.claim.channel.public_key_bytes,
|
||||
'public_key_hash': self.ledger.address_to_hash160(
|
||||
self.ledger.public_key_to_address(txo.claim.channel.public_key_bytes)
|
||||
)
|
||||
} for claim_hash, txo in channels.items()]
|
||||
)
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update claims_in_channel counts')
|
||||
sub_timer.start()
|
||||
if all_channel_keys:
|
||||
self.executemany(f"""
|
||||
UPDATE claim SET
|
||||
claims_in_channel=(
|
||||
SELECT COUNT(*) FROM claim AS claim_in_channel
|
||||
WHERE claim_in_channel.signature_valid=1 AND
|
||||
claim_in_channel.channel_hash=claim.claim_hash
|
||||
)
|
||||
WHERE claim_hash = ?
|
||||
""", [(channel_hash,) for channel_hash in all_channel_keys])
|
||||
sub_timer.stop()
|
||||
|
||||
sub_timer = timer.add_timer('update blocked claims list')
|
||||
sub_timer.start()
|
||||
if (self.blocking_channel_hashes.intersection(all_channel_keys) or
|
||||
self.filtering_channel_hashes.intersection(all_channel_keys)):
|
||||
self.update_blocked_and_filtered_claims()
|
||||
sub_timer.stop()
|
||||
|
||||
def _update_support_amount(self, claim_hashes):
|
||||
if claim_hashes:
|
||||
self.execute(f"""
|
||||
UPDATE claim SET
|
||||
support_amount = COALESCE(
|
||||
(SELECT SUM(amount) FROM support WHERE support.claim_hash=claim.claim_hash), 0
|
||||
)
|
||||
WHERE claim_hash IN ({','.join('?' for _ in claim_hashes)})
|
||||
""", claim_hashes)
|
||||
|
||||
def _update_effective_amount(self, height, claim_hashes=None):
|
||||
self.execute(
|
||||
f"UPDATE claim SET effective_amount = amount + support_amount "
|
||||
f"WHERE activation_height = {height}"
|
||||
)
|
||||
if claim_hashes:
|
||||
self.execute(
|
||||
f"UPDATE claim SET effective_amount = amount + support_amount "
|
||||
f"WHERE activation_height < {height} "
|
||||
f" AND claim_hash IN ({','.join('?' for _ in claim_hashes)})",
|
||||
claim_hashes
|
||||
)
|
||||
|
||||
def _calculate_activation_height(self, height):
|
||||
last_take_over_height = f"""COALESCE(
|
||||
(SELECT last_take_over_height FROM claimtrie
|
||||
WHERE claimtrie.normalized=claim.normalized),
|
||||
{height}
|
||||
)
|
||||
"""
|
||||
self.execute(f"""
|
||||
UPDATE claim SET activation_height =
|
||||
{height} + min(4032, cast(({height} - {last_take_over_height}) / 32 AS INT))
|
||||
WHERE activation_height IS NULL
|
||||
""")
|
||||
|
||||
def _perform_overtake(self, height, changed_claim_hashes, deleted_names):
|
||||
deleted_names_sql = claim_hashes_sql = ""
|
||||
if changed_claim_hashes:
|
||||
claim_hashes_sql = f"OR claim_hash IN ({','.join('?' for _ in changed_claim_hashes)})"
|
||||
if deleted_names:
|
||||
deleted_names_sql = f"OR normalized IN ({','.join('?' for _ in deleted_names)})"
|
||||
overtakes = self.execute(f"""
|
||||
SELECT winner.normalized, winner.claim_hash,
|
||||
claimtrie.claim_hash AS current_winner,
|
||||
MAX(winner.effective_amount) AS max_winner_effective_amount
|
||||
FROM (
|
||||
SELECT normalized, claim_hash, effective_amount FROM claim
|
||||
WHERE normalized IN (
|
||||
SELECT normalized FROM claim WHERE activation_height={height} {claim_hashes_sql}
|
||||
) {deleted_names_sql}
|
||||
ORDER BY effective_amount DESC, height ASC, tx_position ASC
|
||||
) AS winner LEFT JOIN claimtrie USING (normalized)
|
||||
GROUP BY winner.normalized
|
||||
HAVING current_winner IS NULL OR current_winner <> winner.claim_hash
|
||||
""", list(changed_claim_hashes)+deleted_names)
|
||||
for overtake in overtakes:
|
||||
if overtake.current_winner:
|
||||
self.execute(
|
||||
f"UPDATE claimtrie SET claim_hash = ?, last_take_over_height = {height} "
|
||||
f"WHERE normalized = ?",
|
||||
(overtake.claim_hash, overtake.normalized)
|
||||
)
|
||||
else:
|
||||
self.execute(
|
||||
f"INSERT INTO claimtrie (claim_hash, normalized, last_take_over_height) "
|
||||
f"VALUES (?, ?, {height})",
|
||||
(overtake.claim_hash, overtake.normalized)
|
||||
)
|
||||
self.execute(
|
||||
f"UPDATE claim SET activation_height = {height} WHERE normalized = ? "
|
||||
f"AND (activation_height IS NULL OR activation_height > {height})",
|
||||
(overtake.normalized,)
|
||||
)
|
||||
|
||||
def _copy(self, height):
|
||||
if height > 50:
|
||||
self.execute(f"DROP TABLE claimtrie{height-50}")
|
||||
self.execute(f"CREATE TABLE claimtrie{height} AS SELECT * FROM claimtrie")
|
||||
|
||||
def update_claimtrie(self, height, changed_claim_hashes, deleted_names, timer):
|
||||
r = timer.run
|
||||
binary_claim_hashes = list(changed_claim_hashes)
|
||||
|
||||
r(self._calculate_activation_height, height)
|
||||
r(self._update_support_amount, binary_claim_hashes)
|
||||
|
||||
r(self._update_effective_amount, height, binary_claim_hashes)
|
||||
r(self._perform_overtake, height, binary_claim_hashes, list(deleted_names))
|
||||
|
||||
r(self._update_effective_amount, height)
|
||||
r(self._perform_overtake, height, [], [])
|
||||
|
||||
def get_expiring(self, height):
|
||||
return self.execute(
|
||||
f"SELECT claim_hash, normalized FROM claim WHERE expiration_height = {height}"
|
||||
)
|
||||
|
||||
def enqueue_changes(self):
|
||||
query = """
|
||||
SELECT claimtrie.claim_hash as is_controlling,
|
||||
claimtrie.last_take_over_height,
|
||||
(select group_concat(tag, ',,') from tag where tag.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as tags,
|
||||
(select group_concat(language, ' ') from language where language.claim_hash in (claim.claim_hash, claim.reposted_claim_hash)) as languages,
|
||||
cr.has_source as reposted_has_source,
|
||||
cr.claim_type as reposted_claim_type,
|
||||
cr.stream_type as reposted_stream_type,
|
||||
cr.media_type as reposted_media_type,
|
||||
cr.duration as reposted_duration,
|
||||
cr.fee_amount as reposted_fee_amount,
|
||||
cr.fee_currency as reposted_fee_currency,
|
||||
claim.*
|
||||
FROM claim LEFT JOIN claimtrie USING (claim_hash) LEFT JOIN claim cr ON cr.claim_hash=claim.reposted_claim_hash
|
||||
WHERE claim.claim_hash in (SELECT claim_hash FROM changelog)
|
||||
"""
|
||||
for claim in self.execute(query):
|
||||
claim = claim._asdict()
|
||||
id_set = set(filter(None, (claim['claim_hash'], claim['channel_hash'], claim['reposted_claim_hash'])))
|
||||
claim['censor_type'] = 0
|
||||
censoring_channel_hash = None
|
||||
claim['has_source'] = bool(claim.pop('reposted_has_source') or claim['has_source'])
|
||||
claim['stream_type'] = claim.pop('reposted_stream_type') or claim['stream_type']
|
||||
claim['media_type'] = claim.pop('reposted_media_type') or claim['media_type']
|
||||
claim['fee_amount'] = claim.pop('reposted_fee_amount') or claim['fee_amount']
|
||||
claim['fee_currency'] = claim.pop('reposted_fee_currency') or claim['fee_currency']
|
||||
claim['duration'] = claim.pop('reposted_duration') or claim['duration']
|
||||
for reason_id in id_set:
|
||||
if reason_id in self.blocked_streams:
|
||||
claim['censor_type'] = 2
|
||||
censoring_channel_hash = self.blocked_streams.get(reason_id)
|
||||
elif reason_id in self.blocked_channels:
|
||||
claim['censor_type'] = 2
|
||||
censoring_channel_hash = self.blocked_channels.get(reason_id)
|
||||
elif reason_id in self.filtered_streams:
|
||||
claim['censor_type'] = 1
|
||||
censoring_channel_hash = self.filtered_streams.get(reason_id)
|
||||
elif reason_id in self.filtered_channels:
|
||||
claim['censor_type'] = 1
|
||||
censoring_channel_hash = self.filtered_channels.get(reason_id)
|
||||
claim['censoring_channel_id'] = censoring_channel_hash[::-1].hex() if censoring_channel_hash else None
|
||||
|
||||
claim['tags'] = claim['tags'].split(',,') if claim['tags'] else []
|
||||
claim['languages'] = claim['languages'].split(' ') if claim['languages'] else []
|
||||
yield 'update', claim
|
||||
|
||||
def clear_changelog(self):
|
||||
self.execute("delete from changelog;")
|
||||
|
||||
def claim_producer(self):
|
||||
while self.pending_deletes:
|
||||
claim_hash = self.pending_deletes.pop()
|
||||
yield 'delete', hexlify(claim_hash[::-1]).decode()
|
||||
for claim in self.enqueue_changes():
|
||||
yield claim
|
||||
self.clear_changelog()
|
||||
|
||||
def advance_txs(self, height, all_txs, header, daemon_height, timer):
|
||||
insert_claims = []
|
||||
update_claims = []
|
||||
update_claim_hashes = set()
|
||||
delete_claim_hashes = self.pending_deletes
|
||||
insert_supports = []
|
||||
delete_support_txo_hashes = set()
|
||||
recalculate_claim_hashes = set() # added/deleted supports, added/updated claim
|
||||
deleted_claim_names = set()
|
||||
delete_others = set()
|
||||
body_timer = timer.add_timer('body')
|
||||
for position, (etx, txid) in enumerate(all_txs):
|
||||
tx = timer.run(
|
||||
Transaction, etx.raw, height=height, position=position
|
||||
)
|
||||
# Inputs
|
||||
spent_claims, spent_supports, spent_others = timer.run(
|
||||
self.split_inputs_into_claims_supports_and_other, tx.inputs
|
||||
)
|
||||
body_timer.start()
|
||||
delete_claim_hashes.update({r.claim_hash for r in spent_claims})
|
||||
deleted_claim_names.update({r.normalized for r in spent_claims})
|
||||
delete_support_txo_hashes.update({r.txo_hash for r in spent_supports})
|
||||
recalculate_claim_hashes.update({r.claim_hash for r in spent_supports})
|
||||
delete_others.update(spent_others)
|
||||
# Outputs
|
||||
for output in tx.outputs:
|
||||
if output.is_support:
|
||||
insert_supports.append(output)
|
||||
recalculate_claim_hashes.add(output.claim_hash)
|
||||
elif output.script.is_claim_name:
|
||||
insert_claims.append(output)
|
||||
recalculate_claim_hashes.add(output.claim_hash)
|
||||
elif output.script.is_update_claim:
|
||||
claim_hash = output.claim_hash
|
||||
update_claims.append(output)
|
||||
recalculate_claim_hashes.add(claim_hash)
|
||||
body_timer.stop()
|
||||
|
||||
skip_update_claim_timer = timer.add_timer('skip update of abandoned claims')
|
||||
skip_update_claim_timer.start()
|
||||
for updated_claim in list(update_claims):
|
||||
if updated_claim.ref.hash in delete_others:
|
||||
update_claims.remove(updated_claim)
|
||||
for updated_claim in update_claims:
|
||||
claim_hash = updated_claim.claim_hash
|
||||
delete_claim_hashes.discard(claim_hash)
|
||||
update_claim_hashes.add(claim_hash)
|
||||
skip_update_claim_timer.stop()
|
||||
|
||||
skip_insert_claim_timer = timer.add_timer('skip insertion of abandoned claims')
|
||||
skip_insert_claim_timer.start()
|
||||
for new_claim in list(insert_claims):
|
||||
if new_claim.ref.hash in delete_others:
|
||||
if new_claim.claim_hash not in update_claim_hashes:
|
||||
insert_claims.remove(new_claim)
|
||||
skip_insert_claim_timer.stop()
|
||||
|
||||
skip_insert_support_timer = timer.add_timer('skip insertion of abandoned supports')
|
||||
skip_insert_support_timer.start()
|
||||
for new_support in list(insert_supports):
|
||||
if new_support.ref.hash in delete_others:
|
||||
insert_supports.remove(new_support)
|
||||
skip_insert_support_timer.stop()
|
||||
|
||||
expire_timer = timer.add_timer('recording expired claims')
|
||||
expire_timer.start()
|
||||
for expired in self.get_expiring(height):
|
||||
delete_claim_hashes.add(expired.claim_hash)
|
||||
deleted_claim_names.add(expired.normalized)
|
||||
expire_timer.stop()
|
||||
|
||||
r = timer.run
|
||||
affected_channels = r(self.delete_claims, delete_claim_hashes)
|
||||
r(self.delete_supports, delete_support_txo_hashes)
|
||||
r(self.insert_claims, insert_claims, header)
|
||||
r(self.calculate_reposts, insert_claims)
|
||||
r(self.update_claims, update_claims, header)
|
||||
r(self.validate_channel_signatures, height, insert_claims,
|
||||
update_claims, delete_claim_hashes, affected_channels, forward_timer=True)
|
||||
r(self.insert_supports, insert_supports)
|
||||
r(self.update_claimtrie, height, recalculate_claim_hashes, deleted_claim_names, forward_timer=True)
|
||||
for algorithm in self.trending:
|
||||
r(algorithm.run, self.db.cursor(), height, daemon_height, recalculate_claim_hashes)
|
||||
|
||||
|
||||
class LBRYLevelDB(LevelDB):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
path = os.path.join(self.env.db_dir, 'claims.db')
|
||||
trending = []
|
||||
for algorithm_name in self.env.trending_algorithms:
|
||||
if algorithm_name in TRENDING_ALGORITHMS:
|
||||
trending.append(TRENDING_ALGORITHMS[algorithm_name])
|
||||
if self.env.es_mode == 'reader':
|
||||
self.logger.info('Index mode: reader')
|
||||
self.sql = None
|
||||
else:
|
||||
self.logger.info('Index mode: writer. Using SQLite db to sync ES')
|
||||
self.sql = SQLDB(
|
||||
self, path,
|
||||
self.env.default('BLOCKING_CHANNEL_IDS', '').split(' '),
|
||||
self.env.default('FILTERING_CHANNEL_IDS', '').split(' '),
|
||||
trending
|
||||
)
|
||||
|
||||
# Search index
|
||||
self.search_index = SearchIndex(
|
||||
self.env.es_index_prefix, self.env.database_query_timeout, self.env.elastic_host, self.env.elastic_port
|
||||
)
|
||||
|
||||
def close(self):
|
||||
super().close()
|
||||
if self.sql:
|
||||
self.sql.close()
|
||||
|
||||
async def _open_dbs(self, *args, **kwargs):
|
||||
await self.search_index.start()
|
||||
await super()._open_dbs(*args, **kwargs)
|
||||
if self.sql:
|
||||
self.sql.open()
|
|
@ -5,7 +5,7 @@
|
|||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
|
||||
import math
|
||||
import re
|
||||
import resource
|
||||
from os import environ
|
||||
|
@ -39,10 +39,14 @@ class Env:
|
|||
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
|
||||
self.db_dir = self.required('DB_DIRECTORY')
|
||||
self.db_engine = self.default('DB_ENGINE', 'leveldb')
|
||||
self.trending_algorithms = [
|
||||
trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending
|
||||
]
|
||||
self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None)
|
||||
# self.trending_algorithms = [
|
||||
# trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending
|
||||
# ]
|
||||
self.trending_half_life = math.log2(0.1 ** (1 / (3 + self.integer('TRENDING_DECAY_RATE', 48)))) + 1
|
||||
self.trending_whale_half_life = math.log2(0.1 ** (1 / (3 + self.integer('TRENDING_WHALE_DECAY_RATE', 24)))) + 1
|
||||
self.trending_whale_threshold = float(self.integer('TRENDING_WHALE_THRESHOLD', 10000)) * 1E8
|
||||
|
||||
self.max_query_workers = self.integer('MAX_QUERY_WORKERS', 4)
|
||||
self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True)
|
||||
self.track_metrics = self.boolean('TRACK_METRICS', False)
|
||||
self.websocket_host = self.default('WEBSOCKET_HOST', self.host)
|
||||
|
@ -57,7 +61,7 @@ class Env:
|
|||
self.coin = Coin.lookup_coin_class(coin_name, network)
|
||||
self.es_index_prefix = self.default('ES_INDEX_PREFIX', '')
|
||||
self.es_mode = self.default('ES_MODE', 'writer')
|
||||
self.cache_MB = self.integer('CACHE_MB', 1200)
|
||||
self.cache_MB = self.integer('CACHE_MB', 4096)
|
||||
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
||||
# Server stuff
|
||||
self.tcp_port = self.integer('TCP_PORT', None)
|
||||
|
|
|
@ -36,6 +36,7 @@ _sha512 = hashlib.sha512
|
|||
_new_hash = hashlib.new
|
||||
_new_hmac = hmac.new
|
||||
HASHX_LEN = 11
|
||||
CLAIM_HASH_LEN = 20
|
||||
|
||||
|
||||
def sha256(x):
|
||||
|
|
|
@ -1,349 +0,0 @@
|
|||
# Copyright (c) 2016-2018, Neil Booth
|
||||
# Copyright (c) 2017, the ElectrumX authors
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
"""History by script hash (address)."""
|
||||
|
||||
import array
|
||||
import ast
|
||||
import bisect
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
|
||||
from lbry.wallet.server import util
|
||||
from lbry.wallet.server.util import pack_be_uint32, unpack_be_uint32_from, unpack_be_uint16_from
|
||||
from lbry.wallet.server.hash import hash_to_hex_str, HASHX_LEN
|
||||
|
||||
|
||||
class History:
|
||||
|
||||
DB_VERSIONS = [0, 1]
|
||||
|
||||
def __init__(self):
|
||||
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
||||
# For history compaction
|
||||
self.max_hist_row_entries = 12500
|
||||
self.unflushed = defaultdict(partial(array.array, 'I'))
|
||||
self.unflushed_count = 0
|
||||
self.db = None
|
||||
|
||||
@property
|
||||
def needs_migration(self):
|
||||
return self.db_version != max(self.DB_VERSIONS)
|
||||
|
||||
def migrate(self):
|
||||
# 0 -> 1: flush_count from 16 to 32 bits
|
||||
self.logger.warning("HISTORY MIGRATION IN PROGRESS. Please avoid shutting down before it finishes.")
|
||||
with self.db.write_batch() as batch:
|
||||
for key, value in self.db.iterator(prefix=b''):
|
||||
if len(key) != 13:
|
||||
continue
|
||||
flush_id, = unpack_be_uint16_from(key[-2:])
|
||||
new_key = key[:-2] + pack_be_uint32(flush_id)
|
||||
batch.put(new_key, value)
|
||||
self.logger.warning("history migration: new keys added, removing old ones.")
|
||||
for key, value in self.db.iterator(prefix=b''):
|
||||
if len(key) == 13:
|
||||
batch.delete(key)
|
||||
self.logger.warning("history migration: writing new state.")
|
||||
self.db_version = 1
|
||||
self.write_state(batch)
|
||||
self.logger.warning("history migration: done.")
|
||||
|
||||
def open_db(self, db_class, for_sync, utxo_flush_count, compacting):
|
||||
self.db = db_class('hist', for_sync)
|
||||
self.read_state()
|
||||
if self.needs_migration:
|
||||
self.migrate()
|
||||
self.clear_excess(utxo_flush_count)
|
||||
# An incomplete compaction needs to be cancelled otherwise
|
||||
# restarting it will corrupt the history
|
||||
if not compacting:
|
||||
self._cancel_compaction()
|
||||
return self.flush_count
|
||||
|
||||
def close_db(self):
|
||||
if self.db:
|
||||
self.db.close()
|
||||
self.db = None
|
||||
|
||||
def read_state(self):
|
||||
state = self.db.get(b'state\0\0')
|
||||
if state:
|
||||
state = ast.literal_eval(state.decode())
|
||||
if not isinstance(state, dict):
|
||||
raise RuntimeError('failed reading state from history DB')
|
||||
self.flush_count = state['flush_count']
|
||||
self.comp_flush_count = state.get('comp_flush_count', -1)
|
||||
self.comp_cursor = state.get('comp_cursor', -1)
|
||||
self.db_version = state.get('db_version', 0)
|
||||
else:
|
||||
self.flush_count = 0
|
||||
self.comp_flush_count = -1
|
||||
self.comp_cursor = -1
|
||||
self.db_version = max(self.DB_VERSIONS)
|
||||
|
||||
self.logger.info(f'history DB version: {self.db_version}')
|
||||
if self.db_version not in self.DB_VERSIONS:
|
||||
msg = f'this software only handles DB versions {self.DB_VERSIONS}'
|
||||
self.logger.error(msg)
|
||||
raise RuntimeError(msg)
|
||||
self.logger.info(f'flush count: {self.flush_count:,d}')
|
||||
|
||||
def clear_excess(self, utxo_flush_count):
|
||||
# < might happen at end of compaction as both DBs cannot be
|
||||
# updated atomically
|
||||
if self.flush_count <= utxo_flush_count:
|
||||
return
|
||||
|
||||
self.logger.info('DB shut down uncleanly. Scanning for '
|
||||
'excess history flushes...')
|
||||
|
||||
keys = []
|
||||
for key, hist in self.db.iterator(prefix=b''):
|
||||
flush_id, = unpack_be_uint32_from(key[-4:])
|
||||
if flush_id > utxo_flush_count:
|
||||
keys.append(key)
|
||||
|
||||
self.logger.info(f'deleting {len(keys):,d} history entries')
|
||||
|
||||
self.flush_count = utxo_flush_count
|
||||
with self.db.write_batch() as batch:
|
||||
for key in keys:
|
||||
batch.delete(key)
|
||||
self.write_state(batch)
|
||||
|
||||
self.logger.info('deleted excess history entries')
|
||||
|
||||
def write_state(self, batch):
|
||||
"""Write state to the history DB."""
|
||||
state = {
|
||||
'flush_count': self.flush_count,
|
||||
'comp_flush_count': self.comp_flush_count,
|
||||
'comp_cursor': self.comp_cursor,
|
||||
'db_version': self.db_version,
|
||||
}
|
||||
# History entries are not prefixed; the suffix \0\0 ensures we
|
||||
# look similar to other entries and aren't interfered with
|
||||
batch.put(b'state\0\0', repr(state).encode())
|
||||
|
||||
def add_unflushed(self, hashXs_by_tx, first_tx_num):
|
||||
unflushed = self.unflushed
|
||||
count = 0
|
||||
for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num):
|
||||
hashXs = set(hashXs)
|
||||
for hashX in hashXs:
|
||||
unflushed[hashX].append(tx_num)
|
||||
count += len(hashXs)
|
||||
self.unflushed_count += count
|
||||
|
||||
def unflushed_memsize(self):
|
||||
return len(self.unflushed) * 180 + self.unflushed_count * 4
|
||||
|
||||
def assert_flushed(self):
|
||||
assert not self.unflushed
|
||||
|
||||
def flush(self):
|
||||
start_time = time.time()
|
||||
self.flush_count += 1
|
||||
flush_id = pack_be_uint32(self.flush_count)
|
||||
unflushed = self.unflushed
|
||||
|
||||
with self.db.write_batch() as batch:
|
||||
for hashX in sorted(unflushed):
|
||||
key = hashX + flush_id
|
||||
batch.put(key, unflushed[hashX].tobytes())
|
||||
self.write_state(batch)
|
||||
|
||||
count = len(unflushed)
|
||||
unflushed.clear()
|
||||
self.unflushed_count = 0
|
||||
|
||||
if self.db.for_sync:
|
||||
elapsed = time.time() - start_time
|
||||
self.logger.info(f'flushed history in {elapsed:.1f}s '
|
||||
f'for {count:,d} addrs')
|
||||
|
||||
def backup(self, hashXs, tx_count):
|
||||
# Not certain this is needed, but it doesn't hurt
|
||||
self.flush_count += 1
|
||||
nremoves = 0
|
||||
bisect_left = bisect.bisect_left
|
||||
|
||||
with self.db.write_batch() as batch:
|
||||
for hashX in sorted(hashXs):
|
||||
deletes = []
|
||||
puts = {}
|
||||
for key, hist in self.db.iterator(prefix=hashX, reverse=True):
|
||||
a = array.array('I')
|
||||
a.frombytes(hist)
|
||||
# Remove all history entries >= tx_count
|
||||
idx = bisect_left(a, tx_count)
|
||||
nremoves += len(a) - idx
|
||||
if idx > 0:
|
||||
puts[key] = a[:idx].tobytes()
|
||||
break
|
||||
deletes.append(key)
|
||||
|
||||
for key in deletes:
|
||||
batch.delete(key)
|
||||
for key, value in puts.items():
|
||||
batch.put(key, value)
|
||||
self.write_state(batch)
|
||||
|
||||
self.logger.info(f'backing up removed {nremoves:,d} history entries')
|
||||
|
||||
# def get_txnums(self, hashX, limit=1000):
|
||||
# """Generator that returns an unpruned, sorted list of tx_nums in the
|
||||
# history of a hashX. Includes both spending and receiving
|
||||
# transactions. By default yields at most 1000 entries. Set
|
||||
# limit to None to get them all. """
|
||||
# limit = util.resolve_limit(limit)
|
||||
# for key, hist in self.db.iterator(prefix=hashX):
|
||||
# a = array.array('I')
|
||||
# a.frombytes(hist)
|
||||
# for tx_num in a:
|
||||
# if limit == 0:
|
||||
# return
|
||||
# yield tx_num
|
||||
# limit -= 1
|
||||
|
||||
#
|
||||
# History compaction
|
||||
#
|
||||
|
||||
# comp_cursor is a cursor into compaction progress.
|
||||
# -1: no compaction in progress
|
||||
# 0-65535: Compaction in progress; all prefixes < comp_cursor have
|
||||
# been compacted, and later ones have not.
|
||||
# 65536: compaction complete in-memory but not flushed
|
||||
#
|
||||
# comp_flush_count applies during compaction, and is a flush count
|
||||
# for history with prefix < comp_cursor. flush_count applies
|
||||
# to still uncompacted history. It is -1 when no compaction is
|
||||
# taking place. Key suffixes up to and including comp_flush_count
|
||||
# are used, so a parallel history flush must first increment this
|
||||
#
|
||||
# When compaction is complete and the final flush takes place,
|
||||
# flush_count is reset to comp_flush_count, and comp_flush_count to -1
|
||||
|
||||
def _flush_compaction(self, cursor, write_items, keys_to_delete):
|
||||
"""Flush a single compaction pass as a batch."""
|
||||
# Update compaction state
|
||||
if cursor == 65536:
|
||||
self.flush_count = self.comp_flush_count
|
||||
self.comp_cursor = -1
|
||||
self.comp_flush_count = -1
|
||||
else:
|
||||
self.comp_cursor = cursor
|
||||
|
||||
# History DB. Flush compacted history and updated state
|
||||
with self.db.write_batch() as batch:
|
||||
# Important: delete first! The keyspace may overlap.
|
||||
for key in keys_to_delete:
|
||||
batch.delete(key)
|
||||
for key, value in write_items:
|
||||
batch.put(key, value)
|
||||
self.write_state(batch)
|
||||
|
||||
def _compact_hashX(self, hashX, hist_map, hist_list,
|
||||
write_items, keys_to_delete):
|
||||
"""Compress history for a hashX. hist_list is an ordered list of
|
||||
the histories to be compressed."""
|
||||
# History entries (tx numbers) are 4 bytes each. Distribute
|
||||
# over rows of up to 50KB in size. A fixed row size means
|
||||
# future compactions will not need to update the first N - 1
|
||||
# rows.
|
||||
max_row_size = self.max_hist_row_entries * 4
|
||||
full_hist = b''.join(hist_list)
|
||||
nrows = (len(full_hist) + max_row_size - 1) // max_row_size
|
||||
if nrows > 4:
|
||||
self.logger.info('hashX {} is large: {:,d} entries across '
|
||||
'{:,d} rows'
|
||||
.format(hash_to_hex_str(hashX),
|
||||
len(full_hist) // 4, nrows))
|
||||
|
||||
# Find what history needs to be written, and what keys need to
|
||||
# be deleted. Start by assuming all keys are to be deleted,
|
||||
# and then remove those that are the same on-disk as when
|
||||
# compacted.
|
||||
write_size = 0
|
||||
keys_to_delete.update(hist_map)
|
||||
for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
|
||||
key = hashX + pack_be_uint32(n)
|
||||
if hist_map.get(key) == chunk:
|
||||
keys_to_delete.remove(key)
|
||||
else:
|
||||
write_items.append((key, chunk))
|
||||
write_size += len(chunk)
|
||||
|
||||
assert n + 1 == nrows
|
||||
self.comp_flush_count = max(self.comp_flush_count, n)
|
||||
|
||||
return write_size
|
||||
|
||||
def _compact_prefix(self, prefix, write_items, keys_to_delete):
|
||||
"""Compact all history entries for hashXs beginning with the
|
||||
given prefix. Update keys_to_delete and write."""
|
||||
prior_hashX = None
|
||||
hist_map = {}
|
||||
hist_list = []
|
||||
|
||||
key_len = HASHX_LEN + 2
|
||||
write_size = 0
|
||||
for key, hist in self.db.iterator(prefix=prefix):
|
||||
# Ignore non-history entries
|
||||
if len(key) != key_len:
|
||||
continue
|
||||
hashX = key[:-2]
|
||||
if hashX != prior_hashX and prior_hashX:
|
||||
write_size += self._compact_hashX(prior_hashX, hist_map,
|
||||
hist_list, write_items,
|
||||
keys_to_delete)
|
||||
hist_map.clear()
|
||||
hist_list.clear()
|
||||
prior_hashX = hashX
|
||||
hist_map[key] = hist
|
||||
hist_list.append(hist)
|
||||
|
||||
if prior_hashX:
|
||||
write_size += self._compact_hashX(prior_hashX, hist_map, hist_list,
|
||||
write_items, keys_to_delete)
|
||||
return write_size
|
||||
|
||||
def _compact_history(self, limit):
|
||||
"""Inner loop of history compaction. Loops until limit bytes have
|
||||
been processed.
|
||||
"""
|
||||
keys_to_delete = set()
|
||||
write_items = [] # A list of (key, value) pairs
|
||||
write_size = 0
|
||||
|
||||
# Loop over 2-byte prefixes
|
||||
cursor = self.comp_cursor
|
||||
while write_size < limit and cursor < (1 << 32):
|
||||
prefix = pack_be_uint32(cursor)
|
||||
write_size += self._compact_prefix(prefix, write_items,
|
||||
keys_to_delete)
|
||||
cursor += 1
|
||||
|
||||
max_rows = self.comp_flush_count + 1
|
||||
self._flush_compaction(cursor, write_items, keys_to_delete)
|
||||
|
||||
self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), '
|
||||
'removed {:,d} rows, largest: {:,d}, {:.1f}% complete'
|
||||
.format(len(write_items), write_size / 1000000,
|
||||
len(keys_to_delete), max_rows,
|
||||
100 * cursor / 65536))
|
||||
return write_size
|
||||
|
||||
def _cancel_compaction(self):
|
||||
if self.comp_cursor != -1:
|
||||
self.logger.warning('cancelling in-progress history compaction')
|
||||
self.comp_flush_count = -1
|
||||
self.comp_cursor = -1
|
File diff suppressed because it is too large
Load diff
|
@ -9,15 +9,16 @@
|
|||
import asyncio
|
||||
import itertools
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
import attr
|
||||
import typing
|
||||
from typing import Set, Optional, Callable, Awaitable
|
||||
from collections import defaultdict
|
||||
from prometheus_client import Histogram
|
||||
|
||||
import attr
|
||||
|
||||
from lbry.wallet.server.hash import hash_to_hex_str, hex_str_to_hash
|
||||
from lbry.wallet.server.util import class_logger, chunks
|
||||
from lbry.wallet.server.leveldb import UTXO
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
|
@ -37,47 +38,6 @@ class MemPoolTxSummary:
|
|||
has_unconfirmed_inputs = attr.ib()
|
||||
|
||||
|
||||
class MemPoolAPI(ABC):
|
||||
"""A concrete instance of this class is passed to the MemPool object
|
||||
and used by it to query DB and blockchain state."""
|
||||
|
||||
@abstractmethod
|
||||
async def height(self):
|
||||
"""Query bitcoind for its height."""
|
||||
|
||||
@abstractmethod
|
||||
def cached_height(self):
|
||||
"""Return the height of bitcoind the last time it was queried,
|
||||
for any reason, without actually querying it.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def mempool_hashes(self):
|
||||
"""Query bitcoind for the hashes of all transactions in its
|
||||
mempool, returned as a list."""
|
||||
|
||||
@abstractmethod
|
||||
async def raw_transactions(self, hex_hashes):
|
||||
"""Query bitcoind for the serialized raw transactions with the given
|
||||
hashes. Missing transactions are returned as None.
|
||||
|
||||
hex_hashes is an iterable of hexadecimal hash strings."""
|
||||
|
||||
@abstractmethod
|
||||
async def lookup_utxos(self, prevouts):
|
||||
"""Return a list of (hashX, value) pairs each prevout if unspent,
|
||||
otherwise return None if spent or not found.
|
||||
|
||||
prevouts - an iterable of (hash, index) pairs
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def on_mempool(self, touched, new_touched, height):
|
||||
"""Called each time the mempool is synchronized. touched is a set of
|
||||
hashXs touched since the previous call. height is the
|
||||
daemon's height at the time the mempool was obtained."""
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
|
||||
|
@ -89,23 +49,14 @@ mempool_process_time_metric = Histogram(
|
|||
|
||||
|
||||
class MemPool:
|
||||
"""Representation of the daemon's mempool.
|
||||
|
||||
coin - a coin class from coins.py
|
||||
api - an object implementing MemPoolAPI
|
||||
|
||||
Updated regularly in caught-up state. Goal is to enable efficient
|
||||
response to the calls in the external interface. To that end we
|
||||
maintain the following maps:
|
||||
|
||||
tx: tx_hash -> MemPoolTx
|
||||
hashXs: hashX -> set of all hashes of txs touching the hashX
|
||||
"""
|
||||
|
||||
def __init__(self, coin, api, refresh_secs=1.0, log_status_secs=120.0):
|
||||
assert isinstance(api, MemPoolAPI)
|
||||
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0):
|
||||
self.coin = coin
|
||||
self.api = api
|
||||
self._daemon = daemon
|
||||
self._db = db
|
||||
self._touched_mp = {}
|
||||
self._touched_bp = {}
|
||||
self._highest_block = -1
|
||||
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.txs = {}
|
||||
self.hashXs = defaultdict(set) # None can be a key
|
||||
|
@ -113,10 +64,11 @@ class MemPool:
|
|||
self.refresh_secs = refresh_secs
|
||||
self.log_status_secs = log_status_secs
|
||||
# Prevents mempool refreshes during fee histogram calculation
|
||||
self.lock = asyncio.Lock()
|
||||
self.lock = state_lock
|
||||
self.wakeup = asyncio.Event()
|
||||
self.mempool_process_time_metric = mempool_process_time_metric
|
||||
self.notified_mempool_txs = set()
|
||||
self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
|
||||
|
||||
async def _logging(self, synchronized_event):
|
||||
"""Print regular logs of mempool stats."""
|
||||
|
@ -132,40 +84,6 @@ class MemPool:
|
|||
await asyncio.sleep(self.log_status_secs)
|
||||
await synchronized_event.wait()
|
||||
|
||||
async def _refresh_histogram(self, synchronized_event):
|
||||
while True:
|
||||
await synchronized_event.wait()
|
||||
async with self.lock:
|
||||
self._update_histogram(100_000)
|
||||
await asyncio.sleep(self.coin.MEMPOOL_HISTOGRAM_REFRESH_SECS)
|
||||
|
||||
def _update_histogram(self, bin_size):
|
||||
# Build a histogram by fee rate
|
||||
histogram = defaultdict(int)
|
||||
for tx in self.txs.values():
|
||||
histogram[tx.fee // tx.size] += tx.size
|
||||
|
||||
# Now compact it. For efficiency, get_fees returns a
|
||||
# compact histogram with variable bin size. The compact
|
||||
# histogram is an array of (fee_rate, vsize) values.
|
||||
# vsize_n is the cumulative virtual size of mempool
|
||||
# transactions with a fee rate in the interval
|
||||
# [rate_(n-1), rate_n)], and rate_(n-1) > rate_n.
|
||||
# Intervals are chosen to create tranches containing at
|
||||
# least 100kb of transactions
|
||||
compact = []
|
||||
cum_size = 0
|
||||
r = 0 # ?
|
||||
for fee_rate, size in sorted(histogram.items(), reverse=True):
|
||||
cum_size += size
|
||||
if cum_size + r > bin_size:
|
||||
compact.append((fee_rate, cum_size))
|
||||
r += cum_size - bin_size
|
||||
cum_size = 0
|
||||
bin_size *= 1.1
|
||||
self.logger.info(f'compact fee histogram: {compact}')
|
||||
self.cached_compact_histogram = compact
|
||||
|
||||
def _accept_transactions(self, tx_map, utxo_map, touched):
|
||||
"""Accept transactions in tx_map to the mempool if all their inputs
|
||||
can be found in the existing mempool or a utxo_map from the
|
||||
|
@ -223,9 +141,9 @@ class MemPool:
|
|||
"""Refresh our view of the daemon's mempool."""
|
||||
while True:
|
||||
start = time.perf_counter()
|
||||
height = self.api.cached_height()
|
||||
hex_hashes = await self.api.mempool_hashes()
|
||||
if height != await self.api.height():
|
||||
height = self._daemon.cached_height()
|
||||
hex_hashes = await self._daemon.mempool_hashes()
|
||||
if height != await self._daemon.height():
|
||||
continue
|
||||
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
|
||||
async with self.lock:
|
||||
|
@ -237,7 +155,7 @@ class MemPool:
|
|||
}
|
||||
synchronized_event.set()
|
||||
synchronized_event.clear()
|
||||
await self.api.on_mempool(touched, new_touched, height)
|
||||
await self.on_mempool(touched, new_touched, height)
|
||||
duration = time.perf_counter() - start
|
||||
self.mempool_process_time_metric.observe(duration)
|
||||
try:
|
||||
|
@ -292,8 +210,7 @@ class MemPool:
|
|||
|
||||
async def _fetch_and_accept(self, hashes, all_hashes, touched):
|
||||
"""Fetch a list of mempool transactions."""
|
||||
hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes)
|
||||
raw_txs = await self.api.raw_transactions(hex_hashes_iter)
|
||||
raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes))
|
||||
|
||||
to_hashX = self.coin.hashX_from_script
|
||||
deserializer = self.coin.DESERIALIZER
|
||||
|
@ -323,7 +240,7 @@ class MemPool:
|
|||
prevouts = tuple(prevout for tx in tx_map.values()
|
||||
for prevout in tx.prevouts
|
||||
if prevout[0] not in all_hashes)
|
||||
utxos = await self.api.lookup_utxos(prevouts)
|
||||
utxos = await self._db.lookup_utxos(prevouts)
|
||||
utxo_map = dict(zip(prevouts, utxos))
|
||||
|
||||
return self._accept_transactions(tx_map, utxo_map, touched)
|
||||
|
@ -407,3 +324,37 @@ class MemPool:
|
|||
if unspent_inputs:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
async def _maybe_notify(self, new_touched):
|
||||
tmp, tbp = self._touched_mp, self._touched_bp
|
||||
common = set(tmp).intersection(tbp)
|
||||
if common:
|
||||
height = max(common)
|
||||
elif tmp and max(tmp) == self._highest_block:
|
||||
height = self._highest_block
|
||||
else:
|
||||
# Either we are processing a block and waiting for it to
|
||||
# come in, or we have not yet had a mempool update for the
|
||||
# new block height
|
||||
return
|
||||
touched = tmp.pop(height)
|
||||
for old in [h for h in tmp if h <= height]:
|
||||
del tmp[old]
|
||||
for old in [h for h in tbp if h <= height]:
|
||||
touched.update(tbp.pop(old))
|
||||
# print("notify", height, len(touched), len(new_touched))
|
||||
await self.notify_sessions(height, touched, new_touched)
|
||||
|
||||
async def start(self, height, session_manager: 'LBRYSessionManager'):
|
||||
self._highest_block = height
|
||||
self.notify_sessions = session_manager._notify_sessions
|
||||
await self.notify_sessions(height, set(), set())
|
||||
|
||||
async def on_mempool(self, touched, new_touched, height):
|
||||
self._touched_mp[height] = touched
|
||||
await self._maybe_notify(new_touched)
|
||||
|
||||
async def on_block(self, touched, height):
|
||||
self._touched_bp[height] = touched
|
||||
self._highest_block = height
|
||||
await self._maybe_notify(set())
|
||||
|
|
|
@ -43,10 +43,12 @@ class Merkle:
|
|||
def __init__(self, hash_func=double_sha256):
|
||||
self.hash_func = hash_func
|
||||
|
||||
def tree_depth(self, hash_count):
|
||||
return self.branch_length(hash_count) + 1
|
||||
@staticmethod
|
||||
def tree_depth(hash_count):
|
||||
return Merkle.branch_length(hash_count) + 1
|
||||
|
||||
def branch_length(self, hash_count):
|
||||
@staticmethod
|
||||
def branch_length(hash_count):
|
||||
"""Return the length of a merkle branch given the number of hashes."""
|
||||
if not isinstance(hash_count, int):
|
||||
raise TypeError('hash_count must be an integer')
|
||||
|
@ -54,7 +56,8 @@ class Merkle:
|
|||
raise ValueError('hash_count must be at least 1')
|
||||
return ceil(log(hash_count, 2))
|
||||
|
||||
def branch_and_root(self, hashes, index, length=None):
|
||||
@staticmethod
|
||||
def branch_and_root(hashes, index, length=None, hash_func=double_sha256):
|
||||
"""Return a (merkle branch, merkle_root) pair given hashes, and the
|
||||
index of one of those hashes.
|
||||
"""
|
||||
|
@ -64,7 +67,7 @@ class Merkle:
|
|||
# This also asserts hashes is not empty
|
||||
if not 0 <= index < len(hashes):
|
||||
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
|
||||
natural_length = self.branch_length(len(hashes))
|
||||
natural_length = Merkle.branch_length(len(hashes))
|
||||
if length is None:
|
||||
length = natural_length
|
||||
else:
|
||||
|
@ -73,7 +76,6 @@ class Merkle:
|
|||
if length < natural_length:
|
||||
raise ValueError('length out of range')
|
||||
|
||||
hash_func = self.hash_func
|
||||
branch = []
|
||||
for _ in range(length):
|
||||
if len(hashes) & 1:
|
||||
|
@ -85,44 +87,47 @@ class Merkle:
|
|||
|
||||
return branch, hashes[0]
|
||||
|
||||
def root(self, hashes, length=None):
|
||||
@staticmethod
|
||||
def root(hashes, length=None):
|
||||
"""Return the merkle root of a non-empty iterable of binary hashes."""
|
||||
branch, root = self.branch_and_root(hashes, 0, length)
|
||||
branch, root = Merkle.branch_and_root(hashes, 0, length)
|
||||
return root
|
||||
|
||||
def root_from_proof(self, hash, branch, index):
|
||||
"""Return the merkle root given a hash, a merkle branch to it, and
|
||||
its index in the hashes array.
|
||||
# @staticmethod
|
||||
# def root_from_proof(hash, branch, index, hash_func=double_sha256):
|
||||
# """Return the merkle root given a hash, a merkle branch to it, and
|
||||
# its index in the hashes array.
|
||||
#
|
||||
# branch is an iterable sorted deepest to shallowest. If the
|
||||
# returned root is the expected value then the merkle proof is
|
||||
# verified.
|
||||
#
|
||||
# The caller should have confirmed the length of the branch with
|
||||
# branch_length(). Unfortunately this is not easily done for
|
||||
# bitcoin transactions as the number of transactions in a block
|
||||
# is unknown to an SPV client.
|
||||
# """
|
||||
# for elt in branch:
|
||||
# if index & 1:
|
||||
# hash = hash_func(elt + hash)
|
||||
# else:
|
||||
# hash = hash_func(hash + elt)
|
||||
# index >>= 1
|
||||
# if index:
|
||||
# raise ValueError('index out of range for branch')
|
||||
# return hash
|
||||
|
||||
branch is an iterable sorted deepest to shallowest. If the
|
||||
returned root is the expected value then the merkle proof is
|
||||
verified.
|
||||
|
||||
The caller should have confirmed the length of the branch with
|
||||
branch_length(). Unfortunately this is not easily done for
|
||||
bitcoin transactions as the number of transactions in a block
|
||||
is unknown to an SPV client.
|
||||
"""
|
||||
hash_func = self.hash_func
|
||||
for elt in branch:
|
||||
if index & 1:
|
||||
hash = hash_func(elt + hash)
|
||||
else:
|
||||
hash = hash_func(hash + elt)
|
||||
index >>= 1
|
||||
if index:
|
||||
raise ValueError('index out of range for branch')
|
||||
return hash
|
||||
|
||||
def level(self, hashes, depth_higher):
|
||||
@staticmethod
|
||||
def level(hashes, depth_higher):
|
||||
"""Return a level of the merkle tree of hashes the given depth
|
||||
higher than the bottom row of the original tree."""
|
||||
size = 1 << depth_higher
|
||||
root = self.root
|
||||
root = Merkle.root
|
||||
return [root(hashes[n: n + size], depth_higher)
|
||||
for n in range(0, len(hashes), size)]
|
||||
|
||||
def branch_and_root_from_level(self, level, leaf_hashes, index,
|
||||
@staticmethod
|
||||
def branch_and_root_from_level(level, leaf_hashes, index,
|
||||
depth_higher):
|
||||
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
|
||||
level cached.
|
||||
|
@ -146,10 +151,10 @@ class Merkle:
|
|||
if not isinstance(leaf_hashes, list):
|
||||
raise TypeError("leaf_hashes must be a list")
|
||||
leaf_index = (index >> depth_higher) << depth_higher
|
||||
leaf_branch, leaf_root = self.branch_and_root(
|
||||
leaf_branch, leaf_root = Merkle.branch_and_root(
|
||||
leaf_hashes, index - leaf_index, depth_higher)
|
||||
index >>= depth_higher
|
||||
level_branch, root = self.branch_and_root(level, index)
|
||||
level_branch, root = Merkle.branch_and_root(level, index)
|
||||
# Check last so that we know index is in-range
|
||||
if leaf_root != level[index]:
|
||||
raise ValueError('leaf hashes inconsistent with level')
|
||||
|
|
|
@ -5,66 +5,13 @@ from concurrent.futures.thread import ThreadPoolExecutor
|
|||
import typing
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.mempool import MemPool, MemPoolAPI
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from lbry.prometheus import PrometheusServer
|
||||
|
||||
|
||||
class Notifications:
|
||||
# hashX notifications come from two sources: new blocks and
|
||||
# mempool refreshes.
|
||||
#
|
||||
# A user with a pending transaction is notified after the block it
|
||||
# gets in is processed. Block processing can take an extended
|
||||
# time, and the prefetcher might poll the daemon after the mempool
|
||||
# code in any case. In such cases the transaction will not be in
|
||||
# the mempool after the mempool refresh. We want to avoid
|
||||
# notifying clients twice - for the mempool refresh and when the
|
||||
# block is done. This object handles that logic by deferring
|
||||
# notifications appropriately.
|
||||
|
||||
def __init__(self):
|
||||
self._touched_mp = {}
|
||||
self._touched_bp = {}
|
||||
self.notified_mempool_txs = set()
|
||||
self._highest_block = -1
|
||||
|
||||
async def _maybe_notify(self, new_touched):
|
||||
tmp, tbp = self._touched_mp, self._touched_bp
|
||||
common = set(tmp).intersection(tbp)
|
||||
if common:
|
||||
height = max(common)
|
||||
elif tmp and max(tmp) == self._highest_block:
|
||||
height = self._highest_block
|
||||
else:
|
||||
# Either we are processing a block and waiting for it to
|
||||
# come in, or we have not yet had a mempool update for the
|
||||
# new block height
|
||||
return
|
||||
touched = tmp.pop(height)
|
||||
for old in [h for h in tmp if h <= height]:
|
||||
del tmp[old]
|
||||
for old in [h for h in tbp if h <= height]:
|
||||
touched.update(tbp.pop(old))
|
||||
await self.notify(height, touched, new_touched)
|
||||
|
||||
async def notify(self, height, touched, new_touched):
|
||||
pass
|
||||
|
||||
async def start(self, height, notify_func):
|
||||
self._highest_block = height
|
||||
self.notify = notify_func
|
||||
await self.notify(height, set(), set())
|
||||
|
||||
async def on_mempool(self, touched, new_touched, height):
|
||||
self._touched_mp[height] = touched
|
||||
await self._maybe_notify(new_touched)
|
||||
|
||||
async def on_block(self, touched, height):
|
||||
self._touched_bp[height] = touched
|
||||
self._highest_block = height
|
||||
await self._maybe_notify(set())
|
||||
|
||||
|
||||
class Server:
|
||||
|
||||
def __init__(self, env):
|
||||
|
@ -73,26 +20,13 @@ class Server:
|
|||
self.shutdown_event = asyncio.Event()
|
||||
self.cancellable_tasks = []
|
||||
|
||||
self.notifications = notifications = Notifications()
|
||||
self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url)
|
||||
self.db = db = env.coin.DB(env)
|
||||
self.bp = bp = env.coin.BLOCK_PROCESSOR(env, db, daemon, notifications)
|
||||
self.db = db = LevelDB(env)
|
||||
self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event)
|
||||
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
||||
|
||||
# Set notifications up to implement the MemPoolAPI
|
||||
notifications.height = daemon.height
|
||||
notifications.cached_height = daemon.cached_height
|
||||
notifications.mempool_hashes = daemon.mempool_hashes
|
||||
notifications.raw_transactions = daemon.getrawtransactions
|
||||
notifications.lookup_utxos = db.lookup_utxos
|
||||
|
||||
MemPoolAPI.register(Notifications)
|
||||
self.mempool = mempool = MemPool(env.coin, notifications)
|
||||
|
||||
notifications.notified_mempool_txs = self.mempool.notified_mempool_txs
|
||||
|
||||
self.session_mgr = env.coin.SESSION_MANAGER(
|
||||
env, db, bp, daemon, mempool, self.shutdown_event
|
||||
self.session_mgr = LBRYSessionManager(
|
||||
env, db, bp, daemon, self.shutdown_event
|
||||
)
|
||||
self._indexer_task = None
|
||||
|
||||
|
@ -120,8 +54,8 @@ class Server:
|
|||
await _start_cancellable(self.bp.fetch_and_process_blocks)
|
||||
|
||||
await self.db.populate_header_merkle_cache()
|
||||
await _start_cancellable(self.mempool.keep_synchronized)
|
||||
await _start_cancellable(self.session_mgr.serve, self.notifications)
|
||||
await _start_cancellable(self.bp.mempool.keep_synchronized)
|
||||
await _start_cancellable(self.session_mgr.serve, self.bp.mempool)
|
||||
|
||||
async def stop(self):
|
||||
for task in reversed(self.cancellable_tasks):
|
||||
|
@ -135,7 +69,7 @@ class Server:
|
|||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
executor = ThreadPoolExecutor(1)
|
||||
executor = ThreadPoolExecutor(self.env.max_query_workers)
|
||||
loop.set_default_executor(executor)
|
||||
|
||||
def __exit():
|
||||
|
|
|
@ -21,18 +21,20 @@ from elasticsearch import ConnectionTimeout
|
|||
from prometheus_client import Counter, Info, Histogram, Gauge
|
||||
|
||||
import lbry
|
||||
from lbry.error import TooManyClaimSearchParametersError
|
||||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||
from lbry.build_info import BUILD, COMMIT_HASH, DOCKER_TAG
|
||||
from lbry.wallet.server.block_processor import LBRYBlockProcessor
|
||||
from lbry.wallet.server.db.writer import LBRYLevelDB
|
||||
from lbry.schema.result import Outputs
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.leveldb import LevelDB
|
||||
from lbry.wallet.server.websocket import AdminWebSocket
|
||||
from lbry.wallet.server.metrics import ServerLoadData, APICallMetrics
|
||||
from lbry.wallet.rpc.framing import NewlineFramer
|
||||
|
||||
import lbry.wallet.server.version as VERSION
|
||||
|
||||
from lbry.wallet.rpc import (
|
||||
RPCSession, JSONRPCAutoDetect, JSONRPCConnection,
|
||||
handler_invocation, RPCError, Request, JSONRPC
|
||||
handler_invocation, RPCError, Request, JSONRPC, Notification, Batch
|
||||
)
|
||||
from lbry.wallet.server import text
|
||||
from lbry.wallet.server import util
|
||||
|
@ -175,14 +177,13 @@ class SessionManager:
|
|||
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
|
||||
def __init__(self, env: 'Env', db: LBRYLevelDB, bp: LBRYBlockProcessor, daemon: 'Daemon', mempool: 'MemPool',
|
||||
shutdown_event: asyncio.Event):
|
||||
def __init__(self, env: 'Env', db: LevelDB, bp: BlockProcessor, daemon: 'Daemon', shutdown_event: asyncio.Event):
|
||||
env.max_send = max(350000, env.max_send)
|
||||
self.env = env
|
||||
self.db = db
|
||||
self.bp = bp
|
||||
self.daemon = daemon
|
||||
self.mempool = mempool
|
||||
self.mempool = bp.mempool
|
||||
self.shutdown_event = shutdown_event
|
||||
self.logger = util.class_logger(__name__, self.__class__.__name__)
|
||||
self.servers: typing.Dict[str, asyncio.AbstractServer] = {}
|
||||
|
@ -263,16 +264,6 @@ class SessionManager:
|
|||
await self._start_external_servers()
|
||||
paused = False
|
||||
|
||||
async def _log_sessions(self):
|
||||
"""Periodically log sessions."""
|
||||
log_interval = self.env.log_sessions
|
||||
if log_interval:
|
||||
while True:
|
||||
await sleep(log_interval)
|
||||
data = self._session_data(for_log=True)
|
||||
for line in text.sessions_lines(data):
|
||||
self.logger.info(line)
|
||||
self.logger.info(json.dumps(self._get_info()))
|
||||
|
||||
def _group_map(self):
|
||||
group_map = defaultdict(list)
|
||||
|
@ -376,23 +367,6 @@ class SessionManager:
|
|||
'version': lbry.__version__,
|
||||
}
|
||||
|
||||
def _session_data(self, for_log):
|
||||
"""Returned to the RPC 'sessions' call."""
|
||||
now = time.time()
|
||||
sessions = sorted(self.sessions.values(), key=lambda s: s.start_time)
|
||||
return [(session.session_id,
|
||||
session.flags(),
|
||||
session.peer_address_str(for_log=for_log),
|
||||
session.client_version,
|
||||
session.protocol_version_string(),
|
||||
session.count_pending_items(),
|
||||
session.txs_sent,
|
||||
session.sub_count(),
|
||||
session.recv_count, session.recv_size,
|
||||
session.send_count, session.send_size,
|
||||
now - session.start_time)
|
||||
for session in sessions]
|
||||
|
||||
def _group_data(self):
|
||||
"""Returned to the RPC 'groups' call."""
|
||||
result = []
|
||||
|
@ -537,23 +511,19 @@ class SessionManager:
|
|||
|
||||
return lines
|
||||
|
||||
async def rpc_sessions(self):
|
||||
"""Return statistics about connected sessions."""
|
||||
return self._session_data(for_log=False)
|
||||
|
||||
async def rpc_reorg(self, count):
|
||||
"""Force a reorg of the given number of blocks.
|
||||
|
||||
count: number of blocks to reorg
|
||||
"""
|
||||
count = non_negative_integer(count)
|
||||
if not self.bp.force_chain_reorg(count):
|
||||
raise RPCError(BAD_REQUEST, 'still catching up with daemon')
|
||||
return f'scheduled a reorg of {count:,d} blocks'
|
||||
# async def rpc_reorg(self, count):
|
||||
# """Force a reorg of the given number of blocks.
|
||||
#
|
||||
# count: number of blocks to reorg
|
||||
# """
|
||||
# count = non_negative_integer(count)
|
||||
# if not self.bp.force_chain_reorg(count):
|
||||
# raise RPCError(BAD_REQUEST, 'still catching up with daemon')
|
||||
# return f'scheduled a reorg of {count:,d} blocks'
|
||||
|
||||
# --- External Interface
|
||||
|
||||
async def serve(self, notifications, server_listening_event):
|
||||
async def serve(self, mempool, server_listening_event):
|
||||
"""Start the RPC server if enabled. When the event is triggered,
|
||||
start TCP and SSL servers."""
|
||||
try:
|
||||
|
@ -567,7 +537,7 @@ class SessionManager:
|
|||
if self.env.drop_client is not None:
|
||||
self.logger.info(f'drop clients matching: {self.env.drop_client.pattern}')
|
||||
# Start notifications; initialize hsub_results
|
||||
await notifications.start(self.db.db_height, self._notify_sessions)
|
||||
await mempool.start(self.db.db_height, self)
|
||||
await self.start_other()
|
||||
await self._start_external_servers()
|
||||
server_listening_event.set()
|
||||
|
@ -576,7 +546,6 @@ class SessionManager:
|
|||
# because we connect to ourself
|
||||
await asyncio.wait([
|
||||
self._clear_stale_sessions(),
|
||||
self._log_sessions(),
|
||||
self._manage_servers()
|
||||
])
|
||||
finally:
|
||||
|
@ -663,19 +632,25 @@ class SessionManager:
|
|||
for hashX in touched.intersection(self.mempool_statuses.keys()):
|
||||
self.mempool_statuses.pop(hashX, None)
|
||||
|
||||
touched.intersection_update(self.hashx_subscriptions_by_session.keys())
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, touched.intersection_update, self.hashx_subscriptions_by_session.keys()
|
||||
)
|
||||
|
||||
if touched or (height_changed and self.mempool_statuses):
|
||||
if touched or new_touched or (height_changed and self.mempool_statuses):
|
||||
notified_hashxs = 0
|
||||
notified_sessions = 0
|
||||
session_hashxes_to_notify = defaultdict(list)
|
||||
to_notify = touched if height_changed else new_touched
|
||||
|
||||
for hashX in to_notify:
|
||||
if hashX not in self.hashx_subscriptions_by_session:
|
||||
continue
|
||||
for session_id in self.hashx_subscriptions_by_session[hashX]:
|
||||
asyncio.create_task(self.sessions[session_id].send_history_notification(hashX))
|
||||
notified_sessions += 1
|
||||
notified_hashxs += 1
|
||||
if notified_sessions:
|
||||
self.logger.info(f'notified {notified_sessions} sessions/{notified_hashxs:,d} touched addresses')
|
||||
session_hashxes_to_notify[session_id].append(hashX)
|
||||
notified_hashxs += 1
|
||||
for session_id, hashXes in session_hashxes_to_notify.items():
|
||||
asyncio.create_task(self.sessions[session_id].send_history_notifications(*hashXes))
|
||||
if session_hashxes_to_notify:
|
||||
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
|
||||
|
||||
def add_session(self, session):
|
||||
self.sessions[id(session)] = session
|
||||
|
@ -746,16 +721,6 @@ class SessionBase(RPCSession):
|
|||
def toggle_logging(self):
|
||||
self.log_me = not self.log_me
|
||||
|
||||
def flags(self):
|
||||
"""Status flags."""
|
||||
status = self.kind[0]
|
||||
if self.is_closing():
|
||||
status += 'C'
|
||||
if self.log_me:
|
||||
status += 'L'
|
||||
status += str(self._concurrency.max_concurrent)
|
||||
return status
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Handle an incoming client connection."""
|
||||
super().connection_made(transport)
|
||||
|
@ -812,21 +777,21 @@ class LBRYSessionManager(SessionManager):
|
|||
super().__init__(*args, **kwargs)
|
||||
self.query_executor = None
|
||||
self.websocket = None
|
||||
self.metrics = ServerLoadData()
|
||||
# self.metrics = ServerLoadData()
|
||||
self.metrics_loop = None
|
||||
self.running = False
|
||||
if self.env.websocket_host is not None and self.env.websocket_port is not None:
|
||||
self.websocket = AdminWebSocket(self)
|
||||
|
||||
async def process_metrics(self):
|
||||
while self.running:
|
||||
data = self.metrics.to_json_and_reset({
|
||||
'sessions': self.session_count(),
|
||||
'height': self.db.db_height,
|
||||
})
|
||||
if self.websocket is not None:
|
||||
self.websocket.send_message(data)
|
||||
await asyncio.sleep(1)
|
||||
# async def process_metrics(self):
|
||||
# while self.running:
|
||||
# data = self.metrics.to_json_and_reset({
|
||||
# 'sessions': self.session_count(),
|
||||
# 'height': self.db.db_height,
|
||||
# })
|
||||
# if self.websocket is not None:
|
||||
# self.websocket.send_message(data)
|
||||
# await asyncio.sleep(1)
|
||||
|
||||
async def start_other(self):
|
||||
self.running = True
|
||||
|
@ -838,13 +803,9 @@ class LBRYSessionManager(SessionManager):
|
|||
)
|
||||
if self.websocket is not None:
|
||||
await self.websocket.start()
|
||||
if self.env.track_metrics:
|
||||
self.metrics_loop = asyncio.create_task(self.process_metrics())
|
||||
|
||||
async def stop_other(self):
|
||||
self.running = False
|
||||
if self.env.track_metrics:
|
||||
self.metrics_loop.cancel()
|
||||
if self.websocket is not None:
|
||||
await self.websocket.stop()
|
||||
self.query_executor.shutdown()
|
||||
|
@ -887,6 +848,8 @@ class LBRYElectrumX(SessionBase):
|
|||
'blockchain.transaction.get_height': cls.transaction_get_height,
|
||||
'blockchain.claimtrie.search': cls.claimtrie_search,
|
||||
'blockchain.claimtrie.resolve': cls.claimtrie_resolve,
|
||||
'blockchain.claimtrie.getclaimbyid': cls.claimtrie_getclaimbyid,
|
||||
# 'blockchain.claimtrie.getclaimsbyids': cls.claimtrie_getclaimsbyids,
|
||||
'blockchain.block.get_server_height': cls.get_server_height,
|
||||
'mempool.get_fee_histogram': cls.mempool_compact_histogram,
|
||||
'blockchain.block.headers': cls.block_headers,
|
||||
|
@ -915,8 +878,8 @@ class LBRYElectrumX(SessionBase):
|
|||
self.protocol_tuple = self.PROTOCOL_MIN
|
||||
self.protocol_string = None
|
||||
self.daemon = self.session_mgr.daemon
|
||||
self.bp: LBRYBlockProcessor = self.session_mgr.bp
|
||||
self.db: LBRYLevelDB = self.bp.db
|
||||
self.bp: BlockProcessor = self.session_mgr.bp
|
||||
self.db: LevelDB = self.bp.db
|
||||
|
||||
@classmethod
|
||||
def protocol_min_max_strings(cls):
|
||||
|
@ -939,7 +902,7 @@ class LBRYElectrumX(SessionBase):
|
|||
'donation_address': env.donation_address,
|
||||
'daily_fee': env.daily_fee,
|
||||
'hash_function': 'sha256',
|
||||
'trending_algorithm': env.trending_algorithms[0]
|
||||
'trending_algorithm': 'variable_decay'
|
||||
})
|
||||
|
||||
async def server_features_async(self):
|
||||
|
@ -956,32 +919,57 @@ class LBRYElectrumX(SessionBase):
|
|||
def sub_count(self):
|
||||
return len(self.hashX_subs)
|
||||
|
||||
async def send_history_notification(self, hashX):
|
||||
start = time.perf_counter()
|
||||
alias = self.hashX_subs[hashX]
|
||||
if len(alias) == 64:
|
||||
method = 'blockchain.scripthash.subscribe'
|
||||
else:
|
||||
method = 'blockchain.address.subscribe'
|
||||
try:
|
||||
self.session_mgr.notifications_in_flight_metric.inc()
|
||||
status = await self.address_status(hashX)
|
||||
self.session_mgr.address_history_metric.observe(time.perf_counter() - start)
|
||||
async def send_history_notifications(self, *hashXes: typing.Iterable[bytes]):
|
||||
notifications = []
|
||||
for hashX in hashXes:
|
||||
alias = self.hashX_subs[hashX]
|
||||
if len(alias) == 64:
|
||||
method = 'blockchain.scripthash.subscribe'
|
||||
else:
|
||||
method = 'blockchain.address.subscribe'
|
||||
start = time.perf_counter()
|
||||
await self.send_notification(method, (alias, status))
|
||||
db_history = await self.session_mgr.limited_history(hashX)
|
||||
mempool = self.mempool.transaction_summaries(hashX)
|
||||
|
||||
status = ''.join(f'{hash_to_hex_str(tx_hash)}:'
|
||||
f'{height:d}:'
|
||||
for tx_hash, height in db_history)
|
||||
status += ''.join(f'{hash_to_hex_str(tx.hash)}:'
|
||||
f'{-tx.has_unconfirmed_inputs:d}:'
|
||||
for tx in mempool)
|
||||
if status:
|
||||
status = sha256(status.encode()).hex()
|
||||
else:
|
||||
status = None
|
||||
if mempool:
|
||||
self.session_mgr.mempool_statuses[hashX] = status
|
||||
else:
|
||||
self.session_mgr.mempool_statuses.pop(hashX, None)
|
||||
|
||||
self.session_mgr.address_history_metric.observe(time.perf_counter() - start)
|
||||
notifications.append((method, (alias, status)))
|
||||
|
||||
start = time.perf_counter()
|
||||
self.session_mgr.notifications_in_flight_metric.inc()
|
||||
for method, args in notifications:
|
||||
self.NOTIFICATION_COUNT.labels(method=method, version=self.client_version).inc()
|
||||
try:
|
||||
await self.send_notifications(
|
||||
Batch([Notification(method, (alias, status)) for (method, (alias, status)) in notifications])
|
||||
)
|
||||
self.session_mgr.notifications_sent_metric.observe(time.perf_counter() - start)
|
||||
finally:
|
||||
self.session_mgr.notifications_in_flight_metric.dec()
|
||||
|
||||
def get_metrics_or_placeholder_for_api(self, query_name):
|
||||
""" Do not hold on to a reference to the metrics
|
||||
returned by this method past an `await` or
|
||||
you may be working with a stale metrics object.
|
||||
"""
|
||||
if self.env.track_metrics:
|
||||
return self.session_mgr.metrics.for_api(query_name)
|
||||
else:
|
||||
return APICallMetrics(query_name)
|
||||
# def get_metrics_or_placeholder_for_api(self, query_name):
|
||||
# """ Do not hold on to a reference to the metrics
|
||||
# returned by this method past an `await` or
|
||||
# you may be working with a stale metrics object.
|
||||
# """
|
||||
# if self.env.track_metrics:
|
||||
# # return self.session_mgr.metrics.for_api(query_name)
|
||||
# else:
|
||||
# return APICallMetrics(query_name)
|
||||
|
||||
async def run_in_executor(self, query_name, func, kwargs):
|
||||
start = time.perf_counter()
|
||||
|
@ -994,55 +982,87 @@ class LBRYElectrumX(SessionBase):
|
|||
raise
|
||||
except Exception:
|
||||
log.exception("dear devs, please handle this exception better")
|
||||
metrics = self.get_metrics_or_placeholder_for_api(query_name)
|
||||
metrics.query_error(start, {})
|
||||
self.session_mgr.db_error_metric.inc()
|
||||
raise RPCError(JSONRPC.INTERNAL_ERROR, 'unknown server error')
|
||||
else:
|
||||
if self.env.track_metrics:
|
||||
metrics = self.get_metrics_or_placeholder_for_api(query_name)
|
||||
(result, metrics_data) = result
|
||||
metrics.query_response(start, metrics_data)
|
||||
return base64.b64encode(result).decode()
|
||||
finally:
|
||||
self.session_mgr.pending_query_metric.dec()
|
||||
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
||||
|
||||
async def run_and_cache_query(self, query_name, kwargs):
|
||||
start = time.perf_counter()
|
||||
if isinstance(kwargs, dict):
|
||||
kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
|
||||
try:
|
||||
self.session_mgr.pending_query_metric.inc()
|
||||
return await self.db.search_index.session_query(query_name, kwargs)
|
||||
except ConnectionTimeout:
|
||||
self.session_mgr.interrupt_count_metric.inc()
|
||||
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||
finally:
|
||||
self.session_mgr.pending_query_metric.dec()
|
||||
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
||||
# async def run_and_cache_query(self, query_name, kwargs):
|
||||
# start = time.perf_counter()
|
||||
# if isinstance(kwargs, dict):
|
||||
# kwargs['release_time'] = format_release_time(kwargs.get('release_time'))
|
||||
# try:
|
||||
# self.session_mgr.pending_query_metric.inc()
|
||||
# return await self.db.search_index.session_query(query_name, kwargs)
|
||||
# except ConnectionTimeout:
|
||||
# self.session_mgr.interrupt_count_metric.inc()
|
||||
# raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||
# finally:
|
||||
# self.session_mgr.pending_query_metric.dec()
|
||||
# self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
||||
|
||||
async def mempool_compact_histogram(self):
|
||||
return self.mempool.compact_fee_histogram()
|
||||
|
||||
async def claimtrie_search(self, **kwargs):
|
||||
if kwargs:
|
||||
start = time.perf_counter()
|
||||
if 'release_time' in kwargs:
|
||||
release_time = kwargs.pop('release_time')
|
||||
try:
|
||||
return await self.run_and_cache_query('search', kwargs)
|
||||
except TooManyClaimSearchParametersError as err:
|
||||
await asyncio.sleep(2)
|
||||
self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.",
|
||||
self.peer_address()[0], err.key, err.limit)
|
||||
return RPCError(1, str(err))
|
||||
kwargs['release_time'] = format_release_time(release_time)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
self.session_mgr.pending_query_metric.inc()
|
||||
if 'channel' in kwargs:
|
||||
channel_url = kwargs.pop('channel')
|
||||
_, channel_claim, _ = await self.db.fs_resolve(channel_url)
|
||||
if not channel_claim or isinstance(channel_claim, (ResolveCensoredError, LookupError, ValueError)):
|
||||
return Outputs.to_base64([], [], 0, None, None)
|
||||
kwargs['channel_id'] = channel_claim.claim_hash.hex()
|
||||
return await self.db.search_index.cached_search(kwargs)
|
||||
except ConnectionTimeout:
|
||||
self.session_mgr.interrupt_count_metric.inc()
|
||||
raise RPCError(JSONRPC.QUERY_TIMEOUT, 'query timed out')
|
||||
except TooManyClaimSearchParametersError as err:
|
||||
await asyncio.sleep(2)
|
||||
self.logger.warning("Got an invalid query from %s, for %s with more than %d elements.",
|
||||
self.peer_address()[0], err.key, err.limit)
|
||||
return RPCError(1, str(err))
|
||||
finally:
|
||||
self.session_mgr.pending_query_metric.dec()
|
||||
self.session_mgr.executor_time_metric.observe(time.perf_counter() - start)
|
||||
|
||||
async def claimtrie_resolve(self, *urls):
|
||||
if urls:
|
||||
count = len(urls)
|
||||
try:
|
||||
self.session_mgr.urls_to_resolve_count_metric.inc(count)
|
||||
return await self.run_and_cache_query('resolve', urls)
|
||||
finally:
|
||||
self.session_mgr.resolved_url_count_metric.inc(count)
|
||||
rows, extra = [], []
|
||||
for url in urls:
|
||||
self.session_mgr.urls_to_resolve_count_metric.inc()
|
||||
stream, channel, repost = await self.db.fs_resolve(url)
|
||||
self.session_mgr.resolved_url_count_metric.inc()
|
||||
if isinstance(channel, ResolveCensoredError):
|
||||
rows.append(channel)
|
||||
extra.append(channel.censor_row)
|
||||
elif isinstance(stream, ResolveCensoredError):
|
||||
rows.append(stream)
|
||||
extra.append(stream.censor_row)
|
||||
elif channel and not stream:
|
||||
rows.append(channel)
|
||||
# print("resolved channel", channel.name.decode())
|
||||
if repost:
|
||||
extra.append(repost)
|
||||
elif stream:
|
||||
# print("resolved stream", stream.name.decode())
|
||||
rows.append(stream)
|
||||
if channel:
|
||||
# print("and channel", channel.name.decode())
|
||||
extra.append(channel)
|
||||
if repost:
|
||||
extra.append(repost)
|
||||
# print("claimtrie resolve %i rows %i extrat" % (len(rows), len(extra)))
|
||||
return Outputs.to_base64(rows, extra, 0, None, None)
|
||||
|
||||
async def get_server_height(self):
|
||||
return self.bp.height
|
||||
|
@ -1057,6 +1077,15 @@ class LBRYElectrumX(SessionBase):
|
|||
return -1
|
||||
return None
|
||||
|
||||
async def claimtrie_getclaimbyid(self, claim_id):
|
||||
rows = []
|
||||
extra = []
|
||||
stream = await self.db.fs_getclaimbyid(claim_id)
|
||||
if not stream:
|
||||
stream = LookupError(f"Could not find claim at {claim_id}")
|
||||
rows.append(stream)
|
||||
return Outputs.to_base64(rows, extra, 0, None, None)
|
||||
|
||||
def assert_tx_hash(self, value):
|
||||
'''Raise an RPCError if the value is not a valid transaction
|
||||
hash.'''
|
||||
|
|
|
@ -1,167 +0,0 @@
|
|||
# Copyright (c) 2016-2017, the ElectrumX authors
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Backend database abstraction."""
|
||||
|
||||
import os
|
||||
from functools import partial
|
||||
|
||||
from lbry.wallet.server import util
|
||||
|
||||
|
||||
def db_class(db_dir, name):
|
||||
"""Returns a DB engine class."""
|
||||
for db_class in util.subclasses(Storage):
|
||||
if db_class.__name__.lower() == name.lower():
|
||||
db_class.import_module()
|
||||
return partial(db_class, db_dir)
|
||||
raise RuntimeError(f'unrecognised DB engine "{name}"')
|
||||
|
||||
|
||||
class Storage:
|
||||
"""Abstract base class of the DB backend abstraction."""
|
||||
|
||||
def __init__(self, db_dir, name, for_sync):
|
||||
self.db_dir = db_dir
|
||||
self.is_new = not os.path.exists(os.path.join(db_dir, name))
|
||||
self.for_sync = for_sync or self.is_new
|
||||
self.open(name, create=self.is_new)
|
||||
|
||||
@classmethod
|
||||
def import_module(cls):
|
||||
"""Import the DB engine module."""
|
||||
raise NotImplementedError
|
||||
|
||||
def open(self, name, create):
|
||||
"""Open an existing database or create a new one."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close an existing database."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get(self, key):
|
||||
raise NotImplementedError
|
||||
|
||||
def put(self, key, value):
|
||||
raise NotImplementedError
|
||||
|
||||
def write_batch(self):
|
||||
"""Return a context manager that provides `put` and `delete`.
|
||||
|
||||
Changes should only be committed when the context manager
|
||||
closes without an exception.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def iterator(self, prefix=b'', reverse=False):
|
||||
"""Return an iterator that yields (key, value) pairs from the
|
||||
database sorted by key.
|
||||
|
||||
If `prefix` is set, only keys starting with `prefix` will be
|
||||
included. If `reverse` is True the items are returned in
|
||||
reverse order.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LevelDB(Storage):
|
||||
"""LevelDB database engine."""
|
||||
|
||||
@classmethod
|
||||
def import_module(cls):
|
||||
import plyvel
|
||||
cls.module = plyvel
|
||||
|
||||
def open(self, name, create, lru_cache_size=None):
|
||||
mof = 10000
|
||||
path = os.path.join(self.db_dir, name)
|
||||
# Use snappy compression (the default)
|
||||
self.db = self.module.DB(path, create_if_missing=create, max_open_files=mof)
|
||||
self.close = self.db.close
|
||||
self.get = self.db.get
|
||||
self.put = self.db.put
|
||||
self.iterator = self.db.iterator
|
||||
self.write_batch = partial(self.db.write_batch, transaction=True, sync=True)
|
||||
|
||||
|
||||
class RocksDB(Storage):
|
||||
"""RocksDB database engine."""
|
||||
|
||||
@classmethod
|
||||
def import_module(cls):
|
||||
import rocksdb
|
||||
cls.module = rocksdb
|
||||
|
||||
def open(self, name, create):
|
||||
mof = 512 if self.for_sync else 128
|
||||
path = os.path.join(self.db_dir, name)
|
||||
# Use snappy compression (the default)
|
||||
options = self.module.Options(create_if_missing=create,
|
||||
use_fsync=True,
|
||||
target_file_size_base=33554432,
|
||||
max_open_files=mof)
|
||||
self.db = self.module.DB(path, options)
|
||||
self.get = self.db.get
|
||||
self.put = self.db.put
|
||||
|
||||
def close(self):
|
||||
# PyRocksDB doesn't provide a close method; hopefully this is enough
|
||||
self.db = self.get = self.put = None
|
||||
import gc
|
||||
gc.collect()
|
||||
|
||||
def write_batch(self):
|
||||
return RocksDBWriteBatch(self.db)
|
||||
|
||||
def iterator(self, prefix=b'', reverse=False):
|
||||
return RocksDBIterator(self.db, prefix, reverse)
|
||||
|
||||
|
||||
class RocksDBWriteBatch:
|
||||
"""A write batch for RocksDB."""
|
||||
|
||||
def __init__(self, db):
|
||||
self.batch = RocksDB.module.WriteBatch()
|
||||
self.db = db
|
||||
|
||||
def __enter__(self):
|
||||
return self.batch
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if not exc_val:
|
||||
self.db.write(self.batch)
|
||||
|
||||
|
||||
class RocksDBIterator:
|
||||
"""An iterator for RocksDB."""
|
||||
|
||||
def __init__(self, db, prefix, reverse):
|
||||
self.prefix = prefix
|
||||
if reverse:
|
||||
self.iterator = reversed(db.iteritems())
|
||||
nxt_prefix = util.increment_byte_string(prefix)
|
||||
if nxt_prefix:
|
||||
self.iterator.seek(nxt_prefix)
|
||||
try:
|
||||
next(self.iterator)
|
||||
except StopIteration:
|
||||
self.iterator.seek(nxt_prefix)
|
||||
else:
|
||||
self.iterator.seek_to_last()
|
||||
else:
|
||||
self.iterator = db.iteritems()
|
||||
self.iterator.seek(prefix)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
k, v = next(self.iterator)
|
||||
if not k.startswith(self.prefix):
|
||||
raise StopIteration
|
||||
return k, v
|
|
@ -26,7 +26,7 @@
|
|||
# and warranty status of this software.
|
||||
|
||||
"""Transaction-related classes and functions."""
|
||||
|
||||
import typing
|
||||
from collections import namedtuple
|
||||
|
||||
from lbry.wallet.server.hash import sha256, double_sha256, hash_to_hex_str
|
||||
|
@ -41,11 +41,20 @@ ZERO = bytes(32)
|
|||
MINUS_1 = 4294967295
|
||||
|
||||
|
||||
class Tx(namedtuple("Tx", "version inputs outputs locktime raw")):
|
||||
"""Class representing a transaction."""
|
||||
class Tx(typing.NamedTuple):
|
||||
version: int
|
||||
inputs: typing.List['TxInput']
|
||||
outputs: typing.List['TxOutput']
|
||||
locktime: int
|
||||
raw: bytes
|
||||
|
||||
|
||||
class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
|
||||
class TxInput(typing.NamedTuple):
|
||||
prev_hash: bytes
|
||||
prev_idx: int
|
||||
script: bytes
|
||||
sequence: int
|
||||
|
||||
"""Class representing a transaction input."""
|
||||
def __str__(self):
|
||||
script = self.script.hex()
|
||||
|
@ -65,7 +74,9 @@ class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
|
|||
))
|
||||
|
||||
|
||||
class TxOutput(namedtuple("TxOutput", "value pk_script")):
|
||||
class TxOutput(typing.NamedTuple):
|
||||
value: int
|
||||
pk_script: bytes
|
||||
|
||||
def serialize(self):
|
||||
return b''.join((
|
||||
|
|
|
@ -340,7 +340,7 @@ pack_le_int64 = struct_le_q.pack
|
|||
pack_le_uint16 = struct_le_H.pack
|
||||
pack_le_uint32 = struct_le_I.pack
|
||||
pack_be_uint64 = lambda x: x.to_bytes(8, byteorder='big')
|
||||
pack_be_uint16 = struct_be_H.pack
|
||||
pack_be_uint16 = lambda x: x.to_bytes(2, byteorder='big')
|
||||
pack_be_uint32 = struct_be_I.pack
|
||||
pack_byte = structB.pack
|
||||
|
||||
|
|
2
setup.py
2
setup.py
|
@ -9,7 +9,7 @@ with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
|
|||
|
||||
PLYVEL = []
|
||||
if sys.platform.startswith('linux'):
|
||||
PLYVEL.append('plyvel==1.0.5')
|
||||
PLYVEL.append('plyvel==1.3.0')
|
||||
|
||||
setup(
|
||||
name=__name__,
|
||||
|
|
|
@ -22,7 +22,7 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
|
||||
self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex())
|
||||
|
||||
txids = await asyncio.get_event_loop().run_in_executor(bp.db.executor, get_txids)
|
||||
txids = await asyncio.get_event_loop().run_in_executor(None, get_txids)
|
||||
txs = await bp.db.fs_transactions(txids)
|
||||
block_txs = (await bp.daemon.deserialised_block(block_hash))['tx']
|
||||
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
|
||||
|
@ -57,11 +57,29 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
await self.assertBlockHash(209)
|
||||
await self.assertBlockHash(210)
|
||||
await self.assertBlockHash(211)
|
||||
still_valid = await self.daemon.jsonrpc_stream_create(
|
||||
'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!')
|
||||
)
|
||||
await self.ledger.wait(still_valid)
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 212)
|
||||
claim_id = still_valid.outputs[0].claim_id
|
||||
c1 = (await self.resolve(f'still-valid#{claim_id}'))['claim_id']
|
||||
c2 = (await self.resolve(f'still-valid#{claim_id[:2]}'))['claim_id']
|
||||
c3 = (await self.resolve(f'still-valid'))['claim_id']
|
||||
self.assertTrue(c1 == c2 == c3)
|
||||
|
||||
abandon_tx = await self.daemon.jsonrpc_stream_abandon(claim_id=claim_id)
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.on_header.where(lambda e: e.height == 213)
|
||||
c1 = await self.resolve(f'still-valid#{still_valid.outputs[0].claim_id}')
|
||||
c2 = await self.daemon.jsonrpc_resolve([f'still-valid#{claim_id[:2]}'])
|
||||
c3 = await self.daemon.jsonrpc_resolve([f'still-valid'])
|
||||
|
||||
async def test_reorg_change_claim_height(self):
|
||||
# sanity check
|
||||
txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft')
|
||||
self.assertListEqual(txos, [])
|
||||
result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both
|
||||
self.assertIn('error', result)
|
||||
|
||||
still_valid = await self.daemon.jsonrpc_stream_create(
|
||||
'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!')
|
||||
|
@ -82,17 +100,15 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
self.assertEqual(self.ledger.headers.height, 208)
|
||||
await self.assertBlockHash(208)
|
||||
|
||||
txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft')
|
||||
self.assertEqual(1, len(txos))
|
||||
txo = txos[0]
|
||||
self.assertEqual(txo.tx_ref.id, broadcast_tx.id)
|
||||
self.assertEqual(txo.tx_ref.height, 208)
|
||||
claim = await self.resolve('hovercraft')
|
||||
self.assertEqual(claim['txid'], broadcast_tx.id)
|
||||
self.assertEqual(claim['height'], 208)
|
||||
|
||||
# check that our tx is in block 208 as returned by lbrycrdd
|
||||
invalidated_block_hash = (await self.ledger.headers.hash(208)).decode()
|
||||
block_207 = await self.blockchain.get_block(invalidated_block_hash)
|
||||
self.assertIn(txo.tx_ref.id, block_207['tx'])
|
||||
self.assertEqual(208, txos[0].tx_ref.height)
|
||||
self.assertIn(claim['txid'], block_207['tx'])
|
||||
self.assertEqual(208, claim['height'])
|
||||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
|
@ -109,11 +125,20 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
reorg_block_hash = await self.blockchain.get_block_hash(208)
|
||||
self.assertNotEqual(invalidated_block_hash, reorg_block_hash)
|
||||
block_207 = await self.blockchain.get_block(reorg_block_hash)
|
||||
self.assertNotIn(txo.tx_ref.id, block_207['tx'])
|
||||
self.assertNotIn(claim['txid'], block_207['tx'])
|
||||
|
||||
client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode()
|
||||
self.assertEqual(client_reorg_block_hash, reorg_block_hash)
|
||||
|
||||
# verify the dropped claim is no longer returned by claim search
|
||||
self.assertDictEqual(
|
||||
{'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}},
|
||||
await self.resolve('hovercraft')
|
||||
)
|
||||
|
||||
# verify the claim published a block earlier wasn't also reverted
|
||||
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
|
||||
|
||||
# broadcast the claim in a different block
|
||||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||
self.assertEqual(broadcast_tx.id, new_txid)
|
||||
|
@ -123,14 +148,88 @@ class BlockchainReorganizationTests(CommandTestCase):
|
|||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
block_210 = await self.blockchain.get_block((await self.ledger.headers.hash(210)).decode())
|
||||
self.assertIn(txo.tx_ref.id, block_210['tx'])
|
||||
txos, _, _, _ = await self.ledger.claim_search([], name='hovercraft')
|
||||
self.assertEqual(1, len(txos))
|
||||
self.assertEqual(txos[0].tx_ref.id, new_txid)
|
||||
self.assertEqual(210, txos[0].tx_ref.height)
|
||||
republished = await self.resolve('hovercraft')
|
||||
self.assertEqual(210, republished['height'])
|
||||
self.assertEqual(claim['claim_id'], republished['claim_id'])
|
||||
|
||||
# this should still be unchanged
|
||||
txos, _, _, _ = await self.ledger.claim_search([], name='still-valid')
|
||||
self.assertEqual(1, len(txos))
|
||||
self.assertEqual(207, txos[0].tx_ref.height)
|
||||
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
|
||||
|
||||
async def test_reorg_drop_claim(self):
|
||||
# sanity check
|
||||
result = await self.resolve('hovercraft') # TODO: do these for claim_search and resolve both
|
||||
self.assertIn('error', result)
|
||||
|
||||
still_valid = await self.daemon.jsonrpc_stream_create(
|
||||
'still-valid', '1.0', file_path=self.create_upload_file(data=b'hi!')
|
||||
)
|
||||
await self.ledger.wait(still_valid)
|
||||
await self.generate(1)
|
||||
|
||||
# create a claim and verify it's returned by claim_search
|
||||
self.assertEqual(self.ledger.headers.height, 207)
|
||||
await self.assertBlockHash(207)
|
||||
|
||||
broadcast_tx = await self.daemon.jsonrpc_stream_create(
|
||||
'hovercraft', '1.0', file_path=self.create_upload_file(data=b'hi!')
|
||||
)
|
||||
await self.ledger.wait(broadcast_tx)
|
||||
await self.generate(1)
|
||||
await self.ledger.wait(broadcast_tx, self.blockchain.block_expected)
|
||||
self.assertEqual(self.ledger.headers.height, 208)
|
||||
await self.assertBlockHash(208)
|
||||
|
||||
claim = await self.resolve('hovercraft')
|
||||
self.assertEqual(claim['txid'], broadcast_tx.id)
|
||||
self.assertEqual(claim['height'], 208)
|
||||
|
||||
# check that our tx is in block 208 as returned by lbrycrdd
|
||||
invalidated_block_hash = (await self.ledger.headers.hash(208)).decode()
|
||||
block_207 = await self.blockchain.get_block(invalidated_block_hash)
|
||||
self.assertIn(claim['txid'], block_207['tx'])
|
||||
self.assertEqual(208, claim['height'])
|
||||
|
||||
# reorg the last block dropping our claim tx
|
||||
await self.blockchain.invalidate_block(invalidated_block_hash)
|
||||
await self.blockchain.clear_mempool()
|
||||
await self.blockchain.generate(2)
|
||||
|
||||
# wait for the client to catch up and verify the reorg
|
||||
await asyncio.wait_for(self.on_header(209), 3.0)
|
||||
await self.assertBlockHash(207)
|
||||
await self.assertBlockHash(208)
|
||||
await self.assertBlockHash(209)
|
||||
|
||||
# verify the claim was dropped from block 208 as returned by lbrycrdd
|
||||
reorg_block_hash = await self.blockchain.get_block_hash(208)
|
||||
self.assertNotEqual(invalidated_block_hash, reorg_block_hash)
|
||||
block_207 = await self.blockchain.get_block(reorg_block_hash)
|
||||
self.assertNotIn(claim['txid'], block_207['tx'])
|
||||
|
||||
client_reorg_block_hash = (await self.ledger.headers.hash(208)).decode()
|
||||
self.assertEqual(client_reorg_block_hash, reorg_block_hash)
|
||||
|
||||
# verify the dropped claim is no longer returned by claim search
|
||||
self.assertDictEqual(
|
||||
{'error': {'name': 'NOT_FOUND', 'text': 'Could not find claim at "hovercraft".'}},
|
||||
await self.resolve('hovercraft')
|
||||
)
|
||||
|
||||
# verify the claim published a block earlier wasn't also reverted
|
||||
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
|
||||
|
||||
# broadcast the claim in a different block
|
||||
new_txid = await self.blockchain.sendrawtransaction(hexlify(broadcast_tx.raw).decode())
|
||||
self.assertEqual(broadcast_tx.id, new_txid)
|
||||
await self.blockchain.generate(1)
|
||||
|
||||
# wait for the client to catch up
|
||||
await asyncio.wait_for(self.on_header(210), 1.0)
|
||||
|
||||
# verify the claim is in the new block and that it is returned by claim_search
|
||||
republished = await self.resolve('hovercraft')
|
||||
self.assertEqual(210, republished['height'])
|
||||
self.assertEqual(claim['claim_id'], republished['claim_id'])
|
||||
|
||||
# this should still be unchanged
|
||||
self.assertEqual(207, (await self.resolve('still-valid'))['height'])
|
||||
|
|
|
@ -33,7 +33,7 @@ class NetworkTests(IntegrationTestCase):
|
|||
'donation_address': '',
|
||||
'daily_fee': '0',
|
||||
'server_version': lbry.__version__,
|
||||
'trending_algorithm': 'zscore',
|
||||
'trending_algorithm': 'variable_decay',
|
||||
}, await self.ledger.network.get_server_features())
|
||||
# await self.conductor.spv_node.stop()
|
||||
payment_address, donation_address = await self.account.get_addresses(limit=2)
|
||||
|
@ -58,7 +58,7 @@ class NetworkTests(IntegrationTestCase):
|
|||
'donation_address': donation_address,
|
||||
'daily_fee': '42',
|
||||
'server_version': lbry.__version__,
|
||||
'trending_algorithm': 'zscore',
|
||||
'trending_algorithm': 'variable_decay',
|
||||
}, await self.ledger.network.get_server_features())
|
||||
|
||||
|
||||
|
@ -176,10 +176,19 @@ class UDPServerFailDiscoveryTest(AsyncioTestCase):
|
|||
|
||||
|
||||
class ServerPickingTestCase(AsyncioTestCase):
|
||||
async def _make_udp_server(self, port):
|
||||
async def _make_udp_server(self, port, latency) -> StatusServer:
|
||||
s = StatusServer()
|
||||
await s.start(0, b'\x00' * 32, '127.0.0.1', port)
|
||||
await s.start(0, b'\x00' * 32, 'US', '127.0.0.1', port, True)
|
||||
s.set_available()
|
||||
sendto = s._protocol.transport.sendto
|
||||
|
||||
def mock_sendto(data, addr):
|
||||
self.loop.call_later(latency, sendto, data, addr)
|
||||
|
||||
s._protocol.transport.sendto = mock_sendto
|
||||
|
||||
self.addCleanup(s.stop)
|
||||
return s
|
||||
|
||||
async def _make_fake_server(self, latency=1.0, port=1):
|
||||
# local fake server with artificial latency
|
||||
|
@ -191,23 +200,24 @@ class ServerPickingTestCase(AsyncioTestCase):
|
|||
return {'height': 1}
|
||||
server = await self.loop.create_server(lambda: FakeSession(), host='127.0.0.1', port=port)
|
||||
self.addCleanup(server.close)
|
||||
await self._make_udp_server(port)
|
||||
await self._make_udp_server(port, latency)
|
||||
return '127.0.0.1', port
|
||||
|
||||
async def _make_bad_server(self, port=42420):
|
||||
async def echo(reader, writer):
|
||||
while True:
|
||||
writer.write(await reader.read())
|
||||
|
||||
server = await asyncio.start_server(echo, host='127.0.0.1', port=port)
|
||||
self.addCleanup(server.close)
|
||||
await self._make_udp_server(port)
|
||||
await self._make_udp_server(port, 0)
|
||||
return '127.0.0.1', port
|
||||
|
||||
async def _test_pick_fastest(self):
|
||||
async def test_pick_fastest(self):
|
||||
ledger = Mock(config={
|
||||
'default_servers': [
|
||||
# fast but unhealthy, should be discarded
|
||||
await self._make_bad_server(),
|
||||
# await self._make_bad_server(),
|
||||
('localhost', 1),
|
||||
('example.that.doesnt.resolve', 9000),
|
||||
await self._make_fake_server(latency=1.0, port=1340),
|
||||
|
@ -223,7 +233,7 @@ class ServerPickingTestCase(AsyncioTestCase):
|
|||
await asyncio.wait_for(network.on_connected.first, timeout=10)
|
||||
self.assertTrue(network.is_connected)
|
||||
self.assertTupleEqual(network.client.server, ('127.0.0.1', 1337))
|
||||
self.assertTrue(all([not session.is_closing() for session in network.session_pool.available_sessions]))
|
||||
# self.assertTrue(all([not session.is_closing() for session in network.session_pool.available_sessions]))
|
||||
# ensure we are connected to all of them after a while
|
||||
await asyncio.sleep(1)
|
||||
self.assertEqual(len(list(network.session_pool.available_sessions)), 3)
|
||||
# await asyncio.sleep(1)
|
||||
# self.assertEqual(len(list(network.session_pool.available_sessions)), 3)
|
||||
|
|
|
@ -1,410 +0,0 @@
|
|||
import asyncio
|
||||
import json
|
||||
import hashlib
|
||||
from binascii import hexlify, unhexlify
|
||||
from lbry.testcase import CommandTestCase
|
||||
from lbry.wallet.transaction import Transaction, Output
|
||||
from lbry.schema.compat import OldClaimMessage
|
||||
from lbry.crypto.hash import sha256
|
||||
from lbry.crypto.base58 import Base58
|
||||
|
||||
|
||||
class BaseResolveTestCase(CommandTestCase):
|
||||
|
||||
async def assertResolvesToClaimId(self, name, claim_id):
|
||||
other = await self.resolve(name)
|
||||
if claim_id is None:
|
||||
self.assertIn('error', other)
|
||||
self.assertEqual(other['error']['name'], 'NOT_FOUND')
|
||||
else:
|
||||
self.assertEqual(claim_id, other['claim_id'])
|
||||
|
||||
|
||||
class ResolveCommand(BaseResolveTestCase):
|
||||
|
||||
async def test_resolve_response(self):
|
||||
channel_id = self.get_claim_id(
|
||||
await self.channel_create('@abc', '0.01')
|
||||
)
|
||||
|
||||
# resolving a channel @abc
|
||||
response = await self.resolve('lbry://@abc')
|
||||
self.assertEqual(response['name'], '@abc')
|
||||
self.assertEqual(response['value_type'], 'channel')
|
||||
self.assertEqual(response['meta']['claims_in_channel'], 0)
|
||||
|
||||
await self.stream_create('foo', '0.01', channel_id=channel_id)
|
||||
await self.stream_create('foo2', '0.01', channel_id=channel_id)
|
||||
|
||||
# resolving a channel @abc with some claims in it
|
||||
response['confirmations'] += 2
|
||||
response['meta']['claims_in_channel'] = 2
|
||||
self.assertEqual(response, await self.resolve('lbry://@abc'))
|
||||
|
||||
# resolving claim foo within channel @abc
|
||||
claim = await self.resolve('lbry://@abc/foo')
|
||||
self.assertEqual(claim['name'], 'foo')
|
||||
self.assertEqual(claim['value_type'], 'stream')
|
||||
self.assertEqual(claim['signing_channel']['name'], '@abc')
|
||||
self.assertTrue(claim['is_channel_signature_valid'])
|
||||
self.assertEqual(
|
||||
claim['timestamp'],
|
||||
self.ledger.headers.estimated_timestamp(claim['height'])
|
||||
)
|
||||
self.assertEqual(
|
||||
claim['signing_channel']['timestamp'],
|
||||
self.ledger.headers.estimated_timestamp(claim['signing_channel']['height'])
|
||||
)
|
||||
|
||||
# resolving claim foo by itself
|
||||
self.assertEqual(claim, await self.resolve('lbry://foo'))
|
||||
# resolving from the given permanent url
|
||||
self.assertEqual(claim, await self.resolve(claim['permanent_url']))
|
||||
|
||||
# resolving multiple at once
|
||||
response = await self.out(self.daemon.jsonrpc_resolve(['lbry://foo', 'lbry://foo2']))
|
||||
self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response))
|
||||
claim = response['lbry://foo2']
|
||||
self.assertEqual(claim['name'], 'foo2')
|
||||
self.assertEqual(claim['value_type'], 'stream')
|
||||
self.assertEqual(claim['signing_channel']['name'], '@abc')
|
||||
self.assertTrue(claim['is_channel_signature_valid'])
|
||||
|
||||
# resolve has correct confirmations
|
||||
tx_details = await self.blockchain.get_raw_transaction(claim['txid'])
|
||||
self.assertEqual(claim['confirmations'], json.loads(tx_details)['confirmations'])
|
||||
|
||||
# resolve handles invalid data
|
||||
await self.blockchain_claim_name("gibberish", hexlify(b"{'invalid':'json'}").decode(), "0.1")
|
||||
await self.generate(1)
|
||||
response = await self.out(self.daemon.jsonrpc_resolve("lbry://gibberish"))
|
||||
self.assertSetEqual({'lbry://gibberish'}, set(response))
|
||||
claim = response['lbry://gibberish']
|
||||
self.assertEqual(claim['name'], 'gibberish')
|
||||
self.assertNotIn('value', claim)
|
||||
|
||||
# resolve retries
|
||||
await self.conductor.spv_node.stop()
|
||||
resolve_task = asyncio.create_task(self.resolve('foo'))
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node)
|
||||
self.assertIsNotNone((await resolve_task)['claim_id'])
|
||||
|
||||
async def test_winning_by_effective_amount(self):
|
||||
# first one remains winner unless something else changes
|
||||
claim_id1 = self.get_claim_id(
|
||||
await self.channel_create('@foo', allow_duplicate_name=True))
|
||||
await self.assertResolvesToClaimId('@foo', claim_id1)
|
||||
claim_id2 = self.get_claim_id(
|
||||
await self.channel_create('@foo', allow_duplicate_name=True))
|
||||
await self.assertResolvesToClaimId('@foo', claim_id1)
|
||||
claim_id3 = self.get_claim_id(
|
||||
await self.channel_create('@foo', allow_duplicate_name=True))
|
||||
await self.assertResolvesToClaimId('@foo', claim_id1)
|
||||
# supports change the winner
|
||||
await self.support_create(claim_id3, '0.09')
|
||||
await self.assertResolvesToClaimId('@foo', claim_id3)
|
||||
await self.support_create(claim_id2, '0.19')
|
||||
await self.assertResolvesToClaimId('@foo', claim_id2)
|
||||
await self.support_create(claim_id1, '0.29')
|
||||
await self.assertResolvesToClaimId('@foo', claim_id1)
|
||||
|
||||
async def test_advanced_resolve(self):
|
||||
claim_id1 = self.get_claim_id(
|
||||
await self.stream_create('foo', '0.7', allow_duplicate_name=True))
|
||||
claim_id2 = self.get_claim_id(
|
||||
await self.stream_create('foo', '0.8', allow_duplicate_name=True))
|
||||
claim_id3 = self.get_claim_id(
|
||||
await self.stream_create('foo', '0.9', allow_duplicate_name=True))
|
||||
# plain winning claim
|
||||
await self.assertResolvesToClaimId('foo', claim_id3)
|
||||
# amount order resolution
|
||||
await self.assertResolvesToClaimId('foo$1', claim_id3)
|
||||
await self.assertResolvesToClaimId('foo$2', claim_id2)
|
||||
await self.assertResolvesToClaimId('foo$3', claim_id1)
|
||||
await self.assertResolvesToClaimId('foo$4', None)
|
||||
|
||||
async def test_partial_claim_id_resolve(self):
|
||||
# add some noise
|
||||
await self.channel_create('@abc', '0.1', allow_duplicate_name=True)
|
||||
await self.channel_create('@abc', '0.2', allow_duplicate_name=True)
|
||||
await self.channel_create('@abc', '1.0', allow_duplicate_name=True)
|
||||
|
||||
channel_id = self.get_claim_id(
|
||||
await self.channel_create('@abc', '1.1', allow_duplicate_name=True))
|
||||
await self.assertResolvesToClaimId(f'@abc', channel_id)
|
||||
await self.assertResolvesToClaimId(f'@abc#{channel_id[:10]}', channel_id)
|
||||
await self.assertResolvesToClaimId(f'@abc#{channel_id}', channel_id)
|
||||
channel = (await self.claim_search(claim_id=channel_id))[0]
|
||||
await self.assertResolvesToClaimId(channel['short_url'], channel_id)
|
||||
await self.assertResolvesToClaimId(channel['canonical_url'], channel_id)
|
||||
await self.assertResolvesToClaimId(channel['permanent_url'], channel_id)
|
||||
|
||||
# add some noise
|
||||
await self.stream_create('foo', '0.1', allow_duplicate_name=True, channel_id=channel['claim_id'])
|
||||
await self.stream_create('foo', '0.2', allow_duplicate_name=True, channel_id=channel['claim_id'])
|
||||
await self.stream_create('foo', '0.3', allow_duplicate_name=True, channel_id=channel['claim_id'])
|
||||
|
||||
claim_id1 = self.get_claim_id(
|
||||
await self.stream_create('foo', '0.7', allow_duplicate_name=True, channel_id=channel['claim_id']))
|
||||
claim1 = (await self.claim_search(claim_id=claim_id1))[0]
|
||||
await self.assertResolvesToClaimId('foo', claim_id1)
|
||||
await self.assertResolvesToClaimId('@abc/foo', claim_id1)
|
||||
await self.assertResolvesToClaimId(claim1['short_url'], claim_id1)
|
||||
await self.assertResolvesToClaimId(claim1['canonical_url'], claim_id1)
|
||||
await self.assertResolvesToClaimId(claim1['permanent_url'], claim_id1)
|
||||
|
||||
claim_id2 = self.get_claim_id(
|
||||
await self.stream_create('foo', '0.8', allow_duplicate_name=True, channel_id=channel['claim_id']))
|
||||
claim2 = (await self.claim_search(claim_id=claim_id2))[0]
|
||||
await self.assertResolvesToClaimId('foo', claim_id2)
|
||||
await self.assertResolvesToClaimId('@abc/foo', claim_id2)
|
||||
await self.assertResolvesToClaimId(claim2['short_url'], claim_id2)
|
||||
await self.assertResolvesToClaimId(claim2['canonical_url'], claim_id2)
|
||||
await self.assertResolvesToClaimId(claim2['permanent_url'], claim_id2)
|
||||
|
||||
async def test_abandoned_channel_with_signed_claims(self):
|
||||
channel = (await self.channel_create('@abc', '1.0'))['outputs'][0]
|
||||
orphan_claim = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel['claim_id'])
|
||||
abandoned_channel_id = channel['claim_id']
|
||||
await self.channel_abandon(txid=channel['txid'], nout=0)
|
||||
channel = (await self.channel_create('@abc', '1.0'))['outputs'][0]
|
||||
orphan_claim_id = self.get_claim_id(orphan_claim)
|
||||
|
||||
# Original channel doesn't exists anymore, so the signature is invalid. For invalid signatures, resolution is
|
||||
# only possible outside a channel
|
||||
self.assertEqual(
|
||||
{'error': {
|
||||
'name': 'NOT_FOUND',
|
||||
'text': 'Could not find claim at "lbry://@abc/on-channel-claim".',
|
||||
}},
|
||||
await self.resolve('lbry://@abc/on-channel-claim')
|
||||
)
|
||||
response = await self.resolve('lbry://on-channel-claim')
|
||||
self.assertFalse(response['is_channel_signature_valid'])
|
||||
self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel'])
|
||||
direct_uri = 'lbry://on-channel-claim#' + orphan_claim_id
|
||||
response = await self.resolve(direct_uri)
|
||||
self.assertFalse(response['is_channel_signature_valid'])
|
||||
self.assertEqual({'channel_id': abandoned_channel_id}, response['signing_channel'])
|
||||
await self.stream_abandon(claim_id=orphan_claim_id)
|
||||
|
||||
uri = 'lbry://@abc/on-channel-claim'
|
||||
# now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore)
|
||||
valid_claim = await self.stream_create('on-channel-claim', '0.00000001', channel_id=channel['claim_id'])
|
||||
# resolves normally
|
||||
response = await self.resolve(uri)
|
||||
self.assertTrue(response['is_channel_signature_valid'])
|
||||
|
||||
# ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition)
|
||||
await self.stream_create(
|
||||
'on-channel-claim', '0.00000001', channel_id=channel['claim_id'], allow_duplicate_name=True
|
||||
)
|
||||
|
||||
# it still resolves! but to the older claim
|
||||
response = await self.resolve(uri)
|
||||
self.assertTrue(response['is_channel_signature_valid'])
|
||||
self.assertEqual(response['txid'], valid_claim['txid'])
|
||||
claims = await self.claim_search(name='on-channel-claim')
|
||||
self.assertEqual(2, len(claims))
|
||||
self.assertEqual(
|
||||
{channel['claim_id']}, {claim['signing_channel']['claim_id'] for claim in claims}
|
||||
)
|
||||
|
||||
async def test_normalization_resolution(self):
|
||||
|
||||
one = 'ΣίσυφοςfiÆ'
|
||||
two = 'ΣΊΣΥΦΟσFIæ'
|
||||
|
||||
_ = await self.stream_create(one, '0.1')
|
||||
c = await self.stream_create(two, '0.2')
|
||||
|
||||
winner_id = self.get_claim_id(c)
|
||||
|
||||
r1 = await self.resolve(f'lbry://{one}')
|
||||
r2 = await self.resolve(f'lbry://{two}')
|
||||
|
||||
self.assertEqual(winner_id, r1['claim_id'])
|
||||
self.assertEqual(winner_id, r2['claim_id'])
|
||||
|
||||
async def test_resolve_old_claim(self):
|
||||
channel = await self.daemon.jsonrpc_channel_create('@olds', '1.0')
|
||||
await self.confirm_tx(channel.id)
|
||||
address = channel.outputs[0].get_address(self.account.ledger)
|
||||
claim = generate_signed_legacy(address, channel.outputs[0])
|
||||
tx = await Transaction.claim_create('example', claim.SerializeToString(), 1, address, [self.account], self.account)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
|
||||
response = await self.resolve('@olds/example')
|
||||
self.assertTrue(response['is_channel_signature_valid'])
|
||||
|
||||
claim.publisherSignature.signature = bytes(reversed(claim.publisherSignature.signature))
|
||||
tx = await Transaction.claim_create(
|
||||
'bad_example', claim.SerializeToString(), 1, address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await self.broadcast(tx)
|
||||
await self.confirm_tx(tx.id)
|
||||
|
||||
response = await self.resolve('bad_example')
|
||||
self.assertFalse(response['is_channel_signature_valid'])
|
||||
self.assertEqual(
|
||||
{'error': {
|
||||
'name': 'NOT_FOUND',
|
||||
'text': 'Could not find claim at "@olds/bad_example".',
|
||||
}},
|
||||
await self.resolve('@olds/bad_example')
|
||||
)
|
||||
|
||||
async def test_resolve_with_includes(self):
|
||||
wallet2 = await self.daemon.jsonrpc_wallet_create('wallet2', create_account=True)
|
||||
address2 = await self.daemon.jsonrpc_address_unused(wallet_id=wallet2.id)
|
||||
|
||||
await self.wallet_send('1.0', address2)
|
||||
|
||||
stream = await self.stream_create(
|
||||
'priced', '0.1', wallet_id=wallet2.id,
|
||||
fee_amount='0.5', fee_currency='LBC', fee_address=address2
|
||||
)
|
||||
stream_id = self.get_claim_id(stream)
|
||||
|
||||
resolve = await self.resolve('priced')
|
||||
self.assertNotIn('is_my_output', resolve)
|
||||
self.assertNotIn('purchase_receipt', resolve)
|
||||
self.assertNotIn('sent_supports', resolve)
|
||||
self.assertNotIn('sent_tips', resolve)
|
||||
self.assertNotIn('received_tips', resolve)
|
||||
|
||||
# is_my_output
|
||||
resolve = await self.resolve('priced', include_is_my_output=True)
|
||||
self.assertFalse(resolve['is_my_output'])
|
||||
resolve = await self.resolve('priced', wallet_id=wallet2.id, include_is_my_output=True)
|
||||
self.assertTrue(resolve['is_my_output'])
|
||||
|
||||
# purchase receipt
|
||||
resolve = await self.resolve('priced', include_purchase_receipt=True)
|
||||
self.assertNotIn('purchase_receipt', resolve)
|
||||
await self.purchase_create(stream_id)
|
||||
resolve = await self.resolve('priced', include_purchase_receipt=True)
|
||||
self.assertEqual('0.5', resolve['purchase_receipt']['amount'])
|
||||
|
||||
# my supports and my tips
|
||||
resolve = await self.resolve(
|
||||
'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True
|
||||
)
|
||||
self.assertEqual('0.0', resolve['sent_supports'])
|
||||
self.assertEqual('0.0', resolve['sent_tips'])
|
||||
self.assertEqual('0.0', resolve['received_tips'])
|
||||
await self.support_create(stream_id, '0.3')
|
||||
await self.support_create(stream_id, '0.2')
|
||||
await self.support_create(stream_id, '0.4', tip=True)
|
||||
await self.support_create(stream_id, '0.5', tip=True)
|
||||
resolve = await self.resolve(
|
||||
'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True
|
||||
)
|
||||
self.assertEqual('0.5', resolve['sent_supports'])
|
||||
self.assertEqual('0.9', resolve['sent_tips'])
|
||||
self.assertEqual('0.0', resolve['received_tips'])
|
||||
|
||||
resolve = await self.resolve(
|
||||
'priced', include_sent_supports=True, include_sent_tips=True, include_received_tips=True,
|
||||
wallet_id=wallet2.id
|
||||
)
|
||||
self.assertEqual('0.0', resolve['sent_supports'])
|
||||
self.assertEqual('0.0', resolve['sent_tips'])
|
||||
self.assertEqual('0.9', resolve['received_tips'])
|
||||
self.assertEqual('1.4', resolve['meta']['support_amount'])
|
||||
|
||||
# make sure nothing is leaked between wallets through cached tx/txos
|
||||
resolve = await self.resolve('priced')
|
||||
self.assertNotIn('is_my_output', resolve)
|
||||
self.assertNotIn('purchase_receipt', resolve)
|
||||
self.assertNotIn('sent_supports', resolve)
|
||||
self.assertNotIn('sent_tips', resolve)
|
||||
self.assertNotIn('received_tips', resolve)
|
||||
|
||||
|
||||
class ResolveAfterReorg(BaseResolveTestCase):
|
||||
|
||||
async def reorg(self, start):
|
||||
blocks = self.ledger.headers.height - start
|
||||
self.blockchain.block_expected = start - 1
|
||||
# go back to start
|
||||
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
|
||||
# go to previous + 1
|
||||
await self.generate(blocks + 2)
|
||||
|
||||
async def test_reorg(self):
|
||||
self.assertEqual(self.ledger.headers.height, 206)
|
||||
|
||||
channel_name = '@abc'
|
||||
channel_id = self.get_claim_id(
|
||||
await self.channel_create(channel_name, '0.01')
|
||||
)
|
||||
self.assertNotIn('error', await self.resolve(channel_name))
|
||||
await self.reorg(206)
|
||||
self.assertNotIn('error', await self.resolve(channel_name))
|
||||
|
||||
stream_name = 'foo'
|
||||
stream_id = self.get_claim_id(
|
||||
await self.stream_create(stream_name, '0.01', channel_id=channel_id)
|
||||
)
|
||||
self.assertNotIn('error', await self.resolve(stream_name))
|
||||
await self.reorg(206)
|
||||
self.assertNotIn('error', await self.resolve(stream_name))
|
||||
|
||||
await self.support_create(stream_id, '0.01')
|
||||
self.assertNotIn('error', await self.resolve(stream_name))
|
||||
await self.reorg(206)
|
||||
self.assertNotIn('error', await self.resolve(stream_name))
|
||||
|
||||
await self.stream_abandon(stream_id)
|
||||
self.assertNotIn('error', await self.resolve(channel_name))
|
||||
self.assertIn('error', await self.resolve(stream_name))
|
||||
await self.reorg(206)
|
||||
self.assertNotIn('error', await self.resolve(channel_name))
|
||||
self.assertIn('error', await self.resolve(stream_name))
|
||||
|
||||
await self.channel_abandon(channel_id)
|
||||
self.assertIn('error', await self.resolve(channel_name))
|
||||
self.assertIn('error', await self.resolve(stream_name))
|
||||
await self.reorg(206)
|
||||
self.assertIn('error', await self.resolve(channel_name))
|
||||
self.assertIn('error', await self.resolve(stream_name))
|
||||
|
||||
|
||||
def generate_signed_legacy(address: bytes, output: Output):
|
||||
decoded_address = Base58.decode(address)
|
||||
claim = OldClaimMessage()
|
||||
claim.ParseFromString(unhexlify(
|
||||
'080110011aee04080112a604080410011a2b4865726520617265203520526561736f6e73204920e29da4e'
|
||||
'fb88f204e657874636c6f7564207c20544c4722920346696e64206f7574206d6f72652061626f7574204e'
|
||||
'657874636c6f75643a2068747470733a2f2f6e657874636c6f75642e636f6d2f0a0a596f752063616e206'
|
||||
'6696e64206d65206f6e20746865736520736f6369616c733a0a202a20466f72756d733a2068747470733a'
|
||||
'2f2f666f72756d2e6865617679656c656d656e742e696f2f0a202a20506f64636173743a2068747470733'
|
||||
'a2f2f6f6666746f706963616c2e6e65740a202a2050617472656f6e3a2068747470733a2f2f7061747265'
|
||||
'6f6e2e636f6d2f7468656c696e757867616d65720a202a204d657263683a2068747470733a2f2f7465657'
|
||||
'37072696e672e636f6d2f73746f7265732f6f6666696369616c2d6c696e75782d67616d65720a202a2054'
|
||||
'77697463683a2068747470733a2f2f7477697463682e74762f786f6e64616b0a202a20547769747465723'
|
||||
'a2068747470733a2f2f747769747465722e636f6d2f7468656c696e757867616d65720a0a2e2e2e0a6874'
|
||||
'7470733a2f2f7777772e796f75747562652e636f6d2f77617463683f763d4672546442434f535f66632a0'
|
||||
'f546865204c696e75782047616d6572321c436f7079726967687465642028636f6e746163742061757468'
|
||||
'6f722938004a2968747470733a2f2f6265726b2e6e696e6a612f7468756d626e61696c732f46725464424'
|
||||
'34f535f666352005a001a41080110011a30040e8ac6e89c061f982528c23ad33829fd7146435bf7a4cc22'
|
||||
'f0bff70c4fe0b91fd36da9a375e3e1c171db825bf5d1f32209766964656f2f6d70342a5c080110031a406'
|
||||
'2b2dd4c45e364030fbfad1a6fefff695ebf20ea33a5381b947753e2a0ca359989a5cc7d15e5392a0d354c'
|
||||
'0b68498382b2701b22c03beb8dcb91089031b871e72214feb61536c007cdf4faeeaab4876cb397feaf6b51'
|
||||
))
|
||||
claim.ClearField("publisherSignature")
|
||||
digest = sha256(b''.join([
|
||||
decoded_address,
|
||||
claim.SerializeToString(),
|
||||
output.claim_hash[::-1]
|
||||
]))
|
||||
signature = output.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
|
||||
claim.publisherSignature.version = 1
|
||||
claim.publisherSignature.signatureType = 1
|
||||
claim.publisherSignature.signature = signature
|
||||
claim.publisherSignature.certificateId = output.claim_hash[::-1]
|
||||
return claim
|
|
@ -5,7 +5,7 @@ import lbry.wallet
|
|||
from lbry.error import ServerPaymentFeeAboveMaxAllowedError
|
||||
from lbry.wallet.network import ClientSession
|
||||
from lbry.wallet.rpc import RPCError
|
||||
from lbry.wallet.server.db.elasticsearch.sync import run as run_sync, make_es_index
|
||||
from lbry.wallet.server.db.elasticsearch.sync import run_sync, make_es_index
|
||||
from lbry.wallet.server.session import LBRYElectrumX
|
||||
from lbry.testcase import IntegrationTestCase, CommandTestCase
|
||||
from lbry.wallet.orchstr8.node import SPVNode
|
||||
|
@ -104,8 +104,11 @@ class TestESSync(CommandTestCase):
|
|||
async def resync():
|
||||
await db.search_index.start()
|
||||
db.search_index.clear_caches()
|
||||
await run_sync(db.sql._db_path, 1, 0, 0, index_name=db.search_index.index)
|
||||
await run_sync(index_name=db.search_index.index, db=db)
|
||||
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
self.assertEqual(0, len(await self.claim_search(order_by=['height'])))
|
||||
|
||||
await resync()
|
||||
|
||||
# this time we will test a migration from unversioned to v1
|
||||
|
@ -192,17 +195,18 @@ class TestHubDiscovery(CommandTestCase):
|
|||
)
|
||||
|
||||
|
||||
class TestStress(CommandTestCase):
|
||||
async def test_flush_over_66_thousand(self):
|
||||
history = self.conductor.spv_node.server.db.history
|
||||
history.flush_count = 66_000
|
||||
history.flush()
|
||||
self.assertEqual(history.flush_count, 66_001)
|
||||
await self.generate(1)
|
||||
self.assertEqual(history.flush_count, 66_002)
|
||||
class TestStressFlush(CommandTestCase):
|
||||
# async def test_flush_over_66_thousand(self):
|
||||
# history = self.conductor.spv_node.server.db.history
|
||||
# history.flush_count = 66_000
|
||||
# history.flush()
|
||||
# self.assertEqual(history.flush_count, 66_001)
|
||||
# await self.generate(1)
|
||||
# self.assertEqual(history.flush_count, 66_002)
|
||||
|
||||
async def test_thousands_claim_ids_on_search(self):
|
||||
await self.stream_create()
|
||||
with self.assertRaises(RPCError) as err:
|
||||
await self.claim_search(not_channel_ids=[("%040x" % i) for i in range(8196)])
|
||||
self.assertEqual(err.exception.message, 'not_channel_ids cant have more than 2048 items.')
|
||||
# in the go hub this doesnt have a `.` at the end, in python it does
|
||||
self.assertTrue(err.exception.message.startswith('not_channel_ids cant have more than 2048 items'))
|
||||
|
|
0
tests/integration/claims/__init__.py
Normal file
0
tests/integration/claims/__init__.py
Normal file
|
@ -182,6 +182,9 @@ class ClaimSearchCommand(ClaimTestCase):
|
|||
claims = [three, two, signed]
|
||||
await self.assertFindsClaims(claims, channel_ids=[self.channel_id])
|
||||
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}")
|
||||
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}", valid_channel_signature=True)
|
||||
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}", has_channel_signature=True, valid_channel_signature=True)
|
||||
await self.assertFindsClaims([], channel=f"@abc#{self.channel_id}", has_channel_signature=True, invalid_channel_signature=True) # fixme
|
||||
await self.assertFindsClaims([], channel=f"@inexistent")
|
||||
await self.assertFindsClaims([three, two, signed2, signed], channel_ids=[channel_id2, self.channel_id])
|
||||
await self.channel_abandon(claim_id=self.channel_id)
|
||||
|
@ -810,10 +813,15 @@ class TransactionOutputCommands(ClaimTestCase):
|
|||
stream_id = self.get_claim_id(await self.stream_create())
|
||||
await self.support_create(stream_id, '0.3')
|
||||
await self.support_create(stream_id, '0.2')
|
||||
await self.generate(day_blocks)
|
||||
await self.generate(day_blocks // 2)
|
||||
await self.stream_update(stream_id)
|
||||
await self.generate(day_blocks // 2)
|
||||
await self.support_create(stream_id, '0.4')
|
||||
await self.support_create(stream_id, '0.5')
|
||||
await self.generate(day_blocks)
|
||||
await self.stream_update(stream_id)
|
||||
await self.generate(day_blocks // 2)
|
||||
await self.stream_update(stream_id)
|
||||
await self.generate(day_blocks // 2)
|
||||
await self.support_create(stream_id, '0.6')
|
||||
|
||||
plot = await self.txo_plot(type='support')
|
||||
|
@ -1484,12 +1492,10 @@ class StreamCommands(ClaimTestCase):
|
|||
filtering_channel_id = self.get_claim_id(
|
||||
await self.channel_create('@filtering', '0.1')
|
||||
)
|
||||
self.conductor.spv_node.server.db.sql.filtering_channel_hashes.add(
|
||||
unhexlify(filtering_channel_id)[::-1]
|
||||
)
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.filtered_streams))
|
||||
self.conductor.spv_node.server.db.filtering_channel_hashes.add(bytes.fromhex(filtering_channel_id))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_streams))
|
||||
await self.stream_repost(bad_content_id, 'filter1', '0.1', channel_name='@filtering')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.filtered_streams))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_streams))
|
||||
|
||||
# search for filtered content directly
|
||||
result = await self.out(self.daemon.jsonrpc_claim_search(name='bad_content'))
|
||||
|
@ -1531,12 +1537,16 @@ class StreamCommands(ClaimTestCase):
|
|||
blocking_channel_id = self.get_claim_id(
|
||||
await self.channel_create('@blocking', '0.1')
|
||||
)
|
||||
self.conductor.spv_node.server.db.sql.blocking_channel_hashes.add(
|
||||
unhexlify(blocking_channel_id)[::-1]
|
||||
)
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.blocked_streams))
|
||||
# test setting from env vars and starting from scratch
|
||||
await self.conductor.spv_node.stop(False)
|
||||
await self.conductor.spv_node.start(self.conductor.blockchain_node,
|
||||
extraconf={'BLOCKING_CHANNEL_IDS': blocking_channel_id,
|
||||
'FILTERING_CHANNEL_IDS': filtering_channel_id})
|
||||
await self.daemon.wallet_manager.reset()
|
||||
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_streams))
|
||||
await self.stream_repost(bad_content_id, 'block1', '0.1', channel_name='@blocking')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.blocked_streams))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_streams))
|
||||
|
||||
# blocked content is not resolveable
|
||||
error = (await self.resolve('lbry://@some_channel/bad_content'))['error']
|
||||
|
@ -1559,9 +1569,9 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertEqual('@bad_channel', result['items'][1]['name'])
|
||||
|
||||
# filter channel out
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.filtered_channels))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.filtered_channels))
|
||||
await self.stream_repost(bad_channel_id, 'filter2', '0.1', channel_name='@filtering')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.filtered_channels))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.filtered_channels))
|
||||
|
||||
# same claim search as previous now returns 0 results
|
||||
result = await self.out(self.daemon.jsonrpc_claim_search(any_tags=['bad-stuff'], order_by=['height']))
|
||||
|
@ -1586,9 +1596,9 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertEqual(worse_content_id, result['claim_id'])
|
||||
|
||||
# block channel
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.sql.blocked_channels))
|
||||
self.assertEqual(0, len(self.conductor.spv_node.server.db.blocked_channels))
|
||||
await self.stream_repost(bad_channel_id, 'block2', '0.1', channel_name='@blocking')
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.sql.blocked_channels))
|
||||
self.assertEqual(1, len(self.conductor.spv_node.server.db.blocked_channels))
|
||||
|
||||
# channel, claim in channel or claim individually no longer resolve
|
||||
self.assertEqual((await self.resolve('lbry://@bad_channel'))['error']['name'], 'BLOCKED')
|
||||
|
@ -1760,6 +1770,16 @@ class StreamCommands(ClaimTestCase):
|
|||
self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=self.account.id), 3)
|
||||
self.assertItemCount(await self.daemon.jsonrpc_claim_list(account_id=account2_id), 1)
|
||||
|
||||
self.assertEqual(3, len(await self.claim_search(release_time='>0', order_by=['release_time'])))
|
||||
self.assertEqual(3, len(await self.claim_search(release_time='>=0', order_by=['release_time'])))
|
||||
self.assertEqual(4, len(await self.claim_search(order_by=['release_time'])))
|
||||
self.assertEqual(4, len(await self.claim_search(release_time='<derp', order_by=['release_time'])))
|
||||
self.assertEqual(3, len(await self.claim_search(claim_type='stream', order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(claim_type='channel', order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(release_time='>=123456', order_by=['release_time'])))
|
||||
self.assertEqual(1, len(await self.claim_search(release_time='>123456', order_by=['release_time'])))
|
||||
self.assertEqual(2, len(await self.claim_search(release_time='<123457', order_by=['release_time'])))
|
||||
|
||||
async def test_setting_fee_fields(self):
|
||||
tx = await self.out(self.stream_create('paid-stream'))
|
||||
txo = tx['outputs'][0]
|
|
@ -199,5 +199,5 @@ class EpicAdventuresOfChris45(CommandTestCase):
|
|||
# He closes and opens the wallet server databases to see how horribly they break
|
||||
db = self.conductor.spv_node.server.db
|
||||
db.close()
|
||||
await db.open_for_serving()
|
||||
await db.open_dbs()
|
||||
# They didn't! (error would be AssertionError: 276 vs 266 (264 counts) on startup)
|
||||
|
|
|
@ -2,7 +2,7 @@ import logging
|
|||
import pathlib
|
||||
import time
|
||||
|
||||
from ..blockchain.test_claim_commands import ClaimTestCase
|
||||
from ..claims.test_claim_commands import ClaimTestCase
|
||||
from lbry.conf import TranscodeConfig
|
||||
from lbry.file_analysis import VideoFileAnalyzer
|
||||
|
||||
|
|
0
tests/integration/takeovers/__init__.py
Normal file
0
tests/integration/takeovers/__init__.py
Normal file
1708
tests/integration/takeovers/test_resolve_command.py
Normal file
1708
tests/integration/takeovers/test_resolve_command.py
Normal file
File diff suppressed because it is too large
Load diff
0
tests/integration/transactions/__init__.py
Normal file
0
tests/integration/transactions/__init__.py
Normal file
|
@ -17,13 +17,14 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
await self.account.ensure_address_gap()
|
||||
|
||||
address1, address2 = await self.account.receiving.get_addresses(limit=2, only_usable=True)
|
||||
notifications = asyncio.create_task(asyncio.wait(
|
||||
[asyncio.ensure_future(self.on_address_update(address1)),
|
||||
asyncio.ensure_future(self.on_address_update(address2))]
|
||||
))
|
||||
sendtxid1 = await self.blockchain.send_to_address(address1, 5)
|
||||
sendtxid2 = await self.blockchain.send_to_address(address2, 5)
|
||||
await self.blockchain.generate(1)
|
||||
await asyncio.wait([
|
||||
self.on_transaction_id(sendtxid1),
|
||||
self.on_transaction_id(sendtxid2)
|
||||
])
|
||||
await notifications
|
||||
|
||||
self.assertEqual(d2l(await self.account.get_balance()), '10.0')
|
||||
|
||||
|
@ -44,18 +45,18 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
stream_txo.sign(channel_txo)
|
||||
await stream_tx.sign([self.account])
|
||||
|
||||
notifications = asyncio.create_task(asyncio.wait(
|
||||
[asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))]
|
||||
))
|
||||
|
||||
await self.broadcast(channel_tx)
|
||||
await self.broadcast(stream_tx)
|
||||
await asyncio.wait([ # mempool
|
||||
self.ledger.wait(channel_tx),
|
||||
self.ledger.wait(stream_tx)
|
||||
])
|
||||
await notifications
|
||||
notifications = asyncio.create_task(asyncio.wait(
|
||||
[asyncio.ensure_future(self.ledger.wait(channel_tx)), asyncio.ensure_future(self.ledger.wait(stream_tx))]
|
||||
))
|
||||
await self.blockchain.generate(1)
|
||||
await asyncio.wait([ # confirmed
|
||||
self.ledger.wait(channel_tx),
|
||||
self.ledger.wait(stream_tx)
|
||||
])
|
||||
|
||||
await notifications
|
||||
self.assertEqual(d2l(await self.account.get_balance()), '7.985786')
|
||||
self.assertEqual(d2l(await self.account.get_balance(include_claims=True)), '9.985786')
|
||||
|
||||
|
@ -63,10 +64,12 @@ class BasicTransactionTest(IntegrationTestCase):
|
|||
self.assertEqual(response['lbry://@bar/foo'].claim.claim_type, 'stream')
|
||||
|
||||
abandon_tx = await Transaction.create([Input.spend(stream_tx.outputs[0])], [], [self.account], self.account)
|
||||
notify = asyncio.create_task(self.ledger.wait(abandon_tx))
|
||||
await self.broadcast(abandon_tx)
|
||||
await self.ledger.wait(abandon_tx)
|
||||
await notify
|
||||
notify = asyncio.create_task(self.ledger.wait(abandon_tx))
|
||||
await self.blockchain.generate(1)
|
||||
await self.ledger.wait(abandon_tx)
|
||||
await notify
|
||||
|
||||
response = await self.ledger.resolve([], ['lbry://@bar/foo'])
|
||||
self.assertIn('error', response['lbry://@bar/foo'])
|
|
@ -17,14 +17,15 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
|
||||
# send 10 coins to first 10 receiving addresses and then 10 transactions worth 10 coins each
|
||||
# to the 10th receiving address for a total of 30 UTXOs on the entire account
|
||||
sends = list(chain(
|
||||
(self.blockchain.send_to_address(address, 10) for address in addresses[:10]),
|
||||
(self.blockchain.send_to_address(addresses[9], 10) for _ in range(10))
|
||||
))
|
||||
for i in range(10):
|
||||
notification = asyncio.ensure_future(self.on_address_update(addresses[i]))
|
||||
txid = await self.blockchain.send_to_address(addresses[i], 10)
|
||||
await notification
|
||||
notification = asyncio.ensure_future(self.on_address_update(addresses[9]))
|
||||
txid = await self.blockchain.send_to_address(addresses[9], 10)
|
||||
await notification
|
||||
|
||||
# use batching to reduce issues with send_to_address on cli
|
||||
for batch in range(0, len(sends), 10):
|
||||
txids = await asyncio.gather(*sends[batch:batch+10])
|
||||
await asyncio.wait([self.on_transaction_id(txid) for txid in txids])
|
||||
await self.assertBalance(self.account, '200.0')
|
||||
self.assertEqual(20, await self.account.get_utxo_count())
|
||||
|
||||
|
@ -136,7 +137,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
await self.assertBalance(self.account, '0.0')
|
||||
address = await self.account.receiving.get_or_create_usable_address()
|
||||
# evil trick: mempool is unsorted on real life, but same order between python instances. reproduce it
|
||||
original_summary = self.conductor.spv_node.server.mempool.transaction_summaries
|
||||
original_summary = self.conductor.spv_node.server.bp.mempool.transaction_summaries
|
||||
|
||||
def random_summary(*args, **kwargs):
|
||||
summary = original_summary(*args, **kwargs)
|
||||
|
@ -145,7 +146,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
while summary == ordered:
|
||||
random.shuffle(summary)
|
||||
return summary
|
||||
self.conductor.spv_node.server.mempool.transaction_summaries = random_summary
|
||||
self.conductor.spv_node.server.bp.mempool.transaction_summaries = random_summary
|
||||
# 10 unconfirmed txs, all from blockchain wallet
|
||||
sends = [self.blockchain.send_to_address(address, 10) for _ in range(10)]
|
||||
# use batching to reduce issues with send_to_address on cli
|
||||
|
@ -175,11 +176,6 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
self.assertEqual(21, len((await self.ledger.get_local_status_and_history(address))[1]))
|
||||
self.assertEqual(0, len(self.ledger._known_addresses_out_of_sync))
|
||||
|
||||
def wait_for_txid(self, txid, address):
|
||||
return self.ledger.on_transaction.where(
|
||||
lambda e: e.tx.id == txid and e.address == address
|
||||
)
|
||||
|
||||
async def _test_transaction(self, send_amount, address, inputs, change):
|
||||
tx = await Transaction.create(
|
||||
[], [Output.pay_pubkey_hash(send_amount, self.ledger.address_to_hash160(address))], [self.account],
|
||||
|
@ -204,6 +200,7 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
async def test_sqlite_coin_chooser(self):
|
||||
wallet_manager = WalletManager([self.wallet], {self.ledger.get_id(): self.ledger})
|
||||
await self.blockchain.generate(300)
|
||||
|
||||
await self.assertBalance(self.account, '0.0')
|
||||
address = await self.account.receiving.get_or_create_usable_address()
|
||||
other_account = self.wallet.generate_account(self.ledger)
|
||||
|
@ -211,14 +208,26 @@ class BasicTransactionTests(IntegrationTestCase):
|
|||
self.ledger.coin_selection_strategy = 'sqlite'
|
||||
await self.ledger.subscribe_account(self.account)
|
||||
|
||||
txids = []
|
||||
txids.append(await self.blockchain.send_to_address(address, 1.0))
|
||||
txids.append(await self.blockchain.send_to_address(address, 1.0))
|
||||
txids.append(await self.blockchain.send_to_address(address, 3.0))
|
||||
txids.append(await self.blockchain.send_to_address(address, 5.0))
|
||||
txids.append(await self.blockchain.send_to_address(address, 10.0))
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 1.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 1.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 3.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 5.0)
|
||||
await accepted
|
||||
|
||||
accepted = asyncio.ensure_future(self.on_address_update(address))
|
||||
txid = await self.blockchain.send_to_address(address, 10.0)
|
||||
await accepted
|
||||
|
||||
await asyncio.wait([self.wait_for_txid(txid, address) for txid in txids], timeout=1)
|
||||
await self.assertBalance(self.account, '20.0')
|
||||
await self.assertSpendable([99992600, 99992600, 299992600, 499992600, 999992600])
|
||||
|
|
@ -1,616 +0,0 @@
|
|||
import time
|
||||
import struct
|
||||
import sqlite3
|
||||
import logging
|
||||
from operator import itemgetter
|
||||
from typing import Tuple, List, Dict, Union, Type, Optional
|
||||
from binascii import unhexlify
|
||||
from decimal import Decimal
|
||||
from contextvars import ContextVar
|
||||
from functools import wraps
|
||||
from itertools import chain
|
||||
from dataclasses import dataclass
|
||||
|
||||
from lbry.wallet.database import query, interpolate
|
||||
from lbry.error import ResolveCensoredError
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.result import Outputs, Censor
|
||||
from lbry.wallet import Ledger, RegTestLedger
|
||||
|
||||
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES, COMMON_TAGS, INDEXED_LANGUAGES
|
||||
|
||||
|
||||
class SQLiteOperationalError(sqlite3.OperationalError):
|
||||
def __init__(self, metrics):
|
||||
super().__init__('sqlite query errored')
|
||||
self.metrics = metrics
|
||||
|
||||
|
||||
class SQLiteInterruptedError(sqlite3.OperationalError):
|
||||
def __init__(self, metrics):
|
||||
super().__init__('sqlite query interrupted')
|
||||
self.metrics = metrics
|
||||
|
||||
|
||||
ATTRIBUTE_ARRAY_MAX_LENGTH = 100
|
||||
sqlite3.enable_callback_tracebacks(True)
|
||||
|
||||
INTEGER_PARAMS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'channel_join', 'reposted', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_group', 'trending_mixed',
|
||||
'trending_local', 'trending_global',
|
||||
}
|
||||
|
||||
SEARCH_PARAMS = {
|
||||
'name', 'text', 'claim_id', 'claim_ids', 'txid', 'nout', 'channel', 'channel_ids', 'not_channel_ids',
|
||||
'public_key_id', 'claim_type', 'stream_types', 'media_types', 'fee_currency',
|
||||
'has_channel_signature', 'signature_valid',
|
||||
'any_tags', 'all_tags', 'not_tags', 'reposted_claim_id',
|
||||
'any_locations', 'all_locations', 'not_locations',
|
||||
'any_languages', 'all_languages', 'not_languages',
|
||||
'is_controlling', 'limit', 'offset', 'order_by',
|
||||
'no_totals', 'has_source'
|
||||
} | INTEGER_PARAMS
|
||||
|
||||
|
||||
ORDER_FIELDS = {
|
||||
'name', 'claim_hash'
|
||||
} | INTEGER_PARAMS
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReaderState:
|
||||
db: sqlite3.Connection
|
||||
stack: List[List]
|
||||
metrics: Dict
|
||||
is_tracking_metrics: bool
|
||||
ledger: Type[Ledger]
|
||||
query_timeout: float
|
||||
log: logging.Logger
|
||||
blocked_streams: Dict
|
||||
blocked_channels: Dict
|
||||
filtered_streams: Dict
|
||||
filtered_channels: Dict
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def reset_metrics(self):
|
||||
self.stack = []
|
||||
self.metrics = {}
|
||||
|
||||
def set_query_timeout(self):
|
||||
stop_at = time.perf_counter() + self.query_timeout
|
||||
|
||||
def interruptor():
|
||||
if time.perf_counter() >= stop_at:
|
||||
self.db.interrupt()
|
||||
return
|
||||
|
||||
self.db.set_progress_handler(interruptor, 100)
|
||||
|
||||
def get_resolve_censor(self) -> Censor:
|
||||
return Censor(Censor.RESOLVE)
|
||||
|
||||
def get_search_censor(self, limit_claims_per_channel: int) -> Censor:
|
||||
return Censor(Censor.SEARCH)
|
||||
|
||||
|
||||
ctx: ContextVar[Optional[ReaderState]] = ContextVar('ctx')
|
||||
|
||||
|
||||
def row_factory(cursor, row):
|
||||
return {
|
||||
k[0]: (set(row[i].split(',')) if k[0] == 'tags' else row[i])
|
||||
for i, k in enumerate(cursor.description)
|
||||
}
|
||||
|
||||
|
||||
def initializer(log, _path, _ledger_name, query_timeout, _measure=False, block_and_filter=None):
|
||||
db = sqlite3.connect(_path, isolation_level=None, uri=True)
|
||||
db.row_factory = row_factory
|
||||
if block_and_filter:
|
||||
blocked_streams, blocked_channels, filtered_streams, filtered_channels = block_and_filter
|
||||
else:
|
||||
blocked_streams = blocked_channels = filtered_streams = filtered_channels = {}
|
||||
ctx.set(
|
||||
ReaderState(
|
||||
db=db, stack=[], metrics={}, is_tracking_metrics=_measure,
|
||||
ledger=Ledger if _ledger_name == 'mainnet' else RegTestLedger,
|
||||
query_timeout=query_timeout, log=log,
|
||||
blocked_streams=blocked_streams, blocked_channels=blocked_channels,
|
||||
filtered_streams=filtered_streams, filtered_channels=filtered_channels,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def cleanup():
|
||||
ctx.get().close()
|
||||
ctx.set(None)
|
||||
|
||||
|
||||
def measure(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
state = ctx.get()
|
||||
if not state.is_tracking_metrics:
|
||||
return func(*args, **kwargs)
|
||||
metric = {}
|
||||
state.metrics.setdefault(func.__name__, []).append(metric)
|
||||
state.stack.append([])
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
elapsed = int((time.perf_counter()-start)*1000)
|
||||
metric['total'] = elapsed
|
||||
metric['isolated'] = (elapsed-sum(state.stack.pop()))
|
||||
if state.stack:
|
||||
state.stack[-1].append(elapsed)
|
||||
return wrapper
|
||||
|
||||
|
||||
def reports_metrics(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
state = ctx.get()
|
||||
if not state.is_tracking_metrics:
|
||||
return func(*args, **kwargs)
|
||||
state.reset_metrics()
|
||||
r = func(*args, **kwargs)
|
||||
return r, state.metrics
|
||||
return wrapper
|
||||
|
||||
|
||||
@reports_metrics
|
||||
def search_to_bytes(constraints) -> Union[bytes, Tuple[bytes, Dict]]:
|
||||
return encode_result(search(constraints))
|
||||
|
||||
|
||||
@reports_metrics
|
||||
def resolve_to_bytes(urls) -> Union[bytes, Tuple[bytes, Dict]]:
|
||||
return encode_result(resolve(urls))
|
||||
|
||||
|
||||
def encode_result(result):
|
||||
return Outputs.to_bytes(*result)
|
||||
|
||||
|
||||
@measure
|
||||
def execute_query(sql, values, row_offset: int, row_limit: int, censor: Censor) -> List:
|
||||
context = ctx.get()
|
||||
context.set_query_timeout()
|
||||
try:
|
||||
rows = context.db.execute(sql, values).fetchall()
|
||||
return rows[row_offset:row_limit]
|
||||
except sqlite3.OperationalError as err:
|
||||
plain_sql = interpolate(sql, values)
|
||||
if context.is_tracking_metrics:
|
||||
context.metrics['execute_query'][-1]['sql'] = plain_sql
|
||||
context.log.exception('failed running query', exc_info=err)
|
||||
raise SQLiteOperationalError(context.metrics)
|
||||
|
||||
|
||||
def claims_query(cols, for_count=False, **constraints) -> Tuple[str, Dict]:
|
||||
if 'order_by' in constraints:
|
||||
order_by_parts = constraints['order_by']
|
||||
if isinstance(order_by_parts, str):
|
||||
order_by_parts = [order_by_parts]
|
||||
sql_order_by = []
|
||||
for order_by in order_by_parts:
|
||||
is_asc = order_by.startswith('^')
|
||||
column = order_by[1:] if is_asc else order_by
|
||||
if column not in ORDER_FIELDS:
|
||||
raise NameError(f'{column} is not a valid order_by field')
|
||||
if column == 'name':
|
||||
column = 'normalized'
|
||||
sql_order_by.append(
|
||||
f"claim.{column} ASC" if is_asc else f"claim.{column} DESC"
|
||||
)
|
||||
constraints['order_by'] = sql_order_by
|
||||
|
||||
ops = {'<=': '__lte', '>=': '__gte', '<': '__lt', '>': '__gt'}
|
||||
for constraint in INTEGER_PARAMS:
|
||||
if constraint in constraints:
|
||||
value = constraints.pop(constraint)
|
||||
postfix = ''
|
||||
if isinstance(value, str):
|
||||
if len(value) >= 2 and value[:2] in ops:
|
||||
postfix, value = ops[value[:2]], value[2:]
|
||||
elif len(value) >= 1 and value[0] in ops:
|
||||
postfix, value = ops[value[0]], value[1:]
|
||||
if constraint == 'fee_amount':
|
||||
value = Decimal(value)*1000
|
||||
constraints[f'claim.{constraint}{postfix}'] = int(value)
|
||||
|
||||
if constraints.pop('is_controlling', False):
|
||||
if {'sequence', 'amount_order'}.isdisjoint(constraints):
|
||||
for_count = False
|
||||
constraints['claimtrie.claim_hash__is_not_null'] = ''
|
||||
if 'sequence' in constraints:
|
||||
constraints['order_by'] = 'claim.activation_height ASC'
|
||||
constraints['offset'] = int(constraints.pop('sequence')) - 1
|
||||
constraints['limit'] = 1
|
||||
if 'amount_order' in constraints:
|
||||
constraints['order_by'] = 'claim.effective_amount DESC'
|
||||
constraints['offset'] = int(constraints.pop('amount_order')) - 1
|
||||
constraints['limit'] = 1
|
||||
|
||||
if 'claim_id' in constraints:
|
||||
claim_id = constraints.pop('claim_id')
|
||||
if len(claim_id) == 40:
|
||||
constraints['claim.claim_id'] = claim_id
|
||||
else:
|
||||
constraints['claim.claim_id__like'] = f'{claim_id[:40]}%'
|
||||
elif 'claim_ids' in constraints:
|
||||
constraints['claim.claim_id__in'] = set(constraints.pop('claim_ids'))
|
||||
|
||||
if 'reposted_claim_id' in constraints:
|
||||
constraints['claim.reposted_claim_hash'] = unhexlify(constraints.pop('reposted_claim_id'))[::-1]
|
||||
|
||||
if 'name' in constraints:
|
||||
constraints['claim.normalized'] = normalize_name(constraints.pop('name'))
|
||||
|
||||
if 'public_key_id' in constraints:
|
||||
constraints['claim.public_key_hash'] = (
|
||||
ctx.get().ledger.address_to_hash160(constraints.pop('public_key_id')))
|
||||
if 'channel_hash' in constraints:
|
||||
constraints['claim.channel_hash'] = constraints.pop('channel_hash')
|
||||
if 'channel_ids' in constraints:
|
||||
channel_ids = constraints.pop('channel_ids')
|
||||
if channel_ids:
|
||||
constraints['claim.channel_hash__in'] = {
|
||||
unhexlify(cid)[::-1] for cid in channel_ids if cid
|
||||
}
|
||||
if 'not_channel_ids' in constraints:
|
||||
not_channel_ids = constraints.pop('not_channel_ids')
|
||||
if not_channel_ids:
|
||||
not_channel_ids_binary = {
|
||||
unhexlify(ncid)[::-1] for ncid in not_channel_ids
|
||||
}
|
||||
constraints['claim.claim_hash__not_in#not_channel_ids'] = not_channel_ids_binary
|
||||
if constraints.get('has_channel_signature', False):
|
||||
constraints['claim.channel_hash__not_in'] = not_channel_ids_binary
|
||||
else:
|
||||
constraints['null_or_not_channel__or'] = {
|
||||
'claim.signature_valid__is_null': True,
|
||||
'claim.channel_hash__not_in': not_channel_ids_binary
|
||||
}
|
||||
if 'signature_valid' in constraints:
|
||||
has_channel_signature = constraints.pop('has_channel_signature', False)
|
||||
if has_channel_signature:
|
||||
constraints['claim.signature_valid'] = constraints.pop('signature_valid')
|
||||
else:
|
||||
constraints['null_or_signature__or'] = {
|
||||
'claim.signature_valid__is_null': True,
|
||||
'claim.signature_valid': constraints.pop('signature_valid')
|
||||
}
|
||||
elif constraints.pop('has_channel_signature', False):
|
||||
constraints['claim.signature_valid__is_not_null'] = True
|
||||
|
||||
if 'txid' in constraints:
|
||||
tx_hash = unhexlify(constraints.pop('txid'))[::-1]
|
||||
nout = constraints.pop('nout', 0)
|
||||
constraints['claim.txo_hash'] = tx_hash + struct.pack('<I', nout)
|
||||
|
||||
if 'claim_type' in constraints:
|
||||
claim_types = constraints.pop('claim_type')
|
||||
if isinstance(claim_types, str):
|
||||
claim_types = [claim_types]
|
||||
if claim_types:
|
||||
constraints['claim.claim_type__in'] = {
|
||||
CLAIM_TYPES[claim_type] for claim_type in claim_types
|
||||
}
|
||||
if 'stream_types' in constraints:
|
||||
stream_types = constraints.pop('stream_types')
|
||||
if stream_types:
|
||||
constraints['claim.stream_type__in'] = {
|
||||
STREAM_TYPES[stream_type] for stream_type in stream_types
|
||||
}
|
||||
if 'media_types' in constraints:
|
||||
media_types = constraints.pop('media_types')
|
||||
if media_types:
|
||||
constraints['claim.media_type__in'] = set(media_types)
|
||||
|
||||
if 'fee_currency' in constraints:
|
||||
constraints['claim.fee_currency'] = constraints.pop('fee_currency').lower()
|
||||
|
||||
_apply_constraints_for_array_attributes(constraints, 'tag', clean_tags, for_count)
|
||||
_apply_constraints_for_array_attributes(constraints, 'language', lambda _: _, for_count)
|
||||
_apply_constraints_for_array_attributes(constraints, 'location', lambda _: _, for_count)
|
||||
|
||||
select = f"SELECT {cols} FROM claim"
|
||||
if not for_count:
|
||||
select += " LEFT JOIN claimtrie USING (claim_hash)"
|
||||
return query(select, **constraints)
|
||||
|
||||
|
||||
def select_claims(censor: Censor, cols: str, for_count=False, **constraints) -> List:
|
||||
if 'channel' in constraints:
|
||||
channel_url = constraints.pop('channel')
|
||||
match = resolve_url(channel_url)
|
||||
if isinstance(match, dict):
|
||||
constraints['channel_hash'] = match['claim_hash']
|
||||
else:
|
||||
return [{'row_count': 0}] if cols == 'count(*) as row_count' else []
|
||||
row_offset = constraints.pop('offset', 0)
|
||||
row_limit = constraints.pop('limit', 20)
|
||||
sql, values = claims_query(cols, for_count, **constraints)
|
||||
return execute_query(sql, values, row_offset, row_limit, censor)
|
||||
|
||||
|
||||
@measure
|
||||
def count_claims(**constraints) -> int:
|
||||
constraints.pop('offset', None)
|
||||
constraints.pop('limit', None)
|
||||
constraints.pop('order_by', None)
|
||||
count = select_claims(Censor(Censor.SEARCH), 'count(*) as row_count', for_count=True, **constraints)
|
||||
return count[0]['row_count']
|
||||
|
||||
|
||||
def search_claims(censor: Censor, **constraints) -> List:
|
||||
return select_claims(
|
||||
censor,
|
||||
"""
|
||||
claimtrie.claim_hash as is_controlling,
|
||||
claimtrie.last_take_over_height,
|
||||
claim.claim_hash, claim.txo_hash,
|
||||
claim.claims_in_channel, claim.reposted,
|
||||
claim.height, claim.creation_height,
|
||||
claim.activation_height, claim.expiration_height,
|
||||
claim.effective_amount, claim.support_amount,
|
||||
claim.trending_group, claim.trending_mixed,
|
||||
claim.trending_local, claim.trending_global,
|
||||
claim.short_url, claim.canonical_url,
|
||||
claim.channel_hash, claim.reposted_claim_hash,
|
||||
claim.signature_valid
|
||||
""", **constraints
|
||||
)
|
||||
|
||||
|
||||
def _get_referenced_rows(txo_rows: List[dict], censor_channels: List[bytes]):
|
||||
censor = ctx.get().get_resolve_censor()
|
||||
repost_hashes = set(filter(None, map(itemgetter('reposted_claim_hash'), txo_rows)))
|
||||
channel_hashes = set(chain(
|
||||
filter(None, map(itemgetter('channel_hash'), txo_rows)),
|
||||
censor_channels
|
||||
))
|
||||
|
||||
reposted_txos = []
|
||||
if repost_hashes:
|
||||
reposted_txos = search_claims(censor, **{'claim.claim_hash__in': repost_hashes})
|
||||
channel_hashes |= set(filter(None, map(itemgetter('channel_hash'), reposted_txos)))
|
||||
|
||||
channel_txos = []
|
||||
if channel_hashes:
|
||||
channel_txos = search_claims(censor, **{'claim.claim_hash__in': channel_hashes})
|
||||
|
||||
# channels must come first for client side inflation to work properly
|
||||
return channel_txos + reposted_txos
|
||||
|
||||
@measure
|
||||
def search(constraints) -> Tuple[List, List, int, int, Censor]:
|
||||
assert set(constraints).issubset(SEARCH_PARAMS), \
|
||||
f"Search query contains invalid arguments: {set(constraints).difference(SEARCH_PARAMS)}"
|
||||
total = None
|
||||
limit_claims_per_channel = constraints.pop('limit_claims_per_channel', None)
|
||||
if not constraints.pop('no_totals', False):
|
||||
total = count_claims(**constraints)
|
||||
constraints['offset'] = abs(constraints.get('offset', 0))
|
||||
constraints['limit'] = min(abs(constraints.get('limit', 10)), 50)
|
||||
context = ctx.get()
|
||||
search_censor = context.get_search_censor(limit_claims_per_channel)
|
||||
txo_rows = search_claims(search_censor, **constraints)
|
||||
extra_txo_rows = _get_referenced_rows(txo_rows, search_censor.censored.keys())
|
||||
return txo_rows, extra_txo_rows, constraints['offset'], total, search_censor
|
||||
|
||||
|
||||
@measure
|
||||
def resolve(urls) -> Tuple[List, List]:
|
||||
txo_rows = [resolve_url(raw_url) for raw_url in urls]
|
||||
extra_txo_rows = _get_referenced_rows(
|
||||
[txo for txo in txo_rows if isinstance(txo, dict)],
|
||||
[txo.censor_id for txo in txo_rows if isinstance(txo, ResolveCensoredError)]
|
||||
)
|
||||
return txo_rows, extra_txo_rows
|
||||
|
||||
|
||||
@measure
|
||||
def resolve_url(raw_url):
|
||||
censor = ctx.get().get_resolve_censor()
|
||||
|
||||
try:
|
||||
url = URL.parse(raw_url)
|
||||
except ValueError as e:
|
||||
return e
|
||||
|
||||
channel = None
|
||||
|
||||
if url.has_channel:
|
||||
query = url.channel.to_dict()
|
||||
if set(query) == {'name'}:
|
||||
query['is_controlling'] = True
|
||||
else:
|
||||
query['order_by'] = ['^creation_height']
|
||||
matches = search_claims(censor, **query, limit=1)
|
||||
if matches:
|
||||
channel = matches[0]
|
||||
elif censor.censored:
|
||||
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
|
||||
else:
|
||||
return LookupError(f'Could not find channel in "{raw_url}".')
|
||||
|
||||
if url.has_stream:
|
||||
query = url.stream.to_dict()
|
||||
if channel is not None:
|
||||
if set(query) == {'name'}:
|
||||
# temporarily emulate is_controlling for claims in channel
|
||||
query['order_by'] = ['effective_amount', '^height']
|
||||
else:
|
||||
query['order_by'] = ['^channel_join']
|
||||
query['channel_hash'] = channel['claim_hash']
|
||||
query['signature_valid'] = 1
|
||||
elif set(query) == {'name'}:
|
||||
query['is_controlling'] = 1
|
||||
matches = search_claims(censor, **query, limit=1)
|
||||
if matches:
|
||||
return matches[0]
|
||||
elif censor.censored:
|
||||
return ResolveCensoredError(raw_url, next(iter(censor.censored)))
|
||||
else:
|
||||
return LookupError(f'Could not find claim at "{raw_url}".')
|
||||
|
||||
return channel
|
||||
|
||||
|
||||
CLAIM_HASH_OR_REPOST_HASH_SQL = f"""
|
||||
CASE WHEN claim.claim_type = {CLAIM_TYPES['repost']}
|
||||
THEN claim.reposted_claim_hash
|
||||
ELSE claim.claim_hash
|
||||
END
|
||||
"""
|
||||
|
||||
|
||||
def _apply_constraints_for_array_attributes(constraints, attr, cleaner, for_count=False):
|
||||
any_items = set(cleaner(constraints.pop(f'any_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||
all_items = set(cleaner(constraints.pop(f'all_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||
not_items = set(cleaner(constraints.pop(f'not_{attr}s', []))[:ATTRIBUTE_ARRAY_MAX_LENGTH])
|
||||
|
||||
all_items = {item for item in all_items if item not in not_items}
|
||||
any_items = {item for item in any_items if item not in not_items}
|
||||
|
||||
any_queries = {}
|
||||
|
||||
if attr == 'tag':
|
||||
common_tags = any_items & COMMON_TAGS.keys()
|
||||
if common_tags:
|
||||
any_items -= common_tags
|
||||
if len(common_tags) < 5:
|
||||
for item in common_tags:
|
||||
index_name = COMMON_TAGS[item]
|
||||
any_queries[f'#_common_tag_{index_name}'] = f"""
|
||||
EXISTS(
|
||||
SELECT 1 FROM tag INDEXED BY tag_{index_name}_idx
|
||||
WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
|
||||
AND tag = '{item}'
|
||||
)
|
||||
"""
|
||||
elif len(common_tags) >= 5:
|
||||
constraints.update({
|
||||
f'$any_common_tag{i}': item for i, item in enumerate(common_tags)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$any_common_tag{i}' for i in range(len(common_tags))
|
||||
)
|
||||
any_queries[f'#_any_common_tags'] = f"""
|
||||
EXISTS(
|
||||
SELECT 1 FROM tag WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=tag.claim_hash
|
||||
AND tag IN ({values})
|
||||
)
|
||||
"""
|
||||
elif attr == 'language':
|
||||
indexed_languages = any_items & set(INDEXED_LANGUAGES)
|
||||
if indexed_languages:
|
||||
any_items -= indexed_languages
|
||||
for language in indexed_languages:
|
||||
any_queries[f'#_any_common_languages_{language}'] = f"""
|
||||
EXISTS(
|
||||
SELECT 1 FROM language INDEXED BY language_{language}_idx
|
||||
WHERE {CLAIM_HASH_OR_REPOST_HASH_SQL}=language.claim_hash
|
||||
AND language = '{language}'
|
||||
)
|
||||
"""
|
||||
|
||||
if any_items:
|
||||
|
||||
constraints.update({
|
||||
f'$any_{attr}{i}': item for i, item in enumerate(any_items)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$any_{attr}{i}' for i in range(len(any_items))
|
||||
)
|
||||
if for_count or attr == 'tag':
|
||||
if attr == 'tag':
|
||||
any_queries[f'#_any_{attr}'] = f"""
|
||||
((claim.claim_type != {CLAIM_TYPES['repost']}
|
||||
AND claim.claim_hash IN (SELECT claim_hash FROM tag WHERE tag IN ({values}))) OR
|
||||
(claim.claim_type == {CLAIM_TYPES['repost']} AND
|
||||
claim.reposted_claim_hash IN (SELECT claim_hash FROM tag WHERE tag IN ({values}))))
|
||||
"""
|
||||
else:
|
||||
any_queries[f'#_any_{attr}'] = f"""
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
|
||||
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
else:
|
||||
any_queries[f'#_any_{attr}'] = f"""
|
||||
EXISTS(
|
||||
SELECT 1 FROM {attr} WHERE
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||
AND {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
|
||||
if len(any_queries) == 1:
|
||||
constraints.update(any_queries)
|
||||
elif len(any_queries) > 1:
|
||||
constraints[f'ORed_{attr}_queries__any'] = any_queries
|
||||
|
||||
if all_items:
|
||||
constraints[f'$all_{attr}_count'] = len(all_items)
|
||||
constraints.update({
|
||||
f'$all_{attr}{i}': item for i, item in enumerate(all_items)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$all_{attr}{i}' for i in range(len(all_items))
|
||||
)
|
||||
if for_count:
|
||||
constraints[f'#_all_{attr}'] = f"""
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL} IN (
|
||||
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||
GROUP BY claim_hash HAVING COUNT({attr}) = :$all_{attr}_count
|
||||
)
|
||||
"""
|
||||
else:
|
||||
constraints[f'#_all_{attr}'] = f"""
|
||||
{len(all_items)}=(
|
||||
SELECT count(*) FROM {attr} WHERE
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||
AND {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
|
||||
if not_items:
|
||||
constraints.update({
|
||||
f'$not_{attr}{i}': item for i, item in enumerate(not_items)
|
||||
})
|
||||
values = ', '.join(
|
||||
f':$not_{attr}{i}' for i in range(len(not_items))
|
||||
)
|
||||
if for_count:
|
||||
if attr == 'tag':
|
||||
constraints[f'#_not_{attr}'] = f"""
|
||||
((claim.claim_type != {CLAIM_TYPES['repost']}
|
||||
AND claim.claim_hash NOT IN (SELECT claim_hash FROM tag WHERE tag IN ({values}))) OR
|
||||
(claim.claim_type == {CLAIM_TYPES['repost']} AND
|
||||
claim.reposted_claim_hash NOT IN (SELECT claim_hash FROM tag WHERE tag IN ({values}))))
|
||||
"""
|
||||
else:
|
||||
constraints[f'#_not_{attr}'] = f"""
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL} NOT IN (
|
||||
SELECT claim_hash FROM {attr} WHERE {attr} IN ({values})
|
||||
)
|
||||
"""
|
||||
else:
|
||||
constraints[f'#_not_{attr}'] = f"""
|
||||
NOT EXISTS(
|
||||
SELECT 1 FROM {attr} WHERE
|
||||
{CLAIM_HASH_OR_REPOST_HASH_SQL}={attr}.claim_hash
|
||||
AND {attr} IN ({values})
|
||||
)
|
||||
"""
|
|
@ -1,57 +1,57 @@
|
|||
import unittest
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
|
||||
from lbry.wallet.server.history import History
|
||||
from lbry.wallet.server.storage import LevelDB
|
||||
|
||||
|
||||
# dumped from a real history database. Aside from the state, all records are <hashX><flush_count>: <value>
|
||||
STATE_RECORD = (b'state\x00\x00', b"{'flush_count': 21497, 'comp_flush_count': -1, 'comp_cursor': -1, 'db_version': 0}")
|
||||
UNMIGRATED_RECORDS = {
|
||||
'00538b2cbe4a5f1be2dc320241': 'f5ed500142ee5001',
|
||||
'00538b48def1904014880501f2': 'b9a52a01baa52a01',
|
||||
'00538cdcf989b74de32c5100ca': 'c973870078748700',
|
||||
'00538d42d5df44603474284ae1': 'f5d9d802',
|
||||
'00538d42d5df44603474284ae2': '75dad802',
|
||||
'00538ebc879dac6ddbee9e0029': '3ca42f0042a42f00',
|
||||
'00538ed1d391327208748200bc': '804e7d00af4e7d00',
|
||||
'00538f3de41d9e33affa0300c2': '7de8810086e88100',
|
||||
'00539007f87792d98422c505a5': '8c5a7202445b7202',
|
||||
'0053902cf52ee9682d633b0575': 'eb0f64026c106402',
|
||||
'005390e05674571551632205a2': 'a13d7102e13d7102',
|
||||
'0053914ef25a9ceed927330584': '78096902960b6902',
|
||||
'005391768113f69548f37a01b1': 'a5b90b0114ba0b01',
|
||||
'005391a289812669e5b44c02c2': '33da8a016cdc8a01',
|
||||
}
|
||||
|
||||
|
||||
class TestHistoryDBMigration(unittest.TestCase):
|
||||
def test_migrate_flush_count_from_16_to_32_bits(self):
|
||||
self.history = History()
|
||||
tmpdir = mkdtemp()
|
||||
self.addCleanup(lambda: rmtree(tmpdir))
|
||||
LevelDB.import_module()
|
||||
db = LevelDB(tmpdir, 'hist', True)
|
||||
with db.write_batch() as batch:
|
||||
for key, value in UNMIGRATED_RECORDS.items():
|
||||
batch.put(bytes.fromhex(key), bytes.fromhex(value))
|
||||
batch.put(*STATE_RECORD)
|
||||
self.history.db = db
|
||||
self.history.read_state()
|
||||
self.assertEqual(21497, self.history.flush_count)
|
||||
self.assertEqual(0, self.history.db_version)
|
||||
self.assertTrue(self.history.needs_migration)
|
||||
self.history.migrate()
|
||||
self.assertFalse(self.history.needs_migration)
|
||||
self.assertEqual(1, self.history.db_version)
|
||||
for idx, (key, value) in enumerate(sorted(db.iterator())):
|
||||
if key == b'state\x00\x00':
|
||||
continue
|
||||
key, counter = key[:-4], key[-4:]
|
||||
expected_value = UNMIGRATED_RECORDS[key.hex() + counter.hex()[-4:]]
|
||||
self.assertEqual(value.hex(), expected_value)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
# import unittest
|
||||
# from shutil import rmtree
|
||||
# from tempfile import mkdtemp
|
||||
#
|
||||
# from lbry.wallet.server.history import History
|
||||
# from lbry.wallet.server.storage import LevelDB
|
||||
#
|
||||
#
|
||||
# # dumped from a real history database. Aside from the state, all records are <hashX><flush_count>: <value>
|
||||
# STATE_RECORD = (b'state\x00\x00', b"{'flush_count': 21497, 'comp_flush_count': -1, 'comp_cursor': -1, 'db_version': 0}")
|
||||
# UNMIGRATED_RECORDS = {
|
||||
# '00538b2cbe4a5f1be2dc320241': 'f5ed500142ee5001',
|
||||
# '00538b48def1904014880501f2': 'b9a52a01baa52a01',
|
||||
# '00538cdcf989b74de32c5100ca': 'c973870078748700',
|
||||
# '00538d42d5df44603474284ae1': 'f5d9d802',
|
||||
# '00538d42d5df44603474284ae2': '75dad802',
|
||||
# '00538ebc879dac6ddbee9e0029': '3ca42f0042a42f00',
|
||||
# '00538ed1d391327208748200bc': '804e7d00af4e7d00',
|
||||
# '00538f3de41d9e33affa0300c2': '7de8810086e88100',
|
||||
# '00539007f87792d98422c505a5': '8c5a7202445b7202',
|
||||
# '0053902cf52ee9682d633b0575': 'eb0f64026c106402',
|
||||
# '005390e05674571551632205a2': 'a13d7102e13d7102',
|
||||
# '0053914ef25a9ceed927330584': '78096902960b6902',
|
||||
# '005391768113f69548f37a01b1': 'a5b90b0114ba0b01',
|
||||
# '005391a289812669e5b44c02c2': '33da8a016cdc8a01',
|
||||
# }
|
||||
#
|
||||
#
|
||||
# class TestHistoryDBMigration(unittest.TestCase):
|
||||
# def test_migrate_flush_count_from_16_to_32_bits(self):
|
||||
# self.history = History()
|
||||
# tmpdir = mkdtemp()
|
||||
# self.addCleanup(lambda: rmtree(tmpdir))
|
||||
# LevelDB.import_module()
|
||||
# db = LevelDB(tmpdir, 'hist', True)
|
||||
# with db.write_batch() as batch:
|
||||
# for key, value in UNMIGRATED_RECORDS.items():
|
||||
# batch.put(bytes.fromhex(key), bytes.fromhex(value))
|
||||
# batch.put(*STATE_RECORD)
|
||||
# self.history.db = db
|
||||
# self.history.read_state()
|
||||
# self.assertEqual(21497, self.history.flush_count)
|
||||
# self.assertEqual(0, self.history.db_version)
|
||||
# self.assertTrue(self.history.needs_migration)
|
||||
# self.history.migrate()
|
||||
# self.assertFalse(self.history.needs_migration)
|
||||
# self.assertEqual(1, self.history.db_version)
|
||||
# for idx, (key, value) in enumerate(sorted(db.iterator())):
|
||||
# if key == b'state\x00\x00':
|
||||
# continue
|
||||
# key, counter = key[:-4], key[-4:]
|
||||
# expected_value = UNMIGRATED_RECORDS[key.hex() + counter.hex()[-4:]]
|
||||
# self.assertEqual(value.hex(), expected_value)
|
||||
#
|
||||
#
|
||||
# if __name__ == '__main__':
|
||||
# unittest.main()
|
||||
|
|
150
tests/unit/wallet/server/test_revertable.py
Normal file
150
tests/unit/wallet/server/test_revertable.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
import unittest
|
||||
import tempfile
|
||||
import shutil
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertableDelete, RevertablePut, OpStackIntegrity
|
||||
from lbry.wallet.server.db.prefixes import ClaimToTXOPrefixRow, HubDB
|
||||
|
||||
|
||||
class TestRevertableOpStack(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.fake_db = {}
|
||||
self.stack = RevertableOpStack(self.fake_db.get)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.stack.clear()
|
||||
self.fake_db.clear()
|
||||
|
||||
def process_stack(self):
|
||||
for op in self.stack:
|
||||
if op.is_put:
|
||||
self.fake_db[op.key] = op.value
|
||||
else:
|
||||
self.fake_db.pop(op.key)
|
||||
self.stack.clear()
|
||||
|
||||
def update(self, key1: bytes, value1: bytes, key2: bytes, value2: bytes):
|
||||
self.stack.append_op(RevertableDelete(key1, value1))
|
||||
self.stack.append_op(RevertablePut(key2, value2))
|
||||
|
||||
def test_simplify(self):
|
||||
key1 = ClaimToTXOPrefixRow.pack_key(b'\x01' * 20)
|
||||
key2 = ClaimToTXOPrefixRow.pack_key(b'\x02' * 20)
|
||||
key3 = ClaimToTXOPrefixRow.pack_key(b'\x03' * 20)
|
||||
key4 = ClaimToTXOPrefixRow.pack_key(b'\x04' * 20)
|
||||
|
||||
val1 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'derp')
|
||||
val2 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'oops')
|
||||
val3 = ClaimToTXOPrefixRow.pack_value(1, 0, 1, 0, 1, False, 'other')
|
||||
|
||||
# check that we can't delete a non existent value
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.stack.append_op(RevertableDelete(key1, val1))
|
||||
|
||||
self.stack.append_op(RevertablePut(key1, val1))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
self.stack.append_op(RevertableDelete(key1, val1))
|
||||
self.assertEqual(0, len(self.stack))
|
||||
|
||||
self.stack.append_op(RevertablePut(key1, val1))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
# try to delete the wrong value
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.stack.append_op(RevertableDelete(key2, val2))
|
||||
|
||||
self.stack.append_op(RevertableDelete(key1, val1))
|
||||
self.assertEqual(0, len(self.stack))
|
||||
self.stack.append_op(RevertablePut(key2, val3))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
|
||||
self.process_stack()
|
||||
|
||||
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||
|
||||
# check that we can't put on top of the existing stored value
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.stack.append_op(RevertablePut(key2, val1))
|
||||
|
||||
self.assertEqual(0, len(self.stack))
|
||||
self.stack.append_op(RevertableDelete(key2, val3))
|
||||
self.assertEqual(1, len(self.stack))
|
||||
self.stack.append_op(RevertablePut(key2, val3))
|
||||
self.assertEqual(0, len(self.stack))
|
||||
|
||||
self.update(key2, val3, key2, val1)
|
||||
self.assertEqual(2, len(self.stack))
|
||||
|
||||
self.process_stack()
|
||||
self.assertDictEqual({key2: val1}, self.fake_db)
|
||||
|
||||
self.update(key2, val1, key2, val2)
|
||||
self.assertEqual(2, len(self.stack))
|
||||
self.update(key2, val2, key2, val3)
|
||||
self.update(key2, val3, key2, val2)
|
||||
self.update(key2, val2, key2, val3)
|
||||
self.update(key2, val3, key2, val2)
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.update(key2, val3, key2, val2)
|
||||
self.update(key2, val2, key2, val3)
|
||||
self.assertEqual(2, len(self.stack))
|
||||
self.stack.append_op(RevertableDelete(key2, val3))
|
||||
self.process_stack()
|
||||
self.assertDictEqual({}, self.fake_db)
|
||||
|
||||
self.stack.append_op(RevertablePut(key2, val3))
|
||||
self.process_stack()
|
||||
with self.assertRaises(OpStackIntegrity):
|
||||
self.update(key2, val2, key2, val2)
|
||||
self.update(key2, val3, key2, val2)
|
||||
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||
undo = self.stack.get_undo_ops()
|
||||
self.process_stack()
|
||||
self.assertDictEqual({key2: val2}, self.fake_db)
|
||||
self.stack.apply_packed_undo_ops(undo)
|
||||
self.process_stack()
|
||||
self.assertDictEqual({key2: val3}, self.fake_db)
|
||||
|
||||
|
||||
class TestRevertablePrefixDB(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.tmp_dir = tempfile.mkdtemp()
|
||||
self.db = HubDB(self.tmp_dir, cache_mb=1, max_open_files=32)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.db.close()
|
||||
shutil.rmtree(self.tmp_dir)
|
||||
|
||||
def test_rollback(self):
|
||||
name = 'derp'
|
||||
claim_hash1 = 20 * b'\x00'
|
||||
claim_hash2 = 20 * b'\x01'
|
||||
claim_hash3 = 20 * b'\x02'
|
||||
|
||||
takeover_height = 10000000
|
||||
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash1, takeover_height))
|
||||
self.db.commit(10000000)
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash1, takeover_height))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 1))
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash2, takeover_height + 1))
|
||||
self.db.commit(10000001)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash3, takeover_height + 2))
|
||||
self.db.commit(10000002)
|
||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.claim_takeover.stage_delete((name,), (claim_hash3, takeover_height + 2))
|
||||
self.db.claim_takeover.stage_put((name,), (claim_hash2, takeover_height + 3))
|
||||
self.db.commit(10000003)
|
||||
self.assertEqual(10000003, self.db.claim_takeover.get(name).height)
|
||||
|
||||
self.db.rollback(10000003)
|
||||
self.assertEqual(10000002, self.db.claim_takeover.get(name).height)
|
||||
self.db.rollback(10000002)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
||||
self.db.rollback(10000001)
|
||||
self.assertEqual(10000000, self.db.claim_takeover.get(name).height)
|
||||
self.db.rollback(10000000)
|
||||
self.assertIsNone(self.db.claim_takeover.get(name))
|
|
@ -1,765 +0,0 @@
|
|||
import unittest
|
||||
import ecdsa
|
||||
import hashlib
|
||||
import logging
|
||||
from binascii import hexlify
|
||||
from typing import List, Tuple
|
||||
|
||||
from lbry.wallet.constants import COIN, NULL_HASH32
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.schema.result import Censor
|
||||
from lbry.wallet.server.db import writer
|
||||
from lbry.wallet.server.coin import LBCRegTest
|
||||
from lbry.wallet.server.db.trending import zscore
|
||||
from lbry.wallet.server.db.canonical import FindShortestID
|
||||
from lbry.wallet.server.block_processor import Timer
|
||||
from lbry.wallet.transaction import Transaction, Input, Output
|
||||
try:
|
||||
import reader
|
||||
except:
|
||||
from . import reader
|
||||
|
||||
|
||||
def get_output(amount=COIN, pubkey_hash=NULL_HASH32):
|
||||
return Transaction() \
|
||||
.add_outputs([Output.pay_pubkey_hash(amount, pubkey_hash)]) \
|
||||
.outputs[0]
|
||||
|
||||
|
||||
def get_input():
|
||||
return Input.spend(get_output())
|
||||
|
||||
|
||||
def get_tx():
|
||||
return Transaction().add_inputs([get_input()])
|
||||
|
||||
|
||||
def search(**constraints) -> List:
|
||||
return reader.search_claims(Censor(Censor.SEARCH), **constraints)
|
||||
|
||||
|
||||
def censored_search(**constraints) -> Tuple[List, Censor]:
|
||||
rows, _, _, _, censor = reader.search(constraints)
|
||||
return rows, censor
|
||||
|
||||
|
||||
class TestSQLDB(unittest.TestCase):
|
||||
query_timeout = 0.25
|
||||
|
||||
def setUp(self):
|
||||
self.first_sync = False
|
||||
self.daemon_height = 1
|
||||
self.coin = LBCRegTest()
|
||||
db_url = 'file:test_sqldb?mode=memory&cache=shared'
|
||||
self.sql = writer.SQLDB(self, db_url, [], [], [zscore])
|
||||
self.addCleanup(self.sql.close)
|
||||
self.sql.open()
|
||||
reader.initializer(
|
||||
logging.getLogger(__name__), db_url, 'regtest',
|
||||
self.query_timeout, block_and_filter=(
|
||||
self.sql.blocked_streams, self.sql.blocked_channels,
|
||||
self.sql.filtered_streams, self.sql.filtered_channels
|
||||
)
|
||||
)
|
||||
self.addCleanup(reader.cleanup)
|
||||
self.timer = Timer('BlockProcessor')
|
||||
self._current_height = 0
|
||||
self._txos = {}
|
||||
|
||||
def _make_tx(self, output, txi=None):
|
||||
tx = get_tx().add_outputs([output])
|
||||
if txi is not None:
|
||||
tx.add_inputs([txi])
|
||||
self._txos[output.ref.hash] = output
|
||||
return tx, tx.hash
|
||||
|
||||
def _set_channel_key(self, channel, key):
|
||||
private_key = ecdsa.SigningKey.from_string(key*32, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
|
||||
channel.private_key = private_key
|
||||
channel.claim.channel.public_key_bytes = private_key.get_verifying_key().to_der()
|
||||
channel.script.generate()
|
||||
|
||||
def get_channel(self, title, amount, name='@foo', key=b'a'):
|
||||
claim = Claim()
|
||||
claim.channel.title = title
|
||||
channel = Output.pay_claim_name_pubkey_hash(amount, name, claim, b'abc')
|
||||
self._set_channel_key(channel, key)
|
||||
return self._make_tx(channel)
|
||||
|
||||
def get_channel_update(self, channel, amount, key=b'a'):
|
||||
self._set_channel_key(channel, key)
|
||||
return self._make_tx(
|
||||
Output.pay_update_claim_pubkey_hash(
|
||||
amount, channel.claim_name, channel.claim_id, channel.claim, b'abc'
|
||||
),
|
||||
Input.spend(channel)
|
||||
)
|
||||
|
||||
def get_stream(self, title, amount, name='foo', channel=None, **kwargs):
|
||||
claim = Claim()
|
||||
claim.stream.update(title=title, **kwargs)
|
||||
result = self._make_tx(Output.pay_claim_name_pubkey_hash(amount, name, claim, b'abc'))
|
||||
if channel:
|
||||
result[0].outputs[0].sign(channel)
|
||||
result[0]._reset()
|
||||
return result
|
||||
|
||||
def get_stream_update(self, tx, amount, channel=None):
|
||||
stream = Transaction(tx[0].raw).outputs[0]
|
||||
result = self._make_tx(
|
||||
Output.pay_update_claim_pubkey_hash(
|
||||
amount, stream.claim_name, stream.claim_id, stream.claim, b'abc'
|
||||
),
|
||||
Input.spend(stream)
|
||||
)
|
||||
if channel:
|
||||
result[0].outputs[0].sign(channel)
|
||||
result[0]._reset()
|
||||
return result
|
||||
|
||||
def get_repost(self, claim_id, amount, channel):
|
||||
claim = Claim()
|
||||
claim.repost.reference.claim_id = claim_id
|
||||
result = self._make_tx(Output.pay_claim_name_pubkey_hash(amount, 'repost', claim, b'abc'))
|
||||
result[0].outputs[0].sign(channel)
|
||||
result[0]._reset()
|
||||
return result
|
||||
|
||||
def get_abandon(self, tx):
|
||||
claim = Transaction(tx[0].raw).outputs[0]
|
||||
return self._make_tx(
|
||||
Output.pay_pubkey_hash(claim.amount, b'abc'),
|
||||
Input.spend(claim)
|
||||
)
|
||||
|
||||
def get_support(self, tx, amount):
|
||||
claim = Transaction(tx[0].raw).outputs[0]
|
||||
return self._make_tx(
|
||||
Output.pay_support_pubkey_hash(
|
||||
amount, claim.claim_name, claim.claim_id, b'abc'
|
||||
)
|
||||
)
|
||||
|
||||
def get_controlling(self):
|
||||
for claim in self.sql.execute("select claim.* from claimtrie natural join claim"):
|
||||
txo = self._txos[claim.txo_hash]
|
||||
controlling = txo.claim.stream.title, claim.amount, claim.effective_amount, claim.activation_height
|
||||
return controlling
|
||||
|
||||
def get_active(self):
|
||||
controlling = self.get_controlling()
|
||||
active = []
|
||||
for claim in self.sql.execute(
|
||||
f"select * from claim where activation_height <= {self._current_height}"):
|
||||
txo = self._txos[claim.txo_hash]
|
||||
if controlling and controlling[0] == txo.claim.stream.title:
|
||||
continue
|
||||
active.append((txo.claim.stream.title, claim.amount, claim.effective_amount, claim.activation_height))
|
||||
return active
|
||||
|
||||
def get_accepted(self):
|
||||
accepted = []
|
||||
for claim in self.sql.execute(
|
||||
f"select * from claim where activation_height > {self._current_height}"):
|
||||
txo = self._txos[claim.txo_hash]
|
||||
accepted.append((txo.claim.stream.title, claim.amount, claim.effective_amount, claim.activation_height))
|
||||
return accepted
|
||||
|
||||
def advance(self, height, txs):
|
||||
self._current_height = height
|
||||
self.sql.advance_txs(height, txs, {'timestamp': 1}, self.daemon_height, self.timer)
|
||||
return [otx[0].outputs[0] for otx in txs]
|
||||
|
||||
def state(self, controlling=None, active=None, accepted=None):
|
||||
self.assertEqual(controlling, self.get_controlling())
|
||||
self.assertEqual(active or [], self.get_active())
|
||||
self.assertEqual(accepted or [], self.get_accepted())
|
||||
|
||||
|
||||
class TestClaimtrie(TestSQLDB):
|
||||
|
||||
def test_example_from_spec(self):
|
||||
# https://spec.lbry.com/#claim-activation-example
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
advance(13, [stream])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[],
|
||||
accepted=[]
|
||||
)
|
||||
advance(1001, [self.get_stream('Claim B', 20*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[],
|
||||
accepted=[('Claim B', 20*COIN, 0, 1031)]
|
||||
)
|
||||
advance(1010, [self.get_support(stream, 14*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 24*COIN, 13),
|
||||
active=[],
|
||||
accepted=[('Claim B', 20*COIN, 0, 1031)]
|
||||
)
|
||||
advance(1020, [self.get_stream('Claim C', 50*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 24*COIN, 13),
|
||||
active=[],
|
||||
accepted=[
|
||||
('Claim B', 20*COIN, 0, 1031),
|
||||
('Claim C', 50*COIN, 0, 1051)]
|
||||
)
|
||||
advance(1031, [])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 24*COIN, 13),
|
||||
active=[('Claim B', 20*COIN, 20*COIN, 1031)],
|
||||
accepted=[('Claim C', 50*COIN, 0, 1051)]
|
||||
)
|
||||
advance(1040, [self.get_stream('Claim D', 300*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 24*COIN, 13),
|
||||
active=[('Claim B', 20*COIN, 20*COIN, 1031)],
|
||||
accepted=[
|
||||
('Claim C', 50*COIN, 0, 1051),
|
||||
('Claim D', 300*COIN, 0, 1072)]
|
||||
)
|
||||
advance(1051, [])
|
||||
state(
|
||||
controlling=('Claim D', 300*COIN, 300*COIN, 1051),
|
||||
active=[
|
||||
('Claim A', 10*COIN, 24*COIN, 13),
|
||||
('Claim B', 20*COIN, 20*COIN, 1031),
|
||||
('Claim C', 50*COIN, 50*COIN, 1051)],
|
||||
accepted=[]
|
||||
)
|
||||
# beyond example
|
||||
advance(1052, [self.get_stream_update(stream, 290*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 290*COIN, 304*COIN, 13),
|
||||
active=[
|
||||
('Claim B', 20*COIN, 20*COIN, 1031),
|
||||
('Claim C', 50*COIN, 50*COIN, 1051),
|
||||
('Claim D', 300*COIN, 300*COIN, 1051),
|
||||
],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_competing_claims_subsequent_blocks_height_wins(self):
|
||||
advance, state = self.advance, self.state
|
||||
advance(13, [self.get_stream('Claim A', 10*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[],
|
||||
accepted=[]
|
||||
)
|
||||
advance(14, [self.get_stream('Claim B', 10*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[('Claim B', 10*COIN, 10*COIN, 14)],
|
||||
accepted=[]
|
||||
)
|
||||
advance(15, [self.get_stream('Claim C', 10*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[
|
||||
('Claim B', 10*COIN, 10*COIN, 14),
|
||||
('Claim C', 10*COIN, 10*COIN, 15)],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_competing_claims_in_single_block_position_wins(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
stream2 = self.get_stream('Claim B', 10*COIN)
|
||||
advance(13, [stream, stream2])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[('Claim B', 10*COIN, 10*COIN, 13)],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_competing_claims_in_single_block_effective_amount_wins(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
stream2 = self.get_stream('Claim B', 11*COIN)
|
||||
advance(13, [stream, stream2])
|
||||
state(
|
||||
controlling=('Claim B', 11*COIN, 11*COIN, 13),
|
||||
active=[('Claim A', 10*COIN, 10*COIN, 13)],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_winning_claim_deleted(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
stream2 = self.get_stream('Claim B', 11*COIN)
|
||||
advance(13, [stream, stream2])
|
||||
state(
|
||||
controlling=('Claim B', 11*COIN, 11*COIN, 13),
|
||||
active=[('Claim A', 10*COIN, 10*COIN, 13)],
|
||||
accepted=[]
|
||||
)
|
||||
advance(14, [self.get_abandon(stream2)])
|
||||
state(
|
||||
controlling=('Claim A', 10*COIN, 10*COIN, 13),
|
||||
active=[],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_winning_claim_deleted_and_new_claim_becomes_winner(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
stream2 = self.get_stream('Claim B', 11*COIN)
|
||||
advance(13, [stream, stream2])
|
||||
state(
|
||||
controlling=('Claim B', 11*COIN, 11*COIN, 13),
|
||||
active=[('Claim A', 10*COIN, 10*COIN, 13)],
|
||||
accepted=[]
|
||||
)
|
||||
advance(15, [self.get_abandon(stream2), self.get_stream('Claim C', 12*COIN)])
|
||||
state(
|
||||
controlling=('Claim C', 12*COIN, 12*COIN, 15),
|
||||
active=[('Claim A', 10*COIN, 10*COIN, 13)],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_winning_claim_expires_and_another_takes_over(self):
|
||||
advance, state = self.advance, self.state
|
||||
advance(10, [self.get_stream('Claim A', 11*COIN)])
|
||||
advance(20, [self.get_stream('Claim B', 10*COIN)])
|
||||
state(
|
||||
controlling=('Claim A', 11*COIN, 11*COIN, 10),
|
||||
active=[('Claim B', 10*COIN, 10*COIN, 20)],
|
||||
accepted=[]
|
||||
)
|
||||
advance(262984, [])
|
||||
state(
|
||||
controlling=('Claim B', 10*COIN, 10*COIN, 20),
|
||||
active=[],
|
||||
accepted=[]
|
||||
)
|
||||
advance(262994, [])
|
||||
state(
|
||||
controlling=None,
|
||||
active=[],
|
||||
accepted=[]
|
||||
)
|
||||
|
||||
def test_create_and_update_in_same_block(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
advance(10, [stream, self.get_stream_update(stream, 11*COIN)])
|
||||
self.assertTrue(search()[0])
|
||||
|
||||
def test_double_updates_in_same_block(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
advance(10, [stream])
|
||||
update = self.get_stream_update(stream, 11*COIN)
|
||||
advance(20, [update, self.get_stream_update(update, 9*COIN)])
|
||||
self.assertTrue(search()[0])
|
||||
|
||||
def test_create_and_abandon_in_same_block(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
advance(10, [stream, self.get_abandon(stream)])
|
||||
self.assertFalse(search())
|
||||
|
||||
def test_update_and_abandon_in_same_block(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
advance(10, [stream])
|
||||
update = self.get_stream_update(stream, 11*COIN)
|
||||
advance(20, [update, self.get_abandon(update)])
|
||||
self.assertFalse(search())
|
||||
|
||||
def test_create_update_and_delete_in_same_block(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
update = self.get_stream_update(stream, 11*COIN)
|
||||
advance(10, [stream, update, self.get_abandon(update)])
|
||||
self.assertFalse(search())
|
||||
|
||||
def test_support_added_and_removed_in_same_block(self):
|
||||
advance, state = self.advance, self.state
|
||||
stream = self.get_stream('Claim A', 10*COIN)
|
||||
advance(10, [stream])
|
||||
support = self.get_support(stream, COIN)
|
||||
advance(20, [support, self.get_abandon(support)])
|
||||
self.assertEqual(search()[0]['support_amount'], 0)
|
||||
|
||||
@staticmethod
|
||||
def _get_x_with_claim_id_prefix(getter, prefix, cached_iteration=None, **kwargs):
|
||||
iterations = cached_iteration+1 if cached_iteration else 100
|
||||
for i in range(cached_iteration or 1, iterations):
|
||||
stream = getter(f'claim #{i}', COIN, **kwargs)
|
||||
if stream[0].outputs[0].claim_id.startswith(prefix):
|
||||
cached_iteration is None and print(f'Found "{prefix}" in {i} iterations.')
|
||||
return stream
|
||||
if cached_iteration:
|
||||
raise ValueError(f'Failed to find "{prefix}" at cached iteration, run with None to find iteration.')
|
||||
raise ValueError(f'Failed to find "{prefix}" in {iterations} iterations, try different values.')
|
||||
|
||||
def get_channel_with_claim_id_prefix(self, prefix, cached_iteration=None, **kwargs):
|
||||
return self._get_x_with_claim_id_prefix(self.get_channel, prefix, cached_iteration, **kwargs)
|
||||
|
||||
def get_stream_with_claim_id_prefix(self, prefix, cached_iteration=None, **kwargs):
|
||||
return self._get_x_with_claim_id_prefix(self.get_stream, prefix, cached_iteration, **kwargs)
|
||||
|
||||
def test_canonical_url_and_channel_validation(self):
|
||||
advance = self.advance
|
||||
|
||||
tx_chan_a = self.get_channel_with_claim_id_prefix('a', 1, key=b'c')
|
||||
tx_chan_ab = self.get_channel_with_claim_id_prefix('ab', 72, key=b'c')
|
||||
txo_chan_a = tx_chan_a[0].outputs[0]
|
||||
txo_chan_ab = tx_chan_ab[0].outputs[0]
|
||||
advance(1, [tx_chan_a])
|
||||
advance(2, [tx_chan_ab])
|
||||
(r_ab, r_a) = search(order_by=['creation_height'], limit=2)
|
||||
self.assertEqual("@foo#a", r_a['short_url'])
|
||||
self.assertEqual("@foo#ab", r_ab['short_url'])
|
||||
self.assertIsNone(r_a['canonical_url'])
|
||||
self.assertIsNone(r_ab['canonical_url'])
|
||||
self.assertEqual(0, r_a['claims_in_channel'])
|
||||
self.assertEqual(0, r_ab['claims_in_channel'])
|
||||
|
||||
tx_a = self.get_stream_with_claim_id_prefix('a', 2)
|
||||
tx_ab = self.get_stream_with_claim_id_prefix('ab', 42)
|
||||
tx_abc = self.get_stream_with_claim_id_prefix('abc', 65)
|
||||
advance(3, [tx_a])
|
||||
advance(4, [tx_ab, tx_abc])
|
||||
(r_abc, r_ab, r_a) = search(order_by=['creation_height', 'tx_position'], limit=3)
|
||||
self.assertEqual("foo#a", r_a['short_url'])
|
||||
self.assertEqual("foo#ab", r_ab['short_url'])
|
||||
self.assertEqual("foo#abc", r_abc['short_url'])
|
||||
self.assertIsNone(r_a['canonical_url'])
|
||||
self.assertIsNone(r_ab['canonical_url'])
|
||||
self.assertIsNone(r_abc['canonical_url'])
|
||||
|
||||
tx_a2 = self.get_stream_with_claim_id_prefix('a', 7, channel=txo_chan_a)
|
||||
tx_ab2 = self.get_stream_with_claim_id_prefix('ab', 23, channel=txo_chan_a)
|
||||
a2_claim = tx_a2[0].outputs[0]
|
||||
ab2_claim = tx_ab2[0].outputs[0]
|
||||
advance(6, [tx_a2])
|
||||
advance(7, [tx_ab2])
|
||||
(r_ab2, r_a2) = search(order_by=['creation_height'], limit=2)
|
||||
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
|
||||
self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url'])
|
||||
self.assertEqual("@foo#a/foo#a", r_a2['canonical_url'])
|
||||
self.assertEqual("@foo#a/foo#ab", r_ab2['canonical_url'])
|
||||
self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
|
||||
# change channel public key, invaliding stream claim signatures
|
||||
advance(8, [self.get_channel_update(txo_chan_a, COIN, key=b'a')])
|
||||
(r_ab2, r_a2) = search(order_by=['creation_height'], limit=2)
|
||||
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
|
||||
self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url'])
|
||||
self.assertIsNone(r_a2['canonical_url'])
|
||||
self.assertIsNone(r_ab2['canonical_url'])
|
||||
self.assertEqual(0, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
|
||||
# reinstate previous channel public key (previous stream claim signatures become valid again)
|
||||
channel_update = self.get_channel_update(txo_chan_a, COIN, key=b'c')
|
||||
advance(9, [channel_update])
|
||||
(r_ab2, r_a2) = search(order_by=['creation_height'], limit=2)
|
||||
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
|
||||
self.assertEqual(f"foo#{ab2_claim.claim_id[:4]}", r_ab2['short_url'])
|
||||
self.assertEqual("@foo#a/foo#a", r_a2['canonical_url'])
|
||||
self.assertEqual("@foo#a/foo#ab", r_ab2['canonical_url'])
|
||||
self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
self.assertEqual(0, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
|
||||
# change channel of stream
|
||||
self.assertEqual("@foo#a/foo#ab", search(claim_id=ab2_claim.claim_id, limit=1)[0]['canonical_url'])
|
||||
tx_ab2 = self.get_stream_update(tx_ab2, COIN, txo_chan_ab)
|
||||
advance(10, [tx_ab2])
|
||||
self.assertEqual("@foo#ab/foo#a", search(claim_id=ab2_claim.claim_id, limit=1)[0]['canonical_url'])
|
||||
# TODO: currently there is a bug where stream leaving a channel does not update that channels claims count
|
||||
self.assertEqual(2, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
# TODO: after bug is fixed remove test above and add test below
|
||||
#self.assertEqual(1, search(claim_id=txo_chan_a.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
self.assertEqual(1, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
|
||||
# claim abandon updates claims_in_channel
|
||||
advance(11, [self.get_abandon(tx_ab2)])
|
||||
self.assertEqual(0, search(claim_id=txo_chan_ab.claim_id, limit=1)[0]['claims_in_channel'])
|
||||
|
||||
# delete channel, invaliding stream claim signatures
|
||||
advance(12, [self.get_abandon(channel_update)])
|
||||
(r_a2,) = search(order_by=['creation_height'], limit=1)
|
||||
self.assertEqual(f"foo#{a2_claim.claim_id[:2]}", r_a2['short_url'])
|
||||
self.assertIsNone(r_a2['canonical_url'])
|
||||
|
||||
def test_resolve_issue_2448(self):
|
||||
advance = self.advance
|
||||
|
||||
tx_chan_a = self.get_channel_with_claim_id_prefix('a', 1, key=b'c')
|
||||
tx_chan_ab = self.get_channel_with_claim_id_prefix('ab', 72, key=b'c')
|
||||
txo_chan_a = tx_chan_a[0].outputs[0]
|
||||
txo_chan_ab = tx_chan_ab[0].outputs[0]
|
||||
advance(1, [tx_chan_a])
|
||||
advance(2, [tx_chan_ab])
|
||||
|
||||
self.assertEqual(reader.resolve_url("@foo#a")['claim_hash'], txo_chan_a.claim_hash)
|
||||
self.assertEqual(reader.resolve_url("@foo#ab")['claim_hash'], txo_chan_ab.claim_hash)
|
||||
|
||||
# update increase last height change of channel
|
||||
advance(9, [self.get_channel_update(txo_chan_a, COIN, key=b'c')])
|
||||
|
||||
# make sure that activation_height is used instead of height (issue #2448)
|
||||
self.assertEqual(reader.resolve_url("@foo#a")['claim_hash'], txo_chan_a.claim_hash)
|
||||
self.assertEqual(reader.resolve_url("@foo#ab")['claim_hash'], txo_chan_ab.claim_hash)
|
||||
|
||||
def test_canonical_find_shortest_id(self):
|
||||
new_hash = 'abcdef0123456789beef'
|
||||
other0 = '1bcdef0123456789beef'
|
||||
other1 = 'ab1def0123456789beef'
|
||||
other2 = 'abc1ef0123456789beef'
|
||||
other3 = 'abcdef0123456789bee1'
|
||||
f = FindShortestID()
|
||||
f.step(other0, new_hash)
|
||||
self.assertEqual('#a', f.finalize())
|
||||
f.step(other1, new_hash)
|
||||
self.assertEqual('#abc', f.finalize())
|
||||
f.step(other2, new_hash)
|
||||
self.assertEqual('#abcd', f.finalize())
|
||||
f.step(other3, new_hash)
|
||||
self.assertEqual('#abcdef0123456789beef', f.finalize())
|
||||
|
||||
|
||||
class TestTrending(TestSQLDB):
|
||||
|
||||
def test_trending(self):
|
||||
advance, state = self.advance, self.state
|
||||
no_trend = self.get_stream('Claim A', COIN)
|
||||
downwards = self.get_stream('Claim B', COIN)
|
||||
up_small = self.get_stream('Claim C', COIN)
|
||||
up_medium = self.get_stream('Claim D', COIN)
|
||||
up_biggly = self.get_stream('Claim E', COIN)
|
||||
claims = advance(1, [up_biggly, up_medium, up_small, no_trend, downwards])
|
||||
for window in range(1, 8):
|
||||
advance(zscore.TRENDING_WINDOW * window, [
|
||||
self.get_support(downwards, (20-window)*COIN),
|
||||
self.get_support(up_small, int(20+(window/10)*COIN)),
|
||||
self.get_support(up_medium, (20+(window*(2 if window == 7 else 1)))*COIN),
|
||||
self.get_support(up_biggly, (20+(window*(3 if window == 7 else 1)))*COIN),
|
||||
])
|
||||
results = search(order_by=['trending_local'])
|
||||
self.assertEqual([c.claim_id for c in claims], [hexlify(c['claim_hash'][::-1]).decode() for c in results])
|
||||
self.assertEqual([10, 6, 2, 0, -2], [int(c['trending_local']) for c in results])
|
||||
self.assertEqual([53, 38, -32, 0, -6], [int(c['trending_global']) for c in results])
|
||||
self.assertEqual([4, 4, 2, 0, 1], [int(c['trending_group']) for c in results])
|
||||
self.assertEqual([53, 38, 2, 0, -6], [int(c['trending_mixed']) for c in results])
|
||||
|
||||
def test_edge(self):
|
||||
problematic = self.get_stream('Problem', COIN)
|
||||
self.advance(1, [problematic])
|
||||
self.advance(zscore.TRENDING_WINDOW, [self.get_support(problematic, 53000000000)])
|
||||
self.advance(zscore.TRENDING_WINDOW * 2, [self.get_support(problematic, 500000000)])
|
||||
|
||||
|
||||
@unittest.skip("filtering/blocking is applied during ES sync, this needs to be ported to integration test")
|
||||
class TestContentBlocking(TestSQLDB):
|
||||
|
||||
def test_blocking_and_filtering(self):
|
||||
# content claims and channels
|
||||
tx0 = self.get_channel('A Channel', COIN, '@channel1')
|
||||
regular_channel = tx0[0].outputs[0]
|
||||
tx1 = self.get_stream('Claim One', COIN, 'claim1')
|
||||
tx2 = self.get_stream('Claim Two', COIN, 'claim2', regular_channel)
|
||||
tx3 = self.get_stream('Claim Three', COIN, 'claim3')
|
||||
self.advance(1, [tx0, tx1, tx2, tx3])
|
||||
claim1, claim2, claim3 = tx1[0].outputs[0], tx2[0].outputs[0], tx3[0].outputs[0]
|
||||
|
||||
# block and filter channels
|
||||
tx0 = self.get_channel('Blocking Channel', COIN, '@block')
|
||||
tx1 = self.get_channel('Filtering Channel', COIN, '@filter')
|
||||
blocking_channel = tx0[0].outputs[0]
|
||||
filtering_channel = tx1[0].outputs[0]
|
||||
self.sql.blocking_channel_hashes.add(blocking_channel.claim_hash)
|
||||
self.sql.filtering_channel_hashes.add(filtering_channel.claim_hash)
|
||||
self.advance(2, [tx0, tx1])
|
||||
self.assertEqual({}, dict(self.sql.blocked_streams))
|
||||
self.assertEqual({}, dict(self.sql.blocked_channels))
|
||||
self.assertEqual({}, dict(self.sql.filtered_streams))
|
||||
self.assertEqual({}, dict(self.sql.filtered_channels))
|
||||
|
||||
# nothing blocked
|
||||
results, _ = reader.resolve([
|
||||
claim1.claim_name, claim2.claim_name,
|
||||
claim3.claim_name, regular_channel.claim_name
|
||||
])
|
||||
self.assertEqual(claim1.claim_hash, results[0]['claim_hash'])
|
||||
self.assertEqual(claim2.claim_hash, results[1]['claim_hash'])
|
||||
self.assertEqual(claim3.claim_hash, results[2]['claim_hash'])
|
||||
self.assertEqual(regular_channel.claim_hash, results[3]['claim_hash'])
|
||||
|
||||
# nothing filtered
|
||||
results, censor = censored_search()
|
||||
self.assertEqual(6, len(results))
|
||||
self.assertEqual(0, censor.total)
|
||||
self.assertEqual({}, censor.censored)
|
||||
|
||||
# block claim reposted to blocking channel, also gets filtered
|
||||
repost_tx1 = self.get_repost(claim1.claim_id, COIN, blocking_channel)
|
||||
repost1 = repost_tx1[0].outputs[0]
|
||||
self.advance(3, [repost_tx1])
|
||||
self.assertEqual(
|
||||
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.blocked_streams)
|
||||
)
|
||||
self.assertEqual({}, dict(self.sql.blocked_channels))
|
||||
self.assertEqual(
|
||||
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.filtered_streams)
|
||||
)
|
||||
self.assertEqual({}, dict(self.sql.filtered_channels))
|
||||
|
||||
# claim is blocked from results by direct repost
|
||||
results, censor = censored_search(text='Claim')
|
||||
self.assertEqual(2, len(results))
|
||||
self.assertEqual(claim2.claim_hash, results[0]['claim_hash'])
|
||||
self.assertEqual(claim3.claim_hash, results[1]['claim_hash'])
|
||||
self.assertEqual(1, censor.total)
|
||||
self.assertEqual({blocking_channel.claim_hash: 1}, censor.censored)
|
||||
results, _ = reader.resolve([claim1.claim_name])
|
||||
self.assertEqual(
|
||||
f"Resolve of 'claim1' was censored by channel with claim id '{blocking_channel.claim_id}'.",
|
||||
results[0].args[0]
|
||||
)
|
||||
results, _ = reader.resolve([
|
||||
claim2.claim_name, regular_channel.claim_name # claim2 and channel still resolved
|
||||
])
|
||||
self.assertEqual(claim2.claim_hash, results[0]['claim_hash'])
|
||||
self.assertEqual(regular_channel.claim_hash, results[1]['claim_hash'])
|
||||
|
||||
# block claim indirectly by blocking its parent channel
|
||||
repost_tx2 = self.get_repost(regular_channel.claim_id, COIN, blocking_channel)
|
||||
repost2 = repost_tx2[0].outputs[0]
|
||||
self.advance(4, [repost_tx2])
|
||||
self.assertEqual(
|
||||
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.blocked_streams)
|
||||
)
|
||||
self.assertEqual(
|
||||
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.blocked_channels)
|
||||
)
|
||||
self.assertEqual(
|
||||
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.filtered_streams)
|
||||
)
|
||||
self.assertEqual(
|
||||
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.filtered_channels)
|
||||
)
|
||||
|
||||
# claim in blocked channel is filtered from search and can't resolve
|
||||
results, censor = censored_search(text='Claim')
|
||||
self.assertEqual(1, len(results))
|
||||
self.assertEqual(claim3.claim_hash, results[0]['claim_hash'])
|
||||
self.assertEqual(2, censor.total)
|
||||
self.assertEqual({blocking_channel.claim_hash: 2}, censor.censored)
|
||||
results, _ = reader.resolve([
|
||||
claim2.claim_name, regular_channel.claim_name # claim2 and channel don't resolve
|
||||
])
|
||||
self.assertEqual(
|
||||
f"Resolve of 'claim2' was censored by channel with claim id '{blocking_channel.claim_id}'.",
|
||||
results[0].args[0]
|
||||
)
|
||||
self.assertEqual(
|
||||
f"Resolve of '@channel1' was censored by channel with claim id '{blocking_channel.claim_id}'.",
|
||||
results[1].args[0]
|
||||
)
|
||||
results, _ = reader.resolve([claim3.claim_name]) # claim3 still resolved
|
||||
self.assertEqual(claim3.claim_hash, results[0]['claim_hash'])
|
||||
|
||||
# filtered claim is only filtered and not blocked
|
||||
repost_tx3 = self.get_repost(claim3.claim_id, COIN, filtering_channel)
|
||||
repost3 = repost_tx3[0].outputs[0]
|
||||
self.advance(5, [repost_tx3])
|
||||
self.assertEqual(
|
||||
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.blocked_streams)
|
||||
)
|
||||
self.assertEqual(
|
||||
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.blocked_channels)
|
||||
)
|
||||
self.assertEqual(
|
||||
{repost1.claim.repost.reference.claim_hash: blocking_channel.claim_hash,
|
||||
repost3.claim.repost.reference.claim_hash: filtering_channel.claim_hash},
|
||||
dict(self.sql.filtered_streams)
|
||||
)
|
||||
self.assertEqual(
|
||||
{repost2.claim.repost.reference.claim_hash: blocking_channel.claim_hash},
|
||||
dict(self.sql.filtered_channels)
|
||||
)
|
||||
|
||||
# filtered claim doesn't return in search but is resolveable
|
||||
results, censor = censored_search(text='Claim')
|
||||
self.assertEqual(0, len(results))
|
||||
self.assertEqual(3, censor.total)
|
||||
self.assertEqual({blocking_channel.claim_hash: 2, filtering_channel.claim_hash: 1}, censor.censored)
|
||||
results, _ = reader.resolve([claim3.claim_name]) # claim3 still resolved
|
||||
self.assertEqual(claim3.claim_hash, results[0]['claim_hash'])
|
||||
|
||||
# abandon unblocks content
|
||||
self.advance(6, [
|
||||
self.get_abandon(repost_tx1),
|
||||
self.get_abandon(repost_tx2),
|
||||
self.get_abandon(repost_tx3)
|
||||
])
|
||||
self.assertEqual({}, dict(self.sql.blocked_streams))
|
||||
self.assertEqual({}, dict(self.sql.blocked_channels))
|
||||
self.assertEqual({}, dict(self.sql.filtered_streams))
|
||||
self.assertEqual({}, dict(self.sql.filtered_channels))
|
||||
results, censor = censored_search(text='Claim')
|
||||
self.assertEqual(3, len(results))
|
||||
self.assertEqual(0, censor.total)
|
||||
results, censor = censored_search()
|
||||
self.assertEqual(6, len(results))
|
||||
self.assertEqual(0, censor.total)
|
||||
results, _ = reader.resolve([
|
||||
claim1.claim_name, claim2.claim_name,
|
||||
claim3.claim_name, regular_channel.claim_name
|
||||
])
|
||||
self.assertEqual(claim1.claim_hash, results[0]['claim_hash'])
|
||||
self.assertEqual(claim2.claim_hash, results[1]['claim_hash'])
|
||||
self.assertEqual(claim3.claim_hash, results[2]['claim_hash'])
|
||||
self.assertEqual(regular_channel.claim_hash, results[3]['claim_hash'])
|
||||
|
||||
def test_pagination(self):
|
||||
one, two, three, four, five, six, seven, filter_channel = self.advance(1, [
|
||||
self.get_stream('One', COIN),
|
||||
self.get_stream('Two', COIN),
|
||||
self.get_stream('Three', COIN),
|
||||
self.get_stream('Four', COIN),
|
||||
self.get_stream('Five', COIN),
|
||||
self.get_stream('Six', COIN),
|
||||
self.get_stream('Seven', COIN),
|
||||
self.get_channel('Filtering Channel', COIN, '@filter'),
|
||||
])
|
||||
self.sql.filtering_channel_hashes.add(filter_channel.claim_hash)
|
||||
|
||||
# nothing filtered
|
||||
results, censor = censored_search(order_by='^height', offset=1, limit=3)
|
||||
self.assertEqual(3, len(results))
|
||||
self.assertEqual(
|
||||
[two.claim_hash, three.claim_hash, four.claim_hash],
|
||||
[r['claim_hash'] for r in results]
|
||||
)
|
||||
self.assertEqual(0, censor.total)
|
||||
|
||||
# content filtered
|
||||
repost1, repost2 = self.advance(2, [
|
||||
self.get_repost(one.claim_id, COIN, filter_channel),
|
||||
self.get_repost(two.claim_id, COIN, filter_channel),
|
||||
])
|
||||
results, censor = censored_search(order_by='^height', offset=1, limit=3)
|
||||
self.assertEqual(3, len(results))
|
||||
self.assertEqual(
|
||||
[four.claim_hash, five.claim_hash, six.claim_hash],
|
||||
[r['claim_hash'] for r in results]
|
||||
)
|
||||
self.assertEqual(2, censor.total)
|
||||
self.assertEqual({filter_channel.claim_hash: 2}, censor.censored)
|
13
tox.ini
13
tox.ini
|
@ -12,11 +12,18 @@ setenv =
|
|||
commands =
|
||||
orchstr8 download
|
||||
blockchain: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.blockchain {posargs}
|
||||
claims: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.claims {posargs}
|
||||
takeovers: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.takeovers {posargs}
|
||||
transactions: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.transactions {posargs}
|
||||
datanetwork: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.datanetwork {posargs}
|
||||
other: coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.other {posargs}
|
||||
|
||||
[testenv:blockchain_legacy_search]
|
||||
[testenv:claims_legacy_search]
|
||||
setenv =
|
||||
ENABLE_LEGACY_SEARCH=1
|
||||
commands =
|
||||
coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.blockchain {posargs}
|
||||
coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.claims {posargs}
|
||||
[testenv:takeovers_legacy_search]
|
||||
setenv =
|
||||
ENABLE_LEGACY_SEARCH=1
|
||||
commands =
|
||||
coverage run -p --source={envsitepackagesdir}/lbry -m unittest discover -vv integration.takeovers {posargs}
|
||||
|
|
Loading…
Reference in a new issue