Compare commits

..

2 commits

Author SHA1 Message Date
Victor Shyba
070e35b9fc skip public_key_id checks 2022-03-28 17:29:29 -03:00
Victor Shyba
dd7ad16bf7 revert deterministic keys 2022-03-28 16:23:24 -03:00
107 changed files with 2722 additions and 3558 deletions

View file

@ -1,24 +1,24 @@
name: ci name: ci
on: ["push", "pull_request", "workflow_dispatch"] on: ["push", "pull_request"]
jobs: jobs:
lint: lint:
name: lint name: lint
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: actions/setup-python@v4 - uses: actions/setup-python@v1
with: with:
python-version: '3.9' python-version: '3.7'
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel - run: pip install --user --upgrade pip wheel
- run: pip install -e .[lint] - run: pip install -e .[torrent,lint]
- run: make lint - run: make lint
tests-unit: tests-unit:
@ -26,31 +26,31 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-20.04 - ubuntu-latest
- macos-latest - macos-latest
- windows-latest - windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: actions/setup-python@v4 - uses: actions/setup-python@v1
with: with:
python-version: '3.9' python-version: '3.7'
- name: set pip cache dir - name: set pip cache dir
shell: bash id: pip-cache
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV run: echo "::set-output name=dir::$(pip cache dir)"
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ${{ env.PIP_CACHE_DIR }} path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- id: os-name - id: os-name
uses: ASzc/change-string-case-action@v5 uses: ASzc/change-string-case-action@v1
with: with:
string: ${{ runner.os }} string: ${{ runner.os }}
- run: python -m pip install --user --upgrade pip wheel - run: pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux') - if: startsWith(runner.os, 'linux')
run: pip install -e .[test] run: pip install -e .[torrent,test]
- if: startsWith(runner.os, 'linux') - if: startsWith(runner.os, 'linux')
env: env:
HOME: /tmp HOME: /tmp
@ -72,7 +72,7 @@ jobs:
tests-integration: tests-integration:
name: "tests / integration" name: "tests / integration"
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
test: test:
@ -81,6 +81,8 @@ jobs:
- claims - claims
- takeovers - takeovers
- transactions - transactions
- claims_legacy_search
- takeovers_legacy_search
- other - other
steps: steps:
- name: Configure sysctl limits - name: Configure sysctl limits
@ -93,16 +95,16 @@ jobs:
uses: elastic/elastic-github-actions/elasticsearch@master uses: elastic/elastic-github-actions/elasticsearch@master
with: with:
stack-version: 7.12.1 stack-version: 7.12.1
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: actions/setup-python@v4 - uses: actions/setup-python@v1
with: with:
python-version: '3.9' python-version: '3.7'
- if: matrix.test == 'other' - if: matrix.test == 'other'
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ./.tox path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }} key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
@ -123,7 +125,7 @@ jobs:
coverage: coverage:
needs: ["tests-unit", "tests-integration"] needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
steps: steps:
- name: finalize coverage report submission - name: finalize coverage report submission
env: env:
@ -138,29 +140,29 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-20.04 - ubuntu-18.04
- macos-latest - macos-latest
- windows-latest - windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v2
- uses: actions/setup-python@v4 - uses: actions/setup-python@v1
with: with:
python-version: '3.9' python-version: '3.7'
- id: os-name - id: os-name
uses: ASzc/change-string-case-action@v5 uses: ASzc/change-string-case-action@v1
with: with:
string: ${{ runner.os }} string: ${{ runner.os }}
- name: set pip cache dir - name: set pip cache dir
shell: bash id: pip-cache
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV run: echo "::set-output name=dir::$(pip cache dir)"
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ${{ env.PIP_CACHE_DIR }} path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.6 - run: pip install pyinstaller==4.4
- run: pip install -e . - run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v') - if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py run: python docker/set_build.py
@ -175,7 +177,7 @@ jobs:
pip install pywin32==301 pip install pywin32==301
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet.exe --version dist/lbrynet.exe --version
- uses: actions/upload-artifact@v3 - uses: actions/upload-artifact@v2
with: with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }} name: lbrynet-${{ steps.os-name.outputs.lowercase }}
path: dist/ path: dist/
@ -184,7 +186,7 @@ jobs:
name: "release" name: "release"
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
needs: ["build"] needs: ["build"]
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2

View file

@ -7,7 +7,7 @@ on:
jobs: jobs:
release: release:
name: "slack notification" name: "slack notification"
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
steps: steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0 - uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown id: markdown

View file

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc Copyright (c) 2015-2020 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,

View file

@ -2,7 +2,6 @@ FROM debian:10-slim
ARG user=lbry ARG user=lbry
ARG projects_dir=/home/$user ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker ARG DOCKER_COMMIT=docker
@ -28,16 +27,12 @@ RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir COPY . $projects_dir
RUN chown -R $user:$user $projects_dir RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user USER $user
WORKDIR $projects_dir WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install RUN make install
RUN python3 docker/set_build.py RUN python3 docker/set_build.py
RUN rm ~/.cache -rf RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"] ENTRYPOINT ["python3", "scripts/dht_node.py"]

File diff suppressed because one or more lines are too long

View file

@ -1,2 +1,2 @@
__version__ = "0.113.0" __version__ = "0.107.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

View file

@ -87,8 +87,8 @@ class AbstractBlob:
self.blob_completed_callback = blob_completed_callback self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {} self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event() self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
self.writing: asyncio.Event = asyncio.Event() self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
self.readers: typing.List[typing.BinaryIO] = [] self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time() self.added_on = added_on or time.time()
self.is_mine = is_mine self.is_mine = is_mine
@ -222,7 +222,7 @@ class AbstractBlob:
peer_port: typing.Optional[int] = None) -> HashBlobWriter: peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed(): if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}") raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future() fut = asyncio.Future(loop=self.loop)
writer = HashBlobWriter(self.blob_hash, self.get_length, fut) writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer self.writers[(peer_address, peer_port)] = writer

View file

@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.buf = b'' self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result # this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event() self.closed = asyncio.Event(loop=self.loop)
def data_received(self, data: bytes): def data_received(self, data: bytes):
if self.connection_manager: if self.connection_manager:
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.transport.write(msg) self.transport.write(msg)
if self.connection_manager: if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg)) self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout) response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
availability_response = response.get_availability_response() availability_response = response.get_availability_response()
price_response = response.get_price_response() price_response = response.get_price_response()
blob_response = response.get_blob_response() blob_response = response.get_blob_response()
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
f" timeout in {self.peer_timeout}" f" timeout in {self.peer_timeout}"
log.debug(msg) log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}" msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout) await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
# wait for the io to finish # wait for the io to finish
await self.blob.verified.wait() await self.blob.verified.wait()
log.info("%s at %fMB/s", msg, log.info("%s at %fMB/s", msg,
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
try: try:
self._blob_bytes_received = 0 self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port) self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future() self._response_fut = asyncio.Future(loop=self.loop)
return await self._download_blob() return await self._download_blob()
except OSError: except OSError:
# i'm not sure how to fix this race condition - jack # i'm not sure how to fix this race condition - jack
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
try: try:
if not connected_protocol: if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port), await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout) peer_connect_timeout, loop=loop)
connected_protocol = protocol connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable(): if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection # blob is None happens when we are just opening a connection

View file

@ -30,7 +30,7 @@ class BlobDownloader:
self.failures: typing.Dict['KademliaPeer', int] = {} self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.Set['KademliaPeer'] = set() self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {} self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event() self.is_running = asyncio.Event(loop=self.loop)
def should_race_continue(self, blob: 'AbstractBlob'): def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10) max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
@ -64,8 +64,8 @@ class BlobDownloader:
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1 self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self): async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))] active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED') await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
def cleanup_active(self): def cleanup_active(self):
if not self.active_connections and not self.connections: if not self.active_connections and not self.connections:
@ -126,7 +126,7 @@ class BlobDownloader:
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node', async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
blob_hash: str) -> 'AbstractBlob': blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download) search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash) search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue) peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers) fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)

View file

@ -1,7 +1,6 @@
import asyncio import asyncio
import binascii import binascii
import logging import logging
import socket
import typing import typing
from json.decoder import JSONDecodeError from json.decoder import JSONDecodeError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
@ -25,19 +24,19 @@ class BlobServerProtocol(asyncio.Protocol):
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event() self.started_listening = asyncio.Event(loop=self.loop)
self.buf = b'' self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event() self.started_transfer = asyncio.Event(loop=self.loop)
self.transfer_finished = asyncio.Event() self.transfer_finished = asyncio.Event(loop=self.loop)
self.close_on_idle_task: typing.Optional[asyncio.Task] = None self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self): async def close_on_idle(self):
while self.transport: while self.transport:
try: try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout) await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port) log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close() return self.close()
@ -101,7 +100,7 @@ class BlobServerProtocol(asyncio.Protocol):
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port) log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set() self.started_transfer.set()
try: try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout) sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
if sent and sent > 0: if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent) self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port) log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
@ -138,7 +137,7 @@ class BlobServerProtocol(asyncio.Protocol):
try: try:
request = BlobRequest.deserialize(self.buf + data) request = BlobRequest.deserialize(self.buf + data)
self.buf = remainder self.buf = remainder
except (UnicodeDecodeError, JSONDecodeError): except JSONDecodeError:
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port, log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode()) len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close() self.close()
@ -157,7 +156,7 @@ class BlobServer:
self.loop = loop self.loop = loop
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event() self.started_listening = asyncio.Event(loop=self.loop)
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
@ -168,13 +167,6 @@ class BlobServer:
raise Exception("already running") raise Exception("already running")
async def _start_server(): async def _start_server():
# checking if the port is in use
# thx https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
# the port is already in use!
log.error("Failed to bind TCP %s:%d", interface, port)
server = await self.loop.create_server( server = await self.loop.create_server(
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address, lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
self.idle_timeout, self.transfer_timeout), self.idle_timeout, self.transfer_timeout),

View file

@ -624,10 +624,6 @@ class Config(CLIConfig):
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal " "will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 2 "use.", 2
) )
is_bootstrap_node = Toggle(
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
"add as many peers as possible and better help first-runs.", False
)
# protocol timeouts # protocol timeouts
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0) download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
@ -685,14 +681,6 @@ class Config(CLIConfig):
('cdn.reflector.lbry.com', 5567) ('cdn.reflector.lbry.com', 5567)
]) ])
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
])
lbryum_servers = Servers("SPV wallet servers", [ lbryum_servers = Servers("SPV wallet servers", [
('spv11.lbry.com', 50001), ('spv11.lbry.com', 50001),
('spv12.lbry.com', 50001), ('spv12.lbry.com', 50001),
@ -703,20 +691,14 @@ class Config(CLIConfig):
('spv17.lbry.com', 50001), ('spv17.lbry.com', 50001),
('spv18.lbry.com', 50001), ('spv18.lbry.com', 50001),
('spv19.lbry.com', 50001), ('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
]) ])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [ known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin ('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator ('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST ('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST ('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU ('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444), # ASIA ('lbrynet4.lbry.com', 4444) # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
]) ])
# blockchain # blockchain

View file

@ -67,7 +67,7 @@ class ConnectionManager:
while True: while True:
last = time.perf_counter() last = time.perf_counter()
await asyncio.sleep(0.1) await asyncio.sleep(0.1, loop=self.loop)
self._status['incoming_bps'].clear() self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear() self._status['outgoing_bps'].clear()
now = time.perf_counter() now = time.perf_counter()

View file

@ -42,13 +42,15 @@ class BlobAnnouncer:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers) log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err: except Exception as err:
self.announcements_sent_metric.labels(peers=0, error=True).inc() self.announcements_sent_metric.labels(peers=0, error=True).inc()
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise err
log.warning("error announcing %s: %s", blob_hash[:8], str(err)) log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10): async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size: while batch_size:
if not self.node.joined.is_set(): if not self.node.joined.is_set():
await self.node.joined.wait() await self.node.joined.wait()
await asyncio.sleep(60) await asyncio.sleep(60, loop=self.loop)
if not self.node.protocol.routing_table.get_peers(): if not self.node.protocol.routing_table.get_peers():
log.warning("No peers in DHT, announce round skipped") log.warning("No peers in DHT, announce round skipped")
continue continue
@ -57,7 +59,7 @@ class BlobAnnouncer:
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue)) log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0: while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue)) log.info("%i blobs to announce", len(self.announce_queue))
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)]) await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)], loop=self.loop)
announced = list(filter(None, self.announced)) announced = list(filter(None, self.announced))
if announced: if announced:
await self.storage.update_last_announced_blobs(announced) await self.storage.update_last_announced_blobs(announced)

View file

@ -5,7 +5,7 @@ import socket
from prometheus_client import Gauge from prometheus_client import Gauge
from lbry.utils import aclosing, resolve_host from lbry.utils import resolve_host
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.peer import make_kademlia_peer from lbry.dht.peer import make_kademlia_peer
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
@ -30,14 +30,14 @@ class Node:
) )
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT, internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False, split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
storage: typing.Optional['SQLiteStorage'] = None): storage: typing.Optional['SQLiteStorage'] = None):
self.loop = loop self.loop = loop
self.internal_udp_port = internal_udp_port self.internal_udp_port = internal_udp_port
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index, is_bootstrap_node) split_buckets_under_index)
self.listening_port: asyncio.DatagramTransport = None self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event() self.joined = asyncio.Event(loop=self.loop)
self._join_task: asyncio.Task = None self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None self._refresh_task: asyncio.Task = None
self._storage = storage self._storage = storage
@ -70,6 +70,13 @@ class Node:
# get ids falling in the midpoint of each bucket that hasn't been recently updated # get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True) node_ids = self.protocol.routing_table.get_refresh_list(0, True)
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
if self.protocol.routing_table.get_peers(): if self.protocol.routing_table.get_peers():
# if we have node ids to look up, perform the iterative search until we have k results # if we have node ids to look up, perform the iterative search until we have k results
@ -79,7 +86,7 @@ class Node:
else: else:
if force_once: if force_once:
break break
fut = asyncio.Future() fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None) self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
await fut await fut
continue continue
@ -93,7 +100,7 @@ class Node:
if force_once: if force_once:
break break
fut = asyncio.Future() fut = asyncio.Future(loop=self.loop)
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None) self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
await fut await fut
@ -108,7 +115,7 @@ class Node:
for peer in peers: for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port) log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather( stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers) *(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
) )
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted] stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to: if stored_to:
@ -182,35 +189,36 @@ class Node:
for address, udp_port in known_node_urls or [] for address, udp_port in known_node_urls or []
])) ]))
except socket.gaierror: except socket.gaierror:
await asyncio.sleep(30) await asyncio.sleep(30, loop=self.loop)
continue continue
self.protocol.peer_manager.reset() self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0) self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32) await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
await asyncio.sleep(1) await asyncio.sleep(1, loop=self.loop)
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None): def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls)) self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
max_results: int = constants.K) -> IterativeNodeFinder: max_results: int = constants.K) -> IterativeNodeFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist) return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, max_results, None, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
max_results: int = -1) -> IterativeValueFinder: max_results: int = -1) -> IterativeValueFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist) return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
key, max_results, None, shortlist)
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2, async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']: ) -> typing.List['KademliaPeer']:
peers = [] peers = []
async with aclosing(self.get_iterative_node_finder( async for iteration_peers in self.get_iterative_node_finder(
node_id, shortlist=shortlist, max_results=max_results)) as node_finder: node_id, shortlist=shortlist, max_results=max_results):
async for iteration_peers in node_finder:
peers.extend(iteration_peers) peers.extend(iteration_peers)
distance = Distance(node_id) distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id)) peers.sort(key=lambda peer: distance(peer.node_id))
@ -237,8 +245,8 @@ class Node:
# prioritize peers who reply to a dht ping first # prioritize peers who reply to a dht ping first
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers # this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
async for results in value_finder: async for results in self.get_iterative_value_finder(bytes.fromhex(blob_hash)):
to_put = [] to_put = []
for peer in results: for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port: if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
@ -271,7 +279,7 @@ class Node:
def accumulate_peers(self, search_queue: asyncio.Queue, def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None peer_queue: typing.Optional[asyncio.Queue] = None
) -> typing.Tuple[asyncio.Queue, asyncio.Task]: ) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue() queue = peer_queue or asyncio.Queue(loop=self.loop)
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue)) return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))

View file

@ -100,9 +100,6 @@ class PeerManager:
self._node_id_reverse_mapping[node_id] = (address, udp_port) self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys()) self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def get_node_id_for_endpoint(self, address, port):
return self._node_id_mapping.get((address, port))
def prune(self): # TODO: periodically call this def prune(self): # TODO: periodically call this
now = self._loop.time() now = self._loop.time()
to_pop = [] to_pop = []
@ -153,8 +150,7 @@ class PeerManager:
def peer_is_good(self, peer: 'KademliaPeer'): def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port) return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address) node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port) return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)

View file

@ -1,17 +1,17 @@
import asyncio import asyncio
from itertools import chain from itertools import chain
from collections import defaultdict, OrderedDict from collections import defaultdict, OrderedDict
from collections.abc import AsyncIterator
import typing import typing
import logging import logging
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address from lbry.dht.peer import make_kademlia_peer
from lbry.dht.serialization.datagram import PAGE_KEY from lbry.dht.serialization.datagram import PAGE_KEY
if TYPE_CHECKING: if TYPE_CHECKING:
from lbry.dht.protocol.routing_table import TreeRoutingTable
from lbry.dht.protocol.protocol import KademliaProtocol from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import PeerManager, KademliaPeer from lbry.dht.peer import PeerManager, KademliaPeer
@ -26,15 +26,6 @@ class FindResponse:
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]: def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError() raise NotImplementedError()
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
for contact_triple in self.get_close_triples():
node_id, address, udp_port = contact_triple
try:
yield make_kademlia_peer(node_id, address, udp_port)
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
peer_info.udp_port, address, udp_port)
class FindNodeResponse(FindResponse): class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]): def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
@ -65,31 +56,49 @@ class FindValueResponse(FindResponse):
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples] return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
class IterativeFinder(AsyncIterator): def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
def __init__(self, loop: asyncio.AbstractEventLoop, shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
protocol: 'KademliaProtocol', key: bytes, """
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key)
class IterativeFinder:
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None): shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.HASH_LENGTH: if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key)) raise ValueError("invalid key length: %i" % len(key))
self.loop = loop self.loop = loop
self.peer_manager = protocol.peer_manager self.peer_manager = peer_manager
self.routing_table = routing_table
self.protocol = protocol self.protocol = protocol
self.key = key self.key = key
self.max_results = max(constants.K, max_results) self.max_results = max(constants.K, max_results)
self.exclude = exclude or []
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
self.contacted: typing.Set['KademliaPeer'] = set() self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key) self.distance = Distance(key)
self.iteration_queue = asyncio.Queue() self.iteration_queue = asyncio.Queue(loop=self.loop)
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {} self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.iteration_count = 0 self.iteration_count = 0
self.running = False self.running = False
self.tasks: typing.List[asyncio.Task] = [] self.tasks: typing.List[asyncio.Task] = []
for peer in shortlist: for peer in get_shortlist(routing_table, key, shortlist):
if peer.node_id: if peer.node_id:
self._add_active(peer, force=True) self._add_active(peer, force=True)
else: else:
@ -134,10 +143,15 @@ class IterativeFinder(AsyncIterator):
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse): async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer) self._add_active(peer)
for new_peer in response.get_close_kademlia_peers(peer): for contact_triple in response.get_close_triples():
self._add_active(new_peer) node_id, address, udp_port = contact_triple
try:
self._add_active(make_kademlia_peer(node_id, address, udp_port))
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
peer.udp_port, address, udp_port)
self.check_result_ready(response) self.check_result_ready(response)
self._log_state(reason="check result") self._log_state()
def _reset_closest(self, peer): def _reset_closest(self, peer):
if peer in self.active: if peer in self.active:
@ -149,17 +163,12 @@ class IterativeFinder(AsyncIterator):
except asyncio.TimeoutError: except asyncio.TimeoutError:
self._reset_closest(peer) self._reset_closest(peer)
return return
except asyncio.CancelledError:
log.debug("%s[%x] cancelled probe",
type(self).__name__, id(self))
raise
except ValueError as err: except ValueError as err:
log.warning(str(err)) log.warning(str(err))
self._reset_closest(peer) self._reset_closest(peer)
return return
except TransportNotConnected: except TransportNotConnected:
await self._aclose(reason="not connected") return self.aclose()
return
except RemoteException: except RemoteException:
self._reset_closest(peer) self._reset_closest(peer)
return return
@ -173,9 +182,7 @@ class IterativeFinder(AsyncIterator):
added = 0 added = 0
for index, peer in enumerate(self.active.keys()): for index, peer in enumerate(self.active.keys()):
if index == 0: if index == 0:
log.debug("%s[%x] closest to probe: %s", log.debug("closest to probe: %s", peer.node_id.hex()[:8])
type(self).__name__, id(self),
peer.node_id.hex()[:8])
if peer in self.contacted: if peer in self.contacted:
continue continue
if len(self.running_probes) >= constants.ALPHA: if len(self.running_probes) >= constants.ALPHA:
@ -183,19 +190,17 @@ class IterativeFinder(AsyncIterator):
if index > (constants.K + len(self.running_probes)): if index > (constants.K + len(self.running_probes)):
break break
origin_address = (peer.address, peer.udp_port) origin_address = (peer.address, peer.udp_port)
if origin_address in self.exclude:
continue
if peer.node_id == self.protocol.node_id: if peer.node_id == self.protocol.node_id:
continue continue
if origin_address == (self.protocol.external_ip, self.protocol.udp_port): if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
continue continue
self._schedule_probe(peer) self._schedule_probe(peer)
added += 1 added += 1
log.debug("%s[%x] running %d probes for key %s", log.debug("running %d probes for key %s", len(self.running_probes), self.key.hex()[:8])
type(self).__name__, id(self),
len(self.running_probes), self.key.hex()[:8])
if not added and not self.running_probes: if not added and not self.running_probes:
log.debug("%s[%x] search for %s exhausted", log.debug("search for %s exhausted", self.key.hex()[:8])
type(self).__name__, id(self),
self.key.hex()[:8])
self.search_exhausted() self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'): def _schedule_probe(self, peer: 'KademliaPeer'):
@ -211,11 +216,9 @@ class IterativeFinder(AsyncIterator):
t.add_done_callback(callback) t.add_done_callback(callback)
self.running_probes[peer] = t self.running_probes[peer] = t
def _log_state(self, reason="?"): def _log_state(self):
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued", log.debug("[%s] check result: %i active nodes %i contacted",
type(self).__name__, id(self), self.key.hex()[:8], self.key.hex()[:8], len(self.active), len(self.contacted))
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
def __aiter__(self): def __aiter__(self):
if self.running: if self.running:
@ -234,18 +237,11 @@ class IterativeFinder(AsyncIterator):
raise StopAsyncIteration raise StopAsyncIteration
self.iteration_count += 1 self.iteration_count += 1
return result return result
except asyncio.CancelledError: except (asyncio.CancelledError, StopAsyncIteration):
await self._aclose(reason="cancelled") self.loop.call_soon(self.aclose)
raise
except StopAsyncIteration:
await self._aclose(reason="no more results")
raise raise
async def _aclose(self, reason="?"): def aclose(self):
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
self.running = False self.running = False
self.iteration_queue.put_nowait(None) self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes.values()): for task in chain(self.tasks, self.running_probes.values()):
@ -253,18 +249,15 @@ class IterativeFinder(AsyncIterator):
self.tasks.clear() self.tasks.clear()
self.running_probes.clear() self.running_probes.clear()
async def aclose(self):
if self.running:
await self._aclose(reason="aclose")
log.debug("%s[%x] [%s] async close completed",
type(self).__name__, id(self), self.key.hex()[:8])
class IterativeNodeFinder(IterativeFinder): class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
protocol: 'KademliaProtocol', key: bytes, routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None): shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist) super().__init__(loop, peer_manager, routing_table, protocol, key, max_results, exclude,
shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set() self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse: async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
@ -300,11 +293,13 @@ class IterativeNodeFinder(IterativeFinder):
class IterativeValueFinder(IterativeFinder): class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
protocol: 'KademliaProtocol', key: bytes, routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None): shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist) super().__init__(loop, peer_manager, routing_table, protocol, key, max_results, exclude,
shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set() self.blob_peers: typing.Set['KademliaPeer'] = set()
# this tracks the index of the most recent page we requested from each peer # this tracks the index of the most recent page we requested from each peer
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int) self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
@ -323,7 +318,7 @@ class IterativeValueFinder(IterativeFinder):
decoded_peers = set() decoded_peers = set()
for compact_addr in parsed.found_compact_addresses: for compact_addr in parsed.found_compact_addresses:
try: try:
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr)) decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
except ValueError: except ValueError:
log.warning("misbehaving peer %s:%i returned invalid peer for blob", log.warning("misbehaving peer %s:%i returned invalid peer for blob",
peer.address, peer.udp_port) peer.address, peer.udp_port)
@ -345,7 +340,7 @@ class IterativeValueFinder(IterativeFinder):
def check_result_ready(self, response: FindValueResponse): def check_result_ready(self, response: FindValueResponse):
if response.found: if response.found:
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr) blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses] for compact_addr in response.found_compact_addresses]
to_yield = [] to_yield = []
for blob_peer in blob_peers: for blob_peer in blob_peers:

View file

@ -218,10 +218,6 @@ class PingQueue:
def running(self): def running(self):
return self._running return self._running
@property
def busy(self):
return self._running and (any(self._running_pings) or any(self._pending_contacts))
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None): def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
delay = delay if delay is not None else self._default_delay delay = delay if delay is not None else self._default_delay
now = self._loop.time() now = self._loop.time()
@ -233,7 +229,7 @@ class PingQueue:
async def ping_task(): async def ping_task():
try: try:
if self._protocol.peer_manager.peer_is_good(peer): if self._protocol.peer_manager.peer_is_good(peer):
if not self._protocol.routing_table.get_peer(peer.node_id): if peer not in self._protocol.routing_table.get_peers():
self._protocol.add_peer(peer) self._protocol.add_peer(peer)
return return
await self._protocol.get_rpc_peer(peer).ping() await self._protocol.get_rpc_peer(peer).ping()
@ -253,7 +249,7 @@ class PingQueue:
del self._pending_contacts[peer] del self._pending_contacts[peer]
self.maybe_ping(peer) self.maybe_ping(peer)
break break
await asyncio.sleep(1) await asyncio.sleep(1, loop=self._loop)
def start(self): def start(self):
assert not self._running assert not self._running
@ -298,7 +294,7 @@ class KademliaProtocol(DatagramProtocol):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT, udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False): split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
self.peer_manager = peer_manager self.peer_manager = peer_manager
self.loop = loop self.loop = loop
self.node_id = node_id self.node_id = node_id
@ -313,16 +309,15 @@ class KademliaProtocol(DatagramProtocol):
self.transport: DatagramTransport = None self.transport: DatagramTransport = None
self.old_token_secret = constants.generate_id() self.old_token_secret = constants.generate_id()
self.token_secret = constants.generate_id() self.token_secret = constants.generate_id()
self.routing_table = TreeRoutingTable( self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
self.data_store = DictDataStore(self.loop, self.peer_manager) self.data_store = DictDataStore(self.loop, self.peer_manager)
self.ping_queue = PingQueue(self.loop, self) self.ping_queue = PingQueue(self.loop, self)
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port) self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
self.rpc_timeout = rpc_timeout self.rpc_timeout = rpc_timeout
self._split_lock = asyncio.Lock() self._split_lock = asyncio.Lock(loop=self.loop)
self._to_remove: typing.Set['KademliaPeer'] = set() self._to_remove: typing.Set['KademliaPeer'] = set()
self._to_add: typing.Set['KademliaPeer'] = set() self._to_add: typing.Set['KademliaPeer'] = set()
self._wakeup_routing_task = asyncio.Event() self._wakeup_routing_task = asyncio.Event(loop=self.loop)
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
@functools.lru_cache(128) @functools.lru_cache(128)
@ -361,10 +356,72 @@ class KademliaProtocol(DatagramProtocol):
return args, {} return args, {}
async def _add_peer(self, peer: 'KademliaPeer'): async def _add_peer(self, peer: 'KademliaPeer'):
async def probe(some_peer: 'KademliaPeer'): if not peer.node_id:
rpc_peer = self.get_rpc_peer(some_peer) log.warning("Tried adding a peer with no node id!")
await rpc_peer.ping() return False
return await self.routing_table.add_peer(peer, probe) for my_peer in self.routing_table.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.routing_table.remove_peer(my_peer)
self.routing_table.join_buckets()
bucket_index = self.routing_table.kbucket_index(peer.node_id)
if self.routing_table.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self.routing_table.should_split(bucket_index, peer.node_id):
self.routing_table.split_bucket(bucket_index)
# Retry the insertion attempt
result = await self._add_peer(peer)
self.routing_table.join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self.loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.routing_table.buckets[bucket_index].peers[0]
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self.loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
to_replace_rpc = self.get_rpc_peer(to_replace)
await to_replace_rpc.ping()
return False
except asyncio.TimeoutError:
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.routing_table.buckets[bucket_index]:
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
return await self._add_peer(peer)
def add_peer(self, peer: 'KademliaPeer'): def add_peer(self, peer: 'KademliaPeer'):
if peer.node_id == self.node_id: if peer.node_id == self.node_id:
@ -382,10 +439,11 @@ class KademliaProtocol(DatagramProtocol):
async with self._split_lock: async with self._split_lock:
peer = self._to_remove.pop() peer = self._to_remove.pop()
self.routing_table.remove_peer(peer) self.routing_table.remove_peer(peer)
self.routing_table.join_buckets()
while self._to_add: while self._to_add:
async with self._split_lock: async with self._split_lock:
await self._add_peer(self._to_add.pop()) await self._add_peer(self._to_add.pop())
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1)) await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
self._wakeup_routing_task.clear() self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram): def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
@ -424,8 +482,9 @@ class KademliaProtocol(DatagramProtocol):
# This is an RPC method request # This is an RPC method request
self.received_request_metric.labels(method=request_datagram.method).inc() self.received_request_metric.labels(method=request_datagram.method).inc()
self.peer_manager.report_last_requested(address[0], address[1]) self.peer_manager.report_last_requested(address[0], address[1])
try:
peer = self.routing_table.get_peer(request_datagram.node_id) peer = self.routing_table.get_peer(request_datagram.node_id)
if not peer: except IndexError:
try: try:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1]) peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
except ValueError as err: except ValueError as err:

View file

@ -8,7 +8,6 @@ from prometheus_client import Gauge
from lbry import utils from lbry import utils
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.dht.peer import KademliaPeer, PeerManager from lbry.dht.peer import KademliaPeer, PeerManager
@ -29,8 +28,7 @@ class KBucket:
namespace="dht_node", labelnames=("amount",) namespace="dht_node", labelnames=("amount",)
) )
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
node_id: bytes, capacity: int = constants.K):
""" """
@param range_min: The lower boundary for the range in the n-bit ID @param range_min: The lower boundary for the range in the n-bit ID
space covered by this k-bucket space covered by this k-bucket
@ -38,12 +36,12 @@ class KBucket:
covered by this k-bucket covered by this k-bucket
""" """
self._peer_manager = peer_manager self._peer_manager = peer_manager
self.last_accessed = 0
self.range_min = range_min self.range_min = range_min
self.range_max = range_max self.range_max = range_max
self.peers: typing.List['KademliaPeer'] = [] self.peers: typing.List['KademliaPeer'] = []
self._node_id = node_id self._node_id = node_id
self._distance_to_self = Distance(node_id) self._distance_to_self = Distance(node_id)
self.capacity = capacity
def add_peer(self, peer: 'KademliaPeer') -> bool: def add_peer(self, peer: 'KademliaPeer') -> bool:
""" Add contact to _contact list in the right order. This will move the """ Add contact to _contact list in the right order. This will move the
@ -70,7 +68,7 @@ class KBucket:
self.peers.remove(local_peer) self.peers.remove(local_peer)
self.peers.append(peer) self.peers.append(peer)
return True return True
if len(self.peers) < self.capacity: if len(self.peers) < constants.K:
self.peers.append(peer) self.peers.append(peer)
self.peer_in_routing_table_metric.labels("global").inc() self.peer_in_routing_table_metric.labels("global").inc()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id) bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
@ -78,11 +76,13 @@ class KBucket:
return True return True
else: else:
return False return False
# raise BucketFull("No space in bucket to insert contact")
def get_peer(self, node_id: bytes) -> 'KademliaPeer': def get_peer(self, node_id: bytes) -> 'KademliaPeer':
for peer in self.peers: for peer in self.peers:
if peer.node_id == node_id: if peer.node_id == node_id:
return peer return peer
raise IndexError(node_id)
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']: def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
""" Returns a list containing up to the first count number of contacts """ Returns a list containing up to the first count number of contacts
@ -179,13 +179,6 @@ class TreeRoutingTable:
version of the Kademlia paper, in section 2.4. It does, however, use the version of the Kademlia paper, in section 2.4. It does, however, use the
ping RPC-based k-bucket eviction algorithm described in section 2.2 of ping RPC-based k-bucket eviction algorithm described in section 2.2 of
that paper. that paper.
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
bootstrap node does not get a bias towards its own node id and replies are
the best it can provide (joining peer knows its neighbors immediately).
Over time, this will need to be optimized so we use the disk as holding
everything in memory won't be feasible anymore.
See: https://github.com/bittorrent/bootstrap-dht
""" """
bucket_in_routing_table_metric = Gauge( bucket_in_routing_table_metric = Gauge(
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node", "buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
@ -193,22 +186,21 @@ class TreeRoutingTable:
) )
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False): split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
self._loop = loop self._loop = loop
self._peer_manager = peer_manager self._peer_manager = peer_manager
self._parent_node_id = parent_node_id self._parent_node_id = parent_node_id
self._split_buckets_under_index = split_buckets_under_index self._split_buckets_under_index = split_buckets_under_index
self.buckets: typing.List[KBucket] = [ self.buckets: typing.List[KBucket] = [
KBucket( KBucket(
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id, self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id
capacity=1 << 32 if is_bootstrap_node else constants.K
) )
] ]
def get_peers(self) -> typing.List['KademliaPeer']: def get_peers(self) -> typing.List['KademliaPeer']:
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets))) return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
def _should_split(self, bucket_index: int, to_add: bytes) -> bool: def should_split(self, bucket_index: int, to_add: bytes) -> bool:
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456 # https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
if bucket_index < self._split_buckets_under_index: if bucket_index < self._split_buckets_under_index:
return True return True
@ -233,32 +225,39 @@ class TreeRoutingTable:
return [] return []
def get_peer(self, contact_id: bytes) -> 'KademliaPeer': def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id) """
@raise IndexError: No contact with the specified contact ID is known
by this node
"""
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]: def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
bucket_index = start_index
refresh_ids = [] refresh_ids = []
for offset, _ in enumerate(self.buckets[start_index:]): now = int(self._loop.time())
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset)) for bucket in self.buckets[start_index:]:
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL:
# populate/split the buckets further to_search = self.midpoint_id_in_bucket_range(bucket_index)
buckets_with_contacts = self.buckets_with_contacts() refresh_ids.append(to_search)
if buckets_with_contacts <= 3: bucket_index += 1
for i in range(buckets_with_contacts):
refresh_ids.append(self._random_id_in_bucket_range(i))
refresh_ids.append(self._random_id_in_bucket_range(i))
return refresh_ids return refresh_ids
def remove_peer(self, peer: 'KademliaPeer') -> None: def remove_peer(self, peer: 'KademliaPeer') -> None:
if not peer.node_id: if not peer.node_id:
return return
bucket_index = self._kbucket_index(peer.node_id) bucket_index = self.kbucket_index(peer.node_id)
try: try:
self.buckets[bucket_index].remove_peer(peer) self.buckets[bucket_index].remove_peer(peer)
self._join_buckets()
except ValueError: except ValueError:
return return
def _kbucket_index(self, key: bytes) -> int: def touch_kbucket(self, key: bytes) -> None:
self.touch_kbucket_by_index(self.kbucket_index(key))
def touch_kbucket_by_index(self, bucket_index: int):
self.buckets[bucket_index].last_accessed = int(self._loop.time())
def kbucket_index(self, key: bytes) -> int:
i = 0 i = 0
for bucket in self.buckets: for bucket in self.buckets:
if bucket.key_in_range(key): if bucket.key_in_range(key):
@ -267,19 +266,19 @@ class TreeRoutingTable:
i += 1 i += 1
return i return i
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes: def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max)) random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
return Distance( return Distance(
self._parent_node_id self._parent_node_id
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big') )(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes: def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2) half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
return Distance(self._parent_node_id)( return Distance(self._parent_node_id)(
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big') int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
).to_bytes(constants.HASH_LENGTH, 'big') ).to_bytes(constants.HASH_LENGTH, 'big')
def _split_bucket(self, old_bucket_index: int) -> None: def split_bucket(self, old_bucket_index: int) -> None:
""" Splits the specified k-bucket into two new buckets which together """ Splits the specified k-bucket into two new buckets which together
cover the same range in the key/ID space cover the same range in the key/ID space
@ -304,7 +303,7 @@ class TreeRoutingTable:
old_bucket.remove_peer(contact) old_bucket.remove_peer(contact)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets)) self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
def _join_buckets(self): def join_buckets(self):
if len(self.buckets) == 1: if len(self.buckets) == 1:
return return
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0] to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
@ -327,7 +326,14 @@ class TreeRoutingTable:
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
self.buckets.remove(bucket) self.buckets.remove(bucket)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets)) self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
return self._join_buckets() return self.join_buckets()
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
for bucket in self.buckets:
for contact in bucket.get_peers(sort_distance_to=False):
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
return True
return False
def buckets_with_contacts(self) -> int: def buckets_with_contacts(self) -> int:
count = 0 count = 0
@ -335,70 +341,3 @@ class TreeRoutingTable:
if len(bucket) > 0: if len(bucket) > 0:
count += 1 count += 1
return count return count
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.remove_peer(my_peer)
self._join_buckets()
bucket_index = self._kbucket_index(peer.node_id)
if self.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self._should_split(bucket_index, peer.node_id):
self._split_bucket(bucket_index)
# Retry the insertion attempt
result = await self.add_peer(peer, probe)
self._join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self._loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.buckets[bucket_index].peers[0]
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self._loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
await probe(to_replace)
return False
except (asyncio.TimeoutError, RemoteException):
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.buckets[bucket_index]:
self.buckets[bucket_index].remove_peer(to_replace)
return await self.add_peer(peer, probe)

View file

@ -1,5 +1,5 @@
from lbry.extras.cli import execute_command
from lbry.conf import Config from lbry.conf import Config
from lbry.extras.cli import execute_command
def daemon_rpc(conf: Config, method: str, **kwargs): def daemon_rpc(conf: Config, method: str, **kwargs):

View file

@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
def running(self): def running(self):
return self._running return self._running
async def get_status(self): # pylint: disable=no-self-use async def get_status(self):
return return
async def start(self): async def start(self):

View file

@ -42,7 +42,7 @@ class ComponentManager:
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.component_classes = {} self.component_classes = {}
self.components = set() self.components = set()
self.started = asyncio.Event() self.started = asyncio.Event(loop=self.loop)
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop()) self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items(): for component_name, component_class in self.default_component_classes.items():
@ -118,7 +118,7 @@ class ComponentManager:
component._setup() for component in stage if not component.running component._setup() for component in stage if not component.running
] ]
if needing_start: if needing_start:
await asyncio.wait(map(asyncio.create_task, needing_start)) await asyncio.wait(needing_start)
self.started.set() self.started.set()
async def stop(self): async def stop(self):
@ -131,7 +131,7 @@ class ComponentManager:
component._stop() for component in stage if component.running component._stop() for component in stage if component.running
] ]
if needing_stop: if needing_stop:
await asyncio.wait(map(asyncio.create_task, needing_stop)) await asyncio.wait(needing_stop)
def all_components_running(self, *component_names): def all_components_running(self, *component_names):
""" """

View file

@ -27,8 +27,10 @@ from lbry.extras.daemon.storage import SQLiteStorage
from lbry.torrent.torrent_manager import TorrentManager from lbry.torrent.torrent_manager import TorrentManager
from lbry.wallet import WalletManager from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer from lbry.wallet.usage_payment import WalletServerPayer
from lbry.torrent.tracker import TrackerClient try:
from lbry.torrent.session import TorrentSession from lbry.torrent.session import TorrentSession
except ImportError:
TorrentSession = None
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -46,7 +48,6 @@ BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server" PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp" UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager" EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
LIBTORRENT_COMPONENT = "libtorrent_component" LIBTORRENT_COMPONENT = "libtorrent_component"
@ -293,7 +294,6 @@ class DHTComponent(Component):
peer_port=self.external_peer_port, peer_port=self.external_peer_port,
rpc_timeout=self.conf.node_rpc_timeout, rpc_timeout=self.conf.node_rpc_timeout,
split_buckets_under_index=self.conf.split_buckets_under_index, split_buckets_under_index=self.conf.split_buckets_under_index,
is_bootstrap_node=self.conf.is_bootstrap_node,
storage=storage storage=storage
) )
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes) self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
@ -357,6 +357,10 @@ class FileManagerComponent(Component):
wallet = self.component_manager.get_component(WALLET_COMPONENT) wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT) \ node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None if self.component_manager.has_component(DHT_COMPONENT) else None
try:
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
except NameError:
torrent = None
log.info('Starting the file manager') log.info('Starting the file manager')
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
self.file_manager = FileManager( self.file_manager = FileManager(
@ -365,8 +369,7 @@ class FileManagerComponent(Component):
self.file_manager.source_managers['stream'] = StreamManager( self.file_manager.source_managers['stream'] = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node, loop, self.conf, blob_manager, wallet, storage, node,
) )
if self.component_manager.has_component(LIBTORRENT_COMPONENT): if TorrentSession and LIBTORRENT_COMPONENT not in self.conf.components_to_skip:
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
self.file_manager.source_managers['torrent'] = TorrentManager( self.file_manager.source_managers['torrent'] = TorrentManager(
loop, self.conf, torrent, storage, self.component_manager.analytics_manager loop, self.conf, torrent, storage, self.component_manager.analytics_manager
) )
@ -374,7 +377,7 @@ class FileManagerComponent(Component):
log.info('Done setting up file manager') log.info('Done setting up file manager')
async def stop(self): async def stop(self):
await self.file_manager.stop() self.file_manager.stop()
class BackgroundDownloaderComponent(Component): class BackgroundDownloaderComponent(Component):
@ -495,6 +498,7 @@ class TorrentComponent(Component):
} }
async def start(self): async def start(self):
if TorrentSession:
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None) self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
await self.torrent_session.bind() # TODO: specify host/port await self.torrent_session.bind() # TODO: specify host/port
@ -551,7 +555,7 @@ class UPnPComponent(Component):
while True: while True:
if now: if now:
await self._maintain_redirects() await self._maintain_redirects()
await asyncio.sleep(360) await asyncio.sleep(360, loop=self.component_manager.loop)
async def _maintain_redirects(self): async def _maintain_redirects(self):
# setup the gateway if necessary # setup the gateway if necessary
@ -560,6 +564,8 @@ class UPnPComponent(Component):
self.upnp = await UPnP.discover(loop=self.component_manager.loop) self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string) log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.warning("upnp discovery failed: %s", err) log.warning("upnp discovery failed: %s", err)
self.upnp = None self.upnp = None
@ -671,7 +677,7 @@ class UPnPComponent(Component):
log.info("Removing upnp redirects: %s", self.upnp_redirects) log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([ await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items() self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
]) ], loop=self.component_manager.loop)
if self._maintain_redirects_task and not self._maintain_redirects_task.done(): if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel() self._maintain_redirects_task.cancel()
@ -702,49 +708,3 @@ class ExchangeRateManagerComponent(Component):
async def stop(self): async def stop(self):
self.exchange_rate_manager.stop() self.exchange_rate_manager.stop()
class TrackerAnnouncerComponent(Component):
component_name = TRACKER_ANNOUNCER_COMPONENT
depends_on = [FILE_MANAGER_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.file_manager = None
self.announce_task = None
self.tracker_client: typing.Optional[TrackerClient] = None
@property
def component(self):
return self.tracker_client
@property
def running(self):
return self._running and self.announce_task and not self.announce_task.done()
async def announce_forever(self):
while True:
sleep_seconds = 60.0
announce_sd_hashes = []
for file in self.file_manager.get_filtered():
if not file.downloader:
continue
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
await self.tracker_client.announce_many(*announce_sd_hashes)
await asyncio.sleep(sleep_seconds)
async def start(self):
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
node_id = node.protocol.node_id if node else None
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
await self.tracker_client.start()
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
self.announce_task = asyncio.create_task(self.announce_forever())
async def stop(self):
self.file_manager = None
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
self.announce_task = None
self.tracker_client.stop()

View file

@ -8,14 +8,16 @@ import time
import inspect import inspect
import typing import typing
import random import random
import hashlib
import tracemalloc import tracemalloc
import itertools from decimal import Decimal
from urllib.parse import urlencode, quote from urllib.parse import urlencode, quote
from typing import Callable, Optional, List from typing import Callable, Optional, List
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
from traceback import format_exc from traceback import format_exc
from functools import wraps, partial from functools import wraps, partial
import ecdsa
import base58 import base58
from aiohttp import web from aiohttp import web
from prometheus_client import generate_latest as prom_generate_latest, Gauge, Histogram, Counter from prometheus_client import generate_latest as prom_generate_latest, Gauge, Histogram, Counter
@ -27,8 +29,6 @@ from lbry.wallet import (
) )
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPE_NAMES from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPE_NAMES
from lbry.wallet.bip32 import PrivateKey
from lbry.crypto.base58 import Base58
from lbry import utils from lbry import utils
from lbry.conf import Config, Setting, NOT_SET from lbry.conf import Config, Setting, NOT_SET
@ -44,7 +44,7 @@ from lbry.error import (
from lbry.extras import system_info from lbry.extras import system_info
from lbry.extras.daemon import analytics from lbry.extras.daemon import analytics
from lbry.extras.daemon.components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT from lbry.extras.daemon.components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT, TRACKER_ANNOUNCER_COMPONENT from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT
from lbry.extras.daemon.components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT from lbry.extras.daemon.components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT
from lbry.extras.daemon.componentmanager import RequiredCondition from lbry.extras.daemon.componentmanager import RequiredCondition
from lbry.extras.daemon.componentmanager import ComponentManager from lbry.extras.daemon.componentmanager import ComponentManager
@ -53,7 +53,7 @@ from lbry.extras.daemon.undecorated import undecorated
from lbry.extras.daemon.security import ensure_request_allowed from lbry.extras.daemon.security import ensure_request_allowed
from lbry.file_analysis import VideoFileAnalyzer from lbry.file_analysis import VideoFileAnalyzer
from lbry.schema.claim import Claim from lbry.schema.claim import Claim
from lbry.schema.url import URL from lbry.schema.url import URL, normalize_name
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
@ -195,6 +195,61 @@ def paginate_list(items: List, page: Optional[int], page_size: Optional[int]):
} }
def fix_kwargs_for_hub(**kwargs):
repeated_fields = {"media_type", "stream_type", "claim_type"}
value_fields = {"tx_nout", "has_source", "is_signature_valid"}
opcodes = {'=': 0, '<=': 1, '>=': 2, '<': 3, '>': 4}
for key, value in list(kwargs.items()):
if value in (None, [], False):
kwargs.pop(key)
continue
if key in REPLACEMENTS:
kwargs[REPLACEMENTS[key]] = kwargs.pop(key)
key = REPLACEMENTS[key]
if key == "normalized_name":
kwargs[key] = normalize_name(value)
if key == "limit_claims_per_channel":
value = kwargs.pop("limit_claims_per_channel") or 0
if value > 0:
kwargs["limit_claims_per_channel"] = value
elif key == "invalid_channel_signature":
kwargs["is_signature_valid"] = {"value": not kwargs.pop("invalid_channel_signature")}
elif key == "has_no_source":
kwargs["has_source"] = {"value": not kwargs.pop("has_no_source")}
elif key in value_fields:
kwargs[key] = {"value": value} if not isinstance(value, dict) else value
elif key in repeated_fields and isinstance(value, str):
kwargs[key] = [value]
elif key in ("claim_id", "channel_id"):
kwargs[key] = {"invert": False, "value": [kwargs[key]]}
elif key in ("claim_ids", "channel_ids"):
kwargs[key[:-1]] = {"invert": False, "value": kwargs.pop(key)}
elif key == "not_channel_ids":
kwargs["channel_id"] = {"invert": True, "value": kwargs.pop("not_channel_ids")}
elif key in MY_RANGE_FIELDS:
constraints = []
for val in value if isinstance(value, list) else [value]:
operator = '='
if isinstance(val, str) and val[0] in opcodes:
operator_length = 2 if val[:2] in opcodes else 1
operator, val = val[:operator_length], val[operator_length:]
val = [int(val if key != 'fee_amount' else Decimal(val)*1000)]
constraints.append({"op": opcodes[operator], "value": val})
kwargs[key] = constraints
elif key == 'order_by': # TODO: remove this after removing support for old trending args from the api
value = value if isinstance(value, list) else [value]
new_value = []
for new_v in value:
migrated = new_v if new_v not in (
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
) else 'trending_score'
if migrated not in new_value:
new_value.append(migrated)
kwargs[key] = new_value
return kwargs
DHT_HAS_CONTACTS = "dht_has_contacts" DHT_HAS_CONTACTS = "dht_has_contacts"
@ -614,8 +669,7 @@ class Daemon(metaclass=JSONRPCServerType):
content_type='application/json' content_type='application/json'
) )
@staticmethod async def handle_metrics_get_request(self, request: web.Request):
async def handle_metrics_get_request(request: web.Request):
try: try:
return web.Response( return web.Response(
text=prom_generate_latest().decode(), text=prom_generate_latest().decode(),
@ -1328,65 +1382,6 @@ class Daemon(metaclass=JSONRPCServerType):
c.wallets += [wallet_id] c.wallets += [wallet_id]
return wallet return wallet
@requires("wallet")
async def jsonrpc_wallet_export(self, password=None, wallet_id=None):
"""
Exports encrypted wallet data if password is supplied; otherwise plain JSON.
Wallet must be unlocked to perform this operation.
Usage:
wallet_export [--password=<password>] [--wallet_id=<wallet_id>]
Options:
--password=<password> : (str) password to encrypt outgoing data
--wallet_id=<wallet_id> : (str) wallet being exported
Returns:
(str) data: base64-encoded encrypted wallet, or cleartext JSON
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if password is None:
return wallet.to_json()
return wallet.pack(password).decode()
@requires("wallet")
async def jsonrpc_wallet_import(self, data, password=None, wallet_id=None, blocking=False):
"""
Import wallet data and merge accounts and preferences. Data is expected to be JSON if
password is not supplied.
Wallet must be unlocked to perform this operation.
Usage:
wallet_import (<data> | --data=<data>) [<password> | --password=<password>]
[--wallet_id=<wallet_id>] [--blocking]
Options:
--data=<data> : (str) incoming wallet data
--password=<password> : (str) password to decrypt incoming data
--wallet_id=<wallet_id> : (str) wallet being merged into
--blocking : (bool) wait until any new accounts have merged
Returns:
(str) base64-encoded encrypted wallet, or cleartext JSON
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
for new_account in itertools.chain(added_accounts, merged_accounts):
await new_account.maybe_migrate_certificates()
if added_accounts and self.ledger.network.is_connected:
if blocking:
await asyncio.wait([
a.ledger.subscribe_account(a) for a in added_accounts
])
else:
for new_account in added_accounts:
asyncio.create_task(self.ledger.subscribe_account(new_account))
wallet.save()
return await self.jsonrpc_wallet_export(password=password, wallet_id=wallet_id)
@requires("wallet") @requires("wallet")
async def jsonrpc_wallet_add(self, wallet_id): async def jsonrpc_wallet_add(self, wallet_id):
""" """
@ -1855,7 +1850,7 @@ class Daemon(metaclass=JSONRPCServerType):
Options: Options:
--to_account=<to_account> : (str) send to this account --to_account=<to_account> : (str) send to this account
--from_account=<from_account> : (str) spend from this account --from_account=<from_account> : (str) spend from this account
--amount=<amount> : (decimal) the amount to transfer lbc --amount=<amount> : (str) the amount to transfer lbc
--everything : (bool) transfer everything (excluding claims), default: false. --everything : (bool) transfer everything (excluding claims), default: false.
--outputs=<outputs> : (int) split payment across many outputs, default: 1. --outputs=<outputs> : (int) split payment across many outputs, default: 1.
--wallet_id=<wallet_id> : (str) limit operation to specific wallet. --wallet_id=<wallet_id> : (str) limit operation to specific wallet.
@ -1878,48 +1873,6 @@ class Daemon(metaclass=JSONRPCServerType):
outputs=outputs, broadcast=broadcast outputs=outputs, broadcast=broadcast
) )
@requires("wallet")
async def jsonrpc_account_deposit(
self, txid, nout, redeem_script, private_key,
to_account=None, wallet_id=None, preview=False, blocking=False
):
"""
Spend a time locked transaction into your account.
Usage:
account_deposit <txid> <nout> <redeem_script> <private_key>
[<to_account> | --to_account=<to_account>]
[--wallet_id=<wallet_id>] [--preview] [--blocking]
Options:
--txid=<txid> : (str) id of the transaction
--nout=<nout> : (int) output number in the transaction
--redeem_script=<redeem_script> : (str) redeem script for output
--private_key=<private_key> : (str) private key to sign transaction
--to_account=<to_account> : (str) deposit to this account
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until tx has synced
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(to_account)
other_tx = await self.wallet_manager.get_transaction(txid)
tx = await Transaction.spend_time_lock(
other_tx.outputs[nout], unhexlify(redeem_script), account
)
pk = PrivateKey.from_bytes(
account.ledger, Base58.decode_check(private_key)[1:-1]
)
await tx.sign([account], {pk.address: pk})
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(self.analytics_manager.send_credits_sent())
else:
await self.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False, blocking=False): def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False, blocking=False):
""" """
@ -1991,9 +1944,7 @@ class Daemon(metaclass=JSONRPCServerType):
wallet = self.wallet_manager.get_wallet_or_default(wallet_id) wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
wallet_changed = False wallet_changed = False
if data is not None: if data is not None:
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data) added_accounts = wallet.merge(self.wallet_manager, password, data)
for new_account in itertools.chain(added_accounts, merged_accounts):
await new_account.maybe_migrate_certificates()
if added_accounts and self.ledger.network.is_connected: if added_accounts and self.ledger.network.is_connected:
if blocking: if blocking:
await asyncio.wait([ await asyncio.wait([
@ -2410,7 +2361,6 @@ class Daemon(metaclass=JSONRPCServerType):
Usage: Usage:
claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent] claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent]
[--reposted_claim_id=<reposted_claim_id>...]
[--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>] [--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>] [--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>]
[--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips] [--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips]
@ -2421,7 +2371,6 @@ class Daemon(metaclass=JSONRPCServerType):
--channel_id=<channel_id> : (str or list) streams in this channel --channel_id=<channel_id> : (str or list) streams in this channel
--name=<name> : (str or list) claim name --name=<name> : (str or list) claim name
--is_spent : (bool) shows previous claim updates and abandons --is_spent : (bool) shows previous claim updates and abandons
--reposted_claim_id=<reposted_claim_id> : (str or list) reposted claim id
--account_id=<account_id> : (str) id of the account to query --account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet --wallet_id=<wallet_id> : (str) restrict results to specific wallet
--has_source : (bool) list claims containing a source field --has_source : (bool) list claims containing a source field
@ -2619,6 +2568,21 @@ class Daemon(metaclass=JSONRPCServerType):
Returns: {Paginated[Output]} Returns: {Paginated[Output]}
""" """
if self.ledger.config.get('use_go_hub'):
host = self.ledger.network.client.server[0]
port = "50051"
kwargs['new_sdk_server'] = f"{host}:{port}"
if kwargs.get("channel"):
channel = kwargs.pop("channel")
channel_obj = (await self.jsonrpc_resolve(channel))[channel]
if isinstance(channel_obj, dict):
# This happens when the channel doesn't exist
kwargs["channel_id"] = ""
else:
kwargs["channel_id"] = channel_obj.claim_id
kwargs = fix_kwargs_for_hub(**kwargs)
else:
# Don't do this if using the hub server, it screws everything up
if "claim_ids" in kwargs and not kwargs["claim_ids"]: if "claim_ids" in kwargs and not kwargs["claim_ids"]:
kwargs.pop("claim_ids") kwargs.pop("claim_ids")
if {'claim_id', 'claim_ids'}.issubset(kwargs): if {'claim_id', 'claim_ids'}.issubset(kwargs):
@ -2765,17 +2729,16 @@ class Daemon(metaclass=JSONRPCServerType):
name, claim, amount, claim_address, funding_accounts, funding_accounts[0] name, claim, amount, claim_address, funding_accounts, funding_accounts[0]
) )
txo = tx.outputs[0] txo = tx.outputs[0]
txo.set_channel_private_key( await txo.generate_channel_private_key()
await funding_accounts[0].generate_channel_private_key()
)
await tx.sign(funding_accounts) await tx.sign(funding_accounts)
if not preview: if not preview:
account.add_channel_private_key(txo.private_key)
wallet.save() wallet.save()
await self.broadcast_or_release(tx, blocking) await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info( self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
tx, txo, claim_address, claim, name tx, txo, claim_address, claim, name, dewies_to_lbc(amount)
)])) )]))
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel()) self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
else: else:
@ -2920,9 +2883,7 @@ class Daemon(metaclass=JSONRPCServerType):
new_txo = tx.outputs[0] new_txo = tx.outputs[0]
if new_signing_key: if new_signing_key:
new_txo.set_channel_private_key( await new_txo.generate_channel_private_key()
await funding_accounts[0].generate_channel_private_key()
)
else: else:
new_txo.private_key = old_txo.private_key new_txo.private_key = old_txo.private_key
@ -2931,10 +2892,11 @@ class Daemon(metaclass=JSONRPCServerType):
await tx.sign(funding_accounts) await tx.sign(funding_accounts)
if not preview: if not preview:
account.add_channel_private_key(new_txo.private_key)
wallet.save() wallet.save()
await self.broadcast_or_release(tx, blocking) await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info( self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
)])) )]))
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel()) self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
else: else:
@ -2944,21 +2906,19 @@ class Daemon(metaclass=JSONRPCServerType):
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
async def jsonrpc_channel_sign( async def jsonrpc_channel_sign(
self, channel_name=None, channel_id=None, hexdata=None, salt=None, self, channel_name=None, channel_id=None, hexdata=None, channel_account_id=None, wallet_id=None):
channel_account_id=None, wallet_id=None):
""" """
Signs data using the specified channel signing key. Signs data using the specified channel signing key.
Usage: Usage:
channel_sign [<channel_name> | --channel_name=<channel_name>] [<channel_id> | --channel_id=<channel_id>] channel_sign [<channel_name> | --channel_name=<channel_name>]
[<hexdata> | --hexdata=<hexdata>] [<salt> | --salt=<salt>] [<channel_id> | --channel_id=<channel_id>] [<hexdata> | --hexdata=<hexdata>]
[--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>] [--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>]
Options: Options:
--channel_name=<channel_name> : (str) name of channel used to sign (or use channel id) --channel_name=<channel_name> : (str) name of channel used to sign (or use channel id)
--channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name) --channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name)
--hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal --hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal
--salt=<salt> : (str) salt to use for signing, default is to use timestamp
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in --channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts. for channel certificates, defaults to all accounts.
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet --wallet_id=<wallet_id> : (str) restrict operation to specific wallet
@ -2975,13 +2935,11 @@ class Daemon(metaclass=JSONRPCServerType):
signing_channel = await self.get_channel_or_error( signing_channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True wallet, channel_account_id, channel_id, channel_name, for_signing=True
) )
if salt is None: timestamp = str(int(time.time()))
salt = str(int(time.time())) signature = signing_channel.sign_data(unhexlify(str(hexdata)), timestamp)
signature = signing_channel.sign_data(unhexlify(str(hexdata)), salt)
return { return {
'signature': signature, 'signature': signature,
'signing_ts': salt, # DEPRECATED 'signing_ts': timestamp
'salt': salt,
} }
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
@ -3106,7 +3064,7 @@ class Daemon(metaclass=JSONRPCServerType):
'channel_id': channel.claim_id, 'channel_id': channel.claim_id,
'holding_address': address, 'holding_address': address,
'holding_public_key': public_key.extended_key_string(), 'holding_public_key': public_key.extended_key_string(),
'signing_private_key': channel.private_key.signing_key.to_pem().decode() 'signing_private_key': channel.private_key.to_pem().decode()
} }
return base58.b58encode(json.dumps(export, separators=(',', ':'))) return base58.b58encode(json.dumps(export, separators=(',', ':')))
@ -3129,14 +3087,15 @@ class Daemon(metaclass=JSONRPCServerType):
decoded = base58.b58decode(channel_data) decoded = base58.b58decode(channel_data)
data = json.loads(decoded) data = json.loads(decoded)
channel_private_key = PrivateKey.from_pem( channel_private_key = ecdsa.SigningKey.from_pem(
self.ledger, data['signing_private_key'] data['signing_private_key'], hashfunc=hashlib.sha256
) )
public_key_der = channel_private_key.get_verifying_key().to_der()
# check that the holding_address hasn't changed since the export was made # check that the holding_address hasn't changed since the export was made
holding_address = data['holding_address'] holding_address = data['holding_address']
channels, _, _, _ = await self.ledger.claim_search( channels, _, _, _ = await self.ledger.claim_search(
wallet.accounts, public_key_id=channel_private_key.address wallet.accounts, public_key_id=self.ledger.public_key_to_address(public_key_der)
) )
if channels and channels[0].get_address(self.ledger) != holding_address: if channels and channels[0].get_address(self.ledger) != holding_address:
holding_address = channels[0].get_address(self.ledger) holding_address = channels[0].get_address(self.ledger)
@ -3299,17 +3258,15 @@ class Daemon(metaclass=JSONRPCServerType):
) )
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT) @requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT)
async def jsonrpc_stream_repost( async def jsonrpc_stream_repost(self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
channel_name=None, channel_account_id=None, account_id=None, wallet_id=None, channel_name=None, channel_account_id=None, account_id=None, wallet_id=None,
claim_address=None, funding_account_ids=None, preview=False, blocking=False, **kwargs): claim_address=None, funding_account_ids=None, preview=False, blocking=False):
""" """
Creates a claim that references an existing stream by its claim id. Creates a claim that references an existing stream by its claim id.
Usage: Usage:
stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>) stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>)
[--allow_duplicate_name=<allow_duplicate_name>] [--allow_duplicate_name=<allow_duplicate_name>]
[--title=<title>] [--description=<description>] [--tags=<tags>...]
[--channel_id=<channel_id> | --channel_name=<channel_name>] [--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...] [--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
@ -3322,9 +3279,6 @@ class Daemon(metaclass=JSONRPCServerType):
--claim_id=<claim_id> : (str) id of the claim being reposted --claim_id=<claim_id> : (str) id of the claim being reposted
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with --allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false. given name. default: false.
--title=<title> : (str) title of the repost
--description=<description> : (str) description of the repost
--tags=<tags> : (list) add repost tags
--channel_id=<channel_id> : (str) claim id of the publisher channel --channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel --channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in --channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
@ -3359,7 +3313,6 @@ class Daemon(metaclass=JSONRPCServerType):
raise Exception('Invalid claim id. It is expected to be a 40 characters long hexadecimal string.') raise Exception('Invalid claim id. It is expected to be a 40 characters long hexadecimal string.')
claim = Claim() claim = Claim()
claim.repost.update(**kwargs)
claim.repost.reference.claim_id = claim_id claim.repost.reference.claim_id = claim_id
tx = await Transaction.claim_create( tx = await Transaction.claim_create(
name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
@ -3537,7 +3490,7 @@ class Daemon(metaclass=JSONRPCServerType):
async def save_claims(): async def save_claims():
await self.storage.save_claims([self._old_get_temp_claim_info( await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, claim, name tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount)
)]) )])
if file_path is not None: if file_path is not None:
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id) await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
@ -3740,8 +3693,6 @@ class Daemon(metaclass=JSONRPCServerType):
if old_txo.claim.is_stream: if old_txo.claim.is_stream:
claim.stream.update(file_path=file_path, **kwargs) claim.stream.update(file_path=file_path, **kwargs)
elif old_txo.claim.is_repost:
claim.repost.update(**kwargs)
if clear_channel: if clear_channel:
claim.clear_signature() claim.clear_signature()
@ -3774,7 +3725,7 @@ class Daemon(metaclass=JSONRPCServerType):
async def save_claims(): async def save_claims():
await self.storage.save_claims([self._old_get_temp_claim_info( await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
)]) )])
if stream_hash: if stream_hash:
await self.storage.save_content_claim(stream_hash, new_txo.id) await self.storage.save_content_claim(stream_hash, new_txo.id)
@ -4036,8 +3987,6 @@ class Daemon(metaclass=JSONRPCServerType):
[--languages=<languages>...] [--clear_languages] [--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations] [--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>] [--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--claim_address=<claim_address>]
[--funding_account_ids=<funding_account_ids>...] [--funding_account_ids=<funding_account_ids>...]
@ -4093,10 +4042,6 @@ class Daemon(metaclass=JSONRPCServerType):
--clear_locations : (bool) clear existing locations (prior to adding new ones) --clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url --thumbnail_url=<thumbnail_url>: (str) thumbnail url
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account in which to look for collection (default: all) --account_id=<account_id> : (str) account in which to look for collection (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet --wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction --funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
@ -4344,7 +4289,7 @@ class Daemon(metaclass=JSONRPCServerType):
'nout': tx.position, 'nout': tx.position,
'address': claim_address, 'address': claim_address,
'claim_id': claim_id, 'claim_id': claim_id,
'amount': dewies_to_lbc(new_txo.amount) 'amount': dewies_to_lbc(amount)
}]}) }]})
self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('new_support')) self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('new_support'))
else: else:
@ -4961,6 +4906,7 @@ class Daemon(metaclass=JSONRPCServerType):
DHT / Blob Exchange peer commands. DHT / Blob Exchange peer commands.
""" """
@requires(DHT_COMPONENT)
async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None): async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None):
""" """
Get peers for blob hash Get peers for blob hash
@ -4982,29 +4928,21 @@ class Daemon(metaclass=JSONRPCServerType):
if not is_valid_blobhash(blob_hash): if not is_valid_blobhash(blob_hash):
# TODO: use error from lbry.error # TODO: use error from lbry.error
raise Exception("invalid blob hash") raise Exception("invalid blob hash")
peer_q = asyncio.Queue(loop=self.component_manager.loop)
if self.component_manager.has_component(TRACKER_ANNOUNCER_COMPONENT):
tracker = self.component_manager.get_component(TRACKER_ANNOUNCER_COMPONENT)
tracker_peers = await tracker.get_kademlia_peer_list(bytes.fromhex(blob_hash))
log.info("Found %d peers for %s from trackers.", len(tracker_peers), blob_hash[:8])
peer_q.put_nowait(tracker_peers)
elif not self.component_manager.has_component(DHT_COMPONENT):
raise Exception("Peer list needs, at least, either a DHT component or a Tracker component for discovery.")
peers = [] peers = []
if self.component_manager.has_component(DHT_COMPONENT): peer_q = asyncio.Queue(loop=self.component_manager.loop)
await self.dht_node._peers_for_value_producer(blob_hash, peer_q) await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
while not peer_q.empty(): while not peer_q.empty():
peers.extend(peer_q.get_nowait()) peers.extend(peer_q.get_nowait())
results = { results = [
(peer.address, peer.tcp_port): { {
"node_id": hexlify(peer.node_id).decode() if peer.node_id else None, "node_id": hexlify(peer.node_id).decode(),
"address": peer.address, "address": peer.address,
"udp_port": peer.udp_port, "udp_port": peer.udp_port,
"tcp_port": peer.tcp_port, "tcp_port": peer.tcp_port,
} }
for peer in peers for peer in peers
} ]
return paginate_list(list(results.values()), page, page_size) return paginate_list(results, page, page_size)
@requires(DATABASE_COMPONENT) @requires(DATABASE_COMPONENT)
async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None): async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
@ -5474,11 +5412,11 @@ class Daemon(metaclass=JSONRPCServerType):
return results return results
@staticmethod @staticmethod
def _old_get_temp_claim_info(tx, txo, address, claim_dict, name): def _old_get_temp_claim_info(tx, txo, address, claim_dict, name, bid):
return { return {
"claim_id": txo.claim_id, "claim_id": txo.claim_id,
"name": name, "name": name,
"amount": dewies_to_lbc(txo.amount), "amount": bid,
"address": address, "address": address,
"txid": tx.id, "txid": tx.id,
"nout": txo.position, "nout": txo.position,

View file

@ -80,6 +80,8 @@ class MarketFeed:
self.rate = ExchangeRate(self.market, rate, int(time.time())) self.rate = ExchangeRate(self.market, rate, int(time.time()))
self.last_check = time.time() self.last_check = time.time()
return self.rate return self.rate
except asyncio.CancelledError:
raise
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.warning("Timed out fetching exchange rate from %s.", self.name) log.warning("Timed out fetching exchange rate from %s.", self.name)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
@ -194,9 +196,9 @@ FEEDS: Iterable[Type[MarketFeed]] = (
BittrexUSDFeed, BittrexUSDFeed,
CoinExBTCFeed, CoinExBTCFeed,
CoinExUSDFeed, CoinExUSDFeed,
# HotbitBTCFeed, HotbitBTCFeed,
# HotbitUSDFeed, HotbitUSDFeed,
# UPbitBTCFeed, UPbitBTCFeed,
) )

View file

@ -10,7 +10,7 @@ from lbry.schema.claim import Claim
from lbry.schema.support import Support from lbry.schema.support import Support
from lbry.torrent.torrent_manager import TorrentSource from lbry.torrent.torrent_manager import TorrentSource
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
from lbry.wallet.bip32 import PublicKey from lbry.wallet.bip32 import PubKey
from lbry.wallet.dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from lbry.stream.managed_stream import ManagedStream from lbry.stream.managed_stream import ManagedStream
@ -138,7 +138,7 @@ class JSONResponseEncoder(JSONEncoder):
return self.encode_claim(obj) return self.encode_claim(obj)
if isinstance(obj, Support): if isinstance(obj, Support):
return obj.to_dict() return obj.to_dict()
if isinstance(obj, PublicKey): if isinstance(obj, PubKey):
return obj.extended_key_string() return obj.extended_key_string()
if isinstance(obj, datetime): if isinstance(obj, datetime):
return obj.strftime("%Y%m%dT%H:%M:%S") return obj.strftime("%Y%m%dT%H:%M:%S")
@ -328,8 +328,8 @@ class JSONResponseEncoder(JSONEncoder):
result.update({ result.update({
'streaming_url': managed_stream.stream_url, 'streaming_url': managed_stream.stream_url,
'stream_hash': managed_stream.stream_hash, 'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.stream_name, 'stream_name': managed_stream.descriptor.stream_name,
'suggested_file_name': managed_stream.suggested_file_name, 'suggested_file_name': managed_stream.descriptor.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash, 'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type, 'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key, 'key': managed_stream.descriptor.key,

View file

@ -449,7 +449,7 @@ class SQLiteStorage(SQLiteMixin):
return await self.db.execute_fetchall( return await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on " "select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob left join stream_blob using (blob_hash) " "from blob left join stream_blob using (blob_hash) "
"where stream_blob.stream_hash is null and blob.is_mine=? and blob.status='finished'" "where stream_blob.stream_hash is null and blob.is_mine=? "
"order by blob.blob_length desc, blob.added_on asc", "order by blob.blob_length desc, blob.added_on asc",
(is_mine,) (is_mine,)
) )
@ -463,8 +463,7 @@ class SQLiteStorage(SQLiteMixin):
content_blobs = await self.db.execute_fetchall( content_blobs = await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on " "select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)" "from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
"cross join file using (stream_hash)" "cross join file using (stream_hash) where blob.is_mine=? order by blob.added_on asc, blob.blob_length asc",
"where blob.is_mine=? and blob.status='finished' order by blob.added_on asc, blob.blob_length asc",
(is_mine,) (is_mine,)
) )
return content_blobs + sd_blobs return content_blobs + sd_blobs
@ -481,8 +480,7 @@ class SQLiteStorage(SQLiteMixin):
coalesce(sum(case when coalesce(sum(case when
is_mine=1 is_mine=1
then blob_length else 0 end), 0) as private_storage then blob_length else 0 end), 0) as private_storage
from blob left join stream_blob using (blob_hash) from blob left join stream_blob using (blob_hash) where blob_hash not in (select sd_hash from stream)
where blob_hash not in (select sd_hash from stream) and blob.status="finished"
""") """)
return { return {
'network_storage': network_size, 'network_storage': network_size,
@ -793,7 +791,7 @@ class SQLiteStorage(SQLiteMixin):
await self.db.run(_save_claims) await self.db.run(_save_claims)
if update_file_callbacks: if update_file_callbacks:
await asyncio.wait(map(asyncio.create_task, update_file_callbacks)) await asyncio.wait(update_file_callbacks)
if claim_id_to_supports: if claim_id_to_supports:
await self.save_supports(claim_id_to_supports) await self.save_supports(claim_id_to_supports)

View file

@ -13,12 +13,11 @@ from lbry.schema.url import URL
from lbry.wallet.dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from lbry.file.source_manager import SourceManager from lbry.file.source_manager import SourceManager
from lbry.file.source import ManagedDownloadSource from lbry.file.source import ManagedDownloadSource
from lbry.extras.daemon.storage import StoredContentClaim
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.extras.daemon.analytics import AnalyticsManager from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import WalletManager from lbry.wallet import WalletManager, Output
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -50,10 +49,10 @@ class FileManager:
await manager.started.wait() await manager.started.wait()
self.started.set() self.started.set()
async def stop(self): def stop(self):
for manager in self.source_managers.values(): for manager in self.source_managers.values():
# fixme: pop or not? # fixme: pop or not?
await manager.stop() manager.stop()
self.started.clear() self.started.clear()
@cache_concurrent @cache_concurrent
@ -99,6 +98,8 @@ class FileManager:
except asyncio.TimeoutError: except asyncio.TimeoutError:
raise ResolveTimeoutError(uri) raise ResolveTimeoutError(uri)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise
log.exception("Unexpected error resolving stream:") log.exception("Unexpected error resolving stream:")
raise ResolveError(f"Unexpected error resolving stream: {str(err)}") raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
if 'error' in resolved_result: if 'error' in resolved_result:
@ -193,24 +194,21 @@ class FileManager:
#################### ####################
# make downloader and wait for start # make downloader and wait for start
#################### ####################
# temporary with fields we know so downloader can start. Missing fields are populated later.
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
amount=txo.amount, height=txo.tx_ref.height,
serialized=claim.to_bytes().hex())
if not claim.stream.source.bt_infohash: if not claim.stream.source.bt_infohash:
# fixme: this shouldnt be here # fixme: this shouldnt be here
stream = ManagedStream( stream = ManagedStream(
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash, self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment, download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
analytics_manager=self.analytics_manager, claim=stored_claim analytics_manager=self.analytics_manager
) )
stream.downloader.node = source_manager.node stream.downloader.node = source_manager.node
else: else:
stream = TorrentSource( stream = TorrentSource(
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash, self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
file_name=file_name, download_directory=download_directory or self.config.download_dir, file_name=file_name, download_directory=download_directory or self.config.download_dir,
status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager, status=ManagedStream.STATUS_RUNNING,
analytics_manager=self.analytics_manager,
torrent_session=source_manager.torrent_session torrent_session=source_manager.torrent_session
) )
log.info("starting download for %s", uri) log.info("starting download for %s", uri)
@ -242,12 +240,13 @@ class FileManager:
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier) claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
stream.set_claim(claim_info, claim) stream.set_claim(claim_info, claim)
if save_file: if save_file:
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download)) await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download),
loop=self.loop)
return stream return stream
except asyncio.TimeoutError: except asyncio.TimeoutError:
error = DownloadDataTimeoutError(stream.sd_hash) error = DownloadDataTimeoutError(stream.sd_hash)
raise error raise error
except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream except Exception as err: # forgive data timeout, don't delete stream
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError, expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError) KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
if isinstance(err, expected): if isinstance(err, expected):

View file

@ -45,12 +45,11 @@ class ManagedDownloadSource:
self.purchase_receipt = None self.purchase_receipt = None
self._added_on = added_on self._added_on = added_on
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.downloader = None
self.saving = asyncio.Event() self.saving = asyncio.Event(loop=self.loop)
self.finished_writing = asyncio.Event() self.finished_writing = asyncio.Event(loop=self.loop)
self.started_writing = asyncio.Event() self.started_writing = asyncio.Event(loop=self.loop)
self.finished_write_attempt = asyncio.Event() self.finished_write_attempt = asyncio.Event(loop=self.loop)
# @classmethod # @classmethod
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str, # async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
@ -67,7 +66,7 @@ class ManagedDownloadSource:
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None): async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
raise NotImplementedError() raise NotImplementedError()
async def stop_tasks(self): def stop_tasks(self):
raise NotImplementedError() raise NotImplementedError()
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'): def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):

View file

@ -54,16 +54,16 @@ class SourceManager:
self.storage = storage self.storage = storage
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self._sources: typing.Dict[str, ManagedDownloadSource] = {} self._sources: typing.Dict[str, ManagedDownloadSource] = {}
self.started = asyncio.Event() self.started = asyncio.Event(loop=self.loop)
def add(self, source: ManagedDownloadSource): def add(self, source: ManagedDownloadSource):
self._sources[source.identifier] = source self._sources[source.identifier] = source
async def remove(self, source: ManagedDownloadSource): def remove(self, source: ManagedDownloadSource):
if source.identifier not in self._sources: if source.identifier not in self._sources:
return return
self._sources.pop(source.identifier) self._sources.pop(source.identifier)
await source.stop_tasks() source.stop_tasks()
async def initialize_from_database(self): async def initialize_from_database(self):
raise NotImplementedError() raise NotImplementedError()
@ -72,10 +72,10 @@ class SourceManager:
await self.initialize_from_database() await self.initialize_from_database()
self.started.set() self.started.set()
async def stop(self): def stop(self):
while self._sources: while self._sources:
_, source = self._sources.popitem() _, source = self._sources.popitem()
await source.stop_tasks() source.stop_tasks()
self.started.clear() self.started.clear()
async def create(self, file_path: str, key: Optional[bytes] = None, async def create(self, file_path: str, key: Optional[bytes] = None,
@ -83,7 +83,7 @@ class SourceManager:
raise NotImplementedError() raise NotImplementedError()
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False): async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
await self.remove(source) self.remove(source)
if delete_file and source.output_file_exists: if delete_file and source.output_file_exists:
os.remove(source.full_path) os.remove(source.full_path)

View file

@ -2,5 +2,4 @@ build:
rm types/v2/* -rf rm types/v2/* -rf
touch types/v2/__init__.py touch types/v2/__init__.py
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
cd types/v2/ && cp ../../../../../types/jsonschema/* ./
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py

View file

@ -2,9 +2,6 @@ import logging
from typing import List from typing import List
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
from asn1crypto.keys import PublicKeyInfo
from coincurve import PublicKey as cPublicKey
from google.protobuf.json_format import MessageToDict from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError from google.protobuf.message import DecodeError
from hachoir.core.log import log as hachoir_log from hachoir.core.log import log as hachoir_log
@ -349,7 +346,7 @@ class Channel(BaseClaim):
@property @property
def public_key(self) -> str: def public_key(self) -> str:
return hexlify(self.public_key_bytes).decode() return hexlify(self.message.public_key).decode()
@public_key.setter @public_key.setter
def public_key(self, sd_public_key: str): def public_key(self, sd_public_key: str):
@ -357,11 +354,7 @@ class Channel(BaseClaim):
@property @property
def public_key_bytes(self) -> bytes: def public_key_bytes(self) -> bytes:
if len(self.message.public_key) == 33:
return self.message.public_key return self.message.public_key
public_key_info = PublicKeyInfo.load(self.message.public_key)
public_key = cPublicKey(public_key_info.native['public_key'])
return public_key.format(compressed=True)
@public_key_bytes.setter @public_key_bytes.setter
def public_key_bytes(self, public_key: bytes): def public_key_bytes(self, public_key: bytes):
@ -398,12 +391,6 @@ class Repost(BaseClaim):
claim_type = Claim.REPOST claim_type = Claim.REPOST
def to_dict(self):
claim = super().to_dict()
if claim.pop('claim_hash', None):
claim['claim_id'] = self.reference.claim_id
return claim
@property @property
def reference(self) -> ClaimReference: def reference(self) -> ClaimReference:
return ClaimReference(self.message) return ClaimReference(self.message)

View file

@ -177,6 +177,19 @@ class Outputs:
outputs.blocked, outputs.blocked_total outputs.blocked, outputs.blocked_total
) )
@classmethod
def from_grpc(cls, outputs: OutputsMessage) -> 'Outputs':
txs = set()
for txo_message in chain(outputs.txos, outputs.extra_txos):
if txo_message.WhichOneof('meta') == 'error':
continue
txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height))
return cls(
outputs.txos, outputs.extra_txos, txs,
outputs.offset, outputs.total,
outputs.blocked, outputs.blocked_total
)
@classmethod @classmethod
def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str: def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str:
return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode() return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode()

View file

@ -0,0 +1,960 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hub.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import result_pb2 as result__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hub.proto',
package='pb',
syntax='proto3',
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"\x0e\n\x0c\x45mptyMessage\".\n\rServerMessage\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"N\n\x0cHelloMessage\x12\x0c\n\x04port\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\"\n\x07servers\x18\x03 \x03(\x0b\x32\x11.pb.ServerMessage\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\x05\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x8e\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\x05\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x03(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x03(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x03(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x03(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x03(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x03(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x03(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x03(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\x05\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x12\x0f\n\x07sd_hash\x18; \x01(\t2\x88\x03\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x12+\n\x04Ping\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12-\n\x05Hello\x12\x10.pb.HelloMessage\x1a\x10.pb.HelloMessage\"\x00\x12/\n\x07\x41\x64\x64Peer\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12\x35\n\rPeerSubscribe\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12.\n\x07Version\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12/\n\x08\x46\x65\x61tures\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12\x30\n\tBroadcast\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
,
dependencies=[result__pb2.DESCRIPTOR,])
_RANGEFIELD_OP = _descriptor.EnumDescriptor(
name='Op',
full_name='pb.RangeField.Op',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='EQ', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LTE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GTE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LT', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=373,
serialized_end=419,
)
_sym_db.RegisterEnumDescriptor(_RANGEFIELD_OP)
_EMPTYMESSAGE = _descriptor.Descriptor(
name='EmptyMessage',
full_name='pb.EmptyMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=45,
)
_SERVERMESSAGE = _descriptor.Descriptor(
name='ServerMessage',
full_name='pb.ServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='pb.ServerMessage.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='port', full_name='pb.ServerMessage.port', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=93,
)
_HELLOMESSAGE = _descriptor.Descriptor(
name='HelloMessage',
full_name='pb.HelloMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='port', full_name='pb.HelloMessage.port', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='pb.HelloMessage.host', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='servers', full_name='pb.HelloMessage.servers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=173,
)
_INVERTIBLEFIELD = _descriptor.Descriptor(
name='InvertibleField',
full_name='pb.InvertibleField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='invert', full_name='pb.InvertibleField.invert', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pb.InvertibleField.value', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=175,
serialized_end=223,
)
_STRINGVALUE = _descriptor.Descriptor(
name='StringValue',
full_name='pb.StringValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.StringValue.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=253,
)
_BOOLVALUE = _descriptor.Descriptor(
name='BoolValue',
full_name='pb.BoolValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.BoolValue.value', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=255,
serialized_end=281,
)
_UINT32VALUE = _descriptor.Descriptor(
name='UInt32Value',
full_name='pb.UInt32Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.UInt32Value.value', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=283,
serialized_end=311,
)
_RANGEFIELD = _descriptor.Descriptor(
name='RangeField',
full_name='pb.RangeField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='op', full_name='pb.RangeField.op', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pb.RangeField.value', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_RANGEFIELD_OP,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=313,
serialized_end=419,
)
_SEARCHREQUEST = _descriptor.Descriptor(
name='SearchRequest',
full_name='pb.SearchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='claim_id', full_name='pb.SearchRequest.claim_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channel_id', full_name='pb.SearchRequest.channel_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='text', full_name='pb.SearchRequest.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='pb.SearchRequest.limit', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_by', full_name='pb.SearchRequest.order_by', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='pb.SearchRequest.offset', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_controlling', full_name='pb.SearchRequest.is_controlling', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_take_over_height', full_name='pb.SearchRequest.last_take_over_height', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claim_name', full_name='pb.SearchRequest.claim_name', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='normalized_name', full_name='pb.SearchRequest.normalized_name', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_position', full_name='pb.SearchRequest.tx_position', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount', full_name='pb.SearchRequest.amount', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='pb.SearchRequest.timestamp', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_timestamp', full_name='pb.SearchRequest.creation_timestamp', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='height', full_name='pb.SearchRequest.height', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_height', full_name='pb.SearchRequest.creation_height', index=15,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='activation_height', full_name='pb.SearchRequest.activation_height', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expiration_height', full_name='pb.SearchRequest.expiration_height', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='release_time', full_name='pb.SearchRequest.release_time', index=18,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='short_url', full_name='pb.SearchRequest.short_url', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='canonical_url', full_name='pb.SearchRequest.canonical_url', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='title', full_name='pb.SearchRequest.title', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='author', full_name='pb.SearchRequest.author', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='pb.SearchRequest.description', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claim_type', full_name='pb.SearchRequest.claim_type', index=24,
number=25, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repost_count', full_name='pb.SearchRequest.repost_count', index=25,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stream_type', full_name='pb.SearchRequest.stream_type', index=26,
number=27, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='media_type', full_name='pb.SearchRequest.media_type', index=27,
number=28, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fee_amount', full_name='pb.SearchRequest.fee_amount', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fee_currency', full_name='pb.SearchRequest.fee_currency', index=29,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='pb.SearchRequest.duration', index=30,
number=31, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reposted_claim_id', full_name='pb.SearchRequest.reposted_claim_id', index=31,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='censor_type', full_name='pb.SearchRequest.censor_type', index=32,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claims_in_channel', full_name='pb.SearchRequest.claims_in_channel', index=33,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=34,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=35,
number=37, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='support_amount', full_name='pb.SearchRequest.support_amount', index=36,
number=38, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_score', full_name='pb.SearchRequest.trending_score', index=37,
number=39, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_id', full_name='pb.SearchRequest.tx_id', index=38,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=39,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='pb.SearchRequest.signature', index=40,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=41,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=42,
number=47, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=43,
number=48, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='any_tags', full_name='pb.SearchRequest.any_tags', index=44,
number=49, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_tags', full_name='pb.SearchRequest.all_tags', index=45,
number=50, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='not_tags', full_name='pb.SearchRequest.not_tags', index=46,
number=51, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=47,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_source', full_name='pb.SearchRequest.has_source', index=48,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=49,
number=54, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='any_languages', full_name='pb.SearchRequest.any_languages', index=50,
number=55, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_languages', full_name='pb.SearchRequest.all_languages', index=51,
number=56, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=52,
number=57, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='no_totals', full_name='pb.SearchRequest.no_totals', index=53,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sd_hash', full_name='pb.SearchRequest.sd_hash', index=54,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=422,
serialized_end=1972,
)
_HELLOMESSAGE.fields_by_name['servers'].message_type = _SERVERMESSAGE
_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP
_RANGEFIELD_OP.containing_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['claim_id'].message_type = _INVERTIBLEFIELD
_SEARCHREQUEST.fields_by_name['channel_id'].message_type = _INVERTIBLEFIELD
_SEARCHREQUEST.fields_by_name['tx_position'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['timestamp'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['creation_timestamp'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['creation_height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['activation_height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['expiration_height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['release_time'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['repost_count'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['fee_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['duration'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['censor_type'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE
_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_score'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE
_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE
DESCRIPTOR.message_types_by_name['EmptyMessage'] = _EMPTYMESSAGE
DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE
DESCRIPTOR.message_types_by_name['HelloMessage'] = _HELLOMESSAGE
DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD
DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
DESCRIPTOR.message_types_by_name['RangeField'] = _RANGEFIELD
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EmptyMessage = _reflection.GeneratedProtocolMessageType('EmptyMessage', (_message.Message,), {
'DESCRIPTOR' : _EMPTYMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.EmptyMessage)
})
_sym_db.RegisterMessage(EmptyMessage)
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.ServerMessage)
})
_sym_db.RegisterMessage(ServerMessage)
HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), {
'DESCRIPTOR' : _HELLOMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.HelloMessage)
})
_sym_db.RegisterMessage(HelloMessage)
InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), {
'DESCRIPTOR' : _INVERTIBLEFIELD,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.InvertibleField)
})
_sym_db.RegisterMessage(InvertibleField)
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), {
'DESCRIPTOR' : _STRINGVALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.StringValue)
})
_sym_db.RegisterMessage(StringValue)
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
'DESCRIPTOR' : _BOOLVALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.BoolValue)
})
_sym_db.RegisterMessage(BoolValue)
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), {
'DESCRIPTOR' : _UINT32VALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.UInt32Value)
})
_sym_db.RegisterMessage(UInt32Value)
RangeField = _reflection.GeneratedProtocolMessageType('RangeField', (_message.Message,), {
'DESCRIPTOR' : _RANGEFIELD,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.RangeField)
})
_sym_db.RegisterMessage(RangeField)
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
'DESCRIPTOR' : _SEARCHREQUEST,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.SearchRequest)
})
_sym_db.RegisterMessage(SearchRequest)
DESCRIPTOR._options = None
_HUB = _descriptor.ServiceDescriptor(
name='Hub',
full_name='pb.Hub',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1975,
serialized_end=2367,
methods=[
_descriptor.MethodDescriptor(
name='Search',
full_name='pb.Hub.Search',
index=0,
containing_service=None,
input_type=_SEARCHREQUEST,
output_type=result__pb2._OUTPUTS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Ping',
full_name='pb.Hub.Ping',
index=1,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Hello',
full_name='pb.Hub.Hello',
index=2,
containing_service=None,
input_type=_HELLOMESSAGE,
output_type=_HELLOMESSAGE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddPeer',
full_name='pb.Hub.AddPeer',
index=3,
containing_service=None,
input_type=_SERVERMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='PeerSubscribe',
full_name='pb.Hub.PeerSubscribe',
index=4,
containing_service=None,
input_type=_SERVERMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Version',
full_name='pb.Hub.Version',
index=5,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Features',
full_name='pb.Hub.Features',
index=6,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Broadcast',
full_name='pb.Hub.Broadcast',
index=7,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_UINT32VALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_HUB)
DESCRIPTOR.services_by_name['Hub'] = _HUB
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,298 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import hub_pb2 as hub__pb2
from . import result_pb2 as result__pb2
class HubStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Search = channel.unary_unary(
'/pb.Hub/Search',
request_serializer=hub__pb2.SearchRequest.SerializeToString,
response_deserializer=result__pb2.Outputs.FromString,
)
self.Ping = channel.unary_unary(
'/pb.Hub/Ping',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Hello = channel.unary_unary(
'/pb.Hub/Hello',
request_serializer=hub__pb2.HelloMessage.SerializeToString,
response_deserializer=hub__pb2.HelloMessage.FromString,
)
self.AddPeer = channel.unary_unary(
'/pb.Hub/AddPeer',
request_serializer=hub__pb2.ServerMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.PeerSubscribe = channel.unary_unary(
'/pb.Hub/PeerSubscribe',
request_serializer=hub__pb2.ServerMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Version = channel.unary_unary(
'/pb.Hub/Version',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Features = channel.unary_unary(
'/pb.Hub/Features',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Broadcast = channel.unary_unary(
'/pb.Hub/Broadcast',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.UInt32Value.FromString,
)
class HubServicer(object):
"""Missing associated documentation comment in .proto file."""
def Search(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Ping(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Hello(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddPeer(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PeerSubscribe(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Version(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Features(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Broadcast(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HubServicer_to_server(servicer, server):
rpc_method_handlers = {
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=hub__pb2.SearchRequest.FromString,
response_serializer=result__pb2.Outputs.SerializeToString,
),
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Hello': grpc.unary_unary_rpc_method_handler(
servicer.Hello,
request_deserializer=hub__pb2.HelloMessage.FromString,
response_serializer=hub__pb2.HelloMessage.SerializeToString,
),
'AddPeer': grpc.unary_unary_rpc_method_handler(
servicer.AddPeer,
request_deserializer=hub__pb2.ServerMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'PeerSubscribe': grpc.unary_unary_rpc_method_handler(
servicer.PeerSubscribe,
request_deserializer=hub__pb2.ServerMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Version': grpc.unary_unary_rpc_method_handler(
servicer.Version,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Features': grpc.unary_unary_rpc_method_handler(
servicer.Features,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Broadcast': grpc.unary_unary_rpc_method_handler(
servicer.Broadcast,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.UInt32Value.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'pb.Hub', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Hub(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Search',
hub__pb2.SearchRequest.SerializeToString,
result__pb2.Outputs.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Ping',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Hello(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Hello',
hub__pb2.HelloMessage.SerializeToString,
hub__pb2.HelloMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddPeer(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/AddPeer',
hub__pb2.ServerMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PeerSubscribe(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/PeerSubscribe',
hub__pb2.ServerMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Version(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Version',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Features(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Features',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Broadcast(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Broadcast',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.UInt32Value.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

View file

@ -0,0 +1,4 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

View file

@ -1,139 +0,0 @@
{
"title": "Wallet",
"description": "An LBC wallet",
"type": "object",
"required": ["name", "version", "accounts", "preferences"],
"additionalProperties": false,
"properties": {
"name": {
"description": "Human readable name for this wallet",
"type": "string"
},
"version": {
"description": "Wallet spec version",
"type": "integer",
"$comment": "Should this be a string? We may need some sort of decimal type if we want exact decimal versions."
},
"accounts": {
"description": "Accounts associated with this wallet",
"type": "array",
"items": {
"type": "object",
"required": ["address_generator", "certificates", "encrypted", "ledger", "modified_on", "name", "private_key", "public_key", "seed"],
"additionalProperties": false,
"properties": {
"address_generator": {
"description": "Higher level manager of either singular or deterministically generated addresses",
"type": "object",
"oneOf": [
{
"required": ["name", "change", "receiving"],
"additionalProperties": false,
"properties": {
"name": {
"description": "type of address generator: a deterministic chain of addresses",
"enum": ["deterministic-chain"],
"type": "string"
},
"change": {
"$ref": "#/$defs/address_manager",
"description": "Manager for deterministically generated change address (not used for single address)"
},
"receiving": {
"$ref": "#/$defs/address_manager",
"description": "Manager for deterministically generated receiving address (not used for single address)"
}
}
}, {
"required": ["name"],
"additionalProperties": false,
"properties": {
"name": {
"description": "type of address generator: a single address",
"enum": ["single-address"],
"type": "string"
}
}
}
]
},
"certificates": {
"type": "object",
"description": "Channel keys. Mapping from public key address to pem-formatted private key.",
"additionalProperties": {"type": "string"}
},
"encrypted": {
"type": "boolean",
"description": "Whether private key and seed are encrypted with a password"
},
"ledger": {
"description": "Which network to use",
"type": "string",
"examples": [
"lbc_mainnet",
"lbc_testnet"
]
},
"modified_on": {
"description": "last modified time in Unix Time",
"type": "integer"
},
"name": {
"description": "Name for account, possibly human readable",
"type": "string"
},
"private_key": {
"description": "Private key for address if `address_generator` is a single address. Root of chain of private keys for addresses if `address_generator` is a deterministic chain of addresses. Encrypted if `encrypted` is true.",
"type": "string"
},
"public_key": {
"description": "Public key for address if `address_generator` is a single address. Root of chain of public keys for addresses if `address_generator` is a deterministic chain of addresses.",
"type": "string"
},
"seed": {
"description": "Human readable representation of `private_key`. encrypted if `encrypted` is set to `true`",
"type": "string"
}
}
}
},
"preferences": {
"description": "Timestamped application-level preferences. Values can be objects or of a primitive type.",
"$comment": "enable-sync is seen in example wallet. encrypt-on-disk is seen in example wallet. they both have a boolean `value` field. Do we want them explicitly defined here? local and shared seem to have at least a similar structure (type, value [yes, again], version), value being the free-form part. Should we define those here? Or can there be any key under preferences, and `value` be literally be anything in any form?",
"type": "object",
"additionalProperties": {
"type": "object",
"required": ["ts", "value"],
"additionalProperties": false,
"properties": {
"ts": {
"type": "number",
"description": "When the item was set, in Unix time format.",
"$comment": "Do we want a string (decimal)?"
},
"value": {
"$comment": "Sometimes this has been an object, sometimes just a boolean. I don't want to prescribe anything."
}
}
}
}
},
"$defs": {
"address_manager": {
"description": "Manager for deterministically generated addresses",
"type": "object",
"required": ["gap", "maximum_uses_per_address"],
"additionalProperties": false,
"properties": {
"gap": {
"description": "Maximum allowed consecutive generated addresses with no transactions",
"type": "integer"
},
"maximum_uses_per_address": {
"description": "Maximum number of uses for each generated address",
"type": "integer"
}
}
}
}
}

View file

@ -23,7 +23,6 @@ class BackgroundDownloader:
except ValueError: except ValueError:
return return
except asyncio.CancelledError: except asyncio.CancelledError:
log.debug("Cancelled background downloader")
raise raise
except Exception: except Exception:
log.error("Unexpected download error on background downloader") log.error("Unexpected download error on background downloader")

View file

@ -8,8 +8,6 @@ from lbry.error import DownloadSDTimeoutError
from lbry.utils import lru_cache_concurrent from lbry.utils import lru_cache_concurrent
from lbry.stream.descriptor import StreamDescriptor from lbry.stream.descriptor import StreamDescriptor
from lbry.blob_exchange.downloader import BlobDownloader from lbry.blob_exchange.downloader import BlobDownloader
from lbry.torrent.tracker import enqueue_tracker_search
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.dht.node import Node from lbry.dht.node import Node
@ -27,8 +25,8 @@ class StreamDownloader:
self.config = config self.config = config
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.sd_hash = sd_hash self.sd_hash = sd_hash
self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder
self.peer_queue = asyncio.Queue() # new peers to try self.peer_queue = asyncio.Queue(loop=loop) # new peers to try
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue) self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
self.descriptor: typing.Optional[StreamDescriptor] = descriptor self.descriptor: typing.Optional[StreamDescriptor] = descriptor
self.node: typing.Optional['Node'] = None self.node: typing.Optional['Node'] = None
@ -72,7 +70,7 @@ class StreamDownloader:
now = self.loop.time() now = self.loop.time()
sd_blob = await asyncio.wait_for( sd_blob = await asyncio.wait_for(
self.blob_downloader.download_blob(self.sd_hash, connection_id), self.blob_downloader.download_blob(self.sd_hash, connection_id),
self.config.blob_download_timeout self.config.blob_download_timeout, loop=self.loop
) )
log.info("downloaded sd blob %s", self.sd_hash) log.info("downloaded sd blob %s", self.sd_hash)
self.time_to_descriptor = self.loop.time() - now self.time_to_descriptor = self.loop.time() - now
@ -93,7 +91,6 @@ class StreamDownloader:
self.accumulate_task.cancel() self.accumulate_task.cancel()
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue) _, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
await self.add_fixed_peers() await self.add_fixed_peers()
enqueue_tracker_search(bytes.fromhex(self.sd_hash), self.peer_queue)
# start searching for peers for the sd hash # start searching for peers for the sd hash
self.search_queue.put_nowait(self.sd_hash) self.search_queue.put_nowait(self.sd_hash)
log.info("searching for peers for stream %s", self.sd_hash) log.info("searching for peers for stream %s", self.sd_hash)
@ -111,7 +108,7 @@ class StreamDownloader:
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}") raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
blob = await asyncio.wait_for( blob = await asyncio.wait_for(
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id), self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
self.config.blob_download_timeout * 10 self.config.blob_download_timeout * 10, loop=self.loop
) )
return blob return blob

View file

@ -16,8 +16,10 @@ from lbry.file.source import ManagedDownloadSource
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.schema.claim import Claim
from lbry.blob.blob_manager import BlobManager from lbry.blob.blob_manager import BlobManager
from lbry.blob.blob_info import BlobInfo from lbry.blob.blob_info import BlobInfo
from lbry.dht.node import Node
from lbry.extras.daemon.analytics import AnalyticsManager from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.wallet.transaction import Transaction from lbry.wallet.transaction import Transaction
@ -60,9 +62,9 @@ class ManagedStream(ManagedDownloadSource):
self.file_output_task: typing.Optional[asyncio.Task] = None self.file_output_task: typing.Optional[asyncio.Task] = None
self.delayed_stop_task: typing.Optional[asyncio.Task] = None self.delayed_stop_task: typing.Optional[asyncio.Task] = None
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = [] self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
self.fully_reflected = asyncio.Event() self.fully_reflected = asyncio.Event(loop=self.loop)
self.streaming = asyncio.Event() self.streaming = asyncio.Event(loop=self.loop)
self._running = asyncio.Event() self._running = asyncio.Event(loop=self.loop)
@property @property
def sd_hash(self) -> str: def sd_hash(self) -> str:
@ -82,19 +84,7 @@ class ManagedStream(ManagedDownloadSource):
@property @property
def file_name(self) -> Optional[str]: def file_name(self) -> Optional[str]:
return self._file_name or self.suggested_file_name return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
@property
def suggested_file_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name))
@property
def stream_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name)
@property @property
def written_bytes(self) -> int: def written_bytes(self) -> int:
@ -128,7 +118,7 @@ class ManagedStream(ManagedDownloadSource):
@property @property
def mime_type(self): def mime_type(self):
return guess_media_type(os.path.basename(self.suggested_file_name))[0] return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
@property @property
def download_path(self): def download_path(self):
@ -161,7 +151,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("start downloader for stream (sd hash: %s)", self.sd_hash) log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
self._running.set() self._running.set()
try: try:
await asyncio.wait_for(self.downloader.start(), timeout) await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop)
except asyncio.TimeoutError: except asyncio.TimeoutError:
self._running.clear() self._running.clear()
raise DownloadSDTimeoutError(self.sd_hash) raise DownloadSDTimeoutError(self.sd_hash)
@ -174,7 +164,7 @@ class ManagedStream(ManagedDownloadSource):
if not self._file_name: if not self._file_name:
self._file_name = await get_next_available_file_name( self._file_name = await get_next_available_file_name(
self.loop, self.download_directory, self.loop, self.download_directory,
self._file_name or sanitize_file_name(self.suggested_file_name) self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
) )
file_name, download_dir = self._file_name, self.download_directory file_name, download_dir = self._file_name, self.download_directory
else: else:
@ -191,7 +181,7 @@ class ManagedStream(ManagedDownloadSource):
Stop any running save/stream tasks as well as the downloader and update the status in the database Stop any running save/stream tasks as well as the downloader and update the status in the database
""" """
await self.stop_tasks() self.stop_tasks()
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING: if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED) await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
@ -279,7 +269,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id, log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
self.sd_hash[:6], self.full_path) self.sd_hash[:6], self.full_path)
await self.blob_manager.storage.set_saved_file(self.stream_hash) await self.blob_manager.storage.set_saved_file(self.stream_hash)
except (Exception, asyncio.CancelledError) as err: except Exception as err:
if os.path.isfile(output_path): if os.path.isfile(output_path):
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash) log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
os.remove(output_path) os.remove(output_path)
@ -306,14 +296,14 @@ class ManagedStream(ManagedDownloadSource):
self.download_directory = download_directory or self.download_directory or self.config.download_dir self.download_directory = download_directory or self.download_directory or self.config.download_dir
if not self.download_directory: if not self.download_directory:
raise ValueError("no directory to download to") raise ValueError("no directory to download to")
if not (file_name or self._file_name or self.suggested_file_name): if not (file_name or self._file_name or self.descriptor.suggested_file_name):
raise ValueError("no file name to download to") raise ValueError("no file name to download to")
if not os.path.isdir(self.download_directory): if not os.path.isdir(self.download_directory):
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory) log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
os.mkdir(self.download_directory) os.mkdir(self.download_directory)
self._file_name = await get_next_available_file_name( self._file_name = await get_next_available_file_name(
self.loop, self.download_directory, self.loop, self.download_directory,
file_name or self._file_name or sanitize_file_name(self.suggested_file_name) file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
) )
await self.blob_manager.storage.change_file_download_dir_and_file_name( await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, self.download_directory, self.file_name self.stream_hash, self.download_directory, self.file_name
@ -321,16 +311,15 @@ class ManagedStream(ManagedDownloadSource):
await self.update_status(ManagedStream.STATUS_RUNNING) await self.update_status(ManagedStream.STATUS_RUNNING)
self.file_output_task = self.loop.create_task(self._save_file(self.full_path)) self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
try: try:
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout) await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id) log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
await self.stop_tasks() self.stop_tasks()
await self.update_status(ManagedStream.STATUS_STOPPED) await self.update_status(ManagedStream.STATUS_STOPPED)
async def stop_tasks(self): def stop_tasks(self):
if self.file_output_task and not self.file_output_task.done(): if self.file_output_task and not self.file_output_task.done():
self.file_output_task.cancel() self.file_output_task.cancel()
await asyncio.gather(self.file_output_task, return_exceptions=True)
self.file_output_task = None self.file_output_task = None
while self.streaming_responses: while self.streaming_responses:
req, response = self.streaming_responses.pop() req, response = self.streaming_responses.pop()
@ -367,7 +356,7 @@ class ManagedStream(ManagedDownloadSource):
return sent return sent
except ConnectionError: except ConnectionError:
return sent return sent
except (OSError, Exception, asyncio.CancelledError) as err: except (OSError, Exception) as err:
if isinstance(err, asyncio.CancelledError): if isinstance(err, asyncio.CancelledError):
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id) log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
elif isinstance(err, OSError): elif isinstance(err, OSError):
@ -402,7 +391,7 @@ class ManagedStream(ManagedDownloadSource):
self.sd_hash[:6]) self.sd_hash[:6])
await self.stop() await self.stop()
return return
await asyncio.sleep(1) await asyncio.sleep(1, loop=self.loop)
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]: def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
if '=' in get_range: if '=' in get_range:

View file

@ -21,7 +21,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: asyncio.Task = None self.server_task: asyncio.Task = None
self.started_listening = asyncio.Event() self.started_listening = asyncio.Event(loop=self.loop)
self.buf = b'' self.buf = b''
self.transport: asyncio.StreamWriter = None self.transport: asyncio.StreamWriter = None
self.writer: typing.Optional['HashBlobWriter'] = None self.writer: typing.Optional['HashBlobWriter'] = None
@ -29,9 +29,9 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.descriptor: typing.Optional['StreamDescriptor'] = None self.descriptor: typing.Optional['StreamDescriptor'] = None
self.sd_blob: typing.Optional['BlobFile'] = None self.sd_blob: typing.Optional['BlobFile'] = None
self.received = [] self.received = []
self.incoming = incoming_event or asyncio.Event() self.incoming = incoming_event or asyncio.Event(loop=self.loop)
self.not_incoming = not_incoming_event or asyncio.Event() self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop)
self.stop_event = stop_event or asyncio.Event() self.stop_event = stop_event or asyncio.Event(loop=self.loop)
self.chunk_size = response_chunk_size self.chunk_size = response_chunk_size
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
self.partial_event = partial_event self.partial_event = partial_event
@ -94,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set() self.incoming.set()
self.send_response({"send_sd_blob": True}) self.send_response({"send_sd_blob": True})
try: try:
await asyncio.wait_for(self.sd_blob.verified.wait(), 30) await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop)
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob( self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
self.loop, self.blob_manager.blob_dir, self.sd_blob self.loop, self.blob_manager.blob_dir, self.sd_blob
) )
@ -140,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set() self.incoming.set()
self.send_response({"send_blob": True}) self.send_response({"send_blob": True})
try: try:
await asyncio.wait_for(blob.verified.wait(), 30) await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop)
self.send_response({"received_blob": True}) self.send_response({"received_blob": True})
except asyncio.TimeoutError: except asyncio.TimeoutError:
self.send_response({"received_blob": False}) self.send_response({"received_blob": False})
@ -162,10 +162,10 @@ class ReflectorServer:
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event() self.started_listening = asyncio.Event(loop=self.loop)
self.stopped_listening = asyncio.Event() self.stopped_listening = asyncio.Event(loop=self.loop)
self.incoming_event = incoming_event or asyncio.Event() self.incoming_event = incoming_event or asyncio.Event(loop=self.loop)
self.not_incoming_event = not_incoming_event or asyncio.Event() self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop)
self.response_chunk_size = response_chunk_size self.response_chunk_size = response_chunk_size
self.stop_event = stop_event self.stop_event = stop_event
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants

View file

@ -54,7 +54,7 @@ class StreamManager(SourceManager):
self.re_reflect_task: Optional[asyncio.Task] = None self.re_reflect_task: Optional[asyncio.Task] = None
self.update_stream_finished_futs: typing.List[asyncio.Future] = [] self.update_stream_finished_futs: typing.List[asyncio.Future] = []
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {} self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
self.started = asyncio.Event() self.started = asyncio.Event(loop=self.loop)
@property @property
def streams(self): def streams(self):
@ -150,7 +150,7 @@ class StreamManager(SourceManager):
file_info['added_on'], file_info['fully_reflected'] file_info['added_on'], file_info['fully_reflected']
))) )))
if add_stream_tasks: if add_stream_tasks:
await asyncio.gather(*add_stream_tasks) await asyncio.gather(*add_stream_tasks, loop=self.loop)
log.info("Started stream manager with %i files", len(self._sources)) log.info("Started stream manager with %i files", len(self._sources))
if not self.node: if not self.node:
log.info("no DHT node given, resuming downloads trusting that we can contact reflector") log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
@ -159,11 +159,14 @@ class StreamManager(SourceManager):
self.resume_saving_task = asyncio.ensure_future(asyncio.gather( self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
*(self._sources[sd_hash].save_file(file_name, download_directory) *(self._sources[sd_hash].save_file(file_name, download_directory)
for (file_name, download_directory, sd_hash) in to_resume_saving), for (file_name, download_directory, sd_hash) in to_resume_saving),
loop=self.loop
)) ))
async def reflect_streams(self): async def reflect_streams(self):
try: try:
return await self._reflect_streams() return await self._reflect_streams()
except asyncio.CancelledError:
raise
except Exception: except Exception:
log.exception("reflector task encountered an unexpected error!") log.exception("reflector task encountered an unexpected error!")
@ -183,21 +186,21 @@ class StreamManager(SourceManager):
batch.append(self.reflect_stream(stream)) batch.append(self.reflect_stream(stream))
if len(batch) >= self.config.concurrent_reflector_uploads: if len(batch) >= self.config.concurrent_reflector_uploads:
log.debug("waiting for batch of %s reflecting streams", len(batch)) log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch) await asyncio.gather(*batch, loop=self.loop)
log.debug("done processing %s streams", len(batch)) log.debug("done processing %s streams", len(batch))
batch = [] batch = []
if batch: if batch:
log.debug("waiting for batch of %s reflecting streams", len(batch)) log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch) await asyncio.gather(*batch, loop=self.loop)
log.debug("done processing %s streams", len(batch)) log.debug("done processing %s streams", len(batch))
await asyncio.sleep(300) await asyncio.sleep(300, loop=self.loop)
async def start(self): async def start(self):
await super().start() await super().start()
self.re_reflect_task = self.loop.create_task(self.reflect_streams()) self.re_reflect_task = self.loop.create_task(self.reflect_streams())
async def stop(self): def stop(self):
await super().stop() super().stop()
if self.resume_saving_task and not self.resume_saving_task.done(): if self.resume_saving_task and not self.resume_saving_task.done():
self.resume_saving_task.cancel() self.resume_saving_task.cancel()
if self.re_reflect_task and not self.re_reflect_task.done(): if self.re_reflect_task and not self.re_reflect_task.done():
@ -224,8 +227,7 @@ class StreamManager(SourceManager):
) )
return task return task
@staticmethod async def _retriable_reflect_stream(self, stream, host, port):
async def _retriable_reflect_stream(stream, host, port):
sent = await stream.upload_to_reflector(host, port) sent = await stream.upload_to_reflector(host, port)
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0: while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
stream.reflector_progress = 0 stream.reflector_progress = 0
@ -260,7 +262,7 @@ class StreamManager(SourceManager):
return return
if source.identifier in self.running_reflector_uploads: if source.identifier in self.running_reflector_uploads:
self.running_reflector_uploads[source.identifier].cancel() self.running_reflector_uploads[source.identifier].cancel()
await source.stop_tasks() source.stop_tasks()
if source.identifier in self.streams: if source.identifier in self.streams:
del self.streams[source.identifier] del self.streams[source.identifier]
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]] blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]

View file

@ -17,10 +17,8 @@ from functools import partial
from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction from lbry.wallet import WalletManager, Wallet, Ledger, Account, Transaction
from lbry.conf import Config from lbry.conf import Config
from lbry.wallet.util import satoshis_to_coins from lbry.wallet.util import satoshis_to_coins
from lbry.wallet.dewies import lbc_to_dewies
from lbry.wallet.orchstr8 import Conductor from lbry.wallet.orchstr8 import Conductor
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode, HubNode
from lbry.schema.claim import Claim
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
from lbry.extras.daemon.components import Component, WalletComponent from lbry.extras.daemon.components import Component, WalletComponent
@ -237,6 +235,7 @@ class IntegrationTestCase(AsyncioTestCase):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.conductor: Optional[Conductor] = None self.conductor: Optional[Conductor] = None
self.blockchain: Optional[LBCWalletNode] = None self.blockchain: Optional[LBCWalletNode] = None
self.hub: Optional[HubNode] = None
self.wallet_node: Optional[WalletNode] = None self.wallet_node: Optional[WalletNode] = None
self.manager: Optional[WalletManager] = None self.manager: Optional[WalletManager] = None
self.ledger: Optional[Ledger] = None self.ledger: Optional[Ledger] = None
@ -253,7 +252,10 @@ class IntegrationTestCase(AsyncioTestCase):
self.addCleanup(self.conductor.stop_spv) self.addCleanup(self.conductor.stop_spv)
await self.conductor.start_wallet() await self.conductor.start_wallet()
self.addCleanup(self.conductor.stop_wallet) self.addCleanup(self.conductor.stop_wallet)
await self.conductor.start_hub()
self.addCleanup(self.conductor.stop_hub)
self.blockchain = self.conductor.lbcwallet_node self.blockchain = self.conductor.lbcwallet_node
self.hub = self.conductor.hub_node
self.wallet_node = self.conductor.wallet_node self.wallet_node = self.conductor.wallet_node
self.manager = self.wallet_node.manager self.manager = self.wallet_node.manager
self.ledger = self.wallet_node.ledger self.ledger = self.wallet_node.ledger
@ -301,8 +303,15 @@ class IntegrationTestCase(AsyncioTestCase):
watcher = (ledger or self.ledger).on_transaction.where( watcher = (ledger or self.ledger).on_transaction.where(
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
) )
await self.generate(blocks_to_generate) self.conductor.spv_node.server.synchronized.clear()
await self.blockchain.generate(blocks_to_generate)
height = self.blockchain.block_expected
await watcher await watcher
while True:
await self.conductor.spv_node.server.synchronized.wait()
self.conductor.spv_node.server.synchronized.clear()
if self.conductor.spv_node.server.db.db_height >= height:
break
def on_address_update(self, address): def on_address_update(self, address):
return self.ledger.on_transaction.where( return self.ledger.on_transaction.where(
@ -317,17 +326,14 @@ class IntegrationTestCase(AsyncioTestCase):
async def generate(self, blocks): async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """ """ Ask lbrycrd to generate some blocks and wait until ledger has them. """
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block) prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
height = self.blockchain.block_expected
self.conductor.spv_node.server.synchronized.clear() self.conductor.spv_node.server.synchronized.clear()
await self.blockchain.generate(blocks) await self.blockchain.generate(blocks)
height = self.blockchain.block_expected
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
while True: while True:
await self.conductor.spv_node.server.synchronized.wait() await self.conductor.spv_node.server.synchronized.wait()
self.conductor.spv_node.server.synchronized.clear() self.conductor.spv_node.server.synchronized.clear()
if self.conductor.spv_node.server.db.db_height < height: if self.conductor.spv_node.server.db.db_height >= height:
continue
if self.conductor.spv_node.server._es_height < height:
continue
break break
@ -551,19 +557,6 @@ class CommandTestCase(IntegrationTestCase):
return self.sout(tx) return self.sout(tx)
return tx return tx
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
account = (daemon or self.daemon).wallet_manager.default_account
claim_address = await account.receiving.get_or_create_usable_address()
claim = Claim()
claim.channel.public_key_bytes = pubkey_bytes
tx = await Transaction.claim_create(
name, claim, lbc_to_dewies(price),
claim_address, [self.account], self.account
)
await tx.sign([self.account])
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
return self.sout(tx)
def create_upload_file(self, data, prefix=None, suffix=None): def create_upload_file(self, data, prefix=None, suffix=None):
file_path = tempfile.mktemp(prefix=prefix or "tmp", suffix=suffix or "", dir=self.daemon.conf.upload_dir) file_path = tempfile.mktemp(prefix=prefix or "tmp", suffix=suffix or "", dir=self.daemon.conf.upload_dir)
with open(file_path, 'w+b') as file: with open(file_path, 'w+b') as file:

View file

@ -10,13 +10,47 @@ from typing import Optional
import libtorrent import libtorrent
NOTIFICATION_MASKS = [
"error",
"peer",
"port_mapping",
"storage",
"tracker",
"debug",
"status",
"progress",
"ip_block",
"dht",
"stats",
"session_log",
"torrent_log",
"peer_log",
"incoming_request",
"dht_log",
"dht_operation",
"port_mapping_log",
"picker_log",
"file_progress",
"piece_progress",
"upload",
"block_progress"
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted? DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
libtorrent.add_torrent_params_flags_t.flag_auto_managed libtorrent.add_torrent_params_flags_t.flag_auto_managed
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe | libtorrent.add_torrent_params_flags_t.flag_update_subscribe
) )
def get_notification_type(notification) -> str:
for i, notification_type in enumerate(NOTIFICATION_MASKS):
if (1 << i) & notification:
return notification_type
raise ValueError("unrecognized notification type")
class TorrentHandle: class TorrentHandle:
def __init__(self, loop, executor, handle): def __init__(self, loop, executor, handle):
self._loop = loop self._loop = loop
@ -87,7 +121,7 @@ class TorrentHandle:
self._show_status() self._show_status()
if self.finished.is_set(): if self.finished.is_set():
break break
await asyncio.sleep(0.1) await asyncio.sleep(0.1, loop=self._loop)
async def pause(self): async def pause(self):
await self._loop.run_in_executor( await self._loop.run_in_executor(
@ -122,8 +156,10 @@ class TorrentSession:
async def bind(self, interface: str = '0.0.0.0', port: int = 10889): async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
settings = { settings = {
'listen_interfaces': f"{interface}:{port}", 'listen_interfaces': f"{interface}:{port}",
'enable_natpmp': False, 'enable_outgoing_utp': True,
'enable_upnp': False 'enable_incoming_utp': True,
'enable_outgoing_tcp': False,
'enable_incoming_tcp': False
} }
self._session = await self._loop.run_in_executor( self._session = await self._loop.run_in_executor(
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
@ -150,7 +186,7 @@ class TorrentSession:
await self._loop.run_in_executor( await self._loop.run_in_executor(
self._executor, self._pop_alerts self._executor, self._pop_alerts
) )
await asyncio.sleep(1) await asyncio.sleep(1, loop=self._loop)
async def pause(self): async def pause(self):
await self._loop.run_in_executor( await self._loop.run_in_executor(

View file

@ -36,7 +36,7 @@ class Torrent:
def __init__(self, loop, handle): def __init__(self, loop, handle):
self._loop = loop self._loop = loop
self._handle = handle self._handle = handle
self.finished = asyncio.Event() self.finished = asyncio.Event(loop=loop)
def _threaded_update_status(self): def _threaded_update_status(self):
status = self._handle.status() status = self._handle.status()
@ -58,7 +58,7 @@ class Torrent:
log.info("finished downloading torrent!") log.info("finished downloading torrent!")
await self.pause() await self.pause()
break break
await asyncio.sleep(1) await asyncio.sleep(1, loop=self._loop)
async def pause(self): async def pause(self):
log.info("pause torrent") log.info("pause torrent")

View file

@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
def bt_infohash(self): def bt_infohash(self):
return self.identifier return self.identifier
async def stop_tasks(self): def stop_tasks(self):
pass pass
@property @property
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
async def start(self): async def start(self):
await super().start() await super().start()
async def stop(self): def stop(self):
await super().stop() super().stop()
log.info("finished stopping the torrent manager") log.info("finished stopping the torrent manager")
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False): async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):

View file

@ -1,285 +0,0 @@
import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True)
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = {
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers)
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr)
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False)

View file

@ -130,16 +130,21 @@ def get_sd_hash(stream_info):
def json_dumps_pretty(obj, **kwargs): def json_dumps_pretty(obj, **kwargs):
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs) return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
try:
# the standard contextlib.aclosing() is available in 3.10+ def cancel_task(task: typing.Optional[asyncio.Task]):
from contextlib import aclosing # pylint: disable=unused-import if task and not task.done():
except ImportError: task.cancel()
@contextlib.asynccontextmanager
async def aclosing(thing):
try: def cancel_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
yield thing for task in tasks:
finally: cancel_task(task)
await thing.aclose()
def drain_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
while tasks:
cancel_task(tasks.pop())
def async_timed_cache(duration: int): def async_timed_cache(duration: int):
def wrapper(func): def wrapper(func):
@ -450,8 +455,8 @@ def is_running_from_bundle():
class LockWithMetrics(asyncio.Lock): class LockWithMetrics(asyncio.Lock):
def __init__(self, acquire_metric, held_time_metric): def __init__(self, acquire_metric, held_time_metric, loop=None):
super().__init__() super().__init__(loop=loop)
self._acquire_metric = acquire_metric self._acquire_metric = acquire_metric
self._lock_held_time_metric = held_time_metric self._lock_held_time_metric = held_time_metric
self._lock_acquired_time = None self._lock_acquired_time = None

View file

@ -3,11 +3,11 @@ __lbcctl__ = 'lbcctl'
__lbcwallet__ = 'lbcwallet' __lbcwallet__ = 'lbcwallet'
__lbcd_url__ = ( __lbcd_url__ = (
'https://github.com/lbryio/lbcd/releases/download/' + 'https://github.com/lbryio/lbcd/releases/download/' +
'v0.22.100-rc.0/lbcd_0.22.100-rc.0_TARGET_PLATFORM.tar.gz' 'v0.22.200-beta/lbcd_0.22.200-beta_TARGET_PLATFORM.tar.gz'
) )
__lbcwallet_url__ = ( __lbcwallet_url__ = (
'https://github.com/lbryio/lbcwallet/releases/download/' + 'https://github.com/lbryio/lbcwallet/releases/download/' +
'v0.13.100-alpha.0/lbcwallet_0.13.100-alpha.0_TARGET_PLATFORM.tar.gz' 'v0.13.100-alpha-rc2/lbcwallet_0.13.100-alpha-rc2_TARGET_PLATFORM.tar.gz'
) )
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest' __spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
@ -15,8 +15,7 @@ from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, EN
from lbry.wallet.manager import WalletManager from lbry.wallet.manager import WalletManager
from lbry.wallet.network import Network from lbry.wallet.network import Network
from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \ from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic
DeterministicChannelKeyManager
from lbry.wallet.transaction import Transaction, Output, Input from lbry.wallet.transaction import Transaction, Output, Input
from lbry.wallet.script import OutputScript, InputScript from lbry.wallet.script import OutputScript, InputScript
from lbry.wallet.database import SQLiteMixin, Database from lbry.wallet.database import SQLiteMixin, Database

View file

@ -9,10 +9,11 @@ from hashlib import sha256
from string import hexdigits from string import hexdigits
from typing import Type, Dict, Tuple, Optional, Any, List from typing import Type, Dict, Tuple, Optional, Any, List
import ecdsa
from lbry.error import InvalidPasswordError from lbry.error import InvalidPasswordError
from lbry.crypto.crypt import aes_encrypt, aes_decrypt from lbry.crypto.crypt import aes_encrypt, aes_decrypt
from .bip32 import PrivateKey, PublicKey, KeyPath, from_extended_key_string from .bip32 import PrivateKey, PubKey, from_extended_key_string
from .mnemonic import Mnemonic from .mnemonic import Mnemonic
from .constants import COIN, TXO_TYPES from .constants import COIN, TXO_TYPES
from .transaction import Transaction, Input, Output from .transaction import Transaction, Input, Output
@ -33,49 +34,6 @@ def validate_claim_id(claim_id):
raise Exception("Claim id is not hex encoded") raise Exception("Claim id is not hex encoded")
class DeterministicChannelKeyManager:
def __init__(self, account: 'Account'):
self.account = account
self.last_known = 0
self.cache = {}
self._private_key: Optional[PrivateKey] = None
@property
def private_key(self):
if self._private_key is None:
if self.account.private_key is not None:
self._private_key = self.account.private_key.child(KeyPath.CHANNEL)
return self._private_key
def maybe_generate_deterministic_key_for_channel(self, txo):
if self.private_key is None:
return
next_private_key = self.private_key.child(self.last_known)
public_key = next_private_key.public_key
public_key_bytes = public_key.pubkey_bytes
if txo.claim.channel.public_key_bytes == public_key_bytes:
self.cache[public_key.address] = next_private_key
self.last_known += 1
async def ensure_cache_primed(self):
if self.private_key is not None:
await self.generate_next_key()
async def generate_next_key(self) -> PrivateKey:
db = self.account.ledger.db
while True:
next_private_key = self.private_key.child(self.last_known)
public_key = next_private_key.public_key
self.cache[public_key.address] = next_private_key
if not await db.is_channel_key_used(self.account, public_key):
return next_private_key
self.last_known += 1
def get_private_key_from_pubkey_hash(self, pubkey_hash) -> PrivateKey:
return self.cache.get(pubkey_hash)
class AddressManager: class AddressManager:
name: str name: str
@ -121,7 +79,7 @@ class AddressManager:
def get_private_key(self, index: int) -> PrivateKey: def get_private_key(self, index: int) -> PrivateKey:
raise NotImplementedError raise NotImplementedError
def get_public_key(self, index: int) -> PublicKey: def get_public_key(self, index: int) -> PubKey:
raise NotImplementedError raise NotImplementedError
async def get_max_gap(self): async def get_max_gap(self):
@ -161,8 +119,8 @@ class HierarchicalDeterministic(AddressManager):
@classmethod @classmethod
def from_dict(cls, account: 'Account', d: dict) -> Tuple[AddressManager, AddressManager]: def from_dict(cls, account: 'Account', d: dict) -> Tuple[AddressManager, AddressManager]:
return ( return (
cls(account, KeyPath.RECEIVE, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})), cls(account, 0, **d.get('receiving', {'gap': 20, 'maximum_uses_per_address': 1})),
cls(account, KeyPath.CHANGE, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1})) cls(account, 1, **d.get('change', {'gap': 6, 'maximum_uses_per_address': 1}))
) )
def merge(self, d: dict): def merge(self, d: dict):
@ -175,7 +133,7 @@ class HierarchicalDeterministic(AddressManager):
def get_private_key(self, index: int) -> PrivateKey: def get_private_key(self, index: int) -> PrivateKey:
return self.account.private_key.child(self.chain_number).child(index) return self.account.private_key.child(self.chain_number).child(index)
def get_public_key(self, index: int) -> PublicKey: def get_public_key(self, index: int) -> PubKey:
return self.account.public_key.child(self.chain_number).child(index) return self.account.public_key.child(self.chain_number).child(index)
async def get_max_gap(self) -> int: async def get_max_gap(self) -> int:
@ -235,7 +193,7 @@ class SingleKey(AddressManager):
@classmethod @classmethod
def from_dict(cls, account: 'Account', d: dict) \ def from_dict(cls, account: 'Account', d: dict) \
-> Tuple[AddressManager, AddressManager]: -> Tuple[AddressManager, AddressManager]:
same_address_manager = cls(account, account.public_key, KeyPath.RECEIVE) same_address_manager = cls(account, account.public_key, 0)
return same_address_manager, same_address_manager return same_address_manager, same_address_manager
def to_dict_instance(self): def to_dict_instance(self):
@ -244,7 +202,7 @@ class SingleKey(AddressManager):
def get_private_key(self, index: int) -> PrivateKey: def get_private_key(self, index: int) -> PrivateKey:
return self.account.private_key return self.account.private_key
def get_public_key(self, index: int) -> PublicKey: def get_public_key(self, index: int) -> PubKey:
return self.account.public_key return self.account.public_key
async def get_max_gap(self) -> int: async def get_max_gap(self) -> int:
@ -266,6 +224,9 @@ class SingleKey(AddressManager):
class Account: class Account:
mnemonic_class = Mnemonic
private_key_class = PrivateKey
public_key_class = PubKey
address_generators: Dict[str, Type[AddressManager]] = { address_generators: Dict[str, Type[AddressManager]] = {
SingleKey.name: SingleKey, SingleKey.name: SingleKey,
HierarchicalDeterministic.name: HierarchicalDeterministic, HierarchicalDeterministic.name: HierarchicalDeterministic,
@ -273,7 +234,7 @@ class Account:
def __init__(self, ledger: 'Ledger', wallet: 'Wallet', name: str, def __init__(self, ledger: 'Ledger', wallet: 'Wallet', name: str,
seed: str, private_key_string: str, encrypted: bool, seed: str, private_key_string: str, encrypted: bool,
private_key: Optional[PrivateKey], public_key: PublicKey, private_key: Optional[PrivateKey], public_key: PubKey,
address_generator: dict, modified_on: float, channel_keys: dict) -> None: address_generator: dict, modified_on: float, channel_keys: dict) -> None:
self.ledger = ledger self.ledger = ledger
self.wallet = wallet self.wallet = wallet
@ -284,14 +245,13 @@ class Account:
self.private_key_string = private_key_string self.private_key_string = private_key_string
self.init_vectors: Dict[str, bytes] = {} self.init_vectors: Dict[str, bytes] = {}
self.encrypted = encrypted self.encrypted = encrypted
self.private_key: Optional[PrivateKey] = private_key self.private_key = private_key
self.public_key: PublicKey = public_key self.public_key = public_key
generator_name = address_generator.get('name', HierarchicalDeterministic.name) generator_name = address_generator.get('name', HierarchicalDeterministic.name)
self.address_generator = self.address_generators[generator_name] self.address_generator = self.address_generators[generator_name]
self.receiving, self.change = self.address_generator.from_dict(self, address_generator) self.receiving, self.change = self.address_generator.from_dict(self, address_generator)
self.address_managers = {am.chain_number: am for am in (self.receiving, self.change)} self.address_managers = {am.chain_number: am for am in (self.receiving, self.change)}
self.channel_keys = channel_keys self.channel_keys = channel_keys
self.deterministic_channel_keys = DeterministicChannelKeyManager(self)
ledger.add_account(self) ledger.add_account(self)
wallet.add_account(self) wallet.add_account(self)
@ -306,19 +266,19 @@ class Account:
name: str = None, address_generator: dict = None): name: str = None, address_generator: dict = None):
return cls.from_dict(ledger, wallet, { return cls.from_dict(ledger, wallet, {
'name': name, 'name': name,
'seed': Mnemonic().make_seed(), 'seed': cls.mnemonic_class().make_seed(),
'address_generator': address_generator or {} 'address_generator': address_generator or {}
}) })
@classmethod @classmethod
def get_private_key_from_seed(cls, ledger: 'Ledger', seed: str, password: str): def get_private_key_from_seed(cls, ledger: 'Ledger', seed: str, password: str):
return PrivateKey.from_seed( return cls.private_key_class.from_seed(
ledger, Mnemonic.mnemonic_to_seed(seed, password or 'lbryum') ledger, cls.mnemonic_class.mnemonic_to_seed(seed, password or 'lbryum')
) )
@classmethod @classmethod
def keys_from_dict(cls, ledger: 'Ledger', d: dict) \ def keys_from_dict(cls, ledger: 'Ledger', d: dict) \
-> Tuple[str, Optional[PrivateKey], PublicKey]: -> Tuple[str, Optional[PrivateKey], PubKey]:
seed = d.get('seed', '') seed = d.get('seed', '')
private_key_string = d.get('private_key', '') private_key_string = d.get('private_key', '')
private_key = None private_key = None
@ -489,7 +449,7 @@ class Account:
assert not self.encrypted, "Cannot get private key on encrypted wallet account." assert not self.encrypted, "Cannot get private key on encrypted wallet account."
return self.address_managers[chain].get_private_key(index) return self.address_managers[chain].get_private_key(index)
def get_public_key(self, chain: int, index: int) -> PublicKey: def get_public_key(self, chain: int, index: int) -> PubKey:
return self.address_managers[chain].get_public_key(index) return self.address_managers[chain].get_public_key(index)
def get_balance(self, confirmations=0, include_claims=False, read_only=False, **constraints): def get_balance(self, confirmations=0, include_claims=False, read_only=False, **constraints):
@ -560,30 +520,33 @@ class Account:
return tx return tx
async def generate_channel_private_key(self): def add_channel_private_key(self, private_key):
return await self.deterministic_channel_keys.generate_next_key() public_key_bytes = private_key.get_verifying_key().to_der()
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
self.channel_keys[channel_pubkey_hash] = private_key.to_pem().decode()
def add_channel_private_key(self, private_key: PrivateKey): async def get_channel_private_key(self, public_key_bytes):
self.channel_keys[private_key.address] = private_key.to_pem().decode()
async def get_channel_private_key(self, public_key_bytes) -> PrivateKey:
channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes) channel_pubkey_hash = self.ledger.public_key_to_address(public_key_bytes)
private_key_pem = self.channel_keys.get(channel_pubkey_hash) private_key_pem = self.channel_keys.get(channel_pubkey_hash)
if private_key_pem: if private_key_pem:
return PrivateKey.from_pem(self.ledger, private_key_pem) return await asyncio.get_event_loop().run_in_executor(
return self.deterministic_channel_keys.get_private_key_from_pubkey_hash(channel_pubkey_hash) None, ecdsa.SigningKey.from_pem, private_key_pem, sha256
)
async def maybe_migrate_certificates(self): async def maybe_migrate_certificates(self):
def to_der(private_key_pem):
return ecdsa.SigningKey.from_pem(private_key_pem, hashfunc=sha256).get_verifying_key().to_der()
if not self.channel_keys: if not self.channel_keys:
return return
channel_keys = {} channel_keys = {}
for private_key_pem in self.channel_keys.values(): for private_key_pem in self.channel_keys.values():
if not isinstance(private_key_pem, str): if not isinstance(private_key_pem, str):
continue continue
if not private_key_pem.startswith("-----BEGIN"): if "-----BEGIN EC PRIVATE KEY-----" not in private_key_pem:
continue continue
private_key = PrivateKey.from_pem(self.ledger, private_key_pem) public_key_der = await asyncio.get_event_loop().run_in_executor(None, to_der, private_key_pem)
channel_keys[private_key.address] = private_key_pem channel_keys[self.ledger.public_key_to_address(public_key_der)] = private_key_pem
if self.channel_keys != channel_keys: if self.channel_keys != channel_keys:
self.channel_keys = channel_keys self.channel_keys = channel_keys
self.wallet.save() self.wallet.save()

View file

@ -1,21 +1,10 @@
from asn1crypto.keys import PrivateKeyInfo, ECPrivateKey from coincurve import PublicKey, PrivateKey as _PrivateKey
from coincurve import PublicKey as cPublicKey, PrivateKey as cPrivateKey
from coincurve.utils import (
pem_to_der, lib as libsecp256k1, ffi as libsecp256k1_ffi
)
from coincurve.ecdsa import CDATA_SIG_LENGTH
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256 from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
from lbry.crypto.base58 import Base58 from lbry.crypto.base58 import Base58
from .util import cachedproperty from .util import cachedproperty
class KeyPath:
RECEIVE = 0
CHANGE = 1
CHANNEL = 2
class DerivationError(Exception): class DerivationError(Exception):
""" Raised when an invalid derivation occurs. """ """ Raised when an invalid derivation occurs. """
@ -82,30 +71,26 @@ class _KeyBase:
return Base58.encode_check(self.extended_key()) return Base58.encode_check(self.extended_key())
class PublicKey(_KeyBase): class PubKey(_KeyBase):
""" A BIP32 public key. """ """ A BIP32 public key. """
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None): def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent) super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(pubkey, cPublicKey): if isinstance(pubkey, PublicKey):
self.verifying_key = pubkey self.verifying_key = pubkey
else: else:
self.verifying_key = self._verifying_key_from_pubkey(pubkey) self.verifying_key = self._verifying_key_from_pubkey(pubkey)
@classmethod
def from_compressed(cls, public_key_bytes, ledger=None) -> 'PublicKey':
return cls(ledger, public_key_bytes, bytes((0,)*32), 0, 0)
@classmethod @classmethod
def _verifying_key_from_pubkey(cls, pubkey): def _verifying_key_from_pubkey(cls, pubkey):
""" Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """ """ Converts a 33-byte compressed pubkey into an PublicKey object. """
if not isinstance(pubkey, (bytes, bytearray)): if not isinstance(pubkey, (bytes, bytearray)):
raise TypeError('pubkey must be raw bytes') raise TypeError('pubkey must be raw bytes')
if len(pubkey) != 33: if len(pubkey) != 33:
raise ValueError('pubkey must be 33 bytes') raise ValueError('pubkey must be 33 bytes')
if pubkey[0] not in (2, 3): if pubkey[0] not in (2, 3):
raise ValueError('invalid pubkey prefix byte') raise ValueError('invalid pubkey prefix byte')
return cPublicKey(pubkey) return PublicKey(pubkey)
@cachedproperty @cachedproperty
def pubkey_bytes(self): def pubkey_bytes(self):
@ -120,7 +105,7 @@ class PublicKey(_KeyBase):
def ec_point(self): def ec_point(self):
return self.verifying_key.point() return self.verifying_key.point()
def child(self, n: int) -> 'PublicKey': def child(self, n: int):
""" Return the derived child extended pubkey at index N. """ """ Return the derived child extended pubkey at index N. """
if not 0 <= n < (1 << 31): if not 0 <= n < (1 << 31):
raise ValueError('invalid BIP32 public key child number') raise ValueError('invalid BIP32 public key child number')
@ -128,7 +113,7 @@ class PublicKey(_KeyBase):
msg = self.pubkey_bytes + n.to_bytes(4, 'big') msg = self.pubkey_bytes + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.verifying_key.add(L_b) derived_key = self.verifying_key.add(L_b)
return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self) return PubKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def identifier(self): def identifier(self):
""" Return the key's identifier as 20 bytes. """ """ Return the key's identifier as 20 bytes. """
@ -141,36 +126,6 @@ class PublicKey(_KeyBase):
self.pubkey_bytes self.pubkey_bytes
) )
def verify(self, signature, digest) -> bool:
""" Verify that a signature is valid for a 32 byte digest. """
if len(signature) != 64:
raise ValueError('Signature must be 64 bytes long.')
if len(digest) != 32:
raise ValueError('Digest must be 32 bytes long.')
key = self.verifying_key
raw_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
parsed = libsecp256k1.secp256k1_ecdsa_signature_parse_compact(
key.context.ctx, raw_signature, signature
)
assert parsed == 1
normalized_signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
libsecp256k1.secp256k1_ecdsa_signature_normalize(
key.context.ctx, normalized_signature, raw_signature
)
verified = libsecp256k1.secp256k1_ecdsa_verify(
key.context.ctx, normalized_signature, digest, key.public_key
)
return bool(verified)
class PrivateKey(_KeyBase): class PrivateKey(_KeyBase):
"""A BIP32 private key.""" """A BIP32 private key."""
@ -179,7 +134,7 @@ class PrivateKey(_KeyBase):
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None): def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent) super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(privkey, cPrivateKey): if isinstance(privkey, _PrivateKey):
self.signing_key = privkey self.signing_key = privkey
else: else:
self.signing_key = self._signing_key_from_privkey(privkey) self.signing_key = self._signing_key_from_privkey(privkey)
@ -187,7 +142,7 @@ class PrivateKey(_KeyBase):
@classmethod @classmethod
def _signing_key_from_privkey(cls, private_key): def _signing_key_from_privkey(cls, private_key):
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """ """ Converts a 32-byte private key into an coincurve.PrivateKey object. """
return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key)) return _PrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
@classmethod @classmethod
def _private_key_secret_exponent(cls, private_key): def _private_key_secret_exponent(cls, private_key):
@ -199,40 +154,24 @@ class PrivateKey(_KeyBase):
return int.from_bytes(private_key, 'big') return int.from_bytes(private_key, 'big')
@classmethod @classmethod
def from_seed(cls, ledger, seed) -> 'PrivateKey': def from_seed(cls, ledger, seed):
# This hard-coded message string seems to be coin-independent... # This hard-coded message string seems to be coin-independent...
hmac = hmac_sha512(b'Bitcoin seed', seed) hmac = hmac_sha512(b'Bitcoin seed', seed)
privkey, chain_code = hmac[:32], hmac[32:] privkey, chain_code = hmac[:32], hmac[32:]
return cls(ledger, privkey, chain_code, 0, 0) return cls(ledger, privkey, chain_code, 0, 0)
@classmethod
def from_pem(cls, ledger, pem) -> 'PrivateKey':
der = pem_to_der(pem.encode())
try:
key_int = ECPrivateKey.load(der).native['private_key']
except ValueError:
key_int = PrivateKeyInfo.load(der).native['private_key']['private_key']
private_key = cPrivateKey.from_int(key_int)
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
@classmethod
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
@cachedproperty @cachedproperty
def private_key_bytes(self): def private_key_bytes(self):
""" Return the serialized private key (no leading zero byte). """ """ Return the serialized private key (no leading zero byte). """
return self.signing_key.secret return self.signing_key.secret
@cachedproperty @cachedproperty
def public_key(self) -> PublicKey: def public_key(self):
""" Return the corresponding extended public key. """ """ Return the corresponding extended public key. """
verifying_key = self.signing_key.public_key verifying_key = self.signing_key.public_key
parent_pubkey = self.parent.public_key if self.parent else None parent_pubkey = self.parent.public_key if self.parent else None
return PublicKey( return PubKey(self.ledger, verifying_key, self.chain_code, self.n, self.depth,
self.ledger, verifying_key, self.chain_code, parent_pubkey)
self.n, self.depth, parent_pubkey
)
def ec_point(self): def ec_point(self):
return self.public_key.ec_point() return self.public_key.ec_point()
@ -245,12 +184,11 @@ class PrivateKey(_KeyBase):
""" Return the private key encoded in Wallet Import Format. """ """ Return the private key encoded in Wallet Import Format. """
return self.ledger.private_key_to_wif(self.private_key_bytes) return self.ledger.private_key_to_wif(self.private_key_bytes)
@property
def address(self): def address(self):
""" The public key as a P2PKH address. """ """ The public key as a P2PKH address. """
return self.public_key.address return self.public_key.address
def child(self, n) -> 'PrivateKey': def child(self, n):
""" Return the derived child extended private key at index N.""" """ Return the derived child extended private key at index N."""
if not 0 <= n < (1 << 32): if not 0 <= n < (1 << 32):
raise ValueError('invalid BIP32 private key child number') raise ValueError('invalid BIP32 private key child number')
@ -269,28 +207,6 @@ class PrivateKey(_KeyBase):
""" Produce a signature for piece of data by double hashing it and signing the hash. """ """ Produce a signature for piece of data by double hashing it and signing the hash. """
return self.signing_key.sign(data, hasher=double_sha256) return self.signing_key.sign(data, hasher=double_sha256)
def sign_compact(self, digest):
""" Produce a compact signature. """
key = self.signing_key
signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
signed = libsecp256k1.secp256k1_ecdsa_sign(
key.context.ctx, signature, digest, key.secret,
libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL
)
if not signed:
raise ValueError('The private key was invalid.')
serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH)
compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(
key.context.ctx, serialized, signature
)
if compacted != 1:
raise ValueError('The signature could not be compacted.')
return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH))
def identifier(self): def identifier(self):
"""Return the key's identifier as 20 bytes.""" """Return the key's identifier as 20 bytes."""
return self.public_key.identifier() return self.public_key.identifier()
@ -302,12 +218,9 @@ class PrivateKey(_KeyBase):
b'\0' + self.private_key_bytes b'\0' + self.private_key_bytes
) )
def to_pem(self):
return self.signing_key.to_pem()
def _from_extended_key(ledger, ekey): def _from_extended_key(ledger, ekey):
"""Return a PublicKey or PrivateKey from an extended key raw bytes.""" """Return a PubKey or PrivateKey from an extended key raw bytes."""
if not isinstance(ekey, (bytes, bytearray)): if not isinstance(ekey, (bytes, bytearray)):
raise TypeError('extended key must be raw bytes') raise TypeError('extended key must be raw bytes')
if len(ekey) != 78: if len(ekey) != 78:
@ -319,7 +232,7 @@ def _from_extended_key(ledger, ekey):
if ekey[:4] == ledger.extended_public_key_prefix: if ekey[:4] == ledger.extended_public_key_prefix:
pubkey = ekey[45:] pubkey = ekey[45:]
key = PublicKey(ledger, pubkey, chain_code, n, depth) key = PubKey(ledger, pubkey, chain_code, n, depth)
elif ekey[:4] == ledger.extended_private_key_prefix: elif ekey[:4] == ledger.extended_private_key_prefix:
if ekey[45] != 0: if ekey[45] != 0:
raise ValueError('invalid extended private key prefix byte') raise ValueError('invalid extended private key prefix byte')
@ -337,6 +250,6 @@ def from_extended_key_string(ledger, ekey_str):
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL 3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
return a PublicKey or PrivateKey. return a PubKey or PrivateKey.
""" """
return _from_extended_key(ledger, Base58.decode_check(ekey_str)) return _from_extended_key(ledger, Base58.decode_check(ekey_str))

View file

@ -1064,182 +1064,4 @@ HASHES = {
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80', 1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465', 1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6', 1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
} }

View file

@ -141,7 +141,7 @@ class CoinSelector:
_) -> List[OutputEffectiveAmountEstimator]: _) -> List[OutputEffectiveAmountEstimator]:
""" Accumulate UTXOs at random until there is enough to cover the target. """ """ Accumulate UTXOs at random until there is enough to cover the target. """
target = self.target + self.cost_of_change target = self.target + self.cost_of_change
self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument self.random.shuffle(txos, self.random.random)
selection = [] selection = []
amount = 0 amount = 0
for coin in txos: for coin in txos:

View file

@ -9,11 +9,10 @@ from dataclasses import dataclass
from contextvars import ContextVar from contextvars import ContextVar
from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional from typing import Tuple, List, Union, Callable, Any, Awaitable, Iterable, Dict, Optional
from datetime import date from datetime import date
from prometheus_client import Gauge, Counter, Histogram from prometheus_client import Gauge, Counter, Histogram
from lbry.utils import LockWithMetrics from lbry.utils import LockWithMetrics
from .bip32 import PublicKey from .bip32 import PubKey
from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input from .transaction import Transaction, Output, OutputScript, TXRefImmutable, Input
from .constants import TXO_TYPES, CLAIM_TYPES from .constants import TXO_TYPES, CLAIM_TYPES
from .util import date_to_julian_day from .util import date_to_julian_day
@ -976,9 +975,7 @@ class Database(SQLiteMixin):
sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)") sql.append("LEFT JOIN txi ON (txi.position=0 AND txi.txid=txo.txid)")
return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only) return await self.db.execute_fetchall(*query(' '.join(sql), **constraints), read_only=read_only)
async def get_txos( async def get_txos(self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints):
self, wallet=None, no_tx=False, no_channel_info=False, read_only=False, **constraints
) -> List[Output]:
include_is_spent = constraints.get('include_is_spent', False) include_is_spent = constraints.get('include_is_spent', False)
include_is_my_input = constraints.get('include_is_my_input', False) include_is_my_input = constraints.get('include_is_my_input', False)
include_is_my_output = constraints.pop('include_is_my_output', False) include_is_my_output = constraints.pop('include_is_my_output', False)
@ -1204,14 +1201,13 @@ class Database(SQLiteMixin):
addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints) addresses = await self.select_addresses(', '.join(cols), read_only=read_only, **constraints)
if 'pubkey' in cols: if 'pubkey' in cols:
for address in addresses: for address in addresses:
address['pubkey'] = PublicKey( address['pubkey'] = PubKey(
self.ledger, address.pop('pubkey'), address.pop('chain_code'), self.ledger, address.pop('pubkey'), address.pop('chain_code'),
address.pop('n'), address.pop('depth') address.pop('n'), address.pop('depth')
) )
return addresses return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints): async def get_address_count(self, cols=None, read_only=False, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints) count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0 return count[0]['total'] or 0
@ -1245,18 +1241,6 @@ class Database(SQLiteMixin):
async def set_address_history(self, address, history): async def set_address_history(self, address, history):
await self._set_address_history(address, history) await self._set_address_history(address, history)
async def is_channel_key_used(self, account, key: PublicKey):
channels = await self.get_txos(
accounts=[account], txo_type=TXO_TYPES['channel'],
no_tx=True, no_channel_info=True
)
other_key_bytes = key.pubkey_bytes
for channel in channels:
claim = channel.can_decode_claim
if claim and claim.channel.public_key_bytes == other_key_bytes:
return True
return False
@staticmethod @staticmethod
def constrain_purchases(constraints): def constrain_purchases(constraints):
accounts = constraints.pop('accounts', None) accounts = constraints.pop('accounts', None)

View file

@ -26,7 +26,7 @@ from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.header import Headers, UnvalidatedHeaders from lbry.wallet.header import Headers, UnvalidatedHeaders
from lbry.wallet.checkpoints import HASHES from lbry.wallet.checkpoints import HASHES
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32 from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
from lbry.wallet.bip32 import PublicKey, PrivateKey from lbry.wallet.bip32 import PubKey, PrivateKey
from lbry.wallet.coinselection import CoinSelector from lbry.wallet.coinselection import CoinSelector
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -106,7 +106,7 @@ class Ledger(metaclass=LedgerRegistry):
target_timespan = 150 target_timespan = 150
default_fee_per_byte = 50 default_fee_per_byte = 50
default_fee_per_name_char = 0 default_fee_per_name_char = 200000
checkpoints = HASHES checkpoints = HASHES
@ -226,7 +226,7 @@ class Ledger(metaclass=LedgerRegistry):
return account.get_private_key(address_info['chain'], address_info['pubkey'].n) return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
return None return None
async def get_public_key_for_address(self, wallet, address) -> Optional[PublicKey]: async def get_public_key_for_address(self, wallet, address) -> Optional[PubKey]:
match = await self._get_account_and_address_info_for_address(wallet, address) match = await self._get_account_and_address_info_for_address(wallet, address)
if match: if match:
_, address_info = match _, address_info = match
@ -329,10 +329,10 @@ class Ledger(metaclass=LedgerRegistry):
async def start(self): async def start(self):
if not os.path.exists(self.path): if not os.path.exists(self.path):
os.mkdir(self.path) os.mkdir(self.path)
await asyncio.wait(map(asyncio.create_task, [ await asyncio.wait([
self.db.open(), self.db.open(),
self.headers.open() self.headers.open()
])) ])
fully_synced = self.on_ready.first fully_synced = self.on_ready.first
asyncio.create_task(self.network.start()) asyncio.create_task(self.network.start())
await self.network.on_connected.first await self.network.on_connected.first
@ -466,15 +466,14 @@ class Ledger(metaclass=LedgerRegistry):
async def subscribe_accounts(self): async def subscribe_accounts(self):
if self.network.is_connected and self.accounts: if self.network.is_connected and self.accounts:
log.info("Subscribe to %i accounts", len(self.accounts)) log.info("Subscribe to %i accounts", len(self.accounts))
await asyncio.wait(map(asyncio.create_task, [ await asyncio.wait([
self.subscribe_account(a) for a in self.accounts self.subscribe_account(a) for a in self.accounts
])) ])
async def subscribe_account(self, account: Account): async def subscribe_account(self, account: Account):
for address_manager in account.address_managers.values(): for address_manager in account.address_managers.values():
await self.subscribe_addresses(address_manager, await address_manager.get_addresses()) await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
await account.ensure_address_gap() await account.ensure_address_gap()
await account.deterministic_channel_keys.ensure_cache_primed()
async def unsubscribe_account(self, account: Account): async def unsubscribe_account(self, account: Account):
for address in await account.get_addresses(): for address in await account.get_addresses():
@ -555,7 +554,6 @@ class Ledger(metaclass=LedgerRegistry):
) )
remote_history_txids = {txid for txid, _ in remote_history} remote_history_txids = {txid for txid, _ in remote_history}
async for tx in self.request_synced_transactions(to_request, remote_history_txids, address): async for tx in self.request_synced_transactions(to_request, remote_history_txids, address):
self.maybe_has_channel_key(tx)
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:" pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
if len(pending_synced_history) % 100 == 0: if len(pending_synced_history) % 100 == 0:
log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request)) log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request))
@ -623,12 +621,6 @@ class Ledger(metaclass=LedgerRegistry):
tx.is_verified = merkle_root == header['merkle_root'] tx.is_verified = merkle_root == header['merkle_root']
return tx return tx
def maybe_has_channel_key(self, tx):
for txo in tx._outputs:
if txo.can_decode_claim and txo.claim.is_channel:
for account in self.accounts:
account.deterministic_channel_keys.maybe_generate_deterministic_key_for_channel(txo)
async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False): async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False):
batches = [[]] batches = [[]]
remote_heights = {} remote_heights = {}
@ -722,15 +714,6 @@ class Ledger(metaclass=LedgerRegistry):
return account.address_managers[details['chain']] return account.address_managers[details['chain']]
return None return None
async def broadcast_or_release(self, tx, blocking=False):
try:
await self.broadcast(tx)
except:
await self.release_tx(tx)
raise
if blocking:
await self.wait(tx, timeout=None)
def broadcast(self, tx): def broadcast(self, tx):
# broadcast can't be a retriable call yet # broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode()) return self.network.broadcast(hexlify(tx.raw).decode())
@ -789,8 +772,12 @@ class Ledger(metaclass=LedgerRegistry):
include_is_my_output=False, include_is_my_output=False,
include_sent_supports=False, include_sent_supports=False,
include_sent_tips=False, include_sent_tips=False,
include_received_tips=False) -> Tuple[List[Output], dict, int, int]: include_received_tips=False,
hub_server=False) -> Tuple[List[Output], dict, int, int]:
encoded_outputs = await query encoded_outputs = await query
if hub_server:
outputs = Outputs.from_grpc(encoded_outputs)
else:
outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None? outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
txs: List[Transaction] = [] txs: List[Transaction] = []
if len(outputs.txs) > 0: if len(outputs.txs) > 0:
@ -867,9 +854,12 @@ class Ledger(metaclass=LedgerRegistry):
txo.received_tips = tips txo.received_tips = tips
return txos, blocked, outputs.offset, outputs.total return txos, blocked, outputs.offset, outputs.total
async def resolve(self, accounts, urls, **kwargs): async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs):
txos = [] txos = []
urls_copy = list(urls) urls_copy = list(urls)
if new_sdk_server:
resolve = partial(self.network.new_resolve, new_sdk_server)
else:
resolve = partial(self.network.retriable_call, self.network.resolve) resolve = partial(self.network.retriable_call, self.network.resolve)
while urls_copy: while urls_copy:
batch, urls_copy = urls_copy[:100], urls_copy[100:] batch, urls_copy = urls_copy[:100], urls_copy[100:]
@ -895,14 +885,17 @@ class Ledger(metaclass=LedgerRegistry):
return await self.network.sum_supports(new_sdk_server, **kwargs) return await self.network.sum_supports(new_sdk_server, **kwargs)
async def claim_search( async def claim_search(
self, accounts, self, accounts, include_purchase_receipt=False, include_is_my_output=False,
include_purchase_receipt=False, new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]:
include_is_my_output=False, if new_sdk_server:
**kwargs) -> Tuple[List[Output], dict, int, int]: claim_search = partial(self.network.new_claim_search, new_sdk_server)
else:
claim_search = self.network.claim_search
return await self._inflate_outputs( return await self._inflate_outputs(
self.network.claim_search(**kwargs), accounts, claim_search(**kwargs), accounts,
include_purchase_receipt=include_purchase_receipt, include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output include_is_my_output=include_is_my_output,
hub_server=new_sdk_server is not None
) )
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output: # async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
@ -938,7 +931,9 @@ class Ledger(metaclass=LedgerRegistry):
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ", "%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
account.id, balance, total_receiving, account.receiving.gap, total_change, account.id, balance, total_receiving, account.receiving.gap, total_change,
account.change.gap, channel_count, len(account.channel_keys), claim_count) account.change.gap, channel_count, len(account.channel_keys), claim_count)
except Exception: except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception( log.exception(
'Failed to display wallet state, please file issue ' 'Failed to display wallet state, please file issue '
'for this bug along with the traceback you see below:') 'for this bug along with the traceback you see below:')
@ -961,7 +956,9 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = [p.purchased_claim_id for p in purchases] claim_ids = [p.purchased_claim_id for p in purchases]
try: try:
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids) resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception: except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up purchased claim ids:") log.exception("Resolve failed while looking up purchased claim ids:")
resolved = [] resolved = []
lookup = {claim.claim_id: claim for claim in resolved} lookup = {claim.claim_id: claim for claim in resolved}
@ -1041,7 +1038,9 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset] claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
try: try:
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids) resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception: except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up collection claim ids:") log.exception("Resolve failed while looking up collection claim ids:")
return [] return []
claims = [] claims = []

View file

@ -3,6 +3,7 @@ import json
import typing import typing
import logging import logging
import asyncio import asyncio
from distutils.util import strtobool
from binascii import unhexlify from binascii import unhexlify
from decimal import Decimal from decimal import Decimal
@ -182,6 +183,7 @@ class WalletManager:
}[config.blockchain_name] }[config.blockchain_name]
ledger_config = { ledger_config = {
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
'auto_connect': True, 'auto_connect': True,
'explicit_servers': [], 'explicit_servers': [],
'hub_timeout': config.hub_timeout, 'hub_timeout': config.hub_timeout,
@ -236,6 +238,7 @@ class WalletManager:
async def reset(self): async def reset(self):
self.ledger.config = { self.ledger.config = {
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
'auto_connect': True, 'auto_connect': True,
'explicit_servers': [], 'explicit_servers': [],
'default_servers': Config.lbryum_servers.default, 'default_servers': Config.lbryum_servers.default,
@ -317,4 +320,10 @@ class WalletManager:
) )
async def broadcast_or_release(self, tx, blocking=False): async def broadcast_or_release(self, tx, blocking=False):
await self.ledger.broadcast_or_release(tx, blocking=blocking) try:
await self.ledger.broadcast(tx)
except:
await self.ledger.release_tx(tx)
raise
if blocking:
await self.ledger.wait(tx, timeout=None)

View file

@ -7,6 +7,9 @@ from time import perf_counter
from collections import defaultdict from collections import defaultdict
from typing import Dict, Optional, Tuple from typing import Dict, Optional, Tuple
import aiohttp import aiohttp
import grpc
from lbry.schema.types.v2 import hub_pb2_grpc
from lbry.schema.types.v2.hub_pb2 import SearchRequest
from lbry import __version__ from lbry import __version__
from lbry.utils import resolve_host from lbry.utils import resolve_host
@ -117,7 +120,7 @@ class ClientSession(BaseClientSession):
) )
else: else:
await asyncio.sleep(max(0, max_idle - (now - self.last_send))) await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
except (Exception, asyncio.CancelledError) as err: except Exception as err:
if isinstance(err, asyncio.CancelledError): if isinstance(err, asyncio.CancelledError):
log.info("closing connection to %s:%i", *self.server) log.info("closing connection to %s:%i", *self.server)
else: else:
@ -214,7 +217,7 @@ class Network:
def loop_task_done_callback(f): def loop_task_done_callback(f):
try: try:
f.result() f.result()
except (Exception, asyncio.CancelledError): except Exception:
if self.running: if self.running:
log.exception("wallet server connection loop crashed") log.exception("wallet server connection loop crashed")
@ -312,8 +315,7 @@ class Network:
sleep_delay = 30 sleep_delay = 30
while self.running: while self.running:
await asyncio.wait( await asyncio.wait(
map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]), [asyncio.sleep(30), self._urgent_need_reconnect.wait()], return_when=asyncio.FIRST_COMPLETED
return_when=asyncio.FIRST_COMPLETED
) )
if self._urgent_need_reconnect.is_set(): if self._urgent_need_reconnect.is_set():
sleep_delay = 30 sleep_delay = 30
@ -339,13 +341,14 @@ class Network:
try: try:
if not self._urgent_need_reconnect.is_set(): if not self._urgent_need_reconnect.is_set():
await asyncio.wait( await asyncio.wait(
[self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())], [self._keepalive_task, self._urgent_need_reconnect.wait()],
return_when=asyncio.FIRST_COMPLETED return_when=asyncio.FIRST_COMPLETED
) )
else: else:
await self._keepalive_task await self._keepalive_task
if self._urgent_need_reconnect.is_set(): if self._urgent_need_reconnect.is_set():
log.warning("urgent reconnect needed") log.warning("urgent reconnect needed")
self._urgent_need_reconnect.clear()
if self._keepalive_task and not self._keepalive_task.done(): if self._keepalive_task and not self._keepalive_task.done():
self._keepalive_task.cancel() self._keepalive_task.cancel()
except asyncio.CancelledError: except asyncio.CancelledError:
@ -391,6 +394,7 @@ class Network:
log.warning("Wallet server call timed out, retrying.") log.warning("Wallet server call timed out, retrying.")
except ConnectionError: except ConnectionError:
log.warning("connection error") log.warning("connection error")
raise asyncio.CancelledError() # if we got here, we are shutting down raise asyncio.CancelledError() # if we got here, we are shutting down
def _update_remote_height(self, header_args): def _update_remote_height(self, header_args):
@ -473,6 +477,21 @@ class Network:
def claim_search(self, session_override=None, **kwargs): def claim_search(self, session_override=None, **kwargs):
return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override) return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override)
async def new_resolve(self, server, urls):
message = {"method": "resolve", "params": {"urls": urls, "protobuf": True}}
async with self.aiohttp_session.post(server, json=message) as r:
result = await r.json()
return result['result']
async def new_claim_search(self, server, **kwargs):
async with grpc.aio.insecure_channel(server) as channel:
stub = hub_pb2_grpc.HubStub(channel)
try:
response = await stub.Search(SearchRequest(**kwargs))
except grpc.aio.AioRpcError as error:
raise RPCError(error.code(), error.details())
return response
async def sum_supports(self, server, **kwargs): async def sum_supports(self, server, **kwargs):
message = {"method": "support_sum", "params": kwargs} message = {"method": "support_sum", "params": kwargs}
async with self.aiohttp_session.post(server, json=message) as r: async with self.aiohttp_session.post(server, json=message) as r:

View file

@ -1,2 +1,5 @@
__hub_url__ = (
"https://github.com/lbryio/hub/releases/download/v0.2022.01.21.1/hub"
)
from lbry.wallet.orchstr8.node import Conductor from lbry.wallet.orchstr8.node import Conductor
from lbry.wallet.orchstr8.service import ConductorService from lbry.wallet.orchstr8.service import ConductorService

View file

@ -1,5 +1,6 @@
# pylint: disable=import-error # pylint: disable=import-error
import os import os
import signal
import json import json
import shutil import shutil
import asyncio import asyncio
@ -9,29 +10,28 @@ import logging
import tempfile import tempfile
import subprocess import subprocess
import platform import platform
from distutils.util import strtobool
from binascii import hexlify from binascii import hexlify
from typing import Type, Optional from typing import Type, Optional
import urllib.request import urllib.request
from uuid import uuid4 from uuid import uuid4
try:
from scribe.env import Env
from scribe.hub.service import HubServerService
from scribe.elasticsearch.service import ElasticSyncService
from scribe.blockchain.service import BlockchainProcessorService
except ImportError:
pass
import lbry import lbry
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
from lbry.conf import KnownHubsList, Config from lbry.conf import KnownHubsList, Config
from lbry.wallet.orchstr8 import __hub_url__
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
try:
from hub.herald.env import ServerEnv
from hub.scribe.env import BlockchainEnv
from hub.elastic_sync.env import ElasticEnv
from hub.herald.service import HubServerService
from hub.elastic_sync.service import ElasticSyncService
from hub.scribe.service import BlockchainProcessorService
except ImportError:
pass
def get_lbcd_node_from_ledger(ledger_module): def get_lbcd_node_from_ledger(ledger_module):
return LBCDNode( return LBCDNode(
@ -59,10 +59,13 @@ class Conductor:
self.wallet_node = WalletNode( self.wallet_node = WalletNode(
self.manager_module, RegTestLedger, default_seed=seed self.manager_module, RegTestLedger, default_seed=seed
) )
self.hub_node = HubNode(__hub_url__, "hub", self.spv_node)
self.lbcd_started = False self.lbcd_started = False
self.lbcwallet_started = False self.lbcwallet_started = False
self.spv_started = False self.spv_started = False
self.wallet_started = False self.wallet_started = False
self.hub_started = False
self.log = log.getChild('conductor') self.log = log.getChild('conductor')
@ -76,6 +79,17 @@ class Conductor:
await self.lbcd_node.stop(cleanup) await self.lbcd_node.stop(cleanup)
self.lbcd_started = False self.lbcd_started = False
async def start_hub(self):
if not self.hub_started:
await self.hub_node.start()
await self.lbcwallet_node.running.wait()
self.hub_started = True
async def stop_hub(self, cleanup=True):
if self.hub_started:
await self.hub_node.stop(cleanup)
self.hub_started = False
async def start_spv(self): async def start_spv(self):
if not self.spv_started: if not self.spv_started:
await self.spv_node.start(self.lbcwallet_node) await self.spv_node.start(self.lbcwallet_node)
@ -116,11 +130,13 @@ class Conductor:
await self.start_lbcd() await self.start_lbcd()
await self.start_lbcwallet() await self.start_lbcwallet()
await self.start_spv() await self.start_spv()
await self.start_hub()
await self.start_wallet() await self.start_wallet()
async def stop(self): async def stop(self):
all_the_stops = [ all_the_stops = [
self.stop_wallet, self.stop_wallet,
self.stop_hub,
self.stop_spv, self.stop_spv,
self.stop_lbcwallet, self.stop_lbcwallet,
self.stop_lbcd self.stop_lbcd
@ -165,6 +181,7 @@ class WalletNode:
self.manager = self.manager_class.from_config({ self.manager = self.manager_class.from_config({
'ledgers': { 'ledgers': {
self.ledger_class.get_id(): { self.ledger_class.get_id(): {
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
'api_port': self.port, 'api_port': self.port,
'explicit_servers': [(spv_node.hostname, spv_node.port)], 'explicit_servers': [(spv_node.hostname, spv_node.port)],
'default_servers': Config.lbryum_servers.default, 'default_servers': Config.lbryum_servers.default,
@ -172,7 +189,6 @@ class WalletNode:
'known_hubs': config.known_hubs if config else KnownHubsList(), 'known_hubs': config.known_hubs if config else KnownHubsList(),
'hub_timeout': 30, 'hub_timeout': 30,
'concurrent_hub_requests': 32, 'concurrent_hub_requests': 32,
'fee_per_name_char': 200000
} }
}, },
'wallets': [wallet_file_name] 'wallets': [wallet_file_name]
@ -214,7 +230,6 @@ class SPVNode:
self.port = 50001 + node_number # avoid conflict with default daemon self.port = 50001 + node_number # avoid conflict with default daemon
self.udp_port = self.port self.udp_port = self.port
self.elastic_notifier_port = 19080 + node_number self.elastic_notifier_port = 19080 + node_number
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
self.session_timeout = 600 self.session_timeout = 600
self.stopped = True self.stopped = True
self.index_name = uuid4().hex self.index_name = uuid4().hex
@ -236,34 +251,24 @@ class SPVNode:
'host': self.hostname, 'host': self.hostname,
'tcp_port': self.port, 'tcp_port': self.port,
'udp_port': self.udp_port, 'udp_port': self.udp_port,
'elastic_services': self.elastic_services, 'elastic_notifier_port': self.elastic_notifier_port,
'session_timeout': self.session_timeout, 'session_timeout': self.session_timeout,
'max_query_workers': 0, 'max_query_workers': 0,
'es_index_prefix': self.index_name, 'es_index_prefix': self.index_name,
'chain': 'regtest', 'chain': 'regtest'
'index_address_status': False
} }
if extraconf: if extraconf:
conf.update(extraconf) conf.update(extraconf)
self.writer = BlockchainProcessorService( env = Env(**conf)
BlockchainEnv(db_dir=self.data_path, daemon_url=lbcwallet_node.rpc_url, self.writer = BlockchainProcessorService(env)
reorg_limit=100, max_query_workers=0, chain='regtest', index_address_status=False) self.server = HubServerService(env)
) self.es_writer = ElasticSyncService(env)
self.server = HubServerService(ServerEnv(**conf))
self.es_writer = ElasticSyncService(
ElasticEnv(
db_dir=self.data_path, reorg_limit=100, max_query_workers=0, chain='regtest',
elastic_notifier_port=self.elastic_notifier_port,
es_index_prefix=self.index_name,
filtering_channel_ids=(extraconf or {}).get('filtering_channel_ids'),
blocking_channel_ids=(extraconf or {}).get('blocking_channel_ids')
)
)
await self.writer.start() await self.writer.start()
await self.es_writer.start() await self.es_writer.start()
await self.server.start() await self.server.start()
except Exception as e: except Exception as e:
self.stopped = True self.stopped = True
if not isinstance(e, asyncio.CancelledError):
log.exception("failed to start spv node") log.exception("failed to start spv node")
raise e raise e
@ -673,3 +678,139 @@ class LBCWalletNode:
def get_raw_transaction(self, txid): def get_raw_transaction(self, txid):
return self._cli_cmnd('getrawtransaction', txid, '1') return self._cli_cmnd('getrawtransaction', txid, '1')
class HubProcess(asyncio.SubprocessProtocol):
def __init__(self, ready, stopped):
self.ready = ready
self.stopped = stopped
self.log = log.getChild('hub')
self.transport = None
def pipe_data_received(self, fd, data):
self.stopped.clear()
self.ready.set()
if self.log:
self.log.info(data.decode())
if b'error' in data.lower():
self.ready.set()
raise SystemError(data.decode())
if b'listening on' in data:
self.ready.set()
str_lines = str(data.decode()).split("\n")
for line in str_lines:
if 'releaseTime' in line:
print(line)
def process_exited(self):
self.ready.clear()
self.stopped.set()
async def stop(self):
t = asyncio.create_task(self.stopped.wait())
try:
self.transport.send_signal(signal.SIGINT)
await asyncio.wait_for(t, 3)
# log.warning("stopped go hub")
except asyncio.TimeoutError:
if not t.done():
t.cancel()
self.transport.terminate()
await self.stopped.wait()
log.warning("terminated go hub")
class HubNode:
def __init__(self, url, daemon, spv_node):
self.spv_node = spv_node
self.latest_release_url = url
self.project_dir = os.path.dirname(os.path.dirname(__file__))
self.bin_dir = os.path.join(self.project_dir, 'bin')
self.daemon_bin = os.path.join(self.bin_dir, daemon)
self.cli_bin = os.path.join(self.bin_dir, daemon)
self.log = log.getChild('hub')
self.transport = None
self.protocol = None
self.hostname = 'localhost'
self.rpcport = 50051 # avoid conflict with default rpc port
self._stopped = asyncio.Event()
self.running = asyncio.Event()
@property
def stopped(self):
return not self.running.is_set()
@property
def exists(self):
return (
os.path.exists(self.cli_bin) and
os.path.exists(self.daemon_bin)
)
def download(self):
downloaded_file = os.path.join(
self.bin_dir,
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
)
if not os.path.exists(self.bin_dir):
os.mkdir(self.bin_dir)
if not os.path.exists(downloaded_file):
self.log.info('Downloading: %s', self.latest_release_url)
with urllib.request.urlopen(self.latest_release_url) as response:
with open(downloaded_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
self.log.info('Extracting: %s', downloaded_file)
if downloaded_file.endswith('.zip'):
with zipfile.ZipFile(downloaded_file) as dotzip:
dotzip.extractall(self.bin_dir)
# zipfile bug https://bugs.python.org/issue15795
os.chmod(self.cli_bin, 0o755)
os.chmod(self.daemon_bin, 0o755)
elif downloaded_file.endswith('.tar.gz'):
with tarfile.open(downloaded_file) as tar:
tar.extractall(self.bin_dir)
os.chmod(self.daemon_bin, 0o755)
return self.exists
def ensure(self):
return self.exists or self.download()
async def start(self):
assert self.ensure()
loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
command = [
self.daemon_bin, 'serve', '--esindex', self.spv_node.index_name + 'claims', '--debug'
]
self.log.info(' '.join(command))
self.protocol = HubProcess(self.running, self._stopped)
try:
self.transport, _ = await loop.subprocess_exec(
lambda: self.protocol, *command
)
self.protocol.transport = self.transport
except Exception as e:
log.exception('failed to start go hub', exc_info=e)
raise e
await self.protocol.ready.wait()
async def stop(self, cleanup=True):
try:
if self.protocol:
await self.protocol.stop()
except Exception as e:
log.exception('failed to stop go hub', exc_info=e)
raise e
finally:
if cleanup:
self.cleanup()
def cleanup(self):
pass

View file

@ -395,8 +395,8 @@ class RPCSession(SessionBase):
namespace=NAMESPACE, labelnames=("version",) namespace=NAMESPACE, labelnames=("version",)
) )
def __init__(self, *, framer=None, connection=None): def __init__(self, *, framer=None, loop=None, connection=None):
super().__init__(framer=framer) super().__init__(framer=framer, loop=loop)
self.connection = connection or self.default_connection() self.connection = connection or self.default_connection()
self.client_version = 'unknown' self.client_version = 'unknown'

View file

@ -17,7 +17,6 @@ OP_HASH160 = 0xa9
OP_EQUALVERIFY = 0x88 OP_EQUALVERIFY = 0x88
OP_CHECKSIG = 0xac OP_CHECKSIG = 0xac
OP_CHECKMULTISIG = 0xae OP_CHECKMULTISIG = 0xae
OP_CHECKLOCKTIMEVERIFY = 0xb1
OP_EQUAL = 0x87 OP_EQUAL = 0x87
OP_PUSHDATA1 = 0x4c OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d OP_PUSHDATA2 = 0x4d
@ -277,7 +276,7 @@ class Template:
elif isinstance(opcode, PUSH_INTEGER): elif isinstance(opcode, PUSH_INTEGER):
data = values[opcode.name] data = values[opcode.name]
source.write_many(push_data( source.write_many(push_data(
data.to_bytes((data.bit_length() + 8) // 8, byteorder='little', signed=True) data.to_bytes((data.bit_length() + 7) // 8, byteorder='little')
)) ))
elif isinstance(opcode, PUSH_SUBSCRIPT): elif isinstance(opcode, PUSH_SUBSCRIPT):
data = values[opcode.name] data = values[opcode.name]
@ -358,27 +357,19 @@ class InputScript(Script):
REDEEM_PUBKEY_HASH = Template('pubkey_hash', ( REDEEM_PUBKEY_HASH = Template('pubkey_hash', (
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey') PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey')
)) ))
MULTI_SIG_SCRIPT = Template('multi_sig', ( REDEEM_SCRIPT = Template('script', (
SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'), SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'),
OP_CHECKMULTISIG OP_CHECKMULTISIG
)) ))
REDEEM_SCRIPT_HASH_MULTI_SIG = Template('script_hash+multi_sig', ( REDEEM_SCRIPT_HASH = Template('script_hash', (
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', MULTI_SIG_SCRIPT) OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', REDEEM_SCRIPT)
))
TIME_LOCK_SCRIPT = Template('timelock', (
PUSH_INTEGER('height'), OP_CHECKLOCKTIMEVERIFY, OP_DROP,
# rest is identical to OutputScript.PAY_PUBKEY_HASH:
OP_DUP, OP_HASH160, PUSH_SINGLE('pubkey_hash'), OP_EQUALVERIFY, OP_CHECKSIG
))
REDEEM_SCRIPT_HASH_TIME_LOCK = Template('script_hash+timelock', (
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey'), PUSH_SUBSCRIPT('script', TIME_LOCK_SCRIPT)
)) ))
templates = [ templates = [
REDEEM_PUBKEY, REDEEM_PUBKEY,
REDEEM_PUBKEY_HASH, REDEEM_PUBKEY_HASH,
REDEEM_SCRIPT_HASH_TIME_LOCK, REDEEM_SCRIPT_HASH,
REDEEM_SCRIPT_HASH_MULTI_SIG, REDEEM_SCRIPT
] ]
@classmethod @classmethod
@ -389,37 +380,19 @@ class InputScript(Script):
}) })
@classmethod @classmethod
def redeem_multi_sig_script_hash(cls, signatures, pubkeys): def redeem_script_hash(cls, signatures, pubkeys):
return cls(template=cls.REDEEM_SCRIPT_HASH_MULTI_SIG, values={ return cls(template=cls.REDEEM_SCRIPT_HASH, values={
'signatures': signatures, 'signatures': signatures,
'script': cls(template=cls.MULTI_SIG_SCRIPT, values={ 'script': cls.redeem_script(signatures, pubkeys)
})
@classmethod
def redeem_script(cls, signatures, pubkeys):
return cls(template=cls.REDEEM_SCRIPT, values={
'signatures_count': len(signatures), 'signatures_count': len(signatures),
'pubkeys': pubkeys, 'pubkeys': pubkeys,
'pubkeys_count': len(pubkeys) 'pubkeys_count': len(pubkeys)
}) })
})
@classmethod
def redeem_time_lock_script_hash(cls, signature, pubkey, height=None, pubkey_hash=None, script_source=None):
if height and pubkey_hash:
script = cls(template=cls.TIME_LOCK_SCRIPT, values={
'height': height,
'pubkey_hash': pubkey_hash
})
elif script_source:
script = cls(source=script_source, template=cls.TIME_LOCK_SCRIPT)
script.parse(script.template)
else:
raise ValueError("script_source or both height and pubkey_hash are required.")
return cls(template=cls.REDEEM_SCRIPT_HASH_TIME_LOCK, values={
'signature': signature,
'pubkey': pubkey,
'script': script
})
@property
def is_script_hash(self):
return self.template.name.startswith('script_hash+')
class OutputScript(Script): class OutputScript(Script):
@ -487,6 +460,21 @@ class OutputScript(Script):
UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
)) ))
SELL_SCRIPT = Template('sell_script', (
OP_VERIFY, OP_DROP, OP_DROP, OP_DROP, PUSH_INTEGER('price'), OP_PRICECHECK
))
SELL_CLAIM = Template('sell_claim+pay_script_hash', (
OP_SELL_CLAIM, PUSH_SINGLE('claim_id'), PUSH_SUBSCRIPT('sell_script', SELL_SCRIPT),
PUSH_SUBSCRIPT('receive_script', InputScript.REDEEM_SCRIPT), OP_2DROP, OP_2DROP
) + PAY_SCRIPT_HASH.opcodes)
BUY_CLAIM = Template('buy_claim+pay_script_hash', (
OP_BUY_CLAIM, PUSH_SINGLE('sell_id'),
PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim_version'),
PUSH_SINGLE('owner_pubkey_hash'), PUSH_SINGLE('negotiation_signature'),
OP_2DROP, OP_2DROP, OP_2DROP,
) + PAY_SCRIPT_HASH.opcodes)
templates = [ templates = [
PAY_PUBKEY_FULL, PAY_PUBKEY_FULL,
PAY_PUBKEY_HASH, PAY_PUBKEY_HASH,
@ -501,6 +489,8 @@ class OutputScript(Script):
SUPPORT_CLAIM_DATA_SCRIPT, SUPPORT_CLAIM_DATA_SCRIPT,
UPDATE_CLAIM_PUBKEY, UPDATE_CLAIM_PUBKEY,
UPDATE_CLAIM_SCRIPT, UPDATE_CLAIM_SCRIPT,
SELL_CLAIM, SELL_SCRIPT,
BUY_CLAIM,
] ]
@classmethod @classmethod
@ -560,6 +550,30 @@ class OutputScript(Script):
'pubkey_hash': pubkey_hash 'pubkey_hash': pubkey_hash
}) })
@classmethod
def sell_script(cls, price):
return cls(template=cls.SELL_SCRIPT, values={
'price': price,
})
@classmethod
def sell_claim(cls, claim_id, price, signatures, pubkeys):
return cls(template=cls.SELL_CLAIM, values={
'claim_id': claim_id,
'sell_script': OutputScript.sell_script(price),
'receive_script': InputScript.redeem_script(signatures, pubkeys)
})
@classmethod
def buy_claim(cls, sell_id, claim_id, claim_version, owner_pubkey_hash, negotiation_signature):
return cls(template=cls.BUY_CLAIM, values={
'sell_id': sell_id,
'claim_id': claim_id,
'claim_version': claim_version,
'owner_pubkey_hash': owner_pubkey_hash,
'negotiation_signature': negotiation_signature,
})
@property @property
def is_pay_pubkey_hash(self): def is_pay_pubkey_hash(self):
return self.template.name.endswith('pay_pubkey_hash') return self.template.name.endswith('pay_pubkey_hash')
@ -588,6 +602,17 @@ class OutputScript(Script):
def is_support_claim_data(self): def is_support_claim_data(self):
return self.template.name.startswith('support_claim+data+') return self.template.name.startswith('support_claim+data+')
@property
def is_sell_claim(self):
return self.template.name.startswith('sell_claim+')
@property
def is_buy_claim(self):
return self.template.name.startswith('buy_claim+')
@property @property
def is_claim_involved(self): def is_claim_involved(self):
return any((self.is_claim_name, self.is_support_claim, self.is_update_claim)) return any((
self.is_claim_name, self.is_support_claim, self.is_update_claim,
self.is_sell_claim, self.is_buy_claim
))

View file

@ -1,9 +1,19 @@
import struct import struct
import hashlib
import logging import logging
import typing import typing
import asyncio
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
from typing import List, Iterable, Optional, Tuple from typing import List, Iterable, Optional, Tuple
import ecdsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import Prehashed
from cryptography.exceptions import InvalidSignature
from lbry.error import InsufficientFundsError from lbry.error import InsufficientFundsError
from lbry.crypto.hash import hash160, sha256 from lbry.crypto.hash import hash160, sha256
from lbry.crypto.base58 import Base58 from lbry.crypto.base58 import Base58
@ -18,7 +28,6 @@ from .constants import COIN, DUST, NULL_HASH32
from .bcd_data_stream import BCDataStream from .bcd_data_stream import BCDataStream
from .hash import TXRef, TXRefImmutable from .hash import TXRef, TXRefImmutable
from .util import ReadOnlyList from .util import ReadOnlyList
from .bip32 import PrivateKey, PublicKey
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.wallet.account import Account from lbry.wallet.account import Account
@ -145,14 +154,6 @@ class Input(InputOutput):
script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY) script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
return cls(txo.ref, script) return cls(txo.ref, script)
@classmethod
def spend_time_lock(cls, txo: 'Output', script_source: bytes) -> 'Input':
""" Create an input to spend time lock script."""
script = InputScript.redeem_time_lock_script_hash(
cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY, script_source=script_source
)
return cls(txo.ref, script)
@property @property
def amount(self) -> int: def amount(self) -> int:
""" Amount this input adds to the transaction. """ """ Amount this input adds to the transaction. """
@ -221,8 +222,7 @@ class Output(InputOutput):
is_my_output: Optional[bool] = None, is_my_input: Optional[bool] = None, is_my_output: Optional[bool] = None, is_my_input: Optional[bool] = None,
sent_supports: Optional[int] = None, sent_tips: Optional[int] = None, sent_supports: Optional[int] = None, sent_tips: Optional[int] = None,
received_tips: Optional[int] = None, received_tips: Optional[int] = None,
channel: Optional['Output'] = None, channel: Optional['Output'] = None, private_key: Optional[str] = None
private_key: Optional[PrivateKey] = None
) -> None: ) -> None:
super().__init__(tx_ref, position) super().__init__(tx_ref, position)
self.amount = amount self.amount = amount
@ -235,7 +235,7 @@ class Output(InputOutput):
self.sent_tips = sent_tips self.sent_tips = sent_tips
self.received_tips = received_tips self.received_tips = received_tips
self.channel = channel self.channel = channel
self.private_key: PrivateKey = private_key self.private_key = private_key
self.purchase: 'Output' = None # txo containing purchase metadata self.purchase: 'Output' = None # txo containing purchase metadata
self.purchased_claim: 'Output' = None # resolved claim pointed to by purchase self.purchased_claim: 'Output' = None # resolved claim pointed to by purchase
self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim self.purchase_receipt: 'Output' = None # txo representing purchase receipt for this claim
@ -425,15 +425,25 @@ class Output(InputOutput):
] ]
return sha256(b''.join(pieces)) return sha256(b''.join(pieces))
def get_encoded_signature(self):
signature = hexlify(self.signable.signature)
r = int(signature[:int(len(signature)/2)], 16)
s = int(signature[int(len(signature)/2):], 16)
return ecdsa.util.sigencode_der(r, s, len(signature)*4)
@staticmethod @staticmethod
def is_signature_valid(signature, digest, public_key_bytes): def is_signature_valid(encoded_signature, signature_digest, public_key_bytes):
return PublicKey\ try:
.from_compressed(public_key_bytes)\ public_key = load_der_public_key(public_key_bytes, default_backend())
.verify(signature, digest) public_key.verify(encoded_signature, signature_digest, ec.ECDSA(Prehashed(hashes.SHA256())))
return True
except (ValueError, InvalidSignature):
pass
return False
def is_signed_by(self, channel: 'Output', ledger=None): def is_signed_by(self, channel: 'Output', ledger=None):
return self.is_signature_valid( return self.is_signature_valid(
self.signable.signature, self.get_encoded_signature(),
self.get_signature_digest(ledger), self.get_signature_digest(ledger),
channel.claim.channel.public_key_bytes channel.claim.channel.public_key_bytes
) )
@ -446,27 +456,29 @@ class Output(InputOutput):
self.signable.signing_channel_hash, self.signable.signing_channel_hash,
self.signable.to_message_bytes() self.signable.to_message_bytes()
])) ]))
self.signable.signature = channel.private_key.sign_compact(digest) self.signable.signature = channel.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
self.script.generate() self.script.generate()
def sign_data(self, data: bytes, timestamp: str) -> str: def sign_data(self, data:bytes, timestamp:str) -> str:
pieces = [timestamp.encode(), self.claim_hash, data] pieces = [timestamp.encode(), self.claim_hash, data]
digest = sha256(b''.join(pieces)) digest = sha256(b''.join(pieces))
signature = self.private_key.sign_compact(digest) signature = self.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
return hexlify(signature).decode() return hexlify(signature).decode()
def clear_signature(self): def clear_signature(self):
self.channel = None self.channel = None
self.signable.clear_signature() self.signable.clear_signature()
def set_channel_private_key(self, private_key: PrivateKey): async def generate_channel_private_key(self):
self.private_key = private_key self.private_key = await asyncio.get_event_loop().run_in_executor(
self.claim.channel.public_key_bytes = private_key.public_key.pubkey_bytes None, ecdsa.SigningKey.generate, ecdsa.SECP256k1, None, hashlib.sha256
)
self.claim.channel.public_key_bytes = self.private_key.get_verifying_key().to_der()
self.script.generate() self.script.generate()
return self.private_key return self.private_key
def is_channel_private_key(self, private_key: PrivateKey): def is_channel_private_key(self, private_key):
return self.claim.channel.public_key_bytes == private_key.public_key.pubkey_bytes return self.claim.channel.public_key_bytes == private_key.get_verifying_key().to_der()
@classmethod @classmethod
def pay_claim_name_pubkey_hash( def pay_claim_name_pubkey_hash(
@ -718,9 +730,6 @@ class Transaction:
stream.write_compact_size(len(self._inputs)) stream.write_compact_size(len(self._inputs))
for i, txin in enumerate(self._inputs): for i, txin in enumerate(self._inputs):
if signing_input == i: if signing_input == i:
if txin.script.is_script_hash:
txin.serialize_to(stream, txin.script.values['script'].source)
else:
assert txin.txo_ref.txo is not None assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source) txin.serialize_to(stream, txin.txo_ref.txo.script.source)
else: else:
@ -865,19 +874,16 @@ class Transaction:
def signature_hash_type(hash_type): def signature_hash_type(hash_type):
return hash_type return hash_type
async def sign(self, funding_accounts: Iterable['Account'], extra_keys: dict = None): async def sign(self, funding_accounts: Iterable['Account']):
self._reset() self._reset()
ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts) ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts)
for i, txi in enumerate(self._inputs): for i, txi in enumerate(self._inputs):
assert txi.script is not None assert txi.script is not None
assert txi.txo_ref.txo is not None assert txi.txo_ref.txo is not None
txo_script = txi.txo_ref.txo.script txo_script = txi.txo_ref.txo.script
if txo_script.is_pay_pubkey_hash or txo_script.is_pay_script_hash: if txo_script.is_pay_pubkey_hash:
if 'pubkey_hash' in txo_script.values: address = ledger.hash160_to_address(txo_script.values['pubkey_hash'])
address = ledger.hash160_to_address(txo_script.values.get('pubkey_hash', ''))
private_key = await ledger.get_private_key_for_address(wallet, address) private_key = await ledger.get_private_key_for_address(wallet, address)
else:
private_key = next(iter(extra_keys.values()))
assert private_key is not None, 'Cannot find private key for signing output.' assert private_key is not None, 'Cannot find private key for signing output.'
tx = self._serialize_for_signature(i) tx = self._serialize_for_signature(i)
txi.script.values['signature'] = \ txi.script.values['signature'] = \
@ -951,15 +957,6 @@ class Transaction:
data = Output.add_purchase_data(Purchase(claim_id)) data = Output.add_purchase_data(Purchase(claim_id))
return cls.create([], [payment, data], funding_accounts, change_account) return cls.create([], [payment, data], funding_accounts, change_account)
@classmethod
async def spend_time_lock(cls, time_locked_txo: Output, script: bytes, account: 'Account'):
txi = Input.spend_time_lock(time_locked_txo, script)
txi.sequence = 0xFFFFFFFE
tx = await cls.create([txi], [], [account], account, sign=False)
tx.locktime = txi.script.values['script'].values['height']
tx._reset()
return tx
@property @property
def my_inputs(self): def my_inputs(self):
for txi in self.inputs: for txi in self.inputs:

View file

@ -2,7 +2,6 @@ import asyncio
import logging import logging
from lbry.error import ( from lbry.error import (
InsufficientFundsError,
ServerPaymentFeeAboveMaxAllowedError, ServerPaymentFeeAboveMaxAllowedError,
ServerPaymentInvalidAddressError, ServerPaymentInvalidAddressError,
ServerPaymentWalletLockedError ServerPaymentWalletLockedError
@ -25,66 +24,41 @@ class WalletServerPayer:
self.max_fee = max_fee self.max_fee = max_fee
self._on_payment_controller = StreamController() self._on_payment_controller = StreamController()
self.on_payment = self._on_payment_controller.stream self.on_payment = self._on_payment_controller.stream
self.on_payment.listen(None, on_error=lambda e: log.warning(e.args[0])) self.on_payment.listen(None, on_error=lambda e: logging.warning(e.args[0]))
async def pay(self): async def pay(self):
while self.running:
try:
await self._pay()
except (asyncio.TimeoutError, ConnectionError):
if not self.running:
break
delay = max(self.payment_period / 24, 10)
log.warning("Payement failed. Will retry after %g seconds.", delay)
asyncio.sleep(delay)
except BaseException as e:
if not isinstance(e, asyncio.CancelledError):
log.exception("Unexpected exception. Payment task exiting early.")
self.running = False
raise
async def _pay(self):
while self.running: while self.running:
await asyncio.sleep(self.payment_period) await asyncio.sleep(self.payment_period)
features = await self.ledger.network.get_server_features() features = await self.ledger.network.retriable_call(self.ledger.network.get_server_features)
log.debug("pay loop: received server features: %s", str(features))
address = features['payment_address'] address = features['payment_address']
amount = str(features['daily_fee']) amount = str(features['daily_fee'])
if not address or not amount: if not address or not amount:
log.debug("pay loop: no address or no amount")
continue continue
if not self.ledger.is_pubkey_address(address): if not self.ledger.is_pubkey_address(address):
log.info("pay loop: address not pubkey")
self._on_payment_controller.add_error(ServerPaymentInvalidAddressError(address)) self._on_payment_controller.add_error(ServerPaymentInvalidAddressError(address))
continue continue
if self.wallet.is_locked: if self.wallet.is_locked:
log.info("pay loop: wallet is locked")
self._on_payment_controller.add_error(ServerPaymentWalletLockedError()) self._on_payment_controller.add_error(ServerPaymentWalletLockedError())
continue continue
amount = lbc_to_dewies(features['daily_fee']) # check that this is in lbc and not dewies amount = lbc_to_dewies(features['daily_fee']) # check that this is in lbc and not dewies
limit = lbc_to_dewies(self.max_fee) limit = lbc_to_dewies(self.max_fee)
if amount > limit: if amount > limit:
log.info("pay loop: amount (%d) > limit (%d)", amount, limit)
self._on_payment_controller.add_error( self._on_payment_controller.add_error(
ServerPaymentFeeAboveMaxAllowedError(features['daily_fee'], self.max_fee) ServerPaymentFeeAboveMaxAllowedError(features['daily_fee'], self.max_fee)
) )
continue continue
try:
tx = await Transaction.create( tx = await Transaction.create(
[], [],
[Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))], [Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))],
self.wallet.get_accounts_or_all(None), self.wallet.get_accounts_or_all(None),
self.wallet.get_account_or_default(None) self.wallet.get_account_or_default(None)
) )
except InsufficientFundsError:
self._on_payment_controller.add_error(InsufficientFundsError())
continue
await self.ledger.broadcast_or_release(tx, blocking=True) await self.ledger.broadcast(tx)
if self.analytics_manager: if self.analytics_manager:
await self.analytics_manager.send_credits_sent() await self.analytics_manager.send_credits_sent()
self._on_payment_controller.add(tx) self._on_payment_controller.add(tx)
@ -96,18 +70,7 @@ class WalletServerPayer:
self.wallet = wallet self.wallet = wallet
self.running = True self.running = True
self.task = asyncio.ensure_future(self.pay()) self.task = asyncio.ensure_future(self.pay())
self.task.add_done_callback(self._done_callback) self.task.add_done_callback(lambda _: log.info("Stopping wallet server payments."))
def _done_callback(self, f):
if f.cancelled():
reason = "Cancelled"
elif f.exception():
reason = f'Exception: {f.exception()}'
elif not self.running:
reason = "Stopped"
else:
reason = ""
log.info("Stopping wallet server payments. %s", reason)
async def stop(self): async def stop(self):
if self.running: if self.running:

View file

@ -10,7 +10,6 @@ from collections import UserDict
from hashlib import sha256 from hashlib import sha256
from operator import attrgetter from operator import attrgetter
from lbry.crypto.crypt import better_aes_encrypt, better_aes_decrypt from lbry.crypto.crypt import better_aes_encrypt, better_aes_decrypt
from lbry.error import InvalidPasswordError
from .account import Account from .account import Account
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
@ -139,10 +138,6 @@ class Wallet:
'accounts': [a.to_dict(encrypt_password) for a in self.accounts] 'accounts': [a.to_dict(encrypt_password) for a in self.accounts]
} }
def to_json(self):
assert not self.is_locked, "Cannot serialize a wallet with locked/encrypted accounts."
return json.dumps(self.to_dict())
def save(self): def save(self):
if self.preferences.get(ENCRYPT_ON_DISK, False): if self.preferences.get(ENCRYPT_ON_DISK, False):
if self.encryption_password is not None: if self.encryption_password is not None:
@ -169,31 +164,20 @@ class Wallet:
def pack(self, password): def pack(self, password):
assert not self.is_locked, "Cannot pack a wallet with locked/encrypted accounts." assert not self.is_locked, "Cannot pack a wallet with locked/encrypted accounts."
new_data_compressed = zlib.compress(self.to_json().encode()) new_data = json.dumps(self.to_dict())
new_data_compressed = zlib.compress(new_data.encode())
return better_aes_encrypt(password, new_data_compressed) return better_aes_encrypt(password, new_data_compressed)
@classmethod @classmethod
def unpack(cls, password, encrypted): def unpack(cls, password, encrypted):
decrypted = better_aes_decrypt(password, encrypted) decrypted = better_aes_decrypt(password, encrypted)
try:
decompressed = zlib.decompress(decrypted) decompressed = zlib.decompress(decrypted)
except zlib.error as e:
if "incorrect header check" in e.args[0].lower():
raise InvalidPasswordError()
if "unknown compression method" in e.args[0].lower():
raise InvalidPasswordError()
if "invalid window size" in e.args[0].lower():
raise InvalidPasswordError()
raise
return json.loads(decompressed) return json.loads(decompressed)
def merge(self, manager: 'WalletManager', def merge(self, manager: 'WalletManager',
password: str, data: str) -> (List['Account'], List['Account']): password: str, data: str) -> List['Account']:
assert not self.is_locked, "Cannot sync apply on a locked wallet." assert not self.is_locked, "Cannot sync apply on a locked wallet."
added_accounts, merged_accounts = [], [] added_accounts = []
if password is None:
decrypted_data = json.loads(data)
else:
decrypted_data = self.unpack(password, data) decrypted_data = self.unpack(password, data)
self.preferences.merge(decrypted_data.get('preferences', {})) self.preferences.merge(decrypted_data.get('preferences', {}))
for account_dict in decrypted_data['accounts']: for account_dict in decrypted_data['accounts']:
@ -207,11 +191,10 @@ class Wallet:
break break
if local_match is not None: if local_match is not None:
local_match.merge(account_dict) local_match.merge(account_dict)
merged_accounts.append(local_match)
else: else:
new_account = Account.from_dict(ledger, self, account_dict) new_account = Account.from_dict(ledger, self, account_dict)
added_accounts.append(new_account) added_accounts.append(new_account)
return added_accounts, merged_accounts return added_accounts
@property @property
def is_locked(self) -> bool: def is_locked(self) -> bool:
@ -220,12 +203,11 @@ class Wallet:
return True return True
return False return False
async def unlock(self, password): def unlock(self, password):
for account in self.accounts: for account in self.accounts:
if account.encrypted: if account.encrypted:
if not account.decrypt(password): if not account.decrypt(password):
return False return False
await account.deterministic_channel_keys.ensure_cache_primed()
self.encryption_password = password self.encryption_password = password
return True return True

View file

@ -1,520 +0,0 @@
import sys
import datetime
import logging
import asyncio
import os.path
import random
import time
import typing
from dataclasses import dataclass, astuple, replace
from aiohttp import web
from prometheus_client import Gauge, generate_latest as prom_generate_latest, Counter, Histogram
import lbry.dht.error
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import make_kademlia_peer, PeerManager, decode_tcp_peer_from_compact_address
from lbry.dht.protocol.distance import Distance
from lbry.dht.protocol.iterative_find import FindValueResponse, FindNodeResponse, FindResponse
from lbry.extras.daemon.storage import SQLiteMixin
from lbry.conf import Config
from lbry.utils import resolve_host
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
class SDHashSamples:
def __init__(self, samples_file_path):
with open(samples_file_path, "rb") as sample_file:
self._samples = sample_file.read()
assert len(self._samples) % 48 == 0
self.size = len(self._samples) // 48
def read_samples(self, count=1):
for _ in range(count):
offset = 48 * random.randrange(0, self.size)
yield self._samples[offset:offset + 48]
class PeerStorage(SQLiteMixin):
CREATE_TABLES_QUERY = """
PRAGMA JOURNAL_MODE=WAL;
CREATE TABLE IF NOT EXISTS peer (
peer_id INTEGER NOT NULL,
node_id VARCHAR(96),
address VARCHAR,
udp_port INTEGER,
tcp_port INTEGER,
first_online DATETIME,
errors INTEGER,
last_churn INTEGER,
added_on DATETIME NOT NULL,
last_check DATETIME,
last_seen DATETIME,
latency INTEGER,
PRIMARY KEY (peer_id)
);
CREATE TABLE IF NOT EXISTS connection (
from_peer_id INTEGER NOT NULL,
to_peer_id INTEGER NOT NULL,
PRIMARY KEY (from_peer_id, to_peer_id),
FOREIGN KEY(from_peer_id) REFERENCES peer (peer_id),
FOREIGN KEY(to_peer_id) REFERENCES peer (peer_id)
);
"""
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
async def all_peers(self):
return [
DHTPeer(**peer) for peer in await self.db.execute_fetchall(
"select * from peer where latency > 0 or last_seen > datetime('now', '-1 hour')")
]
async def save_peers(self, *peers):
log.info("Saving graph nodes (peers) to DB")
await self.db.executemany(
"INSERT OR REPLACE INTO peer("
"node_id, address, udp_port, tcp_port, first_online, errors, last_churn,"
"added_on, last_check, last_seen, latency, peer_id) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",
[astuple(peer) for peer in peers]
)
log.info("Finished saving graph nodes (peers) to DB")
async def save_connections(self, connections_map):
log.info("Saving graph edges (connections) to DB")
await self.db.executemany(
"DELETE FROM connection WHERE from_peer_id = ?", [(key,) for key in connections_map])
for from_peer_id in connections_map:
await self.db.executemany(
"INSERT INTO connection(from_peer_id, to_peer_id) VALUES(?,?)",
[(from_peer_id, to_peer_id) for to_peer_id in connections_map[from_peer_id]])
log.info("Finished saving graph edges (connections) to DB")
@dataclass(frozen=True)
class DHTPeer:
node_id: str
address: str
udp_port: int
tcp_port: int = None
first_online: datetime.datetime = None
errors: int = None
last_churn: int = None
added_on: datetime.datetime = None
last_check: datetime.datetime = None
last_seen: datetime.datetime = None
latency: int = None
peer_id: int = None
@classmethod
def from_kad_peer(cls, peer, peer_id):
node_id = peer.node_id.hex() if peer.node_id else None
return DHTPeer(
node_id=node_id, address=peer.address, udp_port=peer.udp_port, tcp_port=peer.tcp_port,
peer_id=peer_id, added_on=datetime.datetime.utcnow())
def to_kad_peer(self):
node_id = bytes.fromhex(self.node_id) if self.node_id else None
return make_kademlia_peer(node_id, self.address, self.udp_port, self.tcp_port)
def new_node(address="0.0.0.0", udp_port=0, node_id=None):
node_id = node_id or generate_id()
loop = asyncio.get_event_loop()
return Node(loop, PeerManager(loop), node_id, udp_port, udp_port, 3333, address)
class Crawler:
unique_total_hosts_metric = Gauge(
"unique_total_hosts", "Number of unique hosts seen in the last interval", namespace="dht_crawler_node",
)
reachable_hosts_metric = Gauge(
"reachable_hosts", "Number of hosts that replied in the last interval", namespace="dht_crawler_node",
)
total_historic_hosts_metric = Gauge(
"history_total_hosts", "Number of hosts seen since first run.", namespace="dht_crawler_node",
)
pending_check_hosts_metric = Gauge(
"pending_hosts", "Number of hosts on queue to be checked.", namespace="dht_crawler_node",
)
hosts_with_errors_metric = Gauge(
"error_hosts", "Number of hosts that raised errors during contact.", namespace="dht_crawler_node",
)
ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS = tuple(map(float, range(100))) + (
500., 1000., 2000., float('inf')
)
connections_found_metric = Histogram(
"connections_found", "Number of hosts returned by the last successful contact.", namespace="dht_crawler_node",
buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
known_connections_found_metric = Histogram(
"known_connections_found", "Number of already known hosts returned by last contact.",
namespace="dht_crawler_node", buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
reachable_connections_found_metric = Histogram(
"reachable_connections_found", "Number of reachable known hosts returned by last contact.",
namespace="dht_crawler_node", buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
LATENCY_HISTOGRAM_BUCKETS = (
0., 5., 10., 15., 30., 60., 120., 180., 240., 300., 600., 1200., 1800., 4000., 6000., float('inf')
)
host_latency_metric = Histogram(
"host_latency", "Time spent on the last request, in milliseconds.", namespace="dht_crawler_node",
buckets=LATENCY_HISTOGRAM_BUCKETS
)
probed_streams_metric = Counter(
"probed_streams", "Amount of streams probed.", namespace="dht_crawler_node",
)
announced_streams_metric = Counter(
"announced_streams", "Amount of streams where announcements were found.", namespace="dht_crawler_node",
)
working_streams_metric = Counter(
"working_streams", "Amount of streams with reachable hosts.", namespace="dht_crawler_node",
)
def __init__(self, db_path: str, sd_hash_samples: SDHashSamples):
self.node = new_node()
self.db = PeerStorage(db_path)
self.sd_hashes = sd_hash_samples
self._memory_peers = {}
self._reachable_by_node_id = {}
self._connections = {}
async def open(self):
await self.db.open()
self._memory_peers = {
(peer.address, peer.udp_port): peer for peer in await self.db.all_peers()
}
self.refresh_reachable_set()
def refresh_reachable_set(self):
self._reachable_by_node_id = {
bytes.fromhex(peer.node_id): peer for peer in self._memory_peers.values() if (peer.latency or 0) > 0
}
async def probe_files(self):
if not self.sd_hashes:
return
while True:
for sd_hash in self.sd_hashes.read_samples(10_000):
self.refresh_reachable_set()
distance = Distance(sd_hash)
node_ids = list(self._reachable_by_node_id.keys())
node_ids.sort(key=lambda node_id: distance(node_id))
k_closest = [self._reachable_by_node_id[node_id] for node_id in node_ids[:8]]
found = False
working = False
for response in asyncio.as_completed(
[self.request_peers(peer.address, peer.udp_port, peer.node_id, sd_hash) for peer in k_closest]):
response = await response
if response and response.found:
found = True
blob_peers = []
for compact_addr in response.found_compact_addresses:
try:
blob_peers.append(decode_tcp_peer_from_compact_address(compact_addr))
except ValueError as e:
log.error("Error decoding compact peers: %s", e)
for blob_peer in blob_peers:
response = await self.request_peers(blob_peer.address, blob_peer.tcp_port, blob_peer.node_id, sd_hash)
if response:
working = True
log.info("Found responsive peer for %s: %s:%d(%d)",
sd_hash.hex()[:8], blob_peer.address,
blob_peer.udp_port or -1, blob_peer.tcp_port or -1)
else:
log.info("Found dead peer for %s: %s:%d(%d)",
sd_hash.hex()[:8], blob_peer.address,
blob_peer.udp_port or -1, blob_peer.tcp_port or -1)
self.probed_streams_metric.inc()
if found:
self.announced_streams_metric.inc()
if working:
self.working_streams_metric.inc()
log.info("Done querying stream %s for peers. Found: %s, working: %s", sd_hash.hex()[:8], found, working)
await asyncio.sleep(.5)
@property
def refresh_limit(self):
return datetime.datetime.utcnow() - datetime.timedelta(hours=1)
@property
def all_peers(self):
return [
peer for peer in self._memory_peers.values()
if (peer.last_seen and peer.last_seen > self.refresh_limit) or (peer.latency or 0) > 0
]
@property
def active_peers_count(self):
return len(self.all_peers)
@property
def checked_peers_count(self):
return len([peer for peer in self.all_peers if peer.last_check and peer.last_check > self.refresh_limit])
@property
def unreachable_peers_count(self):
return len([peer for peer in self.all_peers
if peer.last_check and peer.last_check > self.refresh_limit and not peer.latency])
@property
def peers_with_errors_count(self):
return len([peer for peer in self.all_peers if (peer.errors or 0) > 0])
def get_peers_needing_check(self):
to_check = [peer for peer in self.all_peers if peer.last_check is None or peer.last_check < self.refresh_limit]
return to_check
def remove_expired_peers(self):
for key, peer in list(self._memory_peers.items()):
if (peer.latency or 0) < 1 and peer.last_seen < self.refresh_limit:
del self._memory_peers[key]
def add_peers(self, *peers):
for peer in peers:
db_peer = self.get_from_peer(peer)
if db_peer and db_peer.node_id is None and peer.node_id is not None:
db_peer = replace(db_peer, node_id=peer.node_id.hex())
elif not db_peer:
db_peer = DHTPeer.from_kad_peer(peer, len(self._memory_peers) + 1)
db_peer = replace(db_peer, last_seen=datetime.datetime.utcnow())
self._memory_peers[(peer.address, peer.udp_port)] = db_peer
async def flush_to_db(self):
await self.db.save_peers(*self._memory_peers.values())
connections_to_save = self._connections
self._connections = {}
# await self.db.save_connections(connections_to_save) heavy call
self.remove_expired_peers()
def get_from_peer(self, peer):
return self._memory_peers.get((peer.address, peer.udp_port), None)
def set_latency(self, peer, latency=None):
if latency:
self.host_latency_metric.observe(latency / 1_000_000.0)
db_peer = self.get_from_peer(peer)
if not db_peer:
return
db_peer = replace(db_peer, latency=latency)
if not db_peer.node_id and peer.node_id:
db_peer = replace(db_peer, node_id=peer.node_id.hex())
if db_peer.first_online and latency is None:
db_peer = replace(db_peer, last_churn=(datetime.datetime.utcnow() - db_peer.first_online).seconds)
elif latency is not None and db_peer.first_online is None:
db_peer = replace(db_peer, first_online=datetime.datetime.utcnow())
db_peer = replace(db_peer, last_check=datetime.datetime.utcnow())
self._memory_peers[(db_peer.address, db_peer.udp_port)] = db_peer
def inc_errors(self, peer):
db_peer = self.get_from_peer(peer)
self._memory_peers[(peer.address, peer.node_id)] = replace(db_peer, errors=(db_peer.errors or 0) + 1)
def associate_peers(self, peer, other_peers):
self._connections[self.get_from_peer(peer).peer_id] = [
self.get_from_peer(other_peer).peer_id for other_peer in other_peers]
async def request_peers(self, host, port, node_id, key=None) -> typing.Optional[FindResponse]:
key = key or node_id
peer = make_kademlia_peer(key, await resolve_host(host, port, 'udp'), port)
for attempt in range(3):
try:
req_start = time.perf_counter_ns()
if key == node_id:
response = await self.node.protocol.get_rpc_peer(peer).find_node(key)
response = FindNodeResponse(key, response)
latency = time.perf_counter_ns() - req_start
self.set_latency(peer, latency)
else:
response = await self.node.protocol.get_rpc_peer(peer).find_value(key)
response = FindValueResponse(key, response)
await asyncio.sleep(0.05)
return response
except asyncio.TimeoutError:
if key == node_id:
self.set_latency(peer, None)
continue
except lbry.dht.error.TransportNotConnected:
log.info("Transport unavailable, waiting 1s to retry")
await asyncio.sleep(1)
except lbry.dht.error.RemoteException as e:
log.info('Peer errored: %s:%d attempt #%d - %s',
host, port, (attempt + 1), str(e))
if key == node_id:
self.inc_errors(peer)
self.set_latency(peer, None)
continue
async def crawl_routing_table(self, host, port, node_id=None):
start = time.time()
log.debug("querying %s:%d", host, port)
address = await resolve_host(host, port, 'udp')
key = node_id or self.node.protocol.peer_manager.get_node_id_for_endpoint(address, port)
peer = make_kademlia_peer(key, address, port)
self.add_peers(peer)
if not key:
latency = None
for _ in range(3):
try:
ping_start = time.perf_counter_ns()
await self.node.protocol.get_rpc_peer(peer).ping()
await asyncio.sleep(0.05)
key = key or self.node.protocol.peer_manager.get_node_id_for_endpoint(address, port)
peer = make_kademlia_peer(key, address, port)
latency = time.perf_counter_ns() - ping_start
break
except asyncio.TimeoutError:
pass
except lbry.dht.error.RemoteException:
self.inc_errors(peer)
pass
self.set_latency(peer, latency if peer.node_id else None)
if not latency or not peer.node_id:
if latency and not peer.node_id:
log.warning("No node id from %s:%d", host, port)
return set()
distance = Distance(key)
max_distance = int.from_bytes(bytes([0xff] * 48), 'big')
peers = set()
factor = 2048
for i in range(1000):
response = await self.request_peers(address, port, key)
new_peers = list(response.get_close_kademlia_peers(peer)) if response else None
if not new_peers:
break
new_peers.sort(key=lambda peer: distance(peer.node_id))
peers.update(new_peers)
far_key = new_peers[-1].node_id
if distance(far_key) <= distance(key):
current_distance = distance(key)
next_jump = current_distance + int(max_distance // factor) # jump closer
factor /= 2
if factor > 8 and next_jump < max_distance:
key = int.from_bytes(peer.node_id, 'big') ^ next_jump
if key.bit_length() > 384:
break
key = key.to_bytes(48, 'big')
else:
break
else:
key = far_key
factor = 2048
if peers:
log.info("Done querying %s:%d in %.2f seconds: %d peers found over %d requests.",
host, port, (time.time() - start), len(peers), i)
if peers:
self.connections_found_metric.observe(len(peers))
known_peers = 0
reachable_connections = 0
for peer in peers:
known_peer = self.get_from_peer(peer)
known_peers += 1 if known_peer else 0
reachable_connections += 1 if known_peer and (known_peer.latency or 0) > 0 else 0
self.known_connections_found_metric.observe(known_peers)
self.reachable_connections_found_metric.observe(reachable_connections)
self.add_peers(*peers)
self.associate_peers(peer, peers)
return peers
async def process(self):
to_process = {}
def submit(_peer):
f = asyncio.ensure_future(
self.crawl_routing_table(_peer.address, _peer.udp_port, bytes.fromhex(_peer.node_id)))
to_process[_peer.peer_id] = f
f.add_done_callback(lambda _: to_process.pop(_peer.peer_id))
to_check = self.get_peers_needing_check()
last_flush = datetime.datetime.utcnow()
while True:
for peer in to_check[:200]:
if peer.peer_id not in to_process:
submit(peer)
await asyncio.sleep(.05)
await asyncio.sleep(0)
self.unique_total_hosts_metric.set(self.checked_peers_count)
self.reachable_hosts_metric.set(self.checked_peers_count - self.unreachable_peers_count)
self.total_historic_hosts_metric.set(len(self._memory_peers))
self.pending_check_hosts_metric.set(len(to_check))
self.hosts_with_errors_metric.set(self.peers_with_errors_count)
log.info("%d known, %d contacted recently, %d unreachable, %d error, %d processing, %d on queue",
self.active_peers_count, self.checked_peers_count, self.unreachable_peers_count,
self.peers_with_errors_count, len(to_process), len(to_check))
if to_process:
await asyncio.wait(to_process.values(), return_when=asyncio.FIRST_COMPLETED)
to_check = self.get_peers_needing_check()
if (datetime.datetime.utcnow() - last_flush).seconds > 60:
log.info("flushing to db")
await self.flush_to_db()
last_flush = datetime.datetime.utcnow()
while not to_check and not to_process:
port = self.node.listening_port.get_extra_info('socket').getsockname()[1]
self.node.stop()
await self.node.start_listening()
log.info("Idle, sleeping a minute. Port changed to %d", port)
await asyncio.sleep(60.0)
to_check = self.get_peers_needing_check()
class SimpleMetrics:
def __init__(self, port):
self.prometheus_port = port
async def handle_metrics_get_request(self, _):
try:
return web.Response(
text=prom_generate_latest().decode(),
content_type='text/plain; version=0.0.4'
)
except Exception:
log.exception('could not generate prometheus data')
raise
async def start(self):
prom_app = web.Application()
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
metrics_runner = web.AppRunner(prom_app)
await metrics_runner.setup()
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
await prom_site.start()
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
if col[0] in ('added_on', 'first_online', 'last_seen', 'last_check'):
d[col[0]] = datetime.datetime.fromisoformat(row[idx]) if row[idx] else None
else:
d[col[0]] = row[idx]
return d
async def test():
db_path = "/tmp/peers.db" if len(sys.argv) == 1 else sys.argv[-1]
asyncio.get_event_loop().set_debug(True)
metrics = SimpleMetrics('8080')
await metrics.start()
conf = Config()
hosting_samples = SDHashSamples("test.sample") if os.path.isfile("test.sample") else None
crawler = Crawler(db_path, hosting_samples)
await crawler.open()
await crawler.flush_to_db()
await crawler.node.start_listening()
if crawler.active_peers_count < 100:
probes = []
for (host, port) in conf.known_dht_nodes:
probes.append(asyncio.create_task(crawler.crawl_routing_table(host, port)))
await asyncio.gather(*probes)
await crawler.flush_to_db()
await asyncio.gather(crawler.process(), crawler.probe_files())
if __name__ == '__main__':
asyncio.run(test())

View file

@ -83,7 +83,7 @@ async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional
await storage.open() await storage.open()
node = Node( node = Node(
loop, PeerManager(loop), node_id, port, port, 3333, None, loop, PeerManager(loop), node_id, port, port, 3333, None,
storage=storage, is_bootstrap_node=True storage=storage
) )
if prometheus_port > 0: if prometheus_port > 0:
metrics = SimpleMetrics(prometheus_port, node if export else None) metrics = SimpleMetrics(prometheus_port, node if export else None)

View file

@ -0,0 +1,46 @@
import asyncio
from pprint import pprint
from elasticsearch import AsyncElasticsearch
from elasticsearch._async.helpers import async_scan, async_bulk
DB = {}
INDEX = 'claims'
async def generate_support_amounts(client: AsyncElasticsearch):
async for doc in async_scan(client):
DB[doc['_id']] = doc['_source']['support_amount']
if len(DB) > 10:
break
pprint(DB)
def generate_support_to_trending():
for claim_id, amount in DB.items():
yield {'doc': {"trending_mixed": amount}, '_id': claim_id, '_index': INDEX, '_op_type': 'update'}
async def write_trending(client: AsyncElasticsearch):
await async_bulk(client, generate_support_to_trending())
def get_client(host='localhost', port=9201):
hosts = [{'host': host, 'port': port}]
return AsyncElasticsearch(hosts, timeout=port)
async def run():
client = get_client()
await generate_support_amounts(client)
await write_trending(client)
for claim_id, value in DB.items():
if value > 0:
break
doc = await client.get(INDEX, claim_id)
pprint(doc)
pprint(DB[claim_id])
await client.close()
asyncio.get_event_loop().run_until_complete(run())

View file

@ -1,24 +0,0 @@
"""
Hook for libtorrent.
"""
import os
import glob
import os.path
from PyInstaller.utils.hooks import get_module_file_attribute
from PyInstaller import compat
def get_binaries():
if compat.is_win:
files = ('c:/Windows/System32/libssl-1_1-x64.dll', 'c:/Windows/System32/libcrypto-1_1-x64.dll')
for file in files:
if not os.path.isfile(file):
print(f"MISSING {file}")
return [(file, '.') for file in files]
return []
binaries = get_binaries()
for file in glob.glob(os.path.join(get_module_file_attribute('libtorrent'), 'libtorrent*pyd*')):
binaries.append((file, 'libtorrent'))

View file

@ -2,7 +2,7 @@ import os
import re import re
import io import io
import sys import sys
import yaml import json
import argparse import argparse
import unittest import unittest
from datetime import date from datetime import date
@ -25,17 +25,25 @@ AREA_RENAME = {
def get_github(): def get_github():
config_path = os.path.expanduser('~/.config/gh/hosts.yml') config_path = os.path.expanduser('~/.lbry-release-tool.json')
if os.path.exists(config_path): if os.path.exists(config_path):
with open(config_path, 'r') as config_file: with open(config_path, 'r') as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader) config = json.load(config_file)
return github3.login(token=config['github.com']['oauth_token']) return github3.login(token=config['token'])
print('To run release tool you need to first login using the github cli:') token = os.environ.get("GH_TOKEN")
print('') if not token:
print(' $ gh auth login') print('GitHub Credentials')
print('') username = input('username: ')
sys.exit(1) password = getpass('password: ')
gh = github3.authorize(
username, password, ['repo'], 'lbry release tool',
two_factor_callback=lambda: input('Enter 2FA: ')
)
with open(config_path, 'w') as config_file:
json.dump({'token': gh.token}, config_file)
token = gh.token
return github3.login(token=token)
def get_labels(pr, prefix): def get_labels(pr, prefix):

View file

@ -1,44 +0,0 @@
import asyncio
from typing import Iterable
from lbry.extras.daemon.client import daemon_rpc
from lbry.conf import Config
conf = Config()
async def sample_prefix(prefix: bytes):
result = await daemon_rpc(conf, "claim_search", sd_hash=prefix.hex(), page_size=50)
total_pages = result['total_pages']
print(total_pages)
sd_hashes = set()
for page in range(1, total_pages + 1):
if page > 1:
result = await daemon_rpc(conf, "claim_search", sd_hash=prefix.hex(), page=page, page_size=50)
for item in result['items']:
sd_hash = item.get('value', {}).get('source', {}).get('sd_hash')
if not sd_hash:
print('err', item)
continue
sd_hashes.add(sd_hash)
print('page', page, len(sd_hashes))
return sd_hashes
def save_sample(name: str, samples: Iterable[str]):
with open(name, 'wb') as outfile:
for sample in samples:
outfile.write(bytes.fromhex(sample))
outfile.flush()
print(outfile.tell())
async def main():
samples = set()
futs = [asyncio.ensure_future(sample_prefix(bytes([i]))) for i in range(256)]
for i, completed in enumerate(asyncio.as_completed(futs)):
samples.update(await completed)
print(i, len(samples))
print(save_sample("test.sample", samples))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -9,7 +9,7 @@ omit =
lbry/wallet/orchstr8/ lbry/wallet/orchstr8/
.tox/*/lib/python*/site-packages/lbry/wallet/orchstr8/node.py .tox/*/lib/python*/site-packages/lbry/wallet/orchstr8/node.py
[cryptography.*,coincurve.*,pbkdf2,libtorrent] [cryptography.*,coincurve.*,pbkdf2, libtorrent]
ignore_missing_imports = True ignore_missing_imports = True
[pylint] [pylint]
@ -18,7 +18,7 @@ ignore=words,server,rpc,schema,winpaths.py,migrator,undecorated.py
max-parents=10 max-parents=10
max-args=10 max-args=10
max-line-length=120 max-line-length=120
good-names=T,t,n,i,j,k,x,y,s,f,d,h,c,e,op,db,tx,io,cachedproperty,log,id,r,iv,ts,l,pk good-names=T,t,n,i,j,k,x,y,s,f,d,h,c,e,op,db,tx,io,cachedproperty,log,id,r,iv,ts,l
valid-metaclass-classmethod-first-arg=mcs valid-metaclass-classmethod-first-arg=mcs
disable= disable=
c-extension-no-member, c-extension-no-member,
@ -28,7 +28,6 @@ disable=
no-else-return, no-else-return,
cyclic-import, cyclic-import,
missing-docstring, missing-docstring,
consider-using-f-string,
duplicate-code, duplicate-code,
expression-not-assigned, expression-not-assigned,
inconsistent-return-statements, inconsistent-return-statements,

View file

@ -7,6 +7,12 @@ BASE = os.path.dirname(__file__)
with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh: with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh:
long_description = fh.read() long_description = fh.read()
ROCKSDB = []
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
ROCKSDB.append('lbry-rocksdb==0.8.2')
setup( setup(
name=__name__, name=__name__,
version=__version__, version=__version__,
@ -18,7 +24,7 @@ setup(
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
keywords="lbry protocol media", keywords="lbry protocol media",
license='MIT', license='MIT',
python_requires='>=3.8', python_requires='>=3.7',
packages=find_packages(exclude=('tests',)), packages=find_packages(exclude=('tests',)),
zip_safe=False, zip_safe=False,
entry_points={ entry_points={
@ -36,29 +42,28 @@ setup(
'distro==1.4.0', 'distro==1.4.0',
'base58==1.0.0', 'base58==1.0.0',
'cffi==1.13.2', 'cffi==1.13.2',
'cryptography==3.4.7', 'cryptography==2.5',
'protobuf==3.17.2', 'protobuf==3.17.2',
'msgpack==0.6.1',
'prometheus_client==0.7.1', 'prometheus_client==0.7.1',
'ecdsa==0.13.3', 'ecdsa==0.13.3',
'pyyaml==5.3.1', 'pyyaml==5.3.1',
'docopt==0.6.2', 'docopt==0.6.2',
'hachoir==3.1.2', 'hachoir==3.1.2',
'multidict==4.6.1',
'coincurve==15.0.0', 'coincurve==15.0.0',
'pbkdf2==1.3', 'pbkdf2==1.3',
'attrs==18.2.0',
'pylru==1.1.0',
'elasticsearch==7.10.1',
'grpcio==1.38.0',
'filetype==1.0.9', 'filetype==1.0.9',
'libtorrent==2.0.6', ] + ROCKSDB,
],
extras_require={ extras_require={
'lint': [ 'torrent': ['lbry-libtorrent'],
'pylint==2.13.9' 'lint': ['pylint==2.10.0'],
], 'test': ['coverage'],
'test': [ 'scribe': ['scribe @ git+https://github.com/lbryio/scribe.git'],
'coverage',
'jsonschema==4.4.0',
],
'hub': [
'hub@git+https://github.com/lbryio/hub.git@929448d64bcbe6c5e476757ec78456beaa85e56a'
]
}, },
classifiers=[ classifiers=[
'Framework :: AsyncIO', 'Framework :: AsyncIO',

View file

@ -51,8 +51,7 @@ def mock_network_loop(loop: asyncio.AbstractEventLoop,
return rx.datagram_received(data, from_addr) return rx.datagram_received(data, from_addr)
protocol = proto_lam() protocol = proto_lam()
transport = mock.Mock(spec=asyncio.DatagramTransport) transport = asyncio.DatagramTransport(extra={'socket': mock_sock})
transport.get_extra_info = lambda k: {'socket': mock_sock}[k]
transport.is_closing = lambda: False transport.is_closing = lambda: False
transport.close = lambda: mock_sock.close() transport.close = lambda: mock_sock.close()
mock_sock.sendto = sendto mock_sock.sendto = sendto
@ -61,6 +60,7 @@ def mock_network_loop(loop: asyncio.AbstractEventLoop,
dht_network[from_addr] = protocol dht_network[from_addr] = protocol
return transport, protocol return transport, protocol
with mock.patch('socket.socket') as mock_socket:
mock_sock = mock.Mock(spec=socket.socket) mock_sock = mock.Mock(spec=socket.socket)
mock_sock.setsockopt = lambda *_: None mock_sock.setsockopt = lambda *_: None
mock_sock.bind = lambda *_: None mock_sock.bind = lambda *_: None
@ -70,5 +70,6 @@ def mock_network_loop(loop: asyncio.AbstractEventLoop,
mock_sock.close = lambda: None mock_sock.close = lambda: None
mock_sock.type = socket.SOCK_DGRAM mock_sock.type = socket.SOCK_DGRAM
mock_sock.fileno = lambda: 7 mock_sock.fileno = lambda: 7
mock_socket.return_value = mock_sock
loop.create_datagram_endpoint = create_datagram_endpoint loop.create_datagram_endpoint = create_datagram_endpoint
yield yield

View file

@ -1,11 +1,5 @@
from binascii import hexlify, unhexlify
from lbry.testcase import CommandTestCase from lbry.testcase import CommandTestCase
from lbry.wallet.script import InputScript
from lbry.wallet.dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from lbry.wallet.account import DeterministicChannelKeyManager
from lbry.crypto.hash import hash160
from lbry.crypto.base58 import Base58
def extract(d, keys): def extract(d, keys):
@ -66,30 +60,15 @@ class AccountManagement(CommandTestCase):
self.assertEqual(accounts['items'][0]['name'], 'recreated account') self.assertEqual(accounts['items'][0]['name'], 'recreated account')
async def test_wallet_migration(self): async def test_wallet_migration(self):
old_id, new_id, valid_key = (
'mi9E8KqFfW5ngktU22pN2jpgsdf81ZbsGY',
'mqs77XbdnuxWN4cXrjKbSoGLkvAHa4f4B8',
'-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEIBZRTZ7tHnYCH3IE9mCo95'
'466L/ShYFhXGrjmSMFJw8eoAcGBSuBBAAK\noUQDQgAEmucoPz9nI+ChZrfhnh'
'0RZ/bcX0r2G0pYBmoNKovtKzXGa8y07D66MWsW\nqXptakqO/9KddIkBu5eJNS'
'UZzQCxPQ==\n-----END EC PRIVATE KEY-----\n'
)
# null certificates should get deleted # null certificates should get deleted
self.account.channel_keys = { await self.channel_create('@foo1')
new_id: 'not valid key', await self.channel_create('@foo2')
'foo': 'bar', await self.channel_create('@foo3')
} keys = list(self.account.channel_keys.keys())
self.account.channel_keys[keys[0]] = None
self.account.channel_keys[keys[1]] = "some invalid junk"
await self.account.maybe_migrate_certificates() await self.account.maybe_migrate_certificates()
self.assertEqual(self.account.channel_keys, {}) self.assertEqual(list(self.account.channel_keys.keys()), [keys[2]])
self.account.channel_keys = {
new_id: 'not valid key',
'foo': 'bar',
'invalid address': valid_key,
}
await self.account.maybe_migrate_certificates()
self.assertEqual(self.account.channel_keys, {
new_id: valid_key
})
async def assertFindsClaims(self, claim_names, awaitable): async def assertFindsClaims(self, claim_names, awaitable):
self.assertEqual(claim_names, [txo.claim_name for txo in (await awaitable)['items']]) self.assertEqual(claim_names, [txo.claim_name for txo in (await awaitable)['items']])
@ -195,122 +174,3 @@ class AccountManagement(CommandTestCase):
bad_address = address[0:20] + '9999999' + address[27:] bad_address = address[0:20] + '9999999' + address[27:]
with self.assertRaisesRegex(Exception, f"'{bad_address}' is not a valid address"): with self.assertRaisesRegex(Exception, f"'{bad_address}' is not a valid address"):
await self.daemon.jsonrpc_account_send('0.1', addresses=[bad_address]) await self.daemon.jsonrpc_account_send('0.1', addresses=[bad_address])
async def test_hybrid_channel_keys(self):
# non-deterministic channel
self.account.channel_keys = {
'mqs77XbdnuxWN4cXrjKbSoGLkvAHa4f4B8':
'-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEIBZRTZ7tHnYCH3IE9mCo95'
'466L/ShYFhXGrjmSMFJw8eoAcGBSuBBAAK\noUQDQgAEmucoPz9nI+ChZrfhnh'
'0RZ/bcX0r2G0pYBmoNKovtKzXGa8y07D66MWsW\nqXptakqO/9KddIkBu5eJNS'
'UZzQCxPQ==\n-----END EC PRIVATE KEY-----\n'
}
channel1 = await self.create_nondeterministic_channel('@foo1', '1.0', unhexlify(
'3056301006072a8648ce3d020106052b8104000a034200049ae7283f3f6723e0a1'
'66b7e19e1d1167f6dc5f4af61b4a58066a0d2a8bed2b35c66bccb4ec3eba316b16'
'a97a6d6a4a8effd29d748901bb9789352519cd00b13d'
))
await self.confirm_tx(channel1['txid'])
# deterministic channel
channel2 = await self.channel_create('@foo2')
await self.stream_create('stream-in-channel1', '0.01', channel_id=self.get_claim_id(channel1))
await self.stream_create('stream-in-channel2', '0.01', channel_id=self.get_claim_id(channel2))
resolved_stream1 = await self.resolve('@foo1/stream-in-channel1')
self.assertEqual('stream-in-channel1', resolved_stream1['name'])
self.assertTrue(resolved_stream1['is_channel_signature_valid'])
resolved_stream2 = await self.resolve('@foo2/stream-in-channel2')
self.assertEqual('stream-in-channel2', resolved_stream2['name'])
self.assertTrue(resolved_stream2['is_channel_signature_valid'])
async def test_deterministic_channel_keys(self):
seed = self.account.seed
keys = self.account.deterministic_channel_keys
# create two channels and make sure they have different keys
channel1a = await self.channel_create('@foo1')
channel2a = await self.channel_create('@foo2')
self.assertNotEqual(
channel1a['outputs'][0]['value']['public_key'],
channel2a['outputs'][0]['value']['public_key'],
)
# start another daemon from the same seed
self.daemon2 = await self.add_daemon(seed=seed)
channel2b, channel1b = (await self.daemon2.jsonrpc_channel_list())['items']
# both daemons end up with the same channel signing keys automagically
self.assertTrue(channel1b.has_private_key)
self.assertEqual(
channel1a['outputs'][0]['value']['public_key_id'],
channel1b.private_key.address
)
self.assertTrue(channel2b.has_private_key)
self.assertEqual(
channel2a['outputs'][0]['value']['public_key_id'],
channel2b.private_key.address
)
# repeatedly calling next channel key returns the same key when not used
current_known = keys.last_known
next_key = await keys.generate_next_key()
self.assertEqual(current_known, keys.last_known)
self.assertEqual(next_key.address, (await keys.generate_next_key()).address)
# again, should be idempotent
next_key = await keys.generate_next_key()
self.assertEqual(current_known, keys.last_known)
self.assertEqual(next_key.address, (await keys.generate_next_key()).address)
# create third channel while both daemons running, second daemon should pick it up
channel3a = await self.channel_create('@foo3')
self.assertEqual(current_known+1, keys.last_known)
self.assertNotEqual(next_key.address, (await keys.generate_next_key()).address)
channel3b, = (await self.daemon2.jsonrpc_channel_list(name='@foo3'))['items']
self.assertTrue(channel3b.has_private_key)
self.assertEqual(
channel3a['outputs'][0]['value']['public_key_id'],
channel3b.private_key.address
)
# channel key cache re-populated after simulated restart
# reset cache
self.account.deterministic_channel_keys = DeterministicChannelKeyManager(self.account)
channel3c, channel2c, channel1c = (await self.daemon.jsonrpc_channel_list())['items']
self.assertFalse(channel1c.has_private_key)
self.assertFalse(channel2c.has_private_key)
self.assertFalse(channel3c.has_private_key)
# repopulate cache
await self.account.deterministic_channel_keys.ensure_cache_primed()
self.assertEqual(self.account.deterministic_channel_keys.last_known, keys.last_known)
channel3c, channel2c, channel1c = (await self.daemon.jsonrpc_channel_list())['items']
self.assertTrue(channel1c.has_private_key)
self.assertTrue(channel2c.has_private_key)
self.assertTrue(channel3c.has_private_key)
async def test_time_locked_transactions(self):
address = await self.account.receiving.get_or_create_usable_address()
private_key = await self.ledger.get_private_key_for_address(self.wallet, address)
script = InputScript(
template=InputScript.TIME_LOCK_SCRIPT,
values={'height': 210, 'pubkey_hash': self.ledger.address_to_hash160(address)}
)
script_address = self.ledger.hash160_to_script_address(hash160(script.source))
script_source = hexlify(script.source).decode()
await self.assertBalance(self.account, '10.0')
tx = await self.daemon.jsonrpc_account_send('4.0', script_address)
await self.confirm_tx(tx.id)
await self.generate(510)
await self.assertBalance(self.account, '5.999877')
tx = await self.daemon.jsonrpc_account_deposit(
tx.id, 0, script_source,
Base58.encode_check(self.ledger.private_key_to_wif(private_key.private_key_bytes))
)
await self.confirm_tx(tx.id)
await self.assertBalance(self.account, '9.9997545')

View file

@ -10,21 +10,20 @@ class BlockchainReorganizationTests(CommandTestCase):
async def assertBlockHash(self, height): async def assertBlockHash(self, height):
bp = self.conductor.spv_node.writer bp = self.conductor.spv_node.writer
reader = self.conductor.spv_node.server
def get_txids(): def get_txids():
return [ return [
reader.db.fs_tx_hash(tx_num)[0][::-1].hex() bp.db.fs_tx_hash(tx_num)[0][::-1].hex()
for tx_num in range(bp.db.tx_counts[height - 1], bp.db.tx_counts[height]) for tx_num in range(bp.db.tx_counts[height - 1], bp.db.tx_counts[height])
] ]
block_hash = await self.blockchain.get_block_hash(height) block_hash = await self.blockchain.get_block_hash(height)
self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode()) self.assertEqual(block_hash, (await self.ledger.headers.hash(height)).decode())
self.assertEqual(block_hash, (await reader.db.fs_block_hashes(height, 1))[0][::-1].hex()) self.assertEqual(block_hash, (await bp.db.fs_block_hashes(height, 1))[0][::-1].hex())
txids = await asyncio.get_event_loop().run_in_executor(None, get_txids) txids = await asyncio.get_event_loop().run_in_executor(None, get_txids)
txs = await reader.db.get_transactions_and_merkles(txids) txs = await bp.db.get_transactions_and_merkles(txids)
block_txs = (await bp.daemon.deserialised_block(block_hash))['tx'] block_txs = (await bp.daemon.deserialised_block(block_hash))['tx']
self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions') self.assertSetEqual(set(block_txs), set(txs.keys()), msg='leveldb/lbrycrd is missing transactions')
self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order') self.assertListEqual(block_txs, list(txs.keys()), msg='leveldb/lbrycrd transactions are of order')

View file

@ -1,12 +1,11 @@
import asyncio import asyncio
import hub import scribe
from unittest.mock import Mock from unittest.mock import Mock
from hub.herald import HUB_PROTOCOL_VERSION from scribe.blockchain.network import LBCRegTest
from hub.herald.udp import StatusServer from scribe.hub.udp import StatusServer
from hub.herald.session import LBRYElectrumX from scribe.hub.session import LBRYElectrumX
from hub.scribe.network import LBCRegTest
from lbry.wallet.network import Network from lbry.wallet.network import Network
from lbry.wallet.orchstr8 import Conductor from lbry.wallet.orchstr8 import Conductor
@ -36,7 +35,7 @@ class NetworkTests(IntegrationTestCase):
'payment_address': '', 'payment_address': '',
'donation_address': '', 'donation_address': '',
'daily_fee': '0', 'daily_fee': '0',
'server_version': HUB_PROTOCOL_VERSION, 'server_version': scribe.__version__,
'trending_algorithm': 'fast_ar', 'trending_algorithm': 'fast_ar',
}, await self.ledger.network.get_server_features()) }, await self.ledger.network.get_server_features())
# await self.conductor.spv_node.stop() # await self.conductor.spv_node.stop()
@ -66,7 +65,7 @@ class NetworkTests(IntegrationTestCase):
'payment_address': payment_address, 'payment_address': payment_address,
'donation_address': donation_address, 'donation_address': donation_address,
'daily_fee': '42', 'daily_fee': '42',
'server_version': HUB_PROTOCOL_VERSION, 'server_version': scribe.__version__,
'trending_algorithm': 'fast_ar', 'trending_algorithm': 'fast_ar',
}, await self.ledger.network.get_server_features()) }, await self.ledger.network.get_server_features())
@ -102,7 +101,7 @@ class ReconnectTests(IntegrationTestCase):
await self.ledger.stop() await self.ledger.stop()
initial_height = self.ledger.local_height_including_downloaded_height initial_height = self.ledger.local_height_including_downloaded_height
await self.blockchain.generate(100) await self.blockchain.generate(100)
while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 100: while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 99: # off by 1
await asyncio.sleep(0.1) await asyncio.sleep(0.1)
self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height) self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height)
await self.ledger.headers.open() await self.ledger.headers.open()
@ -116,7 +115,7 @@ class ReconnectTests(IntegrationTestCase):
# disconnect and send a new tx, should reconnect and get it # disconnect and send a new tx, should reconnect and get it
self.ledger.network.client.transport.close() self.ledger.network.client.transport.close()
self.assertFalse(self.ledger.network.is_connected) self.assertFalse(self.ledger.network.is_connected)
await self.ledger.resolve([], ['derp']) await self.ledger.resolve([], 'derp')
sendtxid = await self.send_to_address_and_wait(address1, 1.1337, 1) sendtxid = await self.send_to_address_and_wait(address1, 1.1337, 1)
self.assertLess(self.ledger.network.client.response_time, 1) # response time properly set lower, we are fine self.assertLess(self.ledger.network.client.response_time, 1) # response time properly set lower, we are fine
@ -139,7 +138,6 @@ class ReconnectTests(IntegrationTestCase):
if not self.ledger.network.is_connected: if not self.ledger.network.is_connected:
await asyncio.wait_for(self.ledger.network.on_connected.first, timeout=10.0) await asyncio.wait_for(self.ledger.network.on_connected.first, timeout=10.0)
# omg, the burned cable still works! torba is fire proof! # omg, the burned cable still works! torba is fire proof!
await self.ledger.on_header.where(self.blockchain.is_expected_block)
await self.ledger.network.get_transaction(sendtxid) await self.ledger.network.get_transaction(sendtxid)
async def test_timeout_then_reconnect(self): async def test_timeout_then_reconnect(self):

View file

@ -1,8 +1,5 @@
import asyncio import asyncio
import json import json
import string
from binascii import unhexlify
from random import Random
from lbry.wallet import ENCRYPT_ON_DISK from lbry.wallet import ENCRYPT_ON_DISK
from lbry.error import InvalidPasswordError from lbry.error import InvalidPasswordError
@ -24,7 +21,6 @@ class WalletCommands(CommandTestCase):
async def test_wallet_syncing_status(self): async def test_wallet_syncing_status(self):
address = await self.daemon.jsonrpc_address_unused() address = await self.daemon.jsonrpc_address_unused()
await self.ledger._update_tasks.done.wait()
self.assertFalse(self.daemon.jsonrpc_wallet_status()['is_syncing']) self.assertFalse(self.daemon.jsonrpc_wallet_status()['is_syncing'])
await self.send_to_address_and_wait(address, 1) await self.send_to_address_and_wait(address, 1)
await self.ledger._update_tasks.started.wait() await self.ledger._update_tasks.started.wait()
@ -285,19 +281,8 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
) )
# Channel Certificate # Channel Certificate
# non-deterministic channel channel = await daemon2.jsonrpc_channel_create('@foo', '0.1')
self.daemon2.wallet_manager.default_account.channel_keys['mqs77XbdnuxWN4cXrjKbSoGLkvAHa4f4B8'] = ( await self.confirm_tx(channel.id, self.daemon2.ledger)
'-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEIBZRTZ7tHnYCH3IE9mCo95'
'466L/ShYFhXGrjmSMFJw8eoAcGBSuBBAAK\noUQDQgAEmucoPz9nI+ChZrfhnh'
'0RZ/bcX0r2G0pYBmoNKovtKzXGa8y07D66MWsW\nqXptakqO/9KddIkBu5eJNS'
'UZzQCxPQ==\n-----END EC PRIVATE KEY-----\n'
)
channel = await self.create_nondeterministic_channel('@foo', '0.1', unhexlify(
'3056301006072a8648ce3d020106052b8104000a034200049ae7283f3f6723e0a1'
'66b7e19e1d1167f6dc5f4af61b4a58066a0d2a8bed2b35c66bccb4ec3eba316b16'
'a97a6d6a4a8effd29d748901bb9789352519cd00b13d'
), self.daemon2, blocking=True)
await self.confirm_tx(channel['txid'], self.daemon2.ledger)
# both daemons will have the channel but only one has the cert so far # both daemons will have the channel but only one has the cert so far
self.assertItemCount(await daemon.jsonrpc_channel_list(), 1) self.assertItemCount(await daemon.jsonrpc_channel_list(), 1)
@ -329,7 +314,7 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
with self.assertRaisesRegex(AssertionError, "Cannot lock an unencrypted wallet, encrypt first."): with self.assertRaisesRegex(AssertionError, "Cannot lock an unencrypted wallet, encrypt first."):
daemon.jsonrpc_wallet_lock() daemon.jsonrpc_wallet_lock()
# safe to call unlock and decrypt, they are no-ops at this point # safe to call unlock and decrypt, they are no-ops at this point
await daemon.jsonrpc_wallet_unlock('password') # already unlocked daemon.jsonrpc_wallet_unlock('password') # already unlocked
daemon.jsonrpc_wallet_decrypt() # already not encrypted daemon.jsonrpc_wallet_decrypt() # already not encrypted
daemon.jsonrpc_wallet_encrypt('password') daemon.jsonrpc_wallet_encrypt('password')
@ -345,7 +330,7 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
# can't sign transactions with locked wallet # can't sign transactions with locked wallet
with self.assertRaises(AssertionError): with self.assertRaises(AssertionError):
await daemon.jsonrpc_channel_create('@foo', '1.0') await daemon.jsonrpc_channel_create('@foo', '1.0')
await daemon.jsonrpc_wallet_unlock('password') daemon.jsonrpc_wallet_unlock('password')
self.assertEqual(daemon.jsonrpc_wallet_status(), {'is_locked': False, 'is_encrypted': True, self.assertEqual(daemon.jsonrpc_wallet_status(), {'is_locked': False, 'is_encrypted': True,
'is_syncing': False}) 'is_syncing': False})
await daemon.jsonrpc_channel_create('@foo', '1.0') await daemon.jsonrpc_channel_create('@foo', '1.0')
@ -363,17 +348,10 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
await daemon2.jsonrpc_channel_import(exported) await daemon2.jsonrpc_channel_import(exported)
self.assertTrue(daemon2.jsonrpc_wallet_encrypt('password')) self.assertTrue(daemon2.jsonrpc_wallet_encrypt('password'))
self.assertTrue(daemon2.jsonrpc_wallet_lock()) self.assertTrue(daemon2.jsonrpc_wallet_lock())
self.assertTrue(await daemon2.jsonrpc_wallet_unlock("password")) self.assertTrue(daemon2.jsonrpc_wallet_unlock("password"))
self.assertEqual(daemon2.jsonrpc_wallet_status(), self.assertEqual(daemon2.jsonrpc_wallet_status(),
{'is_locked': False, 'is_encrypted': True, 'is_syncing': False}) {'is_locked': False, 'is_encrypted': True, 'is_syncing': False})
async def test_locking_unlocking_does_not_break_deterministic_channels(self):
self.assertTrue(self.daemon.jsonrpc_wallet_encrypt("password"))
self.assertTrue(self.daemon.jsonrpc_wallet_lock())
self.account.deterministic_channel_keys._private_key = None
self.assertTrue(await self.daemon.jsonrpc_wallet_unlock("password"))
await self.channel_create()
async def test_sync_with_encryption_and_password_change(self): async def test_sync_with_encryption_and_password_change(self):
daemon, daemon2 = self.daemon, self.daemon2 daemon, daemon2 = self.daemon, self.daemon2
wallet, wallet2 = daemon.wallet_manager.default_wallet, daemon2.wallet_manager.default_wallet wallet, wallet2 = daemon.wallet_manager.default_wallet, daemon2.wallet_manager.default_wallet
@ -387,16 +365,9 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
data = await daemon2.jsonrpc_sync_apply('password2') data = await daemon2.jsonrpc_sync_apply('password2')
# sync_apply doesn't save password if encrypt-on-disk is False # sync_apply doesn't save password if encrypt-on-disk is False
self.assertEqual(wallet2.encryption_password, None) self.assertEqual(wallet2.encryption_password, None)
# need to use new password2 in sync_apply
# Need to use new password2 in sync_apply. Attempts with other passwords
# should fail consistently with InvalidPasswordError.
random = Random('password')
for i in range(200):
bad_guess = ''.join(random.choices(string.digits + string.ascii_letters + string.punctuation, k=40))
self.assertNotEqual(bad_guess, 'password2')
with self.assertRaises(InvalidPasswordError): with self.assertRaises(InvalidPasswordError):
await daemon.jsonrpc_sync_apply(bad_guess, data=data['data'], blocking=True) await daemon.jsonrpc_sync_apply('password', data=data['data'], blocking=True)
await daemon.jsonrpc_sync_apply('password2', data=data['data'], blocking=True) await daemon.jsonrpc_sync_apply('password2', data=data['data'], blocking=True)
# sync_apply with new password2 also sets it as new local password # sync_apply with new password2 also sets it as new local password
self.assertEqual(wallet.encryption_password, 'password2') self.assertEqual(wallet.encryption_password, 'password2')
@ -407,8 +378,8 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
# check new password is active # check new password is active
daemon.jsonrpc_wallet_lock() daemon.jsonrpc_wallet_lock()
self.assertFalse(await daemon.jsonrpc_wallet_unlock('password')) self.assertFalse(daemon.jsonrpc_wallet_unlock('password'))
self.assertTrue(await daemon.jsonrpc_wallet_unlock('password2')) self.assertTrue(daemon.jsonrpc_wallet_unlock('password2'))
# propagate disk encryption to daemon2 # propagate disk encryption to daemon2
data = await daemon.jsonrpc_sync_apply('password3') data = await daemon.jsonrpc_sync_apply('password3')
@ -424,88 +395,4 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
self.assertWalletEncrypted(wallet2.storage.path, True) self.assertWalletEncrypted(wallet2.storage.path, True)
daemon2.jsonrpc_wallet_lock() daemon2.jsonrpc_wallet_lock()
self.assertTrue(await daemon2.jsonrpc_wallet_unlock('password3')) self.assertTrue(daemon2.jsonrpc_wallet_unlock('password3'))
async def test_wallet_import_and_export(self):
daemon, daemon2 = self.daemon, self.daemon2
# Preferences
self.assertFalse(daemon.jsonrpc_preference_get())
self.assertFalse(daemon2.jsonrpc_preference_get())
daemon.jsonrpc_preference_set("fruit", '["peach", "apricot"]')
daemon.jsonrpc_preference_set("one", "1")
daemon.jsonrpc_preference_set("conflict", "1")
daemon2.jsonrpc_preference_set("another", "A")
await asyncio.sleep(1)
# these preferences will win after merge since they are "newer"
daemon2.jsonrpc_preference_set("two", "2")
daemon2.jsonrpc_preference_set("conflict", "2")
daemon.jsonrpc_preference_set("another", "B")
self.assertDictEqual(daemon.jsonrpc_preference_get(), {
"one": "1", "conflict": "1", "another": "B", "fruit": ["peach", "apricot"]
})
self.assertDictEqual(daemon2.jsonrpc_preference_get(), {
"two": "2", "conflict": "2", "another": "A"
})
self.assertItemCount(await daemon.jsonrpc_account_list(), 1)
data = await daemon2.jsonrpc_wallet_export(password='password')
await daemon.jsonrpc_wallet_import(data=data, password='password', blocking=True)
self.assertItemCount(await daemon.jsonrpc_account_list(), 2)
self.assertDictEqual(
# "two" key added and "conflict" value changed to "2"
daemon.jsonrpc_preference_get(),
{"one": "1", "two": "2", "conflict": "2", "another": "B", "fruit": ["peach", "apricot"]}
)
# Channel Certificate
# non-deterministic channel
self.daemon2.wallet_manager.default_account.channel_keys['mqs77XbdnuxWN4cXrjKbSoGLkvAHa4f4B8'] = (
'-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEIBZRTZ7tHnYCH3IE9mCo95'
'466L/ShYFhXGrjmSMFJw8eoAcGBSuBBAAK\noUQDQgAEmucoPz9nI+ChZrfhnh'
'0RZ/bcX0r2G0pYBmoNKovtKzXGa8y07D66MWsW\nqXptakqO/9KddIkBu5eJNS'
'UZzQCxPQ==\n-----END EC PRIVATE KEY-----\n'
)
channel = await self.create_nondeterministic_channel('@foo', '0.1', unhexlify(
'3056301006072a8648ce3d020106052b8104000a034200049ae7283f3f6723e0a1'
'66b7e19e1d1167f6dc5f4af61b4a58066a0d2a8bed2b35c66bccb4ec3eba316b16'
'a97a6d6a4a8effd29d748901bb9789352519cd00b13d'
), self.daemon2, blocking=True)
await self.confirm_tx(channel['txid'], self.daemon2.ledger)
# both daemons will have the channel but only one has the cert so far
self.assertItemCount(await daemon.jsonrpc_channel_list(), 1)
self.assertEqual(len(daemon.wallet_manager.default_wallet.accounts[1].channel_keys), 0)
self.assertItemCount(await daemon2.jsonrpc_channel_list(), 1)
self.assertEqual(len(daemon2.wallet_manager.default_account.channel_keys), 1)
data = await daemon2.jsonrpc_wallet_export(password='password')
await daemon.jsonrpc_wallet_import(data=data, password='password', blocking=True)
# both daemons have the cert after sync'ing
self.assertEqual(
daemon2.wallet_manager.default_account.channel_keys,
daemon.wallet_manager.default_wallet.accounts[1].channel_keys
)
# test without passwords
data = await daemon2.jsonrpc_wallet_export()
json_data = json.loads(data)
self.assertEqual(json_data["name"], "Wallet")
self.assertNotIn("four", json_data["preferences"])
json_data["preferences"]["four"] = {"value": 4, "ts": 0}
await daemon.jsonrpc_wallet_import(data=json.dumps(json_data), blocking=True)
self.assertEqual(daemon.jsonrpc_preference_get("four"), {"four": 4})
# if password is empty string, export is encrypted
data = await daemon2.jsonrpc_wallet_export(password="")
self.assertNotEqual(data[0], "{")
# if password is empty string, import is decrypted
await daemon.jsonrpc_wallet_import(data, password="")

View file

@ -1,9 +1,9 @@
import asyncio import asyncio
from hub.herald import HUB_PROTOCOL_VERSION import scribe
from hub.herald.session import LBRYElectrumX from scribe.hub.session import LBRYElectrumX
from lbry.error import InsufficientFundsError, ServerPaymentFeeAboveMaxAllowedError from lbry.error import ServerPaymentFeeAboveMaxAllowedError
from lbry.wallet.network import ClientSession from lbry.wallet.network import ClientSession
from lbry.wallet.rpc import RPCError from lbry.wallet.rpc import RPCError
from lbry.testcase import IntegrationTestCase, CommandTestCase from lbry.testcase import IntegrationTestCase, CommandTestCase
@ -34,7 +34,7 @@ class TestSessions(IntegrationTestCase):
async def test_proper_version(self): async def test_proper_version(self):
info = await self.ledger.network.get_server_features() info = await self.ledger.network.get_server_features()
self.assertEqual(HUB_PROTOCOL_VERSION, info['server_version']) self.assertEqual(scribe.__version__, info['server_version'])
async def test_client_errors(self): async def test_client_errors(self):
# Goal is ensuring thsoe are raised and not trapped accidentally # Goal is ensuring thsoe are raised and not trapped accidentally
@ -47,8 +47,7 @@ class TestSessions(IntegrationTestCase):
class TestUsagePayment(CommandTestCase): class TestUsagePayment(CommandTestCase):
async def test_single_server_payment(self): async def test_single_server_payment(self):
wallet_pay_service = self.daemon.component_manager.get_component('wallet_server_payments') wallet_pay_service = self.daemon.component_manager.get_component('wallet_server_payments')
self.assertFalse(wallet_pay_service.running) wallet_pay_service.payment_period = 1
wallet_pay_service.payment_period = 0.5
# only starts with a positive max key fee # only starts with a positive max key fee
wallet_pay_service.max_fee = "0.0" wallet_pay_service.max_fee = "0.0"
await wallet_pay_service.start(ledger=self.ledger, wallet=self.wallet) await wallet_pay_service.start(ledger=self.ledger, wallet=self.wallet)
@ -87,11 +86,6 @@ class TestUsagePayment(CommandTestCase):
self.assertEqual(tx.outputs[0].amount, 100000000) self.assertEqual(tx.outputs[0].amount, 100000000)
self.assertEqual(tx.outputs[0].get_address(self.ledger), address) self.assertEqual(tx.outputs[0].get_address(self.ledger), address)
# continue paying until account is out of funds
with self.assertRaises(InsufficientFundsError):
for i in range(10):
await asyncio.wait_for(wallet_pay_service.on_payment.first, timeout=30)
self.assertTrue(wallet_pay_service.running)
class TestESSync(CommandTestCase): class TestESSync(CommandTestCase):
async def test_es_sync_utility(self): async def test_es_sync_utility(self):
@ -132,28 +126,12 @@ class TestESSync(CommandTestCase):
# stop the es writer and advance the chain by 1, adding a new claim. upon resuming the es writer, it should # stop the es writer and advance the chain by 1, adding a new claim. upon resuming the es writer, it should
# add the new claim # add the new claim
await es_writer.stop() await es_writer.stop()
await self.stream_create(f"stream11", bid='0.001', confirm=False)
stream11 = self.get_claim_id(await self.stream_create(f"stream11", bid='0.001', confirm=False))
current_height = self.conductor.spv_node.writer.height
generate_block_task = asyncio.create_task(self.generate(1)) generate_block_task = asyncio.create_task(self.generate(1))
await self.conductor.spv_node.writer.wait_until_block(current_height + 1)
await es_writer.start() await es_writer.start()
await generate_block_task await generate_block_task
self.assertEqual(11, len(await self.claim_search(order_by=['height']))) self.assertEqual(11, len(await self.claim_search(order_by=['height'])))
# stop/delete es and advance the chain by 1, removing stream11
await es_writer.delete_index()
await es_writer.stop()
server_search_client.clear_caches()
await self.stream_abandon(stream11, confirm=False)
current_height = self.conductor.spv_node.writer.height
generate_block_task = asyncio.create_task(self.generate(1))
await self.conductor.spv_node.writer.wait_until_block(current_height + 1)
await es_writer.start(reindex=True)
await generate_block_task
self.assertEqual(10, len(await self.claim_search(order_by=['height'])))
# # this time we will test a migration from unversioned to v1 # # this time we will test a migration from unversioned to v1
# await db.search_index.sync_client.indices.delete_template(db.search_index.index) # await db.search_index.sync_client.indices.delete_template(db.search_index.index)

View file

@ -18,6 +18,12 @@ from lbry.crypto.hash import sha256
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
def get_encoded_signature(signature):
signature = signature.encode() if isinstance(signature, str) else signature
r = int(signature[:int(len(signature) / 2)], 16)
s = int(signature[int(len(signature) / 2):], 16)
return ecdsa.util.sigencode_der(r, s, len(signature) * 4)
STREAM_TYPES = { STREAM_TYPES = {
'video': 1, 'video': 1,
@ -31,12 +37,12 @@ STREAM_TYPES = {
def verify(channel, data, signature, channel_hash=None): def verify(channel, data, signature, channel_hash=None):
pieces = [ pieces = [
signature['salt'].encode(), signature['signing_ts'].encode(),
channel_hash or channel.claim_hash, channel_hash or channel.claim_hash,
data data
] ]
return Output.is_signature_valid( return Output.is_signature_valid(
unhexlify(signature['signature']), get_encoded_signature(signature['signature']),
sha256(b''.join(pieces)), sha256(b''.join(pieces)),
channel.claim.channel.public_key_bytes channel.claim.channel.public_key_bytes
) )
@ -148,10 +154,10 @@ class ClaimSearchCommand(ClaimTestCase):
await self.assertFindsClaim(self.channel, txid=self.channel['txid'], nout=0) await self.assertFindsClaim(self.channel, txid=self.channel['txid'], nout=0)
await self.assertFindsClaim(channel2, claim_id=channel_id2) await self.assertFindsClaim(channel2, claim_id=channel_id2)
await self.assertFindsClaim(channel2, txid=channel2['txid'], nout=0) await self.assertFindsClaim(channel2, txid=channel2['txid'], nout=0)
await self.assertFindsClaim( #await self.assertFindsClaim(
channel2, public_key_id=channel_txo2['value']['public_key_id']) # channel2, public_key_id=self.ledger.public_key_to_address(channel_txo2['value']['public_key_id']))
await self.assertFindsClaim( #await self.assertFindsClaim(
self.channel, public_key_id=channel_txo['value']['public_key_id']) # self.channel, public_key_id=channel_txo['value']['public_key_id'])
signed = await self.stream_create('on-channel-claim', '0.001', channel_id=self.channel_id) signed = await self.stream_create('on-channel-claim', '0.001', channel_id=self.channel_id)
signed2 = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel_id2, signed2 = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel_id2,
@ -208,7 +214,7 @@ class ClaimSearchCommand(ClaimTestCase):
# resolve by sd hash # resolve by sd hash
two_sd_hash = two['outputs'][0]['value']['source']['sd_hash'] two_sd_hash = two['outputs'][0]['value']['source']['sd_hash']
await self.assertFindsClaims([two], sd_hash=two_sd_hash) await self.assertFindsClaims([two], sd_hash=two_sd_hash)
await self.assertFindsClaims([two], sd_hash=two_sd_hash[:4]) await self.assertFindsClaims([two], sd_hash=two_sd_hash[:2])
async def test_source_filter(self): async def test_source_filter(self):
channel = await self.channel_create('@abc') channel = await self.channel_create('@abc')
@ -1129,17 +1135,17 @@ class ChannelCommands(CommandTestCase):
tx = await self.channel_update(claim_id, bid='4.0') tx = await self.channel_update(claim_id, bid='4.0')
self.assertEqual(tx['outputs'][0]['amount'], '4.0') self.assertEqual(tx['outputs'][0]['amount'], '4.0')
await self.assertBalance(self.account, '5.991503') await self.assertBalance(self.account, '5.991447')
# not enough funds # not enough funds
with self.assertRaisesRegex( with self.assertRaisesRegex(
InsufficientFundsError, "Not enough funds to cover this transaction."): InsufficientFundsError, "Not enough funds to cover this transaction."):
await self.channel_create('@foo2', '9.0') await self.channel_create('@foo2', '9.0')
self.assertItemCount(await self.daemon.jsonrpc_channel_list(), 1) self.assertItemCount(await self.daemon.jsonrpc_channel_list(), 1)
await self.assertBalance(self.account, '5.991503') await self.assertBalance(self.account, '5.991447')
# spend exactly amount available, no change # spend exactly amount available, no change
tx = await self.channel_create('@foo3', '5.981322') tx = await self.channel_create('@foo3', '5.981266')
await self.assertBalance(self.account, '0.0') await self.assertBalance(self.account, '0.0')
self.assertEqual(len(tx['outputs']), 1) # no change self.assertEqual(len(tx['outputs']), 1) # no change
self.assertItemCount(await self.daemon.jsonrpc_channel_list(), 2) self.assertItemCount(await self.daemon.jsonrpc_channel_list(), 2)
@ -1239,13 +1245,8 @@ class ChannelCommands(CommandTestCase):
channel = channel_tx.outputs[0] channel = channel_tx.outputs[0]
signature1 = await self.out(self.daemon.jsonrpc_channel_sign(channel_name='@signer', hexdata=data_to_sign)) signature1 = await self.out(self.daemon.jsonrpc_channel_sign(channel_name='@signer', hexdata=data_to_sign))
signature2 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign)) signature2 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign))
signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign, salt='beef'))
signature4 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign, salt='beef'))
self.assertNotEqual(signature2, signature3)
self.assertEqual(signature3, signature4)
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature1)) self.assertTrue(verify(channel, unhexlify(data_to_sign), signature1))
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature2)) self.assertTrue(verify(channel, unhexlify(data_to_sign), signature2))
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature3))
signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=99)) signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=99))
self.assertTrue(verify(channel, unhexlify('99'), signature3)) self.assertTrue(verify(channel, unhexlify('99'), signature3))
@ -1262,7 +1263,7 @@ class ChannelCommands(CommandTestCase):
await daemon2.jsonrpc_channel_import(exported_data) await daemon2.jsonrpc_channel_import(exported_data)
channels = (await daemon2.jsonrpc_channel_list())['items'] channels = (await daemon2.jsonrpc_channel_list())['items']
self.assertEqual(1, len(channels)) self.assertEqual(1, len(channels))
self.assertEqual(channel_private_key.private_key_bytes, channels[0].private_key.private_key_bytes) self.assertEqual(channel_private_key.to_string(), channels[0].private_key.to_string())
# second wallet can't update until channel is sent to it # second wallet can't update until channel is sent to it
with self.assertRaisesRegex(AssertionError, 'Cannot find private key for signing output.'): with self.assertRaisesRegex(AssertionError, 'Cannot find private key for signing output.'):
@ -1460,31 +1461,17 @@ class StreamCommands(ClaimTestCase):
self.assertItemCount(await self.daemon.jsonrpc_txo_list(reposted_claim_id=claim_id), 0) self.assertItemCount(await self.daemon.jsonrpc_txo_list(reposted_claim_id=claim_id), 0)
self.assertItemCount(await self.daemon.jsonrpc_txo_list(type='repost'), 0) self.assertItemCount(await self.daemon.jsonrpc_txo_list(type='repost'), 0)
tx = await self.stream_repost( tx = await self.stream_repost(claim_id, 'newstuff-again', '1.1', channel_name='@spam')
claim_id, 'newstuff-again', '1.1', channel_name='@spam',
title="repost title", description="repost desc", tags=["tag1", "tag2"]
)
repost_id = self.get_claim_id(tx) repost_id = self.get_claim_id(tx)
# test inflating reposted channels works # test inflating reposted channels works
repost_url = f'newstuff-again:{repost_id}' repost_url = f'newstuff-again:{repost_id}'
self.ledger._tx_cache.clear() self.ledger._tx_cache.clear()
repost_resolve = await self.out(self.daemon.jsonrpc_resolve(repost_url)) self.assertEqual(
repost = repost_resolve[repost_url] goodies_claim_id,
self.assertEqual(goodies_claim_id, repost['reposted_claim']['signing_channel']['claim_id']) (await self.out(self.daemon.jsonrpc_resolve(repost_url))
self.assertEqual("repost title", repost["value"]["title"]) )[repost_url]['reposted_claim']['signing_channel']['claim_id']
self.assertEqual("repost desc", repost["value"]["description"])
self.assertEqual(["tag1", "tag2"], repost["value"]["tags"])
await self.stream_update(
repost_id, title="title 2", description="desc 2", tags=["tag3"]
) )
repost_resolve = await self.out(self.daemon.jsonrpc_resolve(repost_url))
repost = repost_resolve[repost_url]
self.assertEqual(goodies_claim_id, repost['reposted_claim']['signing_channel']['claim_id'])
self.assertEqual("title 2", repost["value"]["title"])
self.assertEqual("desc 2", repost["value"]["description"])
self.assertEqual(["tag1", "tag2", "tag3"], repost["value"]["tags"])
self.assertItemCount(await self.daemon.jsonrpc_claim_list(claim_type='repost'), 1) self.assertItemCount(await self.daemon.jsonrpc_claim_list(claim_type='repost'), 1)
self.assertEqual((await self.claim_search(name='newstuff'))[0]['meta']['reposted'], 1) self.assertEqual((await self.claim_search(name='newstuff'))[0]['meta']['reposted'], 1)
@ -1532,7 +1519,7 @@ class StreamCommands(ClaimTestCase):
self.assertEqual(searched_repost['signing_channel']['claim_id'], spam_claim_id) self.assertEqual(searched_repost['signing_channel']['claim_id'], spam_claim_id)
async def test_filtering_channels_for_removing_content(self): async def test_filtering_channels_for_removing_content(self):
some_channel_id = self.get_claim_id(await self.channel_create('@some_channel', '0.1')) await self.channel_create('@some_channel', '0.1')
await self.stream_create('good_content', '0.1', channel_name='@some_channel', tags=['good']) await self.stream_create('good_content', '0.1', channel_name='@some_channel', tags=['good'])
bad_content_id = self.get_claim_id( bad_content_id = self.get_claim_id(
await self.stream_create('bad_content', '0.1', channel_name='@some_channel', tags=['bad']) await self.stream_create('bad_content', '0.1', channel_name='@some_channel', tags=['bad'])
@ -1602,16 +1589,9 @@ class StreamCommands(ClaimTestCase):
# blocked content is not resolveable # blocked content is not resolveable
error = (await self.resolve('lbry://@some_channel/bad_content'))['error'] error = (await self.resolve('lbry://@some_channel/bad_content'))['error']
self.assertEqual(error['name'], 'BLOCKED') self.assertEqual(error['name'], 'BLOCKED')
self.assertTrue(error['text'].startswith(f"Resolve of 'lbry://@some_channel#{some_channel_id[:1]}/bad_content#{bad_content_id[:1]}' was blocked")) self.assertTrue(error['text'].startswith("Resolve of 'lbry://@some_channel/bad_content' was censored"))
self.assertTrue(error['censor']['short_url'].startswith('lbry://@blocking#')) self.assertTrue(error['censor']['short_url'].startswith('lbry://@blocking#'))
# local claim list still finds local reposted content that's blocked
claims = await self.claim_list(reposted_claim_id=bad_content_id)
self.assertEqual(claims[0]['name'], 'block1')
self.assertEqual(claims[0]['value']['claim_id'], bad_content_id)
self.assertEqual(claims[1]['name'], 'filter1')
self.assertEqual(claims[1]['value']['claim_id'], bad_content_id)
# a filtered/blocked channel impacts all content inside it # a filtered/blocked channel impacts all content inside it
bad_channel_id = self.get_claim_id( bad_channel_id = self.get_claim_id(
await self.channel_create('@bad_channel', '0.1', tags=['bad-stuff']) await self.channel_create('@bad_channel', '0.1', tags=['bad-stuff'])
@ -1693,6 +1673,7 @@ class StreamCommands(ClaimTestCase):
self.assertEqual(tx['txid'], files[0]['txid']) self.assertEqual(tx['txid'], files[0]['txid'])
self.assertEqual(expected, files[0]['metadata']) self.assertEqual(expected, files[0]['metadata'])
@skip
async def test_setting_stream_fields(self): async def test_setting_stream_fields(self):
values = { values = {
'title': "Cool Content", 'title': "Cool Content",

View file

@ -10,7 +10,6 @@ from lbry.stream.descriptor import StreamDescriptor
from lbry.testcase import CommandTestCase from lbry.testcase import CommandTestCase
from lbry.extras.daemon.components import TorrentSession, BACKGROUND_DOWNLOADER_COMPONENT from lbry.extras.daemon.components import TorrentSession, BACKGROUND_DOWNLOADER_COMPONENT
from lbry.wallet import Transaction from lbry.wallet import Transaction
from lbry.torrent.tracker import UDPTrackerServerProtocol
class FileCommands(CommandTestCase): class FileCommands(CommandTestCase):
@ -18,17 +17,11 @@ class FileCommands(CommandTestCase):
super().__init__(*a, **kw) super().__init__(*a, **kw)
self.skip_libtorrent = False self.skip_libtorrent = False
async def add_forever(self):
while True:
for handle in self.client_session._handles.values():
handle._handle.connect_peer(('127.0.0.1', 4040))
await asyncio.sleep(.1)
async def initialize_torrent(self, tx_to_update=None): async def initialize_torrent(self, tx_to_update=None):
if not hasattr(self, 'seeder_session'): if not hasattr(self, 'seeder_session'):
self.seeder_session = TorrentSession(self.loop, None) self.seeder_session = TorrentSession(self.loop, None)
self.addCleanup(self.seeder_session.stop) self.addCleanup(self.seeder_session.stop)
await self.seeder_session.bind('127.0.0.1', port=4040) await self.seeder_session.bind(port=4040)
btih = await self.seeder_session.add_fake_torrent() btih = await self.seeder_session.add_fake_torrent()
address = await self.account.receiving.get_or_create_usable_address() address = await self.account.receiving.get_or_create_usable_address()
if not tx_to_update: if not tx_to_update:
@ -46,9 +39,8 @@ class FileCommands(CommandTestCase):
await tx.sign([self.account]) await tx.sign([self.account])
await self.broadcast_and_confirm(tx) await self.broadcast_and_confirm(tx)
self.client_session = self.daemon.file_manager.source_managers['torrent'].torrent_session self.client_session = self.daemon.file_manager.source_managers['torrent'].torrent_session
self.client_session._session.add_dht_node(('localhost', 4040))
self.client_session.wait_start = False # fixme: this is super slow on tests self.client_session.wait_start = False # fixme: this is super slow on tests
task = asyncio.create_task(self.add_forever())
self.addCleanup(task.cancel)
return tx, btih return tx, btih
@skipIf(TorrentSession is None, "libtorrent not installed") @skipIf(TorrentSession is None, "libtorrent not installed")
@ -89,21 +81,6 @@ class FileCommands(CommandTestCase):
await self.reflector.blob_manager.delete_blobs(all_except_sd) await self.reflector.blob_manager.delete_blobs(all_except_sd)
self.assertEqual(all_except_sd, await self.daemon.jsonrpc_file_reflect(sd_hash=sd_hash)) self.assertEqual(all_except_sd, await self.daemon.jsonrpc_file_reflect(sd_hash=sd_hash))
async def test_sd_blob_fields_fallback(self):
claim_id = self.get_claim_id(await self.stream_create('foo', '0.01', suffix='.txt'))
stream = (await self.daemon.jsonrpc_file_list())["items"][0]
stream.descriptor.suggested_file_name = ' '
stream.descriptor.stream_name = ' '
stream.descriptor.stream_hash = stream.descriptor.get_stream_hash()
sd_hash = stream.descriptor.sd_hash = stream.descriptor.calculate_sd_hash()
await stream.descriptor.make_sd_blob()
await self.daemon.jsonrpc_file_delete(claim_name='foo')
await self.stream_update(claim_id=claim_id, sd_hash=sd_hash)
file_dict = await self.out(self.daemon.jsonrpc_get('lbry://foo', save_file=True))
self.assertEqual(file_dict['suggested_file_name'], stream.file_name)
self.assertEqual(file_dict['stream_name'], stream.file_name)
self.assertEqual(file_dict['mime_type'], 'text/plain')
async def test_file_management(self): async def test_file_management(self):
await self.stream_create('foo', '0.01') await self.stream_create('foo', '0.01')
await self.stream_create('foo2', '0.01') await self.stream_create('foo2', '0.01')
@ -125,32 +102,6 @@ class FileCommands(CommandTestCase):
await self.daemon.jsonrpc_get('lbry://foo') await self.daemon.jsonrpc_get('lbry://foo')
self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1) self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1)
async def test_tracker_discovery(self):
port = 50990
server = UDPTrackerServerProtocol()
transport, _ = await self.loop.create_datagram_endpoint(lambda: server, local_addr=("127.0.0.1", port))
self.addCleanup(transport.close)
self.daemon.conf.fixed_peers = []
self.daemon.conf.tracker_servers = [("127.0.0.1", port)]
tx = await self.stream_create('foo', '0.01')
sd_hash = tx['outputs'][0]['value']['source']['sd_hash']
self.assertNotIn(bytes.fromhex(sd_hash)[:20], server.peers)
server.add_peer(bytes.fromhex(sd_hash)[:20], "127.0.0.1", 5567)
self.assertEqual(1, len(server.peers[bytes.fromhex(sd_hash)[:20]]))
self.assertTrue(await self.daemon.jsonrpc_file_delete(delete_all=True))
stream = await self.daemon.jsonrpc_get('foo', save_file=True)
await self.wait_files_to_complete()
self.assertEqual(0, stream.blobs_remaining)
self.assertEqual(2, len(server.peers[bytes.fromhex(sd_hash)[:20]]))
self.assertEqual([{'address': '127.0.0.1',
'node_id': None,
'tcp_port': 5567,
'udp_port': None},
{'address': '127.0.0.1',
'node_id': None,
'tcp_port': 4444,
'udp_port': None}], (await self.daemon.jsonrpc_peer_list(sd_hash))['items'])
async def test_announces(self): async def test_announces(self):
# announces on publish # announces on publish
self.assertEqual(await self.daemon.storage.get_blobs_to_announce(), []) self.assertEqual(await self.daemon.storage.get_blobs_to_announce(), [])
@ -354,7 +305,7 @@ class FileCommands(CommandTestCase):
await self.daemon.jsonrpc_get('lbry://foo') await self.daemon.jsonrpc_get('lbry://foo')
with open(original_path, 'wb') as handle: with open(original_path, 'wb') as handle:
handle.write(b'some other stuff was there instead') handle.write(b'some other stuff was there instead')
await self.daemon.file_manager.stop() self.daemon.file_manager.stop()
await self.daemon.file_manager.start() await self.daemon.file_manager.start()
await asyncio.wait_for(self.wait_files_to_complete(), timeout=5) # if this hangs, file didn't get set completed await asyncio.wait_for(self.wait_files_to_complete(), timeout=5) # if this hangs, file didn't get set completed
# check that internal state got through up to the file list API # check that internal state got through up to the file list API
@ -382,7 +333,8 @@ class FileCommands(CommandTestCase):
resp = await self.out(self.daemon.jsonrpc_get('lbry://foo', timeout=2)) resp = await self.out(self.daemon.jsonrpc_get('lbry://foo', timeout=2))
self.assertNotIn('error', resp) self.assertNotIn('error', resp)
self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path))
await self.daemon.file_manager.stop() self.daemon.file_manager.stop()
await asyncio.sleep(0.01, loop=self.loop) # FIXME: this sleep should not be needed
self.assertFalse(os.path.isfile(path)) self.assertFalse(os.path.isfile(path))
async def test_incomplete_downloads_retry(self): async def test_incomplete_downloads_retry(self):
@ -477,7 +429,7 @@ class FileCommands(CommandTestCase):
# restart the daemon and make sure the fee is still there # restart the daemon and make sure the fee is still there
await self.daemon.file_manager.stop() self.daemon.file_manager.stop()
await self.daemon.file_manager.start() await self.daemon.file_manager.start()
self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1) self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1)
self.assertEqual((await self.daemon.jsonrpc_file_list())['items'][0].content_fee.raw, raw_content_fee) self.assertEqual((await self.daemon.jsonrpc_file_list())['items'][0].content_fee.raw, raw_content_fee)
@ -620,18 +572,13 @@ class DiskSpaceManagement(CommandTestCase):
self.assertTrue(blobs2.issubset(blobs)) self.assertTrue(blobs2.issubset(blobs))
self.assertFalse(blobs3.issubset(blobs)) self.assertFalse(blobs3.issubset(blobs))
self.assertTrue(blobs4.issubset(blobs)) self.assertTrue(blobs4.issubset(blobs))
# check that pending blobs are not accounted (#3617)
await self.daemon.storage.db.execute_fetchall("update blob set status='pending'")
await self.blob_clean() # just to refresh caches, has no effect
self.assertEqual(0, (await self.status())['disk_space']['total_used_mb'])
self.assertEqual(0, (await self.status())['disk_space']['content_blobs_storage_used_mb'])
self.assertEqual(0, (await self.status())['disk_space']['published_blobs_storage_used_mb'])
# check that added_on gets set on downloads (was a bug) # check that added_on gets set on downloads (was a bug)
self.assertLess(0, await self.daemon.storage.run_and_return_one_or_none("select min(added_on) from blob")) self.assertLess(0, await self.daemon.storage.run_and_return_one_or_none("select min(added_on) from blob"))
await self.daemon.jsonrpc_file_delete(delete_all=True) await self.daemon.jsonrpc_file_delete(delete_all=True)
await self.daemon.jsonrpc_get("foo4", save_file=False) await self.daemon.jsonrpc_get("foo4", save_file=False)
self.assertLess(0, await self.daemon.storage.run_and_return_one_or_none("select min(added_on) from blob")) self.assertLess(0, await self.daemon.storage.run_and_return_one_or_none("select min(added_on) from blob"))
class TestBackgroundDownloaderComponent(CommandTestCase): class TestBackgroundDownloaderComponent(CommandTestCase):
async def get_blobs_from_sd_blob(self, sd_blob): async def get_blobs_from_sd_blob(self, sd_blob):
descriptor = await StreamDescriptor.from_stream_descriptor_blob( descriptor = await StreamDescriptor.from_stream_descriptor_blob(

View file

@ -3,9 +3,7 @@ import hashlib
import aiohttp import aiohttp
import aiohttp.web import aiohttp.web
import asyncio import asyncio
import contextlib
from lbry.file.source import ManagedDownloadSource
from lbry.utils import aiohttp_request from lbry.utils import aiohttp_request
from lbry.blob.blob_file import MAX_BLOB_SIZE from lbry.blob.blob_file import MAX_BLOB_SIZE
from lbry.testcase import CommandTestCase from lbry.testcase import CommandTestCase
@ -23,7 +21,7 @@ def get_random_bytes(n: int) -> bytes:
class RangeRequests(CommandTestCase): class RangeRequests(CommandTestCase):
async def _restart_stream_manager(self): async def _restart_stream_manager(self):
await self.daemon.file_manager.stop() self.daemon.file_manager.stop()
await self.daemon.file_manager.start() await self.daemon.file_manager.start()
return return
@ -133,7 +131,7 @@ class RangeRequests(CommandTestCase):
self.assertTrue(os.path.isfile(self.daemon.blob_manager.get_blob(stream.sd_hash).file_path)) self.assertTrue(os.path.isfile(self.daemon.blob_manager.get_blob(stream.sd_hash).file_path))
self.assertIsNone(stream.download_directory) self.assertIsNone(stream.download_directory)
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
# test that repeated range requests do not create duplicate files # test that repeated range requests do not create duplicate files
for _ in range(3): for _ in range(3):
@ -142,7 +140,7 @@ class RangeRequests(CommandTestCase):
self.assertTrue(os.path.isfile(self.daemon.blob_manager.get_blob(stream.sd_hash).file_path)) self.assertTrue(os.path.isfile(self.daemon.blob_manager.get_blob(stream.sd_hash).file_path))
self.assertIsNone(stream.download_directory) self.assertIsNone(stream.download_directory)
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
current_files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) current_files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
self.assertEqual( self.assertEqual(
len(files_in_download_dir), len(current_files_in_download_dir) len(files_in_download_dir), len(current_files_in_download_dir)
) )
@ -150,7 +148,7 @@ class RangeRequests(CommandTestCase):
# test that a range request after restart does not create a duplicate file # test that a range request after restart does not create a duplicate file
await self._restart_stream_manager() await self._restart_stream_manager()
current_files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) current_files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
self.assertEqual( self.assertEqual(
len(files_in_download_dir), len(current_files_in_download_dir) len(files_in_download_dir), len(current_files_in_download_dir)
) )
@ -164,7 +162,7 @@ class RangeRequests(CommandTestCase):
self.assertTrue(os.path.isfile(self.daemon.blob_manager.get_blob(stream.sd_hash).file_path)) self.assertTrue(os.path.isfile(self.daemon.blob_manager.get_blob(stream.sd_hash).file_path))
self.assertIsNone(stream.download_directory) self.assertIsNone(stream.download_directory)
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
current_files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) current_files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
self.assertEqual( self.assertEqual(
len(files_in_download_dir), len(current_files_in_download_dir) len(files_in_download_dir), len(current_files_in_download_dir)
) )
@ -176,7 +174,7 @@ class RangeRequests(CommandTestCase):
stream = (await self.daemon.jsonrpc_file_list())['items'][0] stream = (await self.daemon.jsonrpc_file_list())['items'][0]
self.assertIsNone(stream.download_directory) self.assertIsNone(stream.download_directory)
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
# test that repeated range requests do not create duplicate files # test that repeated range requests do not create duplicate files
for _ in range(3): for _ in range(3):
@ -184,7 +182,7 @@ class RangeRequests(CommandTestCase):
stream = (await self.daemon.jsonrpc_file_list())['items'][0] stream = (await self.daemon.jsonrpc_file_list())['items'][0]
self.assertIsNone(stream.download_directory) self.assertIsNone(stream.download_directory)
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
current_files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) current_files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
self.assertEqual( self.assertEqual(
len(files_in_download_dir), len(current_files_in_download_dir) len(files_in_download_dir), len(current_files_in_download_dir)
) )
@ -192,7 +190,7 @@ class RangeRequests(CommandTestCase):
# test that a range request after restart does not create a duplicate file # test that a range request after restart does not create a duplicate file
await self._restart_stream_manager() await self._restart_stream_manager()
current_files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) current_files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
self.assertEqual( self.assertEqual(
len(files_in_download_dir), len(current_files_in_download_dir) len(files_in_download_dir), len(current_files_in_download_dir)
) )
@ -204,7 +202,7 @@ class RangeRequests(CommandTestCase):
stream = (await self.daemon.jsonrpc_file_list())['items'][0] stream = (await self.daemon.jsonrpc_file_list())['items'][0]
self.assertIsNone(stream.download_directory) self.assertIsNone(stream.download_directory)
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
current_files_in_download_dir = list(os.scandir(self.daemon.conf.data_dir)) current_files_in_download_dir = list(os.scandir(os.path.dirname(self.daemon.conf.data_dir)))
self.assertEqual( self.assertEqual(
len(files_in_download_dir), len(current_files_in_download_dir) len(files_in_download_dir), len(current_files_in_download_dir)
) )
@ -354,20 +352,13 @@ class RangeRequests(CommandTestCase):
path = stream.full_path path = stream.full_path
self.assertIsNotNone(path) self.assertIsNotNone(path)
if wait_for_start_writing: if wait_for_start_writing:
with contextlib.suppress(asyncio.CancelledError):
await stream.started_writing.wait() await stream.started_writing.wait()
self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path))
await self.daemon.file_manager.stop() await self._restart_stream_manager()
# while stopped, we get no response to query and no file is present
self.assertEqual((await self.daemon.jsonrpc_file_list())['items'], [])
self.assertEqual(os.path.isfile(path), stream.status == ManagedDownloadSource.STATUS_FINISHED)
await self.daemon.file_manager.start()
# after restart, we get a response to query and same file path
stream = (await self.daemon.jsonrpc_file_list())['items'][0] stream = (await self.daemon.jsonrpc_file_list())['items'][0]
self.assertIsNotNone(stream.full_path) self.assertIsNotNone(stream.full_path)
self.assertEqual(stream.full_path, path) self.assertFalse(os.path.isfile(path))
if wait_for_start_writing: if wait_for_start_writing:
with contextlib.suppress(asyncio.CancelledError):
await stream.started_writing.wait() await stream.started_writing.wait()
self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path))
@ -423,6 +414,6 @@ class RangeRequestsLRUCache(CommandTestCase):
# running with cache size 0 gets through without errors without # running with cache size 0 gets through without errors without
# this since the server doesn't stop immediately # this since the server doesn't stop immediately
await asyncio.sleep(1) await asyncio.sleep(1, loop=self.loop)
await self._request_stream() await self._request_stream()

View file

@ -10,7 +10,7 @@ from lbry.extras.daemon.components import (
DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT,
HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT,
LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT, TRACKER_ANNOUNCER_COMPONENT LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT
) )
from lbry.extras.daemon.daemon import Daemon from lbry.extras.daemon.daemon import Daemon
@ -26,7 +26,7 @@ class CLIIntegrationTest(AsyncioTestCase):
DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT,
HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT,
LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT, TRACKER_ANNOUNCER_COMPONENT LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT
) )
Daemon.component_attributes = {} Daemon.component_attributes = {}
self.daemon = Daemon(conf) self.daemon = Daemon(conf)

View file

@ -1,9 +1,8 @@
import asyncio import asyncio
import logging
from decimal import Decimal from decimal import Decimal
from lbry.testcase import AsyncioTestCase from lbry.testcase import AsyncioTestCase
from lbry.extras.daemon.exchange_rate_manager import ( from lbry.extras.daemon.exchange_rate_manager import ExchangeRate, ExchangeRateManager, FEEDS, MarketFeed
ExchangeRate, ExchangeRateManager, FEEDS, MarketFeed
)
class TestExchangeRateManager(AsyncioTestCase): class TestExchangeRateManager(AsyncioTestCase):
@ -25,10 +24,10 @@ class TestExchangeRateManager(AsyncioTestCase):
self.assertLessEqual(len(failures), 1, f"feed failures: {failures}. Please check exchange rate feeds!") self.assertLessEqual(len(failures), 1, f"feed failures: {failures}. Please check exchange rate feeds!")
lbc = manager.convert_currency('USD', 'LBC', Decimal('1.0')) lbc = manager.convert_currency('USD', 'LBC', Decimal('1.0'))
self.assertGreaterEqual(lbc, 2.0) self.assertGreaterEqual(lbc, 2.0)
self.assertLessEqual(lbc, 120.0) self.assertLessEqual(lbc, 60.0)
lbc = manager.convert_currency('BTC', 'LBC', Decimal('0.01')) lbc = manager.convert_currency('BTC', 'LBC', Decimal('0.01'))
self.assertGreaterEqual(lbc, 1_000) self.assertGreaterEqual(lbc, 1_000)
self.assertLessEqual(lbc, 30_000) self.assertLessEqual(lbc, 20_000)
async def test_it_handles_feed_being_offline(self): async def test_it_handles_feed_being_offline(self):
class FakeFeed(MarketFeed): class FakeFeed(MarketFeed):

View file

@ -215,21 +215,6 @@ class ResolveCommand(BaseResolveTestCase):
await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:17]}', colliding_claim_ids[1]) await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1][:17]}', colliding_claim_ids[1])
await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1]}', colliding_claim_ids[1]) await self.assertResolvesToClaimId(f'@abc#{colliding_claim_ids[1]}', colliding_claim_ids[1])
# test resolving different streams for a channel using short urls
self.get_claim_id(
await self.stream_create('foo1', '0.01', channel_id=colliding_claim_ids[0])
)
self.get_claim_id(
await self.stream_create('foo2', '0.01', channel_id=colliding_claim_ids[0])
)
duplicated_resolved = list((
await self.ledger.resolve([], [
f'@abc#{colliding_claim_ids[0][:2]}/foo1', f'@abc#{colliding_claim_ids[0][:2]}/foo2'
])
).values())
self.assertEqual('foo1', duplicated_resolved[0].normalized_name)
self.assertEqual('foo2', duplicated_resolved[1].normalized_name)
async def test_abandon_channel_and_claims_in_same_tx(self): async def test_abandon_channel_and_claims_in_same_tx(self):
channel_id = self.get_claim_id( channel_id = self.get_claim_id(
await self.channel_create('@abc', '0.01') await self.channel_create('@abc', '0.01')
@ -341,30 +326,6 @@ class ResolveCommand(BaseResolveTestCase):
await self.support_abandon(claim_id1) await self.support_abandon(claim_id1)
await self.assertResolvesToClaimId('@foo', claim_id2) await self.assertResolvesToClaimId('@foo', claim_id2)
async def test_resolve_duplicate_name_in_channel(self):
db_resolve = self.conductor.spv_node.server.db.resolve
# first one remains winner unless something else changes
channel_id = self.get_claim_id(await self.channel_create('@foo'))
file_path = self.create_upload_file(data=b'hi!')
tx = await self.daemon.jsonrpc_stream_create('duplicate', '0.1', file_path=file_path, allow_duplicate_name=True, channel_id=channel_id)
await self.ledger.wait(tx)
first_claim = tx.outputs[0].claim_id
file_path = self.create_upload_file(data=b'hi!')
tx = await self.daemon.jsonrpc_stream_create('duplicate', '0.1', file_path=file_path, allow_duplicate_name=True, channel_id=channel_id)
await self.ledger.wait(tx)
duplicate_claim = tx.outputs[0].claim_id
await self.generate(1)
stream, channel, _, _ = await db_resolve(f"@foo:{channel_id}/duplicate:{first_claim}")
self.assertEqual(stream.claim_hash.hex(), first_claim)
self.assertEqual(channel.claim_hash.hex(), channel_id)
stream, channel, _, _ = await db_resolve(f"@foo:{channel_id}/duplicate:{duplicate_claim}")
self.assertEqual(stream.claim_hash.hex(), duplicate_claim)
self.assertEqual(channel.claim_hash.hex(), channel_id)
async def test_advanced_resolve(self): async def test_advanced_resolve(self):
claim_id1 = self.get_claim_id( claim_id1 = self.get_claim_id(
await self.stream_create('foo', '0.7', allow_duplicate_name=True)) await self.stream_create('foo', '0.7', allow_duplicate_name=True))
@ -1508,27 +1469,27 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
COIN = int(1E8) COIN = int(1E8)
self.assertEqual(self.conductor.spv_node.writer.height, 207) self.assertEqual(self.conductor.spv_node.writer.height, 207)
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
(208, bytes.fromhex(claim_id1)), (0, 10 * COIN) (208, bytes.fromhex(claim_id1)), (0, 10 * COIN)
) )
await self.generate(1) await self.generate(1)
self.assertEqual(self.conductor.spv_node.writer.height, 208) self.assertEqual(self.conductor.spv_node.writer.height, 208)
self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1)) self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1))
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
(209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN) (209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN)
) )
await self.generate(1) await self.generate(1)
self.assertEqual(self.conductor.spv_node.writer.height, 209) self.assertEqual(self.conductor.spv_node.writer.height, 209)
self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1)) self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1))
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
(309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN) (309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN)
) )
await self.generate(100) await self.generate(100)
self.assertEqual(self.conductor.spv_node.writer.height, 309) self.assertEqual(self.conductor.spv_node.writer.height, 309)
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1)) self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put(
(409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN) (409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN)
) )
@ -1550,10 +1511,18 @@ class ResolveAfterReorg(BaseResolveTestCase):
blocks = self.ledger.headers.height - start blocks = self.ledger.headers.height - start
self.blockchain.block_expected = start - 1 self.blockchain.block_expected = start - 1
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
self.conductor.spv_node.server.synchronized.clear()
# go back to start # go back to start
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode()) await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
# go to previous + 1 # go to previous + 1
await self.generate(blocks + 2) await self.blockchain.generate(blocks + 2)
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
await self.conductor.spv_node.server.synchronized.wait()
# await asyncio.wait_for(self.on_header(self.blockchain.block_expected), 30.0)
async def assertBlockHash(self, height): async def assertBlockHash(self, height):
reader_db = self.conductor.spv_node.server.db reader_db = self.conductor.spv_node.server.db
@ -1827,7 +1796,7 @@ def generate_signed_legacy(address: bytes, output: Output):
claim.SerializeToString(), claim.SerializeToString(),
output.claim_hash[::-1] output.claim_hash[::-1]
])) ]))
signature = output.private_key.sign_compact(digest) signature = output.private_key.sign_digest_deterministic(digest, hashfunc=hashlib.sha256)
claim.publisherSignature.version = 1 claim.publisherSignature.version = 1
claim.publisherSignature.signatureType = 1 claim.publisherSignature.signatureType = 1
claim.publisherSignature.signature = signature claim.publisherSignature.signature = signature

View file

@ -31,9 +31,7 @@ class BasicTransactionTest(IntegrationTestCase):
channel_txo = Output.pay_claim_name_pubkey_hash( channel_txo = Output.pay_claim_name_pubkey_hash(
l2d('1.0'), '@bar', channel, self.account.ledger.address_to_hash160(address1) l2d('1.0'), '@bar', channel, self.account.ledger.address_to_hash160(address1)
) )
channel_txo.set_channel_private_key( await channel_txo.generate_channel_private_key()
await self.account.generate_channel_private_key()
)
channel_txo.script.generate() channel_txo.script.generate()
channel_tx = await Transaction.create([], [channel_txo], [self.account], self.account) channel_tx = await Transaction.create([], [channel_txo], [self.account], self.account)

View file

@ -1,8 +1,7 @@
import asyncio
import unittest import unittest
from lbry.testcase import CommandTestCase from lbry.testcase import CommandTestCase
from lbry.wallet import Transaction
class TransactionCommandsTestCase(CommandTestCase): class TransactionCommandsTestCase(CommandTestCase):
@ -29,42 +28,16 @@ class TransactionCommandsTestCase(CommandTestCase):
# someone's tx # someone's tx
change_address = await self.blockchain.get_raw_change_address() change_address = await self.blockchain.get_raw_change_address()
sendtxid = await self.blockchain.send_to_address(change_address, 10) sendtxid = await self.blockchain.send_to_address(change_address, 10)
# After a few tries, Hub should have the transaction (in mempool).
for i in range(5):
tx = await self.daemon.jsonrpc_transaction_show(sendtxid) tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
# Retry if Hub is not aware of the transaction. self.assertEqual(tx.id, sendtxid)
if isinstance(tx, dict): self.assertEqual(tx.height, -1)
# Fields: 'success', 'code', 'message'
self.assertFalse(tx['success'], tx)
self.assertEqual(tx['code'], 404, tx)
self.assertEqual(tx['message'], "transaction not found", tx)
await asyncio.sleep(0.1)
continue
break
# verify transaction show (in mempool)
self.assertTrue(isinstance(tx, Transaction), str(tx))
# Fields: 'txid', 'raw', 'height', 'position', 'is_verified', and more.
self.assertEqual(tx.id, sendtxid, vars(tx))
self.assertEqual(tx.height, -1, vars(tx))
self.assertEqual(tx.is_verified, False, vars(tx))
# transaction is confirmed and leaves mempool
await self.generate(1) await self.generate(1)
# verify transaction show
tx = await self.daemon.jsonrpc_transaction_show(sendtxid) tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertTrue(isinstance(tx, Transaction), str(tx)) self.assertEqual(tx.height, self.ledger.headers.height)
self.assertEqual(tx.id, sendtxid, vars(tx))
self.assertEqual(tx.height, self.ledger.headers.height, vars(tx))
self.assertEqual(tx.is_verified, True, vars(tx))
# inexistent # inexistent
result = await self.daemon.jsonrpc_transaction_show('0'*64) result = await self.daemon.jsonrpc_transaction_show('0'*64)
self.assertTrue(isinstance(result, dict), result) self.assertFalse(result['success'])
# Fields: 'success', 'code', 'message'
self.assertFalse(result['success'], result)
self.assertEqual(result['code'], 404, result)
self.assertEqual(result['message'], "transaction not found", result)
async def test_utxo_release(self): async def test_utxo_release(self):
await self.send_to_address_and_wait( await self.send_to_address_and_wait(

View file

@ -36,7 +36,7 @@ class TestBlob(AsyncioTestCase):
writer.write(self.blob_bytes) writer.write(self.blob_bytes)
await blob.verified.wait() await blob.verified.wait()
self.assertTrue(blob.get_is_verified()) self.assertTrue(blob.get_is_verified())
await asyncio.sleep(0) # wait for the db save task await asyncio.sleep(0, loop=self.loop) # wait for the db save task
return blob return blob
async def _test_close_writers_on_finished(self, blob_class=AbstractBlob, blob_directory=None): async def _test_close_writers_on_finished(self, blob_class=AbstractBlob, blob_directory=None):
@ -48,7 +48,7 @@ class TestBlob(AsyncioTestCase):
with self.assertRaises(InvalidDataError): with self.assertRaises(InvalidDataError):
writers[1].write(self.blob_bytes * 2) writers[1].write(self.blob_bytes * 2)
await writers[1].finished await writers[1].finished
await asyncio.sleep(0) await asyncio.sleep(0, loop=self.loop)
self.assertEqual(4, len(blob.writers)) self.assertEqual(4, len(blob.writers))
# write the blob # write the blob
@ -208,7 +208,7 @@ class TestBlob(AsyncioTestCase):
async def read_blob_buffer(): async def read_blob_buffer():
with reader as read_handle: with reader as read_handle:
self.assertEqual(1, len(blob.readers)) self.assertEqual(1, len(blob.readers))
await asyncio.sleep(2) await asyncio.sleep(2, loop=self.loop)
self.assertEqual(0, len(blob.readers)) self.assertEqual(0, len(blob.readers))
return read_handle.read() return read_handle.read()

View file

@ -54,8 +54,7 @@ class BlobExchangeTestBase(AsyncioTestCase):
download_dir=self.client_dir, download_dir=self.client_dir,
wallet=self.client_wallet_dir, wallet=self.client_wallet_dir,
save_files=True, save_files=True,
fixed_peers=[], fixed_peers=[]
tracker_servers=[]
) )
self.client_config.transaction_cache_size = 10000 self.client_config.transaction_cache_size = 10000
self.client_storage = SQLiteStorage(self.client_config, os.path.join(self.client_dir, "lbrynet.sqlite")) self.client_storage = SQLiteStorage(self.client_config, os.path.join(self.client_dir, "lbrynet.sqlite"))
@ -183,7 +182,7 @@ class TestBlobExchange(BlobExchangeTestBase):
writer.write(mock_blob_bytes) writer.write(mock_blob_bytes)
return self.loop.create_task(_inner()) return self.loop.create_task(_inner())
await asyncio.gather(write_task(writer1), write_task(writer2)) await asyncio.gather(write_task(writer1), write_task(writer2), loop=self.loop)
self.assertDictEqual({1: mock_blob_bytes, 2: mock_blob_bytes}, results) self.assertDictEqual({1: mock_blob_bytes, 2: mock_blob_bytes}, results)
self.assertEqual(1, write_called_count) self.assertEqual(1, write_called_count)
@ -239,8 +238,7 @@ class TestBlobExchange(BlobExchangeTestBase):
async def test_server_chunked_request(self): async def test_server_chunked_request(self):
blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
server_protocol = BlobServerProtocol(self.loop, self.server_blob_manager, self.server.lbrycrd_address) server_protocol = BlobServerProtocol(self.loop, self.server_blob_manager, self.server.lbrycrd_address)
transport = mock.Mock(spec=asyncio.Transport) transport = asyncio.Transport(extra={'peername': ('ip', 90)})
transport.get_extra_info = lambda k: {'peername': ('ip', 90)}[k]
received_data = BytesIO() received_data = BytesIO()
transport.is_closing = lambda: received_data.closed transport.is_closing = lambda: received_data.closed
transport.write = received_data.write transport.write = received_data.write
@ -270,7 +268,7 @@ class TestBlobExchange(BlobExchangeTestBase):
client_blob.delete() client_blob.delete()
# wait for less than the idle timeout # wait for less than the idle timeout
await asyncio.sleep(0.5) await asyncio.sleep(0.5, loop=self.loop)
# download the blob again # download the blob again
downloaded, protocol2 = await request_blob(self.loop, client_blob, self.server_from_client.address, downloaded, protocol2 = await request_blob(self.loop, client_blob, self.server_from_client.address,
@ -284,10 +282,10 @@ class TestBlobExchange(BlobExchangeTestBase):
client_blob.delete() client_blob.delete()
# check that the connection times out from the server side # check that the connection times out from the server side
await asyncio.sleep(0.9) await asyncio.sleep(0.9, loop=self.loop)
self.assertFalse(protocol.transport.is_closing()) self.assertFalse(protocol.transport.is_closing())
self.assertIsNotNone(protocol.transport._sock) self.assertIsNotNone(protocol.transport._sock)
await asyncio.sleep(0.1) await asyncio.sleep(0.1, loop=self.loop)
self.assertIsNone(protocol.transport) self.assertIsNone(protocol.transport)
def test_max_request_size(self): def test_max_request_size(self):
@ -323,7 +321,7 @@ class TestBlobExchange(BlobExchangeTestBase):
server_blob = self.server_blob_manager.get_blob(blob_hash) server_blob = self.server_blob_manager.get_blob(blob_hash)
async def sendfile(writer): async def sendfile(writer):
await asyncio.sleep(2) await asyncio.sleep(2, loop=self.loop)
return 0 return 0
server_blob.sendfile = sendfile server_blob.sendfile = sendfile
@ -347,7 +345,7 @@ class TestBlobExchange(BlobExchangeTestBase):
def _mock_accumulate_peers(q1, q2=None): def _mock_accumulate_peers(q1, q2=None):
async def _task(): async def _task():
pass pass
q2 = q2 or asyncio.Queue() q2 = q2 or asyncio.Queue(loop=self.loop)
return q2, self.loop.create_task(_task()) return q2, self.loop.create_task(_task())
mock_node.accumulate_peers = _mock_accumulate_peers mock_node.accumulate_peers = _mock_accumulate_peers

View file

@ -34,7 +34,6 @@ class TestComponentManager(AsyncioTestCase):
], ],
[ [
components.BackgroundDownloaderComponent, components.BackgroundDownloaderComponent,
components.TrackerAnnouncerComponent
] ]
] ]
self.component_manager = ComponentManager(Config()) self.component_manager = ComponentManager(Config())
@ -151,9 +150,6 @@ class FakeDelayedFileManager(FakeComponent):
async def start(self): async def start(self):
await asyncio.sleep(1) await asyncio.sleep(1)
def get_filtered(self):
return []
class TestComponentManagerProperStart(AdvanceTimeTestCase): class TestComponentManagerProperStart(AdvanceTimeTestCase):

View file

@ -72,14 +72,14 @@ class CacheConcurrentDecoratorTests(AsyncioTestCase):
@utils.cache_concurrent @utils.cache_concurrent
async def foo(self, arg1, arg2=None, delay=1): async def foo(self, arg1, arg2=None, delay=1):
self.called.append((arg1, arg2, delay)) self.called.append((arg1, arg2, delay))
await asyncio.sleep(delay) await asyncio.sleep(delay, loop=self.loop)
self.counter += 1 self.counter += 1
self.finished.append((arg1, arg2, delay)) self.finished.append((arg1, arg2, delay))
return object() return object()
async def test_gather_duplicates(self): async def test_gather_duplicates(self):
result = await asyncio.gather( result = await asyncio.gather(
self.loop.create_task(self.foo(1)), self.loop.create_task(self.foo(1)) self.loop.create_task(self.foo(1)), self.loop.create_task(self.foo(1)), loop=self.loop
) )
self.assertEqual(1, len(self.called)) self.assertEqual(1, len(self.called))
self.assertEqual(1, len(self.finished)) self.assertEqual(1, len(self.finished))
@ -93,7 +93,7 @@ class CacheConcurrentDecoratorTests(AsyncioTestCase):
with self.assertRaises(asyncio.CancelledError): with self.assertRaises(asyncio.CancelledError):
await asyncio.gather( await asyncio.gather(
t1, self.loop.create_task(self.foo(1)) t1, self.loop.create_task(self.foo(1)), loop=self.loop
) )
self.assertEqual(1, len(self.called)) self.assertEqual(1, len(self.called))
self.assertEqual(0, len(self.finished)) self.assertEqual(0, len(self.finished))

View file

@ -128,7 +128,7 @@ class TestBlobAnnouncer(AsyncioTestCase):
await self.chain_peer(constants.generate_id(current + 4), '1.2.3.13') await self.chain_peer(constants.generate_id(current + 4), '1.2.3.13')
last = await self.chain_peer(constants.generate_id(current + 5), '1.2.3.14') last = await self.chain_peer(constants.generate_id(current + 5), '1.2.3.14')
search_q, peer_q = asyncio.Queue(), asyncio.Queue() search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop)
search_q.put_nowait(blob1) search_q.put_nowait(blob1)
_, task = last.accumulate_peers(search_q, peer_q) _, task = last.accumulate_peers(search_q, peer_q)

View file

@ -11,35 +11,6 @@ from lbry.dht.peer import PeerManager, make_kademlia_peer
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
class TestBootstrapNode(AsyncioTestCase):
TIMEOUT = 10.0 # do not increase. Hitting a timeout is a real failure
async def test_bootstrap_node_adds_all_peers(self):
loop = asyncio.get_event_loop()
loop.set_debug(False)
with dht_mocks.mock_network_loop(loop):
advance = dht_mocks.get_time_accelerator(loop)
self.bootstrap_node = Node(self.loop, PeerManager(loop), constants.generate_id(),
4444, 4444, 3333, '1.2.3.4', is_bootstrap_node=True)
self.bootstrap_node.start('1.2.3.4', [])
self.bootstrap_node.protocol.ping_queue._default_delay = 0
self.addCleanup(self.bootstrap_node.stop)
# start the nodes
nodes = {}
futs = []
for i in range(100):
nodes[i] = Node(loop, PeerManager(loop), constants.generate_id(i), 4444, 4444, 3333, f'1.3.3.{i}')
nodes[i].start(f'1.3.3.{i}', [('1.2.3.4', 4444)])
self.addCleanup(nodes[i].stop)
futs.append(nodes[i].joined.wait())
await asyncio.gather(*futs)
while self.bootstrap_node.protocol.ping_queue.busy:
await advance(1)
self.assertEqual(100, len(self.bootstrap_node.protocol.routing_table.get_peers()))
class TestNodePingQueueDiscover(AsyncioTestCase): class TestNodePingQueueDiscover(AsyncioTestCase):
async def test_ping_queue_discover(self): async def test_ping_queue_discover(self):
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()

View file

@ -2,6 +2,7 @@ import unittest
from unittest import mock from unittest import mock
import json import json
from lbry.conf import Config
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
from lbry.extras.daemon.componentmanager import ComponentManager from lbry.extras.daemon.componentmanager import ComponentManager
from lbry.extras.daemon.components import DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT from lbry.extras.daemon.components import DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT
@ -10,7 +11,6 @@ from lbry.extras.daemon.components import UPNP_COMPONENT, BLOB_COMPONENT
from lbry.extras.daemon.components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT from lbry.extras.daemon.components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
from lbry.extras.daemon.daemon import Daemon as LBRYDaemon from lbry.extras.daemon.daemon import Daemon as LBRYDaemon
from lbry.wallet import WalletManager, Wallet from lbry.wallet import WalletManager, Wallet
from lbry.conf import Config
from tests import test_utils from tests import test_utils
# from tests.mocks import mock_conf_settings, FakeNetwork, FakeFileManager # from tests.mocks import mock_conf_settings, FakeNetwork, FakeFileManager

View file

@ -11,7 +11,7 @@ from lbry.extras.daemon.components import (
DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT,
HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT,
LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT, TRACKER_ANNOUNCER_COMPONENT LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT
) )
from lbry.extras.daemon.daemon import Daemon from lbry.extras.daemon.daemon import Daemon
@ -72,7 +72,7 @@ class TestAccessHeaders(AsyncioTestCase):
DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, DATABASE_COMPONENT, DISK_SPACE_COMPONENT, BLOB_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT,
HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT, HASH_ANNOUNCER_COMPONENT, FILE_MANAGER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, WALLET_SERVER_PAYMENTS_COMPONENT,
LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT, TRACKER_ANNOUNCER_COMPONENT LIBTORRENT_COMPONENT, BACKGROUND_DOWNLOADER_COMPONENT
) )
Daemon.component_attributes = {} Daemon.component_attributes = {}
self.daemon = Daemon(conf) self.daemon = Daemon(conf)

View file

@ -138,7 +138,9 @@ class TestTypesV1Compatibility(TestCase):
channel = cert.channel channel = cert.channel
self.assertEqual( self.assertEqual(
channel.public_key, channel.public_key,
'033878b1edd4a1373149909ef03f4339f6da9c2bd2214c040fd2e530463ffe6609' '3056301006072a8648ce3d020106052b8104000a034200043878b1edd4a1373149909ef03f4339f6da9c2b'
'd2214c040fd2e530463ffe66098eca14fc70b50ff3aefd106049a815f595ed5a13eda7419ad78d9ed7ae47'
'3f17'
) )
def test_unsigned_with_fee(self): def test_unsigned_with_fee(self):

View file

@ -8,13 +8,18 @@ from lbry.blob_exchange.serialization import BlobResponse
from lbry.blob_exchange.server import BlobServerProtocol from lbry.blob_exchange.server import BlobServerProtocol
from lbry.dht.node import Node from lbry.dht.node import Node
from lbry.dht.peer import make_kademlia_peer from lbry.dht.peer import make_kademlia_peer
from lbry.extras.daemon.storage import StoredContentClaim
from lbry.schema import Claim
from lbry.stream.managed_stream import ManagedStream from lbry.stream.managed_stream import ManagedStream
from lbry.stream.descriptor import StreamDescriptor from lbry.stream.descriptor import StreamDescriptor
from tests.unit.blob_exchange.test_transfer_blob import BlobExchangeTestBase from tests.unit.blob_exchange.test_transfer_blob import BlobExchangeTestBase
def get_mock_node(loop):
mock_node = mock.Mock(spec=Node)
mock_node.joined = asyncio.Event(loop=loop)
mock_node.joined.set()
return mock_node
class TestManagedStream(BlobExchangeTestBase): class TestManagedStream(BlobExchangeTestBase):
async def create_stream(self, blob_count: int = 10, file_name='test_file'): async def create_stream(self, blob_count: int = 10, file_name='test_file'):
self.stream_bytes = b'' self.stream_bytes = b''
@ -25,10 +30,7 @@ class TestManagedStream(BlobExchangeTestBase):
with open(file_path, 'wb') as f: with open(file_path, 'wb') as f:
f.write(self.stream_bytes) f.write(self.stream_bytes)
descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path) descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path)
descriptor.suggested_file_name = file_name self.sd_hash = descriptor.calculate_sd_hash()
descriptor.stream_hash = descriptor.get_stream_hash()
self.sd_hash = descriptor.sd_hash = descriptor.calculate_sd_hash()
await descriptor.make_sd_blob()
return descriptor return descriptor
async def setup_stream(self, blob_count: int = 10): async def setup_stream(self, blob_count: int = 10):
@ -52,21 +54,6 @@ class TestManagedStream(BlobExchangeTestBase):
self.assertEqual(self.stream.full_path, os.path.join(self.client_dir, 'tt_f')) self.assertEqual(self.stream.full_path, os.path.join(self.client_dir, 'tt_f'))
self.assertTrue(os.path.isfile(os.path.join(self.client_dir, 'tt_f'))) self.assertTrue(os.path.isfile(os.path.join(self.client_dir, 'tt_f')))
async def test_empty_name_fallback(self):
descriptor = await self.create_stream(file_name=" ")
descriptor.suggested_file_name = " "
claim = Claim()
claim.stream.source.name = "cool.mp4"
self.stream = ManagedStream(
self.loop, self.client_config, self.client_blob_manager, self.sd_hash, self.client_dir,
claim=StoredContentClaim(serialized=claim.to_bytes().hex())
)
await self._test_transfer_stream(10, skip_setup=True)
self.assertTrue(self.stream.completed)
self.assertEqual(self.stream.suggested_file_name, "cool.mp4")
self.assertEqual(self.stream.stream_name, "cool.mp4")
self.assertEqual(self.stream.mime_type, "video/mp4")
async def test_status_file_completed(self): async def test_status_file_completed(self):
await self._test_transfer_stream(10) await self._test_transfer_stream(10)
self.assertTrue(self.stream.output_file_exists) self.assertTrue(self.stream.output_file_exists)
@ -109,9 +96,9 @@ class TestManagedStream(BlobExchangeTestBase):
await self._test_transfer_stream(10, stop_when_done=False) await self._test_transfer_stream(10, stop_when_done=False)
self.assertEqual(self.stream.status, "finished") self.assertEqual(self.stream.status, "finished")
self.assertTrue(self.stream._running.is_set()) self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(0.5) await asyncio.sleep(0.5, loop=self.loop)
self.assertTrue(self.stream._running.is_set()) self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(2) await asyncio.sleep(2, loop=self.loop)
self.assertEqual(self.stream.status, "finished") self.assertEqual(self.stream.status, "finished")
self.assertFalse(self.stream._running.is_set()) self.assertFalse(self.stream._running.is_set())

View file

@ -86,13 +86,13 @@ class TestReflector(AsyncioTestCase):
self.assertListEqual(sent, []) self.assertListEqual(sent, [])
async def test_reflect_stream(self): async def test_reflect_stream(self):
return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=50), 3) return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=50), 3, loop=self.loop)
async def test_reflect_stream_but_reflector_changes_its_mind(self): async def test_reflect_stream_but_reflector_changes_its_mind(self):
return await asyncio.wait_for(self._test_reflect_stream(partial_needs=True), 3) return await asyncio.wait_for(self._test_reflect_stream(partial_needs=True), 3, loop=self.loop)
async def test_reflect_stream_small_response_chunks(self): async def test_reflect_stream_small_response_chunks(self):
return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=30), 3) return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=30), 3, loop=self.loop)
async def test_announces(self): async def test_announces(self):
to_announce = await self.storage.get_blobs_to_announce() to_announce = await self.storage.get_blobs_to_announce()

View file

@ -174,7 +174,7 @@ class TestStreamManager(BlobExchangeTestBase):
await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager)
else: else:
await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager)
await asyncio.sleep(0) await asyncio.sleep(0, loop=self.loop)
self.assertTrue(checked_analytics_event) self.assertTrue(checked_analytics_event)
async def test_time_to_first_bytes(self): async def test_time_to_first_bytes(self):
@ -317,7 +317,7 @@ class TestStreamManager(BlobExchangeTestBase):
stream.downloader.node = self.stream_manager.node stream.downloader.node = self.stream_manager.node
await stream.save_file() await stream.save_file()
await stream.finished_writing.wait() await stream.finished_writing.wait()
await asyncio.sleep(0) await asyncio.sleep(0, loop=self.loop)
self.assertTrue(stream.finished) self.assertTrue(stream.finished)
self.assertFalse(stream.running) self.assertFalse(stream.running)
self.assertTrue(os.path.isfile(os.path.join(self.client_dir, "test_file"))) self.assertTrue(os.path.isfile(os.path.join(self.client_dir, "test_file")))
@ -355,7 +355,7 @@ class TestStreamManager(BlobExchangeTestBase):
self.stream_manager.analytics_manager._post = check_post self.stream_manager.analytics_manager._post = check_post
await self._test_download_error_on_start(expected_error, timeout) await self._test_download_error_on_start(expected_error, timeout)
await asyncio.sleep(0) await asyncio.sleep(0, loop=self.loop)
self.assertListEqual([expected_error.__name__], received) self.assertListEqual([expected_error.__name__], received)
async def test_insufficient_funds(self): async def test_insufficient_funds(self):
@ -424,7 +424,7 @@ class TestStreamManager(BlobExchangeTestBase):
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
self.assertEqual(0, stream.written_bytes) self.assertEqual(0, stream.written_bytes)
await self.stream_manager.stop() self.stream_manager.stop()
await self.stream_manager.start() await self.stream_manager.start()
self.assertEqual(1, len(self.stream_manager.streams)) self.assertEqual(1, len(self.stream_manager.streams))
stream = list(self.stream_manager.streams.values())[0] stream = list(self.stream_manager.streams.values())[0]
@ -448,8 +448,8 @@ class TestStreamManager(BlobExchangeTestBase):
self.assertDictEqual(self.stream_manager.streams, {}) self.assertDictEqual(self.stream_manager.streams, {})
stream = await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) stream = await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager)
await stream.finished_writing.wait() await stream.finished_writing.wait()
await asyncio.sleep(0) await asyncio.sleep(0, loop=self.loop)
await self.stream_manager.stop() self.stream_manager.stop()
self.client_blob_manager.stop() self.client_blob_manager.stop()
# partial removal, only sd blob is missing. # partial removal, only sd blob is missing.
# in this case, we recover the sd blob while the other blobs are kept untouched as 'finished' # in this case, we recover the sd blob while the other blobs are kept untouched as 'finished'

View file

@ -1,92 +0,0 @@
import asyncio
import random
from lbry.testcase import AsyncioTestCase
from lbry.dht.peer import KademliaPeer
from lbry.torrent.tracker import CompactIPv4Peer, TrackerClient, enqueue_tracker_search, UDPTrackerServerProtocol, encode_peer
class UDPTrackerClientTestCase(AsyncioTestCase):
async def asyncSetUp(self):
self.client_servers_list = []
self.servers = {}
self.client = TrackerClient(b"\x00" * 48, 4444, lambda: self.client_servers_list, timeout=1)
await self.client.start()
self.addCleanup(self.client.stop)
await self.add_server()
async def add_server(self, port=None, add_to_client=True):
port = port or len(self.servers) + 59990
assert port not in self.servers
server = UDPTrackerServerProtocol()
self.servers[port] = server
transport, _ = await self.loop.create_datagram_endpoint(lambda: server, local_addr=("127.0.0.1", port))
self.addCleanup(transport.close)
if add_to_client:
self.client_servers_list.append(("127.0.0.1", port))
async def test_announce(self):
info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
announcement = (await self.client.get_peer_list(info_hash))[0]
self.assertEqual(announcement.seeders, 1)
self.assertEqual(announcement.peers,
[CompactIPv4Peer(int.from_bytes(bytes([127, 0, 0, 1]), "big", signed=False), 4444)])
async def test_announce_many_info_hashes_to_many_servers_with_bad_one_and_dns_error(self):
await asyncio.gather(*[self.add_server() for _ in range(3)])
self.client_servers_list.append(("no.it.does.not.exist", 7070))
self.client_servers_list.append(("127.0.0.2", 7070))
info_hashes = [random.getrandbits(160).to_bytes(20, "big", signed=False) for _ in range(5)]
await self.client.announce_many(*info_hashes)
for server in self.servers.values():
self.assertDictEqual(
server.peers, {
info_hash: [encode_peer("127.0.0.1", self.client.announce_port)] for info_hash in info_hashes
})
async def test_announce_using_helper_function(self):
info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
queue = asyncio.Queue()
enqueue_tracker_search(info_hash, queue)
peers = await queue.get()
self.assertEqual(peers, [KademliaPeer('127.0.0.1', None, None, 4444, allow_localhost=True)])
async def test_error(self):
info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
await self.client.get_peer_list(info_hash)
list(self.servers.values())[0].known_conns.clear()
self.client.results.clear()
with self.assertRaises(Exception) as err:
await self.client.get_peer_list(info_hash)
self.assertEqual(err.exception.args[0], b'Connection ID missmatch.\x00')
async def test_multiple_servers(self):
await asyncio.gather(*[self.add_server() for _ in range(10)])
info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
await self.client.get_peer_list(info_hash)
for server in self.servers.values():
self.assertEqual(server.peers, {info_hash: [encode_peer("127.0.0.1", self.client.announce_port)]})
async def test_multiple_servers_with_bad_one(self):
await asyncio.gather(*[self.add_server() for _ in range(10)])
self.client_servers_list.append(("127.0.0.2", 7070))
info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
await self.client.get_peer_list(info_hash)
for server in self.servers.values():
self.assertEqual(server.peers, {info_hash: [encode_peer("127.0.0.1", self.client.announce_port)]})
async def test_multiple_servers_with_different_peers_across_helper_function(self):
# this is how the downloader uses it
await asyncio.gather(*[self.add_server() for _ in range(10)])
info_hash = random.getrandbits(160).to_bytes(20, "big", signed=False)
fake_peers = []
for server in self.servers.values():
for _ in range(10):
peer = (f"127.0.0.{random.randint(1, 255)}", random.randint(2000, 65500))
fake_peers.append(peer)
server.add_peer(info_hash, *peer)
peer_q = asyncio.Queue()
enqueue_tracker_search(info_hash, peer_q)
await asyncio.sleep(0)
await asyncio.gather(*self.client.tasks.values())
self.assertEqual(11, peer_q.qsize())

View file

@ -1,11 +1,7 @@
import asyncio import asyncio
from binascii import hexlify from binascii import hexlify
from lbry.testcase import AsyncioTestCase from lbry.testcase import AsyncioTestCase
from lbry.wallet import ( from lbry.wallet import Wallet, Ledger, Database, Headers, Account, SingleKey, HierarchicalDeterministic
Wallet, Ledger, Database, Headers,
Account, SingleKey, HierarchicalDeterministic,
DeterministicChannelKeyManager
)
class TestAccount(AsyncioTestCase): class TestAccount(AsyncioTestCase):

Some files were not shown because too many files have changed in this diff Show more