Compare commits
28 commits
master
...
rocksdb-hu
Author | SHA1 | Date | |
---|---|---|---|
|
a586266831 | ||
|
bd284be69d | ||
|
d6035c1ead | ||
|
876a72f18d | ||
|
b82dc8e45f | ||
|
31144a490e | ||
|
a2e7afa87f | ||
|
6f6faef3dc | ||
|
607d7d9cc7 | ||
|
3c7307a2f4 | ||
|
bbfe263591 | ||
|
7fe34ebb78 | ||
|
ff8c08b289 | ||
|
d7707d0053 | ||
|
a7d64de361 | ||
|
8a02796b37 | ||
|
4f4ecd64cc | ||
|
e3a4dab6cb | ||
|
03f888f787 | ||
|
d072b9f70b | ||
|
8616fb96b1 | ||
|
ebec12522b | ||
|
611ad5c655 | ||
|
e06c8e8303 | ||
|
776dea58c2 | ||
|
28be7d8993 | ||
|
24622103cf | ||
|
2d8ed77806 |
156 changed files with 15694 additions and 4296 deletions
74
.github/workflows/main.yml
vendored
74
.github/workflows/main.yml
vendored
|
@ -1,24 +1,24 @@
|
|||
name: ci
|
||||
on: ["push", "pull_request", "workflow_dispatch"]
|
||||
on: ["push", "pull_request"]
|
||||
|
||||
jobs:
|
||||
|
||||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.7'
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: pip install --user --upgrade pip wheel
|
||||
- run: pip install -e .[lint]
|
||||
- run: pip install -e .[torrent,lint]
|
||||
- run: make lint
|
||||
|
||||
tests-unit:
|
||||
|
@ -26,31 +26,31 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-latest
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.7'
|
||||
- name: set pip cache dir
|
||||
shell: bash
|
||||
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||
id: pip-cache
|
||||
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.PIP_CACHE_DIR }}
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- id: os-name
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
uses: ASzc/change-string-case-action@v1
|
||||
with:
|
||||
string: ${{ runner.os }}
|
||||
- run: python -m pip install --user --upgrade pip wheel
|
||||
- run: pip install --user --upgrade pip wheel
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
run: pip install -e .[test]
|
||||
run: pip install -e .[torrent,test]
|
||||
- if: startsWith(runner.os, 'linux')
|
||||
env:
|
||||
HOME: /tmp
|
||||
|
@ -72,7 +72,7 @@ jobs:
|
|||
|
||||
tests-integration:
|
||||
name: "tests / integration"
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
|
@ -81,6 +81,8 @@ jobs:
|
|||
- claims
|
||||
- takeovers
|
||||
- transactions
|
||||
- claims_legacy_search
|
||||
- takeovers_legacy_search
|
||||
- other
|
||||
steps:
|
||||
- name: Configure sysctl limits
|
||||
|
@ -93,16 +95,16 @@ jobs:
|
|||
uses: elastic/elastic-github-actions/elasticsearch@master
|
||||
with:
|
||||
stack-version: 7.12.1
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.7'
|
||||
- if: matrix.test == 'other'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y --no-install-recommends ffmpeg
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./.tox
|
||||
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
|
||||
|
@ -123,7 +125,7 @@ jobs:
|
|||
|
||||
coverage:
|
||||
needs: ["tests-unit", "tests-integration"]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: finalize coverage report submission
|
||||
env:
|
||||
|
@ -138,29 +140,29 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-20.04
|
||||
- ubuntu-18.04
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.7'
|
||||
- id: os-name
|
||||
uses: ASzc/change-string-case-action@v5
|
||||
uses: ASzc/change-string-case-action@v1
|
||||
with:
|
||||
string: ${{ runner.os }}
|
||||
- name: set pip cache dir
|
||||
shell: bash
|
||||
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
|
||||
id: pip-cache
|
||||
run: echo "::set-output name=dir::$(pip cache dir)"
|
||||
- name: extract pip cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ env.PIP_CACHE_DIR }}
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
|
||||
restore-keys: ${{ runner.os }}-pip-
|
||||
- run: pip install pyinstaller==4.6
|
||||
- run: pip install pyinstaller==4.4
|
||||
- run: pip install -e .
|
||||
- if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: python docker/set_build.py
|
||||
|
@ -175,7 +177,7 @@ jobs:
|
|||
pip install pywin32==301
|
||||
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
|
||||
dist/lbrynet.exe --version
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
|
||||
path: dist/
|
||||
|
@ -184,7 +186,7 @@ jobs:
|
|||
name: "release"
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
needs: ["build"]
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/download-artifact@v2
|
||||
|
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -7,7 +7,7 @@ on:
|
|||
jobs:
|
||||
release:
|
||||
name: "slack notification"
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
|
||||
id: markdown
|
||||
|
|
2
LICENSE
2
LICENSE
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2022 LBRY Inc
|
||||
Copyright (c) 2015-2020 LBRY Inc
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
|
||||
|
|
|
@ -2,7 +2,6 @@ FROM debian:10-slim
|
|||
|
||||
ARG user=lbry
|
||||
ARG projects_dir=/home/$user
|
||||
ARG db_dir=/database
|
||||
|
||||
ARG DOCKER_TAG
|
||||
ARG DOCKER_COMMIT=docker
|
||||
|
@ -28,16 +27,12 @@ RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
|
|||
|
||||
COPY . $projects_dir
|
||||
RUN chown -R $user:$user $projects_dir
|
||||
RUN mkdir -p $db_dir
|
||||
RUN chown -R $user:$user $db_dir
|
||||
|
||||
USER $user
|
||||
WORKDIR $projects_dir
|
||||
|
||||
RUN python3 -m pip install -U setuptools pip
|
||||
RUN make install
|
||||
RUN python3 docker/set_build.py
|
||||
RUN rm ~/.cache -rf
|
||||
VOLUME $db_dir
|
||||
ENTRYPOINT ["python3", "scripts/dht_node.py"]
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ RUN apt-get update && \
|
|||
build-essential \
|
||||
automake libtool \
|
||||
pkg-config \
|
||||
libleveldb-dev \
|
||||
python3.7 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
|
|
330
docs/api.json
330
docs/api.json
File diff suppressed because one or more lines are too long
|
@ -1,2 +1,2 @@
|
|||
__version__ = "0.113.0"
|
||||
__version__ = "0.106.0"
|
||||
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name
|
||||
|
|
|
@ -87,8 +87,8 @@ class AbstractBlob:
|
|||
self.blob_completed_callback = blob_completed_callback
|
||||
self.blob_directory = blob_directory
|
||||
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
|
||||
self.verified: asyncio.Event = asyncio.Event()
|
||||
self.writing: asyncio.Event = asyncio.Event()
|
||||
self.verified: asyncio.Event = asyncio.Event(loop=self.loop)
|
||||
self.writing: asyncio.Event = asyncio.Event(loop=self.loop)
|
||||
self.readers: typing.List[typing.BinaryIO] = []
|
||||
self.added_on = added_on or time.time()
|
||||
self.is_mine = is_mine
|
||||
|
@ -201,7 +201,7 @@ class AbstractBlob:
|
|||
writer = blob.get_blob_writer()
|
||||
writer.write(blob_bytes)
|
||||
await blob.verified.wait()
|
||||
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
|
||||
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash, added_on, is_mine)
|
||||
|
||||
def save_verified_blob(self, verified_bytes: bytes):
|
||||
if self.verified.is_set():
|
||||
|
@ -222,7 +222,7 @@ class AbstractBlob:
|
|||
peer_port: typing.Optional[int] = None) -> HashBlobWriter:
|
||||
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
|
||||
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
|
||||
fut = asyncio.Future()
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
|
||||
self.writers[(peer_address, peer_port)] = writer
|
||||
|
||||
|
|
|
@ -12,8 +12,8 @@ class BlobInfo:
|
|||
]
|
||||
|
||||
def __init__(
|
||||
self, blob_num: int, length: int, iv: str, added_on,
|
||||
blob_hash: typing.Optional[str] = None, is_mine=False):
|
||||
self, blob_num: int, length: int, iv: str,
|
||||
blob_hash: typing.Optional[str] = None, added_on=0, is_mine=False):
|
||||
self.blob_hash = blob_hash
|
||||
self.blob_num = blob_num
|
||||
self.length = length
|
||||
|
|
|
@ -83,8 +83,6 @@ class BlobManager:
|
|||
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
|
||||
if to_add:
|
||||
self.completed_blob_hashes.update(to_add)
|
||||
# check blobs that aren't set as finished but were seen on disk
|
||||
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
|
||||
if self.config.track_bandwidth:
|
||||
self.connection_manager.start()
|
||||
return True
|
||||
|
@ -115,18 +113,9 @@ class BlobManager:
|
|||
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
|
||||
)
|
||||
|
||||
async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
|
||||
"""Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
|
||||
to_add = []
|
||||
for blob_hash in blob_hashes:
|
||||
if not self.is_blob_verified(blob_hash):
|
||||
continue
|
||||
blob = self.get_blob(blob_hash)
|
||||
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
|
||||
if len(to_add) > 500:
|
||||
await self.storage.add_blobs(*to_add, finished=True)
|
||||
to_add.clear()
|
||||
return await self.storage.add_blobs(*to_add, finished=True)
|
||||
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]:
|
||||
"""Returns of the blobhashes_to_check, which are valid"""
|
||||
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)]
|
||||
|
||||
def delete_blob(self, blob_hash: str):
|
||||
if not is_valid_blobhash(blob_hash):
|
||||
|
|
|
@ -36,29 +36,31 @@ class DiskSpaceManager:
|
|||
await self._clean(True)
|
||||
|
||||
async def _clean(self, is_network_blob=False):
|
||||
space_used_mb = await self.get_space_used_mb(cached=False)
|
||||
space_used_bytes = await self.get_space_used_bytes()
|
||||
if is_network_blob:
|
||||
space_used_mb = space_used_mb['network_storage']
|
||||
space_used_bytes = space_used_bytes['network_storage']
|
||||
else:
|
||||
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
|
||||
space_used_bytes = space_used_bytes['content_storage'] + space_used_bytes['private_storage']
|
||||
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
|
||||
storage_limit = storage_limit_mb*1024*1024 if storage_limit_mb else None
|
||||
if self.analytics:
|
||||
asyncio.create_task(
|
||||
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
|
||||
self.analytics.send_disk_space_used(space_used_bytes, storage_limit, is_network_blob)
|
||||
)
|
||||
if not storage_limit:
|
||||
return 0
|
||||
delete = []
|
||||
available = storage_limit_mb - space_used_mb
|
||||
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
|
||||
available = storage_limit - space_used_bytes
|
||||
if available > 0:
|
||||
return 0
|
||||
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
|
||||
delete.append(blob_hash)
|
||||
available += int(file_size/1024.0/1024.0)
|
||||
if available >= 0:
|
||||
available += file_size
|
||||
if available > 0:
|
||||
break
|
||||
if delete:
|
||||
await self.db.stop_all_files()
|
||||
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
|
||||
self._used_space_bytes = None
|
||||
return len(delete)
|
||||
|
||||
async def cleaning_loop(self):
|
||||
|
|
|
@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
self.buf = b''
|
||||
|
||||
# this is here to handle the race when the downloader is closed right as response_fut gets a result
|
||||
self.closed = asyncio.Event()
|
||||
self.closed = asyncio.Event(loop=self.loop)
|
||||
|
||||
def data_received(self, data: bytes):
|
||||
if self.connection_manager:
|
||||
|
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
self.transport.write(msg)
|
||||
if self.connection_manager:
|
||||
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
|
||||
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
|
||||
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
|
||||
availability_response = response.get_availability_response()
|
||||
price_response = response.get_price_response()
|
||||
blob_response = response.get_blob_response()
|
||||
|
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
f" timeout in {self.peer_timeout}"
|
||||
log.debug(msg)
|
||||
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
|
||||
await asyncio.wait_for(self.writer.finished, self.peer_timeout)
|
||||
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
|
||||
# wait for the io to finish
|
||||
await self.blob.verified.wait()
|
||||
log.info("%s at %fMB/s", msg,
|
||||
|
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
|
|||
try:
|
||||
self._blob_bytes_received = 0
|
||||
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
|
||||
self._response_fut = asyncio.Future()
|
||||
self._response_fut = asyncio.Future(loop=self.loop)
|
||||
return await self._download_blob()
|
||||
except OSError:
|
||||
# i'm not sure how to fix this race condition - jack
|
||||
|
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
|
|||
try:
|
||||
if not connected_protocol:
|
||||
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
|
||||
peer_connect_timeout)
|
||||
peer_connect_timeout, loop=loop)
|
||||
connected_protocol = protocol
|
||||
if blob is None or blob.get_is_verified() or not blob.is_writeable():
|
||||
# blob is None happens when we are just opening a connection
|
||||
|
|
|
@ -30,7 +30,7 @@ class BlobDownloader:
|
|||
self.failures: typing.Dict['KademliaPeer', int] = {}
|
||||
self.connection_failures: typing.Set['KademliaPeer'] = set()
|
||||
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
|
||||
self.is_running = asyncio.Event()
|
||||
self.is_running = asyncio.Event(loop=self.loop)
|
||||
|
||||
def should_race_continue(self, blob: 'AbstractBlob'):
|
||||
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
|
||||
|
@ -64,8 +64,8 @@ class BlobDownloader:
|
|||
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
|
||||
|
||||
async def new_peer_or_finished(self):
|
||||
active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
|
||||
await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
|
||||
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)]
|
||||
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
|
||||
|
||||
def cleanup_active(self):
|
||||
if not self.active_connections and not self.connections:
|
||||
|
@ -97,15 +97,21 @@ class BlobDownloader:
|
|||
"%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
|
||||
len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
|
||||
)
|
||||
re_add: typing.Set['KademliaPeer'] = set()
|
||||
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
|
||||
if peer in self.ignored:
|
||||
continue
|
||||
if peer in self.active_connections or not self.should_race_continue(blob):
|
||||
if peer in self.active_connections:
|
||||
if peer not in re_add:
|
||||
re_add.add(peer)
|
||||
continue
|
||||
if not self.should_race_continue(blob):
|
||||
break
|
||||
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
|
||||
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
|
||||
self.active_connections[peer] = t
|
||||
self.peer_queue.put_nowait(list(batch))
|
||||
if not re_add:
|
||||
self.peer_queue.put_nowait(list(batch))
|
||||
await self.new_peer_or_finished()
|
||||
self.cleanup_active()
|
||||
log.debug("downloaded %s", blob_hash[:8])
|
||||
|
@ -126,7 +132,7 @@ class BlobDownloader:
|
|||
|
||||
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
|
||||
blob_hash: str) -> 'AbstractBlob':
|
||||
search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
|
||||
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
|
||||
search_queue.put_nowait(blob_hash)
|
||||
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
|
||||
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import asyncio
|
||||
import binascii
|
||||
import logging
|
||||
import socket
|
||||
import typing
|
||||
from json.decoder import JSONDecodeError
|
||||
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
|
||||
|
@ -25,19 +24,19 @@ class BlobServerProtocol(asyncio.Protocol):
|
|||
self.idle_timeout = idle_timeout
|
||||
self.transfer_timeout = transfer_timeout
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event()
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.buf = b''
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
self.lbrycrd_address = lbrycrd_address
|
||||
self.peer_address_and_port: typing.Optional[str] = None
|
||||
self.started_transfer = asyncio.Event()
|
||||
self.transfer_finished = asyncio.Event()
|
||||
self.started_transfer = asyncio.Event(loop=self.loop)
|
||||
self.transfer_finished = asyncio.Event(loop=self.loop)
|
||||
self.close_on_idle_task: typing.Optional[asyncio.Task] = None
|
||||
|
||||
async def close_on_idle(self):
|
||||
while self.transport:
|
||||
try:
|
||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
|
||||
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop)
|
||||
except asyncio.TimeoutError:
|
||||
log.debug("closing idle connection from %s", self.peer_address_and_port)
|
||||
return self.close()
|
||||
|
@ -101,7 +100,7 @@ class BlobServerProtocol(asyncio.Protocol):
|
|||
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
|
||||
self.started_transfer.set()
|
||||
try:
|
||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
|
||||
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop)
|
||||
if sent and sent > 0:
|
||||
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
|
||||
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
|
||||
|
@ -138,7 +137,7 @@ class BlobServerProtocol(asyncio.Protocol):
|
|||
try:
|
||||
request = BlobRequest.deserialize(self.buf + data)
|
||||
self.buf = remainder
|
||||
except (UnicodeDecodeError, JSONDecodeError):
|
||||
except JSONDecodeError:
|
||||
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
|
||||
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
|
||||
self.close()
|
||||
|
@ -157,7 +156,7 @@ class BlobServer:
|
|||
self.loop = loop
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event()
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.lbrycrd_address = lbrycrd_address
|
||||
self.idle_timeout = idle_timeout
|
||||
self.transfer_timeout = transfer_timeout
|
||||
|
@ -168,13 +167,6 @@ class BlobServer:
|
|||
raise Exception("already running")
|
||||
|
||||
async def _start_server():
|
||||
# checking if the port is in use
|
||||
# thx https://stackoverflow.com/a/52872579
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
if s.connect_ex(('localhost', port)) == 0:
|
||||
# the port is already in use!
|
||||
log.error("Failed to bind TCP %s:%d", interface, port)
|
||||
|
||||
server = await self.loop.create_server(
|
||||
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
|
||||
self.idle_timeout, self.transfer_timeout),
|
||||
|
|
22
lbry/conf.py
22
lbry/conf.py
|
@ -622,11 +622,7 @@ class Config(CLIConfig):
|
|||
"Routing table bucket index below which we always split the bucket if given a new key to add to it and "
|
||||
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
|
||||
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
|
||||
"use.", 2
|
||||
)
|
||||
is_bootstrap_node = Toggle(
|
||||
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
|
||||
"add as many peers as possible and better help first-runs.", False
|
||||
"use.", 1
|
||||
)
|
||||
|
||||
# protocol timeouts
|
||||
|
@ -685,14 +681,6 @@ class Config(CLIConfig):
|
|||
('cdn.reflector.lbry.com', 5567)
|
||||
])
|
||||
|
||||
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
|
||||
('tracker.lbry.com', 9252),
|
||||
('tracker.lbry.grin.io', 9252),
|
||||
('tracker.lbry.pigg.es', 9252),
|
||||
('tracker.lizard.technology', 9252),
|
||||
('s1.lbry.network', 9252),
|
||||
])
|
||||
|
||||
lbryum_servers = Servers("SPV wallet servers", [
|
||||
('spv11.lbry.com', 50001),
|
||||
('spv12.lbry.com', 50001),
|
||||
|
@ -703,20 +691,14 @@ class Config(CLIConfig):
|
|||
('spv17.lbry.com', 50001),
|
||||
('spv18.lbry.com', 50001),
|
||||
('spv19.lbry.com', 50001),
|
||||
('hub.lbry.grin.io', 50001),
|
||||
('hub.lizard.technology', 50001),
|
||||
('s1.lbry.network', 50001),
|
||||
])
|
||||
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
|
||||
('dht.lbry.grin.io', 4444), # Grin
|
||||
('dht.lbry.madiator.com', 4444), # Madiator
|
||||
('dht.lbry.pigg.es', 4444), # Pigges
|
||||
('lbrynet1.lbry.com', 4444), # US EAST
|
||||
('lbrynet2.lbry.com', 4444), # US WEST
|
||||
('lbrynet3.lbry.com', 4444), # EU
|
||||
('lbrynet4.lbry.com', 4444), # ASIA
|
||||
('dht.lizard.technology', 4444), # Jack
|
||||
('s2.lbry.network', 4444),
|
||||
('lbrynet4.lbry.com', 4444) # ASIA
|
||||
])
|
||||
|
||||
# blockchain
|
||||
|
|
|
@ -67,7 +67,7 @@ class ConnectionManager:
|
|||
|
||||
while True:
|
||||
last = time.perf_counter()
|
||||
await asyncio.sleep(0.1)
|
||||
await asyncio.sleep(0.1, loop=self.loop)
|
||||
self._status['incoming_bps'].clear()
|
||||
self._status['outgoing_bps'].clear()
|
||||
now = time.perf_counter()
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
import asyncio
|
||||
import typing
|
||||
import logging
|
||||
|
||||
from prometheus_client import Counter, Gauge
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.node import Node
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
|
@ -12,59 +9,45 @@ log = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class BlobAnnouncer:
|
||||
announcements_sent_metric = Counter(
|
||||
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
|
||||
labelnames=("peers", "error"),
|
||||
)
|
||||
announcement_queue_size_metric = Gauge(
|
||||
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
|
||||
labelnames=("scope",)
|
||||
)
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
|
||||
self.loop = loop
|
||||
self.node = node
|
||||
self.storage = storage
|
||||
self.announce_task: asyncio.Task = None
|
||||
self.announce_queue: typing.List[str] = []
|
||||
self._done = asyncio.Event()
|
||||
self.announced = set()
|
||||
|
||||
async def _run_consumer(self):
|
||||
while self.announce_queue:
|
||||
try:
|
||||
blob_hash = self.announce_queue.pop()
|
||||
peers = len(await self.node.announce_blob(blob_hash))
|
||||
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
|
||||
if peers > 4:
|
||||
self.announced.add(blob_hash)
|
||||
else:
|
||||
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||
except Exception as err:
|
||||
self.announcements_sent_metric.labels(peers=0, error=True).inc()
|
||||
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||
async def _submit_announcement(self, blob_hash):
|
||||
try:
|
||||
peers = len(await self.node.announce_blob(blob_hash))
|
||||
if peers > 4:
|
||||
return blob_hash
|
||||
else:
|
||||
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise err
|
||||
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
|
||||
|
||||
async def _announce(self, batch_size: typing.Optional[int] = 10):
|
||||
while batch_size:
|
||||
if not self.node.joined.is_set():
|
||||
await self.node.joined.wait()
|
||||
await asyncio.sleep(60)
|
||||
await asyncio.sleep(60, loop=self.loop)
|
||||
if not self.node.protocol.routing_table.get_peers():
|
||||
log.warning("No peers in DHT, announce round skipped")
|
||||
continue
|
||||
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
|
||||
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
|
||||
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
|
||||
while len(self.announce_queue) > 0:
|
||||
log.info("%i blobs to announce", len(self.announce_queue))
|
||||
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
|
||||
announced = list(filter(None, self.announced))
|
||||
announced = await asyncio.gather(*[
|
||||
self._submit_announcement(
|
||||
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
|
||||
], loop=self.loop)
|
||||
announced = list(filter(None, announced))
|
||||
if announced:
|
||||
await self.storage.update_last_announced_blobs(announced)
|
||||
log.info("announced %i blobs", len(announced))
|
||||
self.announced.clear()
|
||||
self._done.set()
|
||||
self._done.clear()
|
||||
|
||||
def start(self, batch_size: typing.Optional[int] = 10):
|
||||
assert not self.announce_task or self.announce_task.done(), "already running"
|
||||
|
@ -73,6 +56,3 @@ class BlobAnnouncer:
|
|||
def stop(self):
|
||||
if self.announce_task and not self.announce_task.done():
|
||||
self.announce_task.cancel()
|
||||
|
||||
def wait(self):
|
||||
return self._done.wait()
|
||||
|
|
|
@ -20,6 +20,7 @@ MAYBE_PING_DELAY = 300 # 5 minutes
|
|||
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
|
||||
RPC_ID_LENGTH = 20
|
||||
PROTOCOL_VERSION = 1
|
||||
BOTTOM_OUT_LIMIT = 3
|
||||
MSG_SIZE_LIMIT = 1400
|
||||
|
||||
|
||||
|
|
106
lbry/dht/node.py
106
lbry/dht/node.py
|
@ -5,7 +5,7 @@ import socket
|
|||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from lbry.utils import aclosing, resolve_host
|
||||
from lbry.utils import resolve_host
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.peer import make_kademlia_peer
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
|
@ -30,14 +30,14 @@ class Node:
|
|||
)
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
|
||||
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX,
|
||||
storage: typing.Optional['SQLiteStorage'] = None):
|
||||
self.loop = loop
|
||||
self.internal_udp_port = internal_udp_port
|
||||
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
|
||||
split_buckets_under_index, is_bootstrap_node)
|
||||
split_buckets_under_index)
|
||||
self.listening_port: asyncio.DatagramTransport = None
|
||||
self.joined = asyncio.Event()
|
||||
self.joined = asyncio.Event(loop=self.loop)
|
||||
self._join_task: asyncio.Task = None
|
||||
self._refresh_task: asyncio.Task = None
|
||||
self._storage = storage
|
||||
|
@ -70,6 +70,13 @@ class Node:
|
|||
|
||||
# get ids falling in the midpoint of each bucket that hasn't been recently updated
|
||||
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
|
||||
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
||||
# populate/split the buckets further
|
||||
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
|
||||
if buckets_with_contacts <= 3:
|
||||
for i in range(buckets_with_contacts):
|
||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
||||
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
|
||||
|
||||
if self.protocol.routing_table.get_peers():
|
||||
# if we have node ids to look up, perform the iterative search until we have k results
|
||||
|
@ -79,7 +86,7 @@ class Node:
|
|||
else:
|
||||
if force_once:
|
||||
break
|
||||
fut = asyncio.Future()
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
|
||||
await fut
|
||||
continue
|
||||
|
@ -93,7 +100,7 @@ class Node:
|
|||
if force_once:
|
||||
break
|
||||
|
||||
fut = asyncio.Future()
|
||||
fut = asyncio.Future(loop=self.loop)
|
||||
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
|
||||
await fut
|
||||
|
||||
|
@ -108,7 +115,7 @@ class Node:
|
|||
for peer in peers:
|
||||
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
|
||||
stored_to_tup = await asyncio.gather(
|
||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
|
||||
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop
|
||||
)
|
||||
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
|
||||
if stored_to:
|
||||
|
@ -182,36 +189,39 @@ class Node:
|
|||
for address, udp_port in known_node_urls or []
|
||||
]))
|
||||
except socket.gaierror:
|
||||
await asyncio.sleep(30)
|
||||
await asyncio.sleep(30, loop=self.loop)
|
||||
continue
|
||||
|
||||
self.protocol.peer_manager.reset()
|
||||
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
|
||||
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
|
||||
|
||||
await asyncio.sleep(1)
|
||||
await asyncio.sleep(1, loop=self.loop)
|
||||
|
||||
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
|
||||
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
|
||||
|
||||
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
|
||||
max_results: int = constants.K) -> IterativeNodeFinder:
|
||||
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||
|
||||
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
||||
key, bottom_out_limit, max_results, None, shortlist)
|
||||
|
||||
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
|
||||
bottom_out_limit: int = 40,
|
||||
max_results: int = -1) -> IterativeValueFinder:
|
||||
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
|
||||
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
|
||||
|
||||
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol,
|
||||
key, bottom_out_limit, max_results, None, shortlist)
|
||||
|
||||
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None
|
||||
) -> typing.List['KademliaPeer']:
|
||||
peers = []
|
||||
async with aclosing(self.get_iterative_node_finder(
|
||||
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
|
||||
async for iteration_peers in node_finder:
|
||||
peers.extend(iteration_peers)
|
||||
async for iteration_peers in self.get_iterative_node_finder(
|
||||
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results):
|
||||
peers.extend(iteration_peers)
|
||||
distance = Distance(node_id)
|
||||
peers.sort(key=lambda peer: distance(peer.node_id))
|
||||
return peers[:count]
|
||||
|
@ -237,41 +247,41 @@ class Node:
|
|||
|
||||
# prioritize peers who reply to a dht ping first
|
||||
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
|
||||
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
|
||||
async for results in value_finder:
|
||||
to_put = []
|
||||
for peer in results:
|
||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||
continue
|
||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||
if is_good:
|
||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||
to_put.append(peer)
|
||||
elif is_good is None:
|
||||
if not peer.udp_port:
|
||||
# TODO: use the same port for TCP and UDP
|
||||
# the udp port must be guessed
|
||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||
# including on a network with several nodes, then assume the udp port is proportionately
|
||||
# based on a starting port of 4444
|
||||
udp_port_to_try = peer.tcp_port
|
||||
if 3400 > peer.tcp_port > 3332:
|
||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||
self.loop.create_task(put_into_result_queue_after_pong(
|
||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||
))
|
||||
else:
|
||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||
|
||||
async for results in self.get_iterative_value_finder(bytes.fromhex(blob_hash)):
|
||||
to_put = []
|
||||
for peer in results:
|
||||
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
|
||||
continue
|
||||
is_good = self.protocol.peer_manager.peer_is_good(peer)
|
||||
if is_good:
|
||||
# the peer has replied recently over UDP, it can probably be reached on the TCP port
|
||||
to_put.append(peer)
|
||||
elif is_good is None:
|
||||
if not peer.udp_port:
|
||||
# TODO: use the same port for TCP and UDP
|
||||
# the udp port must be guessed
|
||||
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
|
||||
# including on a network with several nodes, then assume the udp port is proportionately
|
||||
# based on a starting port of 4444
|
||||
udp_port_to_try = peer.tcp_port
|
||||
if 3400 > peer.tcp_port > 3332:
|
||||
udp_port_to_try = (peer.tcp_port - 3333) + 4444
|
||||
self.loop.create_task(put_into_result_queue_after_pong(
|
||||
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
|
||||
))
|
||||
else:
|
||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||
if to_put:
|
||||
result_queue.put_nowait(to_put)
|
||||
self.loop.create_task(put_into_result_queue_after_pong(peer))
|
||||
else:
|
||||
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
|
||||
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
|
||||
if to_put:
|
||||
result_queue.put_nowait(to_put)
|
||||
|
||||
def accumulate_peers(self, search_queue: asyncio.Queue,
|
||||
peer_queue: typing.Optional[asyncio.Queue] = None
|
||||
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
|
||||
queue = peer_queue or asyncio.Queue()
|
||||
queue = peer_queue or asyncio.Queue(loop=self.loop)
|
||||
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
|
||||
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ from lbry.dht import constants
|
|||
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
|
||||
|
||||
ALLOW_LOCALHOST = False
|
||||
CACHE_SIZE = 16384
|
||||
CACHE_SIZE = 2048
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -100,9 +100,6 @@ class PeerManager:
|
|||
self._node_id_reverse_mapping[node_id] = (address, udp_port)
|
||||
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
|
||||
|
||||
def get_node_id_for_endpoint(self, address, port):
|
||||
return self._node_id_mapping.get((address, port))
|
||||
|
||||
def prune(self): # TODO: periodically call this
|
||||
now = self._loop.time()
|
||||
to_pop = []
|
||||
|
@ -153,10 +150,9 @@ class PeerManager:
|
|||
def peer_is_good(self, peer: 'KademliaPeer'):
|
||||
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
|
||||
|
||||
|
||||
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
||||
node_id, address, tcp_port = decode_compact_address(compact_address)
|
||||
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
||||
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
|
||||
node_id, address, tcp_port = decode_compact_address(compact_address)
|
||||
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
|
||||
|
||||
|
||||
@dataclass(unsafe_hash=True)
|
||||
|
@ -194,6 +190,3 @@ class KademliaPeer:
|
|||
|
||||
def compact_ip(self):
|
||||
return make_compact_ip(self.address)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
import asyncio
|
||||
from itertools import chain
|
||||
from collections import defaultdict, OrderedDict
|
||||
from collections.abc import AsyncIterator
|
||||
from collections import defaultdict
|
||||
import typing
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.error import RemoteException, TransportNotConnected
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
|
||||
from lbry.dht.peer import make_kademlia_peer
|
||||
from lbry.dht.serialization.datagram import PAGE_KEY
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lbry.dht.protocol.routing_table import TreeRoutingTable
|
||||
from lbry.dht.protocol.protocol import KademliaProtocol
|
||||
from lbry.dht.peer import PeerManager, KademliaPeer
|
||||
|
||||
|
@ -26,15 +26,6 @@ class FindResponse:
|
|||
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
|
||||
for contact_triple in self.get_close_triples():
|
||||
node_id, address, udp_port = contact_triple
|
||||
try:
|
||||
yield make_kademlia_peer(node_id, address, udp_port)
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
|
||||
peer_info.udp_port, address, udp_port)
|
||||
|
||||
|
||||
class FindNodeResponse(FindResponse):
|
||||
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
|
||||
|
@ -65,33 +56,57 @@ class FindValueResponse(FindResponse):
|
|||
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
|
||||
|
||||
|
||||
class IterativeFinder(AsyncIterator):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
|
||||
"""
|
||||
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
|
||||
|
||||
:param routing_table: a TreeRoutingTable
|
||||
:param key: a 48 byte hash
|
||||
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
|
||||
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
|
||||
"""
|
||||
if len(key) != constants.HASH_LENGTH:
|
||||
raise ValueError("invalid key length: %i" % len(key))
|
||||
return shortlist or routing_table.find_close_peers(key)
|
||||
|
||||
|
||||
class IterativeFinder:
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
if len(key) != constants.HASH_LENGTH:
|
||||
raise ValueError("invalid key length: %i" % len(key))
|
||||
self.loop = loop
|
||||
self.peer_manager = protocol.peer_manager
|
||||
self.peer_manager = peer_manager
|
||||
self.routing_table = routing_table
|
||||
self.protocol = protocol
|
||||
|
||||
self.key = key
|
||||
self.max_results = max(constants.K, max_results)
|
||||
self.bottom_out_limit = bottom_out_limit
|
||||
self.max_results = max_results
|
||||
self.exclude = exclude or []
|
||||
|
||||
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
|
||||
self.active: typing.Set['KademliaPeer'] = set()
|
||||
self.contacted: typing.Set['KademliaPeer'] = set()
|
||||
self.distance = Distance(key)
|
||||
|
||||
self.iteration_queue = asyncio.Queue()
|
||||
self.closest_peer: typing.Optional['KademliaPeer'] = None
|
||||
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
|
||||
|
||||
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
|
||||
self.iteration_queue = asyncio.Queue(loop=self.loop)
|
||||
|
||||
self.running_probes: typing.Set[asyncio.Task] = set()
|
||||
self.iteration_count = 0
|
||||
self.bottom_out_count = 0
|
||||
self.running = False
|
||||
self.tasks: typing.List[asyncio.Task] = []
|
||||
for peer in shortlist:
|
||||
self.delayed_calls: typing.List[asyncio.Handle] = []
|
||||
for peer in get_shortlist(routing_table, key, shortlist):
|
||||
if peer.node_id:
|
||||
self._add_active(peer, force=True)
|
||||
self._add_active(peer)
|
||||
else:
|
||||
# seed nodes
|
||||
self._schedule_probe(peer)
|
||||
|
@ -123,79 +138,66 @@ class IterativeFinder(AsyncIterator):
|
|||
"""
|
||||
return []
|
||||
|
||||
def _add_active(self, peer, force=False):
|
||||
if not force and self.peer_manager.peer_is_good(peer) is False:
|
||||
return
|
||||
if peer in self.contacted:
|
||||
return
|
||||
def _is_closer(self, peer: 'KademliaPeer') -> bool:
|
||||
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id)
|
||||
|
||||
def _add_active(self, peer):
|
||||
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
|
||||
self.active[peer] = self.distance(peer.node_id)
|
||||
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
|
||||
self.active.add(peer)
|
||||
if self._is_closer(peer):
|
||||
self.prev_closest_peer = self.closest_peer
|
||||
self.closest_peer = peer
|
||||
|
||||
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
|
||||
self._add_active(peer)
|
||||
for new_peer in response.get_close_kademlia_peers(peer):
|
||||
self._add_active(new_peer)
|
||||
for contact_triple in response.get_close_triples():
|
||||
node_id, address, udp_port = contact_triple
|
||||
try:
|
||||
self._add_active(make_kademlia_peer(node_id, address, udp_port))
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
|
||||
peer.udp_port, address, udp_port)
|
||||
self.check_result_ready(response)
|
||||
self._log_state(reason="check result")
|
||||
|
||||
def _reset_closest(self, peer):
|
||||
if peer in self.active:
|
||||
del self.active[peer]
|
||||
|
||||
async def _send_probe(self, peer: 'KademliaPeer'):
|
||||
try:
|
||||
response = await self.send_probe(peer)
|
||||
except asyncio.TimeoutError:
|
||||
self._reset_closest(peer)
|
||||
self.active.discard(peer)
|
||||
return
|
||||
except asyncio.CancelledError:
|
||||
log.debug("%s[%x] cancelled probe",
|
||||
type(self).__name__, id(self))
|
||||
raise
|
||||
except ValueError as err:
|
||||
log.warning(str(err))
|
||||
self._reset_closest(peer)
|
||||
self.active.discard(peer)
|
||||
return
|
||||
except TransportNotConnected:
|
||||
await self._aclose(reason="not connected")
|
||||
return
|
||||
return self.aclose()
|
||||
except RemoteException:
|
||||
self._reset_closest(peer)
|
||||
return
|
||||
return await self._handle_probe_result(peer, response)
|
||||
|
||||
def _search_round(self):
|
||||
async def _search_round(self):
|
||||
"""
|
||||
Send up to constants.alpha (5) probes to closest active peers
|
||||
"""
|
||||
|
||||
added = 0
|
||||
for index, peer in enumerate(self.active.keys()):
|
||||
if index == 0:
|
||||
log.debug("%s[%x] closest to probe: %s",
|
||||
type(self).__name__, id(self),
|
||||
peer.node_id.hex()[:8])
|
||||
if peer in self.contacted:
|
||||
continue
|
||||
if len(self.running_probes) >= constants.ALPHA:
|
||||
break
|
||||
if index > (constants.K + len(self.running_probes)):
|
||||
to_probe = list(self.active - self.contacted)
|
||||
to_probe.sort(key=lambda peer: self.distance(self.key))
|
||||
for peer in to_probe:
|
||||
if added >= constants.ALPHA:
|
||||
break
|
||||
origin_address = (peer.address, peer.udp_port)
|
||||
if origin_address in self.exclude:
|
||||
continue
|
||||
if peer.node_id == self.protocol.node_id:
|
||||
continue
|
||||
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
|
||||
continue
|
||||
self._schedule_probe(peer)
|
||||
added += 1
|
||||
log.debug("%s[%x] running %d probes for key %s",
|
||||
type(self).__name__, id(self),
|
||||
len(self.running_probes), self.key.hex()[:8])
|
||||
log.debug("running %d probes for key %s", len(self.running_probes), self.key.hex()[:8])
|
||||
if not added and not self.running_probes:
|
||||
log.debug("%s[%x] search for %s exhausted",
|
||||
type(self).__name__, id(self),
|
||||
self.key.hex()[:8])
|
||||
log.debug("search for %s exhausted", self.key.hex()[:8])
|
||||
self.search_exhausted()
|
||||
|
||||
def _schedule_probe(self, peer: 'KademliaPeer'):
|
||||
|
@ -204,24 +206,33 @@ class IterativeFinder(AsyncIterator):
|
|||
t = self.loop.create_task(self._send_probe(peer))
|
||||
|
||||
def callback(_):
|
||||
self.running_probes.pop(peer, None)
|
||||
if self.running:
|
||||
self._search_round()
|
||||
self.running_probes.difference_update({
|
||||
probe for probe in self.running_probes if probe.done() or probe == t
|
||||
})
|
||||
if not self.running_probes:
|
||||
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
|
||||
|
||||
t.add_done_callback(callback)
|
||||
self.running_probes[peer] = t
|
||||
self.running_probes.add(t)
|
||||
|
||||
def _log_state(self, reason="?"):
|
||||
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
|
||||
type(self).__name__, id(self), self.key.hex()[:8],
|
||||
reason, len(self.active), len(self.contacted),
|
||||
self.iteration_count, self.iteration_queue.qsize())
|
||||
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY):
|
||||
try:
|
||||
if self.running:
|
||||
await self._search_round()
|
||||
if self.running:
|
||||
self.delayed_calls.append(self.loop.call_later(delay, self._search))
|
||||
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
|
||||
if self.running:
|
||||
self.loop.call_soon(self.aclose)
|
||||
|
||||
def _search(self):
|
||||
self.tasks.append(self.loop.create_task(self._search_task()))
|
||||
|
||||
def __aiter__(self):
|
||||
if self.running:
|
||||
raise Exception("already running")
|
||||
self.running = True
|
||||
self.loop.call_soon(self._search_round)
|
||||
self._search()
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> typing.List['KademliaPeer']:
|
||||
|
@ -234,37 +245,28 @@ class IterativeFinder(AsyncIterator):
|
|||
raise StopAsyncIteration
|
||||
self.iteration_count += 1
|
||||
return result
|
||||
except asyncio.CancelledError:
|
||||
await self._aclose(reason="cancelled")
|
||||
raise
|
||||
except StopAsyncIteration:
|
||||
await self._aclose(reason="no more results")
|
||||
except (asyncio.CancelledError, StopAsyncIteration):
|
||||
self.loop.call_soon(self.aclose)
|
||||
raise
|
||||
|
||||
async def _aclose(self, reason="?"):
|
||||
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
|
||||
type(self).__name__, id(self), self.key.hex()[:8],
|
||||
reason, len(self.active), len(self.contacted),
|
||||
self.iteration_count, self.iteration_queue.qsize())
|
||||
def aclose(self):
|
||||
self.running = False
|
||||
self.iteration_queue.put_nowait(None)
|
||||
for task in chain(self.tasks, self.running_probes.values()):
|
||||
for task in chain(self.tasks, self.running_probes, self.delayed_calls):
|
||||
task.cancel()
|
||||
self.tasks.clear()
|
||||
self.running_probes.clear()
|
||||
self.delayed_calls.clear()
|
||||
|
||||
async def aclose(self):
|
||||
if self.running:
|
||||
await self._aclose(reason="aclose")
|
||||
log.debug("%s[%x] [%s] async close completed",
|
||||
type(self).__name__, id(self), self.key.hex()[:8])
|
||||
|
||||
class IterativeNodeFinder(IterativeFinder):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
||||
shortlist)
|
||||
self.yielded_peers: typing.Set['KademliaPeer'] = set()
|
||||
|
||||
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
|
||||
|
@ -274,14 +276,14 @@ class IterativeNodeFinder(IterativeFinder):
|
|||
return FindNodeResponse(self.key, response)
|
||||
|
||||
def search_exhausted(self):
|
||||
self.put_result(self.active.keys(), finish=True)
|
||||
self.put_result(self.active, finish=True)
|
||||
|
||||
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
|
||||
not_yet_yielded = [
|
||||
peer for peer in from_iter
|
||||
if peer not in self.yielded_peers
|
||||
and peer.node_id != self.protocol.node_id
|
||||
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
|
||||
and self.peer_manager.peer_is_good(peer) is not False
|
||||
]
|
||||
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
|
||||
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
|
||||
|
@ -296,15 +298,27 @@ class IterativeNodeFinder(IterativeFinder):
|
|||
|
||||
if found:
|
||||
log.debug("found")
|
||||
return self.put_result(self.active.keys(), finish=True)
|
||||
return self.put_result(self.active, finish=True)
|
||||
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
|
||||
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
|
||||
# self.bottom_out_count, self.iteration_count)
|
||||
self.bottom_out_count = 0
|
||||
elif self.prev_closest_peer and self.closest_peer:
|
||||
self.bottom_out_count += 1
|
||||
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
|
||||
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
|
||||
log.info("limit hit")
|
||||
self.put_result(self.active, True)
|
||||
|
||||
|
||||
class IterativeValueFinder(IterativeFinder):
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop,
|
||||
protocol: 'KademliaProtocol', key: bytes,
|
||||
max_results: typing.Optional[int] = constants.K,
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
|
||||
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
|
||||
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
|
||||
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
|
||||
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
|
||||
super().__init__(loop, protocol, key, max_results, shortlist)
|
||||
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude,
|
||||
shortlist)
|
||||
self.blob_peers: typing.Set['KademliaPeer'] = set()
|
||||
# this tracks the index of the most recent page we requested from each peer
|
||||
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
|
||||
|
@ -323,7 +337,7 @@ class IterativeValueFinder(IterativeFinder):
|
|||
decoded_peers = set()
|
||||
for compact_addr in parsed.found_compact_addresses:
|
||||
try:
|
||||
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
|
||||
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr))
|
||||
except ValueError:
|
||||
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
|
||||
peer.address, peer.udp_port)
|
||||
|
@ -345,15 +359,26 @@ class IterativeValueFinder(IterativeFinder):
|
|||
|
||||
def check_result_ready(self, response: FindValueResponse):
|
||||
if response.found:
|
||||
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
|
||||
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)
|
||||
for compact_addr in response.found_compact_addresses]
|
||||
to_yield = []
|
||||
self.bottom_out_count = 0
|
||||
for blob_peer in blob_peers:
|
||||
if blob_peer not in self.blob_peers:
|
||||
self.blob_peers.add(blob_peer)
|
||||
to_yield.append(blob_peer)
|
||||
if to_yield:
|
||||
# log.info("found %i new peers for blob", len(to_yield))
|
||||
self.iteration_queue.put_nowait(to_yield)
|
||||
# if self.max_results and len(self.blob_peers) >= self.max_results:
|
||||
# log.info("enough blob peers found")
|
||||
# if not self.finished.is_set():
|
||||
# self.finished.set()
|
||||
elif self.prev_closest_peer and self.closest_peer:
|
||||
self.bottom_out_count += 1
|
||||
if self.bottom_out_count >= self.bottom_out_limit:
|
||||
log.info("blob peer search bottomed out")
|
||||
self.iteration_queue.put_nowait(None)
|
||||
|
||||
def get_initial_result(self) -> typing.List['KademliaPeer']:
|
||||
if self.protocol.data_store.has_peers_for_blob(self.key):
|
||||
|
|
|
@ -218,10 +218,6 @@ class PingQueue:
|
|||
def running(self):
|
||||
return self._running
|
||||
|
||||
@property
|
||||
def busy(self):
|
||||
return self._running and (any(self._running_pings) or any(self._pending_contacts))
|
||||
|
||||
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
|
||||
delay = delay if delay is not None else self._default_delay
|
||||
now = self._loop.time()
|
||||
|
@ -233,7 +229,7 @@ class PingQueue:
|
|||
async def ping_task():
|
||||
try:
|
||||
if self._protocol.peer_manager.peer_is_good(peer):
|
||||
if not self._protocol.routing_table.get_peer(peer.node_id):
|
||||
if peer not in self._protocol.routing_table.get_peers():
|
||||
self._protocol.add_peer(peer)
|
||||
return
|
||||
await self._protocol.get_rpc_peer(peer).ping()
|
||||
|
@ -253,7 +249,7 @@ class PingQueue:
|
|||
del self._pending_contacts[peer]
|
||||
self.maybe_ping(peer)
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
await asyncio.sleep(1, loop=self._loop)
|
||||
|
||||
def start(self):
|
||||
assert not self._running
|
||||
|
@ -298,7 +294,7 @@ class KademliaProtocol(DatagramProtocol):
|
|||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
|
||||
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
|
||||
self.peer_manager = peer_manager
|
||||
self.loop = loop
|
||||
self.node_id = node_id
|
||||
|
@ -313,16 +309,15 @@ class KademliaProtocol(DatagramProtocol):
|
|||
self.transport: DatagramTransport = None
|
||||
self.old_token_secret = constants.generate_id()
|
||||
self.token_secret = constants.generate_id()
|
||||
self.routing_table = TreeRoutingTable(
|
||||
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
|
||||
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index)
|
||||
self.data_store = DictDataStore(self.loop, self.peer_manager)
|
||||
self.ping_queue = PingQueue(self.loop, self)
|
||||
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
|
||||
self.rpc_timeout = rpc_timeout
|
||||
self._split_lock = asyncio.Lock()
|
||||
self._split_lock = asyncio.Lock(loop=self.loop)
|
||||
self._to_remove: typing.Set['KademliaPeer'] = set()
|
||||
self._to_add: typing.Set['KademliaPeer'] = set()
|
||||
self._wakeup_routing_task = asyncio.Event()
|
||||
self._wakeup_routing_task = asyncio.Event(loop=self.loop)
|
||||
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
|
||||
|
||||
@functools.lru_cache(128)
|
||||
|
@ -361,10 +356,72 @@ class KademliaProtocol(DatagramProtocol):
|
|||
return args, {}
|
||||
|
||||
async def _add_peer(self, peer: 'KademliaPeer'):
|
||||
async def probe(some_peer: 'KademliaPeer'):
|
||||
rpc_peer = self.get_rpc_peer(some_peer)
|
||||
await rpc_peer.ping()
|
||||
return await self.routing_table.add_peer(peer, probe)
|
||||
if not peer.node_id:
|
||||
log.warning("Tried adding a peer with no node id!")
|
||||
return False
|
||||
for my_peer in self.routing_table.get_peers():
|
||||
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
||||
self.routing_table.remove_peer(my_peer)
|
||||
self.routing_table.join_buckets()
|
||||
bucket_index = self.routing_table.kbucket_index(peer.node_id)
|
||||
if self.routing_table.buckets[bucket_index].add_peer(peer):
|
||||
return True
|
||||
|
||||
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
||||
if self.routing_table.should_split(bucket_index, peer.node_id):
|
||||
self.routing_table.split_bucket(bucket_index)
|
||||
# Retry the insertion attempt
|
||||
result = await self._add_peer(peer)
|
||||
self.routing_table.join_buckets()
|
||||
return result
|
||||
else:
|
||||
# We can't split the k-bucket
|
||||
#
|
||||
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
||||
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
||||
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
||||
#
|
||||
# A reasonable extension to this is BEP 0005, which extends the above:
|
||||
#
|
||||
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
||||
# Many nodes using the DHT are able to send queries and receive responses,
|
||||
# but are not able to respond to queries from other nodes. It is important that
|
||||
# each node's routing table must contain only known good nodes. A good node is
|
||||
# a node has responded to one of our queries within the last 15 minutes. A node
|
||||
# is also good if it has ever responded to one of our queries and has sent us a
|
||||
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
||||
# questionable. Nodes become bad when they fail to respond to multiple queries
|
||||
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
||||
#
|
||||
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
||||
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
||||
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
||||
# is ignored if the pinged node replies.
|
||||
|
||||
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
|
||||
not_recently_replied = []
|
||||
for my_peer in not_good_contacts:
|
||||
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
||||
if not last_replied or last_replied + 60 < self.loop.time():
|
||||
not_recently_replied.append(my_peer)
|
||||
if not_recently_replied:
|
||||
to_replace = not_recently_replied[0]
|
||||
else:
|
||||
to_replace = self.routing_table.buckets[bucket_index].peers[0]
|
||||
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
||||
if last_replied and last_replied + 60 > self.loop.time():
|
||||
return False
|
||||
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
||||
try:
|
||||
to_replace_rpc = self.get_rpc_peer(to_replace)
|
||||
await to_replace_rpc.ping()
|
||||
return False
|
||||
except asyncio.TimeoutError:
|
||||
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
||||
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
||||
if to_replace in self.routing_table.buckets[bucket_index]:
|
||||
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
|
||||
return await self._add_peer(peer)
|
||||
|
||||
def add_peer(self, peer: 'KademliaPeer'):
|
||||
if peer.node_id == self.node_id:
|
||||
|
@ -382,10 +439,11 @@ class KademliaProtocol(DatagramProtocol):
|
|||
async with self._split_lock:
|
||||
peer = self._to_remove.pop()
|
||||
self.routing_table.remove_peer(peer)
|
||||
self.routing_table.join_buckets()
|
||||
while self._to_add:
|
||||
async with self._split_lock:
|
||||
await self._add_peer(self._to_add.pop())
|
||||
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
|
||||
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop)
|
||||
self._wakeup_routing_task.clear()
|
||||
|
||||
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
|
||||
|
@ -424,8 +482,9 @@ class KademliaProtocol(DatagramProtocol):
|
|||
# This is an RPC method request
|
||||
self.received_request_metric.labels(method=request_datagram.method).inc()
|
||||
self.peer_manager.report_last_requested(address[0], address[1])
|
||||
peer = self.routing_table.get_peer(request_datagram.node_id)
|
||||
if not peer:
|
||||
try:
|
||||
peer = self.routing_table.get_peer(request_datagram.node_id)
|
||||
except IndexError:
|
||||
try:
|
||||
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
|
||||
except ValueError as err:
|
||||
|
|
|
@ -6,9 +6,7 @@ import itertools
|
|||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from lbry import utils
|
||||
from lbry.dht import constants
|
||||
from lbry.dht.error import RemoteException
|
||||
from lbry.dht.protocol.distance import Distance
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.dht.peer import KademliaPeer, PeerManager
|
||||
|
@ -29,8 +27,7 @@ class KBucket:
|
|||
namespace="dht_node", labelnames=("amount",)
|
||||
)
|
||||
|
||||
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
|
||||
node_id: bytes, capacity: int = constants.K):
|
||||
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes):
|
||||
"""
|
||||
@param range_min: The lower boundary for the range in the n-bit ID
|
||||
space covered by this k-bucket
|
||||
|
@ -38,12 +35,12 @@ class KBucket:
|
|||
covered by this k-bucket
|
||||
"""
|
||||
self._peer_manager = peer_manager
|
||||
self.last_accessed = 0
|
||||
self.range_min = range_min
|
||||
self.range_max = range_max
|
||||
self.peers: typing.List['KademliaPeer'] = []
|
||||
self._node_id = node_id
|
||||
self._distance_to_self = Distance(node_id)
|
||||
self.capacity = capacity
|
||||
|
||||
def add_peer(self, peer: 'KademliaPeer') -> bool:
|
||||
""" Add contact to _contact list in the right order. This will move the
|
||||
|
@ -70,19 +67,22 @@ class KBucket:
|
|||
self.peers.remove(local_peer)
|
||||
self.peers.append(peer)
|
||||
return True
|
||||
if len(self.peers) < self.capacity:
|
||||
if len(self.peers) < constants.K:
|
||||
self.peers.append(peer)
|
||||
self.peer_in_routing_table_metric.labels("global").inc()
|
||||
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
|
||||
if peer.node_id[0] == self._node_id[0]:
|
||||
bits_colliding = 8 - (peer.node_id[1] ^ self._node_id[1]).bit_length()
|
||||
self.peer_with_x_bit_colliding_metric.labels(amount=(bits_colliding + 8)).inc()
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
# raise BucketFull("No space in bucket to insert contact")
|
||||
|
||||
def get_peer(self, node_id: bytes) -> 'KademliaPeer':
|
||||
for peer in self.peers:
|
||||
if peer.node_id == node_id:
|
||||
return peer
|
||||
raise IndexError(node_id)
|
||||
|
||||
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
|
||||
""" Returns a list containing up to the first count number of contacts
|
||||
|
@ -140,8 +140,9 @@ class KBucket:
|
|||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||
self.peers.remove(peer)
|
||||
self.peer_in_routing_table_metric.labels("global").dec()
|
||||
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
|
||||
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
|
||||
if peer.node_id[0] == self._node_id[0]:
|
||||
bits_colliding = 8 - (peer.node_id[1] ^ self._node_id[1]).bit_length()
|
||||
self.peer_with_x_bit_colliding_metric.labels(amount=(bits_colliding + 8)).dec()
|
||||
|
||||
def key_in_range(self, key: bytes) -> bool:
|
||||
""" Tests whether the specified key (i.e. node ID) is in the range
|
||||
|
@ -179,13 +180,6 @@ class TreeRoutingTable:
|
|||
version of the Kademlia paper, in section 2.4. It does, however, use the
|
||||
ping RPC-based k-bucket eviction algorithm described in section 2.2 of
|
||||
that paper.
|
||||
|
||||
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
|
||||
bootstrap node does not get a bias towards its own node id and replies are
|
||||
the best it can provide (joining peer knows its neighbors immediately).
|
||||
Over time, this will need to be optimized so we use the disk as holding
|
||||
everything in memory won't be feasible anymore.
|
||||
See: https://github.com/bittorrent/bootstrap-dht
|
||||
"""
|
||||
bucket_in_routing_table_metric = Gauge(
|
||||
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
|
||||
|
@ -193,22 +187,21 @@ class TreeRoutingTable:
|
|||
)
|
||||
|
||||
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
|
||||
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX):
|
||||
self._loop = loop
|
||||
self._peer_manager = peer_manager
|
||||
self._parent_node_id = parent_node_id
|
||||
self._split_buckets_under_index = split_buckets_under_index
|
||||
self.buckets: typing.List[KBucket] = [
|
||||
KBucket(
|
||||
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
|
||||
capacity=1 << 32 if is_bootstrap_node else constants.K
|
||||
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id
|
||||
)
|
||||
]
|
||||
|
||||
def get_peers(self) -> typing.List['KademliaPeer']:
|
||||
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
|
||||
|
||||
def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
||||
def should_split(self, bucket_index: int, to_add: bytes) -> bool:
|
||||
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
|
||||
if bucket_index < self._split_buckets_under_index:
|
||||
return True
|
||||
|
@ -233,32 +226,39 @@ class TreeRoutingTable:
|
|||
return []
|
||||
|
||||
def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
|
||||
return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
|
||||
"""
|
||||
@raise IndexError: No contact with the specified contact ID is known
|
||||
by this node
|
||||
"""
|
||||
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
|
||||
|
||||
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
|
||||
bucket_index = start_index
|
||||
refresh_ids = []
|
||||
for offset, _ in enumerate(self.buckets[start_index:]):
|
||||
refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
|
||||
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
|
||||
# populate/split the buckets further
|
||||
buckets_with_contacts = self.buckets_with_contacts()
|
||||
if buckets_with_contacts <= 3:
|
||||
for i in range(buckets_with_contacts):
|
||||
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||
refresh_ids.append(self._random_id_in_bucket_range(i))
|
||||
now = int(self._loop.time())
|
||||
for bucket in self.buckets[start_index:]:
|
||||
if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL:
|
||||
to_search = self.midpoint_id_in_bucket_range(bucket_index)
|
||||
refresh_ids.append(to_search)
|
||||
bucket_index += 1
|
||||
return refresh_ids
|
||||
|
||||
def remove_peer(self, peer: 'KademliaPeer') -> None:
|
||||
if not peer.node_id:
|
||||
return
|
||||
bucket_index = self._kbucket_index(peer.node_id)
|
||||
bucket_index = self.kbucket_index(peer.node_id)
|
||||
try:
|
||||
self.buckets[bucket_index].remove_peer(peer)
|
||||
self._join_buckets()
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
def _kbucket_index(self, key: bytes) -> int:
|
||||
def touch_kbucket(self, key: bytes) -> None:
|
||||
self.touch_kbucket_by_index(self.kbucket_index(key))
|
||||
|
||||
def touch_kbucket_by_index(self, bucket_index: int):
|
||||
self.buckets[bucket_index].last_accessed = int(self._loop.time())
|
||||
|
||||
def kbucket_index(self, key: bytes) -> int:
|
||||
i = 0
|
||||
for bucket in self.buckets:
|
||||
if bucket.key_in_range(key):
|
||||
|
@ -267,19 +267,19 @@ class TreeRoutingTable:
|
|||
i += 1
|
||||
return i
|
||||
|
||||
def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
def random_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
|
||||
return Distance(
|
||||
self._parent_node_id
|
||||
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
|
||||
|
||||
def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
|
||||
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
|
||||
return Distance(self._parent_node_id)(
|
||||
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
|
||||
).to_bytes(constants.HASH_LENGTH, 'big')
|
||||
|
||||
def _split_bucket(self, old_bucket_index: int) -> None:
|
||||
def split_bucket(self, old_bucket_index: int) -> None:
|
||||
""" Splits the specified k-bucket into two new buckets which together
|
||||
cover the same range in the key/ID space
|
||||
|
||||
|
@ -304,7 +304,7 @@ class TreeRoutingTable:
|
|||
old_bucket.remove_peer(contact)
|
||||
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||
|
||||
def _join_buckets(self):
|
||||
def join_buckets(self):
|
||||
if len(self.buckets) == 1:
|
||||
return
|
||||
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
|
||||
|
@ -327,7 +327,14 @@ class TreeRoutingTable:
|
|||
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
|
||||
self.buckets.remove(bucket)
|
||||
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
|
||||
return self._join_buckets()
|
||||
return self.join_buckets()
|
||||
|
||||
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
|
||||
for bucket in self.buckets:
|
||||
for contact in bucket.get_peers(sort_distance_to=False):
|
||||
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
|
||||
return True
|
||||
return False
|
||||
|
||||
def buckets_with_contacts(self) -> int:
|
||||
count = 0
|
||||
|
@ -335,70 +342,3 @@ class TreeRoutingTable:
|
|||
if len(bucket) > 0:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
|
||||
if not peer.node_id:
|
||||
log.warning("Tried adding a peer with no node id!")
|
||||
return False
|
||||
for my_peer in self.get_peers():
|
||||
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
|
||||
self.remove_peer(my_peer)
|
||||
self._join_buckets()
|
||||
bucket_index = self._kbucket_index(peer.node_id)
|
||||
if self.buckets[bucket_index].add_peer(peer):
|
||||
return True
|
||||
|
||||
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
|
||||
if self._should_split(bucket_index, peer.node_id):
|
||||
self._split_bucket(bucket_index)
|
||||
# Retry the insertion attempt
|
||||
result = await self.add_peer(peer, probe)
|
||||
self._join_buckets()
|
||||
return result
|
||||
else:
|
||||
# We can't split the k-bucket
|
||||
#
|
||||
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
|
||||
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
|
||||
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
|
||||
#
|
||||
# A reasonable extension to this is BEP 0005, which extends the above:
|
||||
#
|
||||
# Not all nodes that we learn about are equal. Some are "good" and some are not.
|
||||
# Many nodes using the DHT are able to send queries and receive responses,
|
||||
# but are not able to respond to queries from other nodes. It is important that
|
||||
# each node's routing table must contain only known good nodes. A good node is
|
||||
# a node has responded to one of our queries within the last 15 minutes. A node
|
||||
# is also good if it has ever responded to one of our queries and has sent us a
|
||||
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
|
||||
# questionable. Nodes become bad when they fail to respond to multiple queries
|
||||
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
|
||||
#
|
||||
# When there are bad or questionable nodes in the bucket, the least recent is selected for
|
||||
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
|
||||
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
|
||||
# is ignored if the pinged node replies.
|
||||
|
||||
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
|
||||
not_recently_replied = []
|
||||
for my_peer in not_good_contacts:
|
||||
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
|
||||
if not last_replied or last_replied + 60 < self._loop.time():
|
||||
not_recently_replied.append(my_peer)
|
||||
if not_recently_replied:
|
||||
to_replace = not_recently_replied[0]
|
||||
else:
|
||||
to_replace = self.buckets[bucket_index].peers[0]
|
||||
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
|
||||
if last_replied and last_replied + 60 > self._loop.time():
|
||||
return False
|
||||
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
|
||||
try:
|
||||
await probe(to_replace)
|
||||
return False
|
||||
except (asyncio.TimeoutError, RemoteException):
|
||||
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
|
||||
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
|
||||
if to_replace in self.buckets[bucket_index]:
|
||||
self.buckets[bucket_index].remove_peer(to_replace)
|
||||
return await self.add_peer(peer, probe)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from lbry.extras.cli import execute_command
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.cli import execute_command
|
||||
|
||||
|
||||
def daemon_rpc(conf: Config, method: str, **kwargs):
|
||||
|
|
|
@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
|
|||
def running(self):
|
||||
return self._running
|
||||
|
||||
async def get_status(self): # pylint: disable=no-self-use
|
||||
async def get_status(self):
|
||||
return
|
||||
|
||||
async def start(self):
|
||||
|
|
|
@ -42,7 +42,7 @@ class ComponentManager:
|
|||
self.analytics_manager = analytics_manager
|
||||
self.component_classes = {}
|
||||
self.components = set()
|
||||
self.started = asyncio.Event()
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
|
||||
|
||||
for component_name, component_class in self.default_component_classes.items():
|
||||
|
@ -118,7 +118,7 @@ class ComponentManager:
|
|||
component._setup() for component in stage if not component.running
|
||||
]
|
||||
if needing_start:
|
||||
await asyncio.wait(map(asyncio.create_task, needing_start))
|
||||
await asyncio.wait(needing_start)
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
|
@ -131,7 +131,7 @@ class ComponentManager:
|
|||
component._stop() for component in stage if component.running
|
||||
]
|
||||
if needing_stop:
|
||||
await asyncio.wait(map(asyncio.create_task, needing_stop))
|
||||
await asyncio.wait(needing_stop)
|
||||
|
||||
def all_components_running(self, *component_names):
|
||||
"""
|
||||
|
|
|
@ -27,8 +27,10 @@ from lbry.extras.daemon.storage import SQLiteStorage
|
|||
from lbry.torrent.torrent_manager import TorrentManager
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.wallet.usage_payment import WalletServerPayer
|
||||
from lbry.torrent.tracker import TrackerClient
|
||||
from lbry.torrent.session import TorrentSession
|
||||
try:
|
||||
from lbry.torrent.session import TorrentSession
|
||||
except ImportError:
|
||||
TorrentSession = None
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -46,7 +48,6 @@ BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
|
|||
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
|
||||
UPNP_COMPONENT = "upnp"
|
||||
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
|
||||
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
|
||||
LIBTORRENT_COMPONENT = "libtorrent_component"
|
||||
|
||||
|
||||
|
@ -293,7 +294,6 @@ class DHTComponent(Component):
|
|||
peer_port=self.external_peer_port,
|
||||
rpc_timeout=self.conf.node_rpc_timeout,
|
||||
split_buckets_under_index=self.conf.split_buckets_under_index,
|
||||
is_bootstrap_node=self.conf.is_bootstrap_node,
|
||||
storage=storage
|
||||
)
|
||||
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
|
||||
|
@ -357,6 +357,10 @@ class FileManagerComponent(Component):
|
|||
wallet = self.component_manager.get_component(WALLET_COMPONENT)
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
try:
|
||||
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
|
||||
except NameError:
|
||||
torrent = None
|
||||
log.info('Starting the file manager')
|
||||
loop = asyncio.get_event_loop()
|
||||
self.file_manager = FileManager(
|
||||
|
@ -365,8 +369,7 @@ class FileManagerComponent(Component):
|
|||
self.file_manager.source_managers['stream'] = StreamManager(
|
||||
loop, self.conf, blob_manager, wallet, storage, node,
|
||||
)
|
||||
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
|
||||
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
|
||||
if TorrentSession and LIBTORRENT_COMPONENT not in self.conf.components_to_skip:
|
||||
self.file_manager.source_managers['torrent'] = TorrentManager(
|
||||
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
|
||||
)
|
||||
|
@ -374,11 +377,10 @@ class FileManagerComponent(Component):
|
|||
log.info('Done setting up file manager')
|
||||
|
||||
async def stop(self):
|
||||
await self.file_manager.stop()
|
||||
self.file_manager.stop()
|
||||
|
||||
|
||||
class BackgroundDownloaderComponent(Component):
|
||||
MIN_PREFIX_COLLIDING_BITS = 8
|
||||
component_name = BACKGROUND_DOWNLOADER_COMPONENT
|
||||
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
|
||||
|
||||
|
@ -410,18 +412,12 @@ class BackgroundDownloaderComponent(Component):
|
|||
while True:
|
||||
self.space_available = await self.space_manager.get_free_space_mb(True)
|
||||
if not self.is_busy and self.space_available > 10:
|
||||
self._download_next_close_blob_hash()
|
||||
blob_hash = next((key.hex() for key in self.dht_node.stored_blob_hashes if
|
||||
key.hex() not in self.blob_manager.completed_blob_hashes), None)
|
||||
if blob_hash:
|
||||
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash))
|
||||
await asyncio.sleep(self.download_loop_delay_seconds)
|
||||
|
||||
def _download_next_close_blob_hash(self):
|
||||
node_id = self.dht_node.protocol.node_id
|
||||
for blob_hash in self.dht_node.stored_blob_hashes:
|
||||
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
|
||||
continue
|
||||
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
|
||||
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
|
||||
return
|
||||
|
||||
async def start(self):
|
||||
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
|
||||
if not self.component_manager.has_component(DHT_COMPONENT):
|
||||
|
@ -495,8 +491,9 @@ class TorrentComponent(Component):
|
|||
}
|
||||
|
||||
async def start(self):
|
||||
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||
await self.torrent_session.bind() # TODO: specify host/port
|
||||
if TorrentSession:
|
||||
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
|
||||
await self.torrent_session.bind() # TODO: specify host/port
|
||||
|
||||
async def stop(self):
|
||||
if self.torrent_session:
|
||||
|
@ -551,7 +548,7 @@ class UPnPComponent(Component):
|
|||
while True:
|
||||
if now:
|
||||
await self._maintain_redirects()
|
||||
await asyncio.sleep(360)
|
||||
await asyncio.sleep(360, loop=self.component_manager.loop)
|
||||
|
||||
async def _maintain_redirects(self):
|
||||
# setup the gateway if necessary
|
||||
|
@ -560,6 +557,8 @@ class UPnPComponent(Component):
|
|||
self.upnp = await UPnP.discover(loop=self.component_manager.loop)
|
||||
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.warning("upnp discovery failed: %s", err)
|
||||
self.upnp = None
|
||||
|
||||
|
@ -671,7 +670,7 @@ class UPnPComponent(Component):
|
|||
log.info("Removing upnp redirects: %s", self.upnp_redirects)
|
||||
await asyncio.wait([
|
||||
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
|
||||
])
|
||||
], loop=self.component_manager.loop)
|
||||
if self._maintain_redirects_task and not self._maintain_redirects_task.done():
|
||||
self._maintain_redirects_task.cancel()
|
||||
|
||||
|
@ -702,49 +701,3 @@ class ExchangeRateManagerComponent(Component):
|
|||
|
||||
async def stop(self):
|
||||
self.exchange_rate_manager.stop()
|
||||
|
||||
|
||||
class TrackerAnnouncerComponent(Component):
|
||||
component_name = TRACKER_ANNOUNCER_COMPONENT
|
||||
depends_on = [FILE_MANAGER_COMPONENT]
|
||||
|
||||
def __init__(self, component_manager):
|
||||
super().__init__(component_manager)
|
||||
self.file_manager = None
|
||||
self.announce_task = None
|
||||
self.tracker_client: typing.Optional[TrackerClient] = None
|
||||
|
||||
@property
|
||||
def component(self):
|
||||
return self.tracker_client
|
||||
|
||||
@property
|
||||
def running(self):
|
||||
return self._running and self.announce_task and not self.announce_task.done()
|
||||
|
||||
async def announce_forever(self):
|
||||
while True:
|
||||
sleep_seconds = 60.0
|
||||
announce_sd_hashes = []
|
||||
for file in self.file_manager.get_filtered():
|
||||
if not file.downloader:
|
||||
continue
|
||||
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
|
||||
await self.tracker_client.announce_many(*announce_sd_hashes)
|
||||
await asyncio.sleep(sleep_seconds)
|
||||
|
||||
async def start(self):
|
||||
node = self.component_manager.get_component(DHT_COMPONENT) \
|
||||
if self.component_manager.has_component(DHT_COMPONENT) else None
|
||||
node_id = node.protocol.node_id if node else None
|
||||
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
|
||||
await self.tracker_client.start()
|
||||
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
|
||||
self.announce_task = asyncio.create_task(self.announce_forever())
|
||||
|
||||
async def stop(self):
|
||||
self.file_manager = None
|
||||
if self.announce_task and not self.announce_task.done():
|
||||
self.announce_task.cancel()
|
||||
self.announce_task = None
|
||||
self.tracker_client.stop()
|
||||
|
|
|
@ -9,7 +9,7 @@ import inspect
|
|||
import typing
|
||||
import random
|
||||
import tracemalloc
|
||||
import itertools
|
||||
from decimal import Decimal
|
||||
from urllib.parse import urlencode, quote
|
||||
from typing import Callable, Optional, List
|
||||
from binascii import hexlify, unhexlify
|
||||
|
@ -28,7 +28,6 @@ from lbry.wallet import (
|
|||
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
|
||||
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPE_NAMES
|
||||
from lbry.wallet.bip32 import PrivateKey
|
||||
from lbry.crypto.base58 import Base58
|
||||
|
||||
from lbry import utils
|
||||
from lbry.conf import Config, Setting, NOT_SET
|
||||
|
@ -38,13 +37,12 @@ from lbry.dht.peer import make_kademlia_peer
|
|||
from lbry.error import (
|
||||
DownloadSDTimeoutError, ComponentsNotStartedError, ComponentStartConditionNotMetError,
|
||||
CommandDoesNotExistError, BaseError, WalletNotFoundError, WalletAlreadyLoadedError, WalletAlreadyExistsError,
|
||||
ConflictingInputValueError, AlreadyPurchasedError, PrivateKeyNotFoundError, InputStringIsBlankError,
|
||||
InputValueError
|
||||
ConflictingInputValueError, AlreadyPurchasedError, PrivateKeyNotFoundError, InputStringIsBlankError
|
||||
)
|
||||
from lbry.extras import system_info
|
||||
from lbry.extras.daemon import analytics
|
||||
from lbry.extras.daemon.components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
|
||||
from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT, TRACKER_ANNOUNCER_COMPONENT
|
||||
from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT
|
||||
from lbry.extras.daemon.components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT
|
||||
from lbry.extras.daemon.componentmanager import RequiredCondition
|
||||
from lbry.extras.daemon.componentmanager import ComponentManager
|
||||
|
@ -53,8 +51,9 @@ from lbry.extras.daemon.undecorated import undecorated
|
|||
from lbry.extras.daemon.security import ensure_request_allowed
|
||||
from lbry.file_analysis import VideoFileAnalyzer
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.schema.url import URL
|
||||
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.wallet.server.db.elasticsearch.constants import RANGE_FIELDS, REPLACEMENTS
|
||||
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
|
@ -67,29 +66,6 @@ if typing.TYPE_CHECKING:
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_score', 'censor_type', 'tx_num'
|
||||
}
|
||||
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
|
||||
REPLACEMENTS = {
|
||||
'claim_name': 'normalized_name',
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'trending_group': 'trending_score',
|
||||
'trending_mixed': 'trending_score',
|
||||
'trending_global': 'trending_score',
|
||||
'trending_local': 'trending_score',
|
||||
'reposted': 'repost_count',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'valid_channel_signature': 'is_signature_valid'
|
||||
}
|
||||
|
||||
|
||||
def is_transactional_function(name):
|
||||
for action in ('create', 'update', 'abandon', 'send', 'fund'):
|
||||
|
@ -195,6 +171,61 @@ def paginate_list(items: List, page: Optional[int], page_size: Optional[int]):
|
|||
}
|
||||
|
||||
|
||||
def fix_kwargs_for_hub(**kwargs):
|
||||
repeated_fields = {"media_type", "stream_type", "claim_type"}
|
||||
value_fields = {"tx_nout", "has_source", "is_signature_valid"}
|
||||
opcodes = {'=': 0, '<=': 1, '>=': 2, '<': 3, '>': 4}
|
||||
for key, value in list(kwargs.items()):
|
||||
if value in (None, [], False):
|
||||
kwargs.pop(key)
|
||||
continue
|
||||
if key in REPLACEMENTS:
|
||||
kwargs[REPLACEMENTS[key]] = kwargs.pop(key)
|
||||
key = REPLACEMENTS[key]
|
||||
|
||||
if key == "normalized_name":
|
||||
kwargs[key] = normalize_name(value)
|
||||
if key == "limit_claims_per_channel":
|
||||
value = kwargs.pop("limit_claims_per_channel") or 0
|
||||
if value > 0:
|
||||
kwargs["limit_claims_per_channel"] = value
|
||||
elif key == "invalid_channel_signature":
|
||||
kwargs["is_signature_valid"] = {"value": not kwargs.pop("invalid_channel_signature")}
|
||||
elif key == "has_no_source":
|
||||
kwargs["has_source"] = {"value": not kwargs.pop("has_no_source")}
|
||||
elif key in value_fields:
|
||||
kwargs[key] = {"value": value} if not isinstance(value, dict) else value
|
||||
elif key in repeated_fields and isinstance(value, str):
|
||||
kwargs[key] = [value]
|
||||
elif key in ("claim_id", "channel_id"):
|
||||
kwargs[key] = {"invert": False, "value": [kwargs[key]]}
|
||||
elif key in ("claim_ids", "channel_ids"):
|
||||
kwargs[key[:-1]] = {"invert": False, "value": kwargs.pop(key)}
|
||||
elif key == "not_channel_ids":
|
||||
kwargs["channel_id"] = {"invert": True, "value": kwargs.pop("not_channel_ids")}
|
||||
elif key in MY_RANGE_FIELDS:
|
||||
constraints = []
|
||||
for val in value if isinstance(value, list) else [value]:
|
||||
operator = '='
|
||||
if isinstance(val, str) and val[0] in opcodes:
|
||||
operator_length = 2 if val[:2] in opcodes else 1
|
||||
operator, val = val[:operator_length], val[operator_length:]
|
||||
val = [int(val if key != 'fee_amount' else Decimal(val)*1000)]
|
||||
constraints.append({"op": opcodes[operator], "value": val})
|
||||
kwargs[key] = constraints
|
||||
elif key == 'order_by': # TODO: remove this after removing support for old trending args from the api
|
||||
value = value if isinstance(value, list) else [value]
|
||||
new_value = []
|
||||
for new_v in value:
|
||||
migrated = new_v if new_v not in (
|
||||
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
|
||||
) else 'trending_score'
|
||||
if migrated not in new_value:
|
||||
new_value.append(migrated)
|
||||
kwargs[key] = new_value
|
||||
return kwargs
|
||||
|
||||
|
||||
DHT_HAS_CONTACTS = "dht_has_contacts"
|
||||
|
||||
|
||||
|
@ -614,8 +645,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
content_type='application/json'
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def handle_metrics_get_request(request: web.Request):
|
||||
async def handle_metrics_get_request(self, request: web.Request):
|
||||
try:
|
||||
return web.Response(
|
||||
text=prom_generate_latest().decode(),
|
||||
|
@ -1328,65 +1358,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
c.wallets += [wallet_id]
|
||||
return wallet
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_wallet_export(self, password=None, wallet_id=None):
|
||||
"""
|
||||
Exports encrypted wallet data if password is supplied; otherwise plain JSON.
|
||||
|
||||
Wallet must be unlocked to perform this operation.
|
||||
|
||||
Usage:
|
||||
wallet_export [--password=<password>] [--wallet_id=<wallet_id>]
|
||||
|
||||
Options:
|
||||
--password=<password> : (str) password to encrypt outgoing data
|
||||
--wallet_id=<wallet_id> : (str) wallet being exported
|
||||
|
||||
Returns:
|
||||
(str) data: base64-encoded encrypted wallet, or cleartext JSON
|
||||
|
||||
"""
|
||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
if password is None:
|
||||
return wallet.to_json()
|
||||
return wallet.pack(password).decode()
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_wallet_import(self, data, password=None, wallet_id=None, blocking=False):
|
||||
"""
|
||||
Import wallet data and merge accounts and preferences. Data is expected to be JSON if
|
||||
password is not supplied.
|
||||
|
||||
Wallet must be unlocked to perform this operation.
|
||||
|
||||
Usage:
|
||||
wallet_import (<data> | --data=<data>) [<password> | --password=<password>]
|
||||
[--wallet_id=<wallet_id>] [--blocking]
|
||||
|
||||
Options:
|
||||
--data=<data> : (str) incoming wallet data
|
||||
--password=<password> : (str) password to decrypt incoming data
|
||||
--wallet_id=<wallet_id> : (str) wallet being merged into
|
||||
--blocking : (bool) wait until any new accounts have merged
|
||||
|
||||
Returns:
|
||||
(str) base64-encoded encrypted wallet, or cleartext JSON
|
||||
"""
|
||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
|
||||
for new_account in itertools.chain(added_accounts, merged_accounts):
|
||||
await new_account.maybe_migrate_certificates()
|
||||
if added_accounts and self.ledger.network.is_connected:
|
||||
if blocking:
|
||||
await asyncio.wait([
|
||||
a.ledger.subscribe_account(a) for a in added_accounts
|
||||
])
|
||||
else:
|
||||
for new_account in added_accounts:
|
||||
asyncio.create_task(self.ledger.subscribe_account(new_account))
|
||||
wallet.save()
|
||||
return await self.jsonrpc_wallet_export(password=password, wallet_id=wallet_id)
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_wallet_add(self, wallet_id):
|
||||
"""
|
||||
|
@ -1855,7 +1826,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
Options:
|
||||
--to_account=<to_account> : (str) send to this account
|
||||
--from_account=<from_account> : (str) spend from this account
|
||||
--amount=<amount> : (decimal) the amount to transfer lbc
|
||||
--amount=<amount> : (str) the amount to transfer lbc
|
||||
--everything : (bool) transfer everything (excluding claims), default: false.
|
||||
--outputs=<outputs> : (int) split payment across many outputs, default: 1.
|
||||
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
|
||||
|
@ -1878,48 +1849,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
outputs=outputs, broadcast=broadcast
|
||||
)
|
||||
|
||||
@requires("wallet")
|
||||
async def jsonrpc_account_deposit(
|
||||
self, txid, nout, redeem_script, private_key,
|
||||
to_account=None, wallet_id=None, preview=False, blocking=False
|
||||
):
|
||||
"""
|
||||
Spend a time locked transaction into your account.
|
||||
|
||||
Usage:
|
||||
account_deposit <txid> <nout> <redeem_script> <private_key>
|
||||
[<to_account> | --to_account=<to_account>]
|
||||
[--wallet_id=<wallet_id>] [--preview] [--blocking]
|
||||
|
||||
Options:
|
||||
--txid=<txid> : (str) id of the transaction
|
||||
--nout=<nout> : (int) output number in the transaction
|
||||
--redeem_script=<redeem_script> : (str) redeem script for output
|
||||
--private_key=<private_key> : (str) private key to sign transaction
|
||||
--to_account=<to_account> : (str) deposit to this account
|
||||
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
|
||||
--preview : (bool) do not broadcast the transaction
|
||||
--blocking : (bool) wait until tx has synced
|
||||
|
||||
Returns: {Transaction}
|
||||
"""
|
||||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
account = wallet.get_account_or_default(to_account)
|
||||
other_tx = await self.wallet_manager.get_transaction(txid)
|
||||
tx = await Transaction.spend_time_lock(
|
||||
other_tx.outputs[nout], unhexlify(redeem_script), account
|
||||
)
|
||||
pk = PrivateKey.from_bytes(
|
||||
account.ledger, Base58.decode_check(private_key)[1:-1]
|
||||
)
|
||||
await tx.sign([account], {pk.address: pk})
|
||||
if not preview:
|
||||
await self.broadcast_or_release(tx, blocking)
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_credits_sent())
|
||||
else:
|
||||
await self.ledger.release_tx(tx)
|
||||
return tx
|
||||
|
||||
@requires(WALLET_COMPONENT)
|
||||
def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False, blocking=False):
|
||||
"""
|
||||
|
@ -1991,9 +1920,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
|
||||
wallet_changed = False
|
||||
if data is not None:
|
||||
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
|
||||
for new_account in itertools.chain(added_accounts, merged_accounts):
|
||||
await new_account.maybe_migrate_certificates()
|
||||
added_accounts = wallet.merge(self.wallet_manager, password, data)
|
||||
if added_accounts and self.ledger.network.is_connected:
|
||||
if blocking:
|
||||
await asyncio.wait([
|
||||
|
@ -2410,7 +2337,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
Usage:
|
||||
claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent]
|
||||
[--reposted_claim_id=<reposted_claim_id>...]
|
||||
[--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
|
||||
[--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>]
|
||||
[--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips]
|
||||
|
@ -2421,7 +2347,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--channel_id=<channel_id> : (str or list) streams in this channel
|
||||
--name=<name> : (str or list) claim name
|
||||
--is_spent : (bool) shows previous claim updates and abandons
|
||||
--reposted_claim_id=<reposted_claim_id> : (str or list) reposted claim id
|
||||
--account_id=<account_id> : (str) id of the account to query
|
||||
--wallet_id=<wallet_id> : (str) restrict results to specific wallet
|
||||
--has_source : (bool) list claims containing a source field
|
||||
|
@ -2619,27 +2544,42 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
Returns: {Paginated[Output]}
|
||||
"""
|
||||
if "claim_ids" in kwargs and not kwargs["claim_ids"]:
|
||||
kwargs.pop("claim_ids")
|
||||
if {'claim_id', 'claim_ids'}.issubset(kwargs):
|
||||
raise ConflictingInputValueError('claim_id', 'claim_ids')
|
||||
if kwargs.pop('valid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 1
|
||||
if kwargs.pop('invalid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 0
|
||||
if 'has_no_source' in kwargs:
|
||||
kwargs['has_source'] = not kwargs.pop('has_no_source')
|
||||
if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api
|
||||
value = kwargs.pop('order_by')
|
||||
value = value if isinstance(value, list) else [value]
|
||||
new_value = []
|
||||
for new_v in value:
|
||||
migrated = new_v if new_v not in (
|
||||
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
|
||||
) else 'trending_score'
|
||||
if migrated not in new_value:
|
||||
new_value.append(migrated)
|
||||
kwargs['order_by'] = new_value
|
||||
if self.ledger.config.get('use_go_hub'):
|
||||
host = self.ledger.network.client.server[0]
|
||||
port = "50051"
|
||||
kwargs['new_sdk_server'] = f"{host}:{port}"
|
||||
if kwargs.get("channel"):
|
||||
channel = kwargs.pop("channel")
|
||||
channel_obj = (await self.jsonrpc_resolve(channel))[channel]
|
||||
if isinstance(channel_obj, dict):
|
||||
# This happens when the channel doesn't exist
|
||||
kwargs["channel_id"] = ""
|
||||
else:
|
||||
kwargs["channel_id"] = channel_obj.claim_id
|
||||
kwargs = fix_kwargs_for_hub(**kwargs)
|
||||
else:
|
||||
# Don't do this if using the hub server, it screws everything up
|
||||
if "claim_ids" in kwargs and not kwargs["claim_ids"]:
|
||||
kwargs.pop("claim_ids")
|
||||
if {'claim_id', 'claim_ids'}.issubset(kwargs):
|
||||
raise ConflictingInputValueError('claim_id', 'claim_ids')
|
||||
if kwargs.pop('valid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 1
|
||||
if kwargs.pop('invalid_channel_signature', False):
|
||||
kwargs['signature_valid'] = 0
|
||||
if 'has_no_source' in kwargs:
|
||||
kwargs['has_source'] = not kwargs.pop('has_no_source')
|
||||
if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api
|
||||
value = kwargs.pop('order_by')
|
||||
value = value if isinstance(value, list) else [value]
|
||||
new_value = []
|
||||
for new_v in value:
|
||||
migrated = new_v if new_v not in (
|
||||
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
|
||||
) else 'trending_score'
|
||||
if migrated not in new_value:
|
||||
new_value.append(migrated)
|
||||
kwargs['order_by'] = new_value
|
||||
page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', DEFAULT_PAGE_SIZE)), 50)
|
||||
wallet = self.wallet_manager.get_wallet_or_default(kwargs.pop('wallet_id', None))
|
||||
kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})
|
||||
|
@ -2775,7 +2715,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
wallet.save()
|
||||
await self.broadcast_or_release(tx, blocking)
|
||||
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, txo, claim_address, claim, name
|
||||
tx, txo, claim_address, claim, name, dewies_to_lbc(amount)
|
||||
)]))
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
|
||||
else:
|
||||
|
@ -2934,7 +2874,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
wallet.save()
|
||||
await self.broadcast_or_release(tx, blocking)
|
||||
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
|
||||
)]))
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
|
||||
else:
|
||||
|
@ -2944,21 +2884,19 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
@requires(WALLET_COMPONENT)
|
||||
async def jsonrpc_channel_sign(
|
||||
self, channel_name=None, channel_id=None, hexdata=None, salt=None,
|
||||
channel_account_id=None, wallet_id=None):
|
||||
self, channel_name=None, channel_id=None, hexdata=None, channel_account_id=None, wallet_id=None):
|
||||
"""
|
||||
Signs data using the specified channel signing key.
|
||||
|
||||
Usage:
|
||||
channel_sign [<channel_name> | --channel_name=<channel_name>] [<channel_id> | --channel_id=<channel_id>]
|
||||
[<hexdata> | --hexdata=<hexdata>] [<salt> | --salt=<salt>]
|
||||
channel_sign [<channel_name> | --channel_name=<channel_name>]
|
||||
[<channel_id> | --channel_id=<channel_id>] [<hexdata> | --hexdata=<hexdata>]
|
||||
[--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>]
|
||||
|
||||
Options:
|
||||
--channel_name=<channel_name> : (str) name of channel used to sign (or use channel id)
|
||||
--channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name)
|
||||
--hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal
|
||||
--salt=<salt> : (str) salt to use for signing, default is to use timestamp
|
||||
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
|
||||
for channel certificates, defaults to all accounts.
|
||||
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
|
||||
|
@ -2975,13 +2913,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
signing_channel = await self.get_channel_or_error(
|
||||
wallet, channel_account_id, channel_id, channel_name, for_signing=True
|
||||
)
|
||||
if salt is None:
|
||||
salt = str(int(time.time()))
|
||||
signature = signing_channel.sign_data(unhexlify(str(hexdata)), salt)
|
||||
timestamp = str(int(time.time()))
|
||||
signature = signing_channel.sign_data(unhexlify(str(hexdata)), timestamp)
|
||||
return {
|
||||
'signature': signature,
|
||||
'signing_ts': salt, # DEPRECATED
|
||||
'salt': salt,
|
||||
'signing_ts': timestamp
|
||||
}
|
||||
|
||||
@requires(WALLET_COMPONENT)
|
||||
|
@ -3299,17 +3235,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
)
|
||||
|
||||
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT)
|
||||
async def jsonrpc_stream_repost(
|
||||
self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
|
||||
channel_name=None, channel_account_id=None, account_id=None, wallet_id=None,
|
||||
claim_address=None, funding_account_ids=None, preview=False, blocking=False, **kwargs):
|
||||
async def jsonrpc_stream_repost(self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
|
||||
channel_name=None, channel_account_id=None, account_id=None, wallet_id=None,
|
||||
claim_address=None, funding_account_ids=None, preview=False, blocking=False):
|
||||
"""
|
||||
Creates a claim that references an existing stream by its claim id.
|
||||
|
||||
Usage:
|
||||
stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>)
|
||||
[--allow_duplicate_name=<allow_duplicate_name>]
|
||||
[--title=<title>] [--description=<description>] [--tags=<tags>...]
|
||||
[--channel_id=<channel_id> | --channel_name=<channel_name>]
|
||||
[--channel_account_id=<channel_account_id>...]
|
||||
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
|
||||
|
@ -3322,9 +3256,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
--claim_id=<claim_id> : (str) id of the claim being reposted
|
||||
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
|
||||
given name. default: false.
|
||||
--title=<title> : (str) title of the repost
|
||||
--description=<description> : (str) description of the repost
|
||||
--tags=<tags> : (list) add repost tags
|
||||
--channel_id=<channel_id> : (str) claim id of the publisher channel
|
||||
--channel_name=<channel_name> : (str) name of the publisher channel
|
||||
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
|
||||
|
@ -3359,7 +3290,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
raise Exception('Invalid claim id. It is expected to be a 40 characters long hexadecimal string.')
|
||||
|
||||
claim = Claim()
|
||||
claim.repost.update(**kwargs)
|
||||
claim.repost.reference.claim_id = claim_id
|
||||
tx = await Transaction.claim_create(
|
||||
name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
|
||||
|
@ -3537,7 +3467,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
async def save_claims():
|
||||
await self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, new_txo, claim_address, claim, name
|
||||
tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount)
|
||||
)])
|
||||
if file_path is not None:
|
||||
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
|
||||
|
@ -3684,17 +3614,15 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
)
|
||||
if len(existing_claims) != 1:
|
||||
account_ids = ', '.join(f"'{account.id}'" for account in accounts)
|
||||
raise InputValueError(
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(
|
||||
f"Can't find the stream '{claim_id}' in account(s) {account_ids}."
|
||||
)
|
||||
|
||||
old_txo = existing_claims[0]
|
||||
if not old_txo.claim.is_stream and not old_txo.claim.is_repost:
|
||||
# in principle it should work with any type of claim, but its safer to
|
||||
# limit it to ones we know won't be broken. in the future we can expand
|
||||
# this if we have a test case for e.g. channel or support claims
|
||||
raise InputValueError(
|
||||
f"A claim with id '{claim_id}' was found but it is not a stream or repost claim."
|
||||
if not old_txo.claim.is_stream:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception(
|
||||
f"A claim with id '{claim_id}' was found but it is not a stream claim."
|
||||
)
|
||||
|
||||
if bid is not None:
|
||||
|
@ -3725,34 +3653,28 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
if replace:
|
||||
claim = Claim()
|
||||
if old_txo.claim.is_stream:
|
||||
if old_txo.claim.stream.has_source:
|
||||
claim.stream.message.source.CopyFrom(
|
||||
old_txo.claim.stream.message.source
|
||||
)
|
||||
stream_type = old_txo.claim.stream.stream_type
|
||||
if stream_type:
|
||||
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
|
||||
new_stream_type = getattr(claim.stream.message, stream_type)
|
||||
new_stream_type.CopyFrom(old_stream_type)
|
||||
if old_txo.claim.stream.has_source:
|
||||
claim.stream.message.source.CopyFrom(
|
||||
old_txo.claim.stream.message.source
|
||||
)
|
||||
stream_type = old_txo.claim.stream.stream_type
|
||||
if stream_type:
|
||||
old_stream_type = getattr(old_txo.claim.stream.message, stream_type)
|
||||
new_stream_type = getattr(claim.stream.message, stream_type)
|
||||
new_stream_type.CopyFrom(old_stream_type)
|
||||
claim.stream.update(file_path=file_path, **kwargs)
|
||||
else:
|
||||
claim = Claim.from_bytes(old_txo.claim.to_bytes())
|
||||
|
||||
if old_txo.claim.is_stream:
|
||||
claim.stream.update(file_path=file_path, **kwargs)
|
||||
elif old_txo.claim.is_repost:
|
||||
claim.repost.update(**kwargs)
|
||||
|
||||
if clear_channel:
|
||||
claim.clear_signature()
|
||||
tx = await Transaction.claim_update(
|
||||
old_txo, claim, amount, claim_address, funding_accounts, funding_accounts[0],
|
||||
channel if not clear_channel else None
|
||||
)
|
||||
|
||||
new_txo = tx.outputs[0]
|
||||
stream_hash = None
|
||||
if not preview and old_txo.claim.is_stream:
|
||||
if not preview:
|
||||
old_stream = self.file_manager.get_filtered(sd_hash=old_txo.claim.stream.source.sd_hash)
|
||||
old_stream = old_stream[0] if old_stream else None
|
||||
if file_path is not None:
|
||||
|
@ -3774,7 +3696,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
async def save_claims():
|
||||
await self.storage.save_claims([self._old_get_temp_claim_info(
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name
|
||||
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount)
|
||||
)])
|
||||
if stream_hash:
|
||||
await self.storage.save_content_claim(stream_hash, new_txo.id)
|
||||
|
@ -4036,8 +3958,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
[--languages=<languages>...] [--clear_languages]
|
||||
[--locations=<locations>...] [--clear_locations]
|
||||
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
|
||||
[--channel_id=<channel_id> | --channel_name=<channel_name>]
|
||||
[--channel_account_id=<channel_account_id>...]
|
||||
[--account_id=<account_id>] [--wallet_id=<wallet_id>]
|
||||
[--claim_address=<claim_address>]
|
||||
[--funding_account_ids=<funding_account_ids>...]
|
||||
|
@ -4093,10 +4013,6 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
|
||||
--clear_locations : (bool) clear existing locations (prior to adding new ones)
|
||||
--thumbnail_url=<thumbnail_url>: (str) thumbnail url
|
||||
--channel_id=<channel_id> : (str) claim id of the publisher channel
|
||||
--channel_name=<channel_name> : (str) name of the publisher channel
|
||||
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
|
||||
for channel certificates, defaults to all accounts.
|
||||
--account_id=<account_id> : (str) account in which to look for collection (default: all)
|
||||
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet
|
||||
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
|
||||
|
@ -4344,7 +4260,7 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
'nout': tx.position,
|
||||
'address': claim_address,
|
||||
'claim_id': claim_id,
|
||||
'amount': dewies_to_lbc(new_txo.amount)
|
||||
'amount': dewies_to_lbc(amount)
|
||||
}]})
|
||||
self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('new_support'))
|
||||
else:
|
||||
|
@ -4961,16 +4877,21 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
DHT / Blob Exchange peer commands.
|
||||
"""
|
||||
|
||||
async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None):
|
||||
@requires(DHT_COMPONENT)
|
||||
async def jsonrpc_peer_list(self, blob_hash, search_bottom_out_limit=None, page=None, page_size=None):
|
||||
"""
|
||||
Get peers for blob hash
|
||||
|
||||
Usage:
|
||||
peer_list (<blob_hash> | --blob_hash=<blob_hash>)
|
||||
[<search_bottom_out_limit> | --search_bottom_out_limit=<search_bottom_out_limit>]
|
||||
[--page=<page>] [--page_size=<page_size>]
|
||||
|
||||
Options:
|
||||
--blob_hash=<blob_hash> : (str) find available peers for this blob hash
|
||||
--search_bottom_out_limit=<search_bottom_out_limit> : (int) the number of search probes in a row
|
||||
that don't find any new peers
|
||||
before giving up and returning
|
||||
--page=<page> : (int) page to return during paginating
|
||||
--page_size=<page_size> : (int) number of items on page during pagination
|
||||
|
||||
|
@ -4982,29 +4903,28 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
if not is_valid_blobhash(blob_hash):
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception("invalid blob hash")
|
||||
peer_q = asyncio.Queue(loop=self.component_manager.loop)
|
||||
if self.component_manager.has_component(TRACKER_ANNOUNCER_COMPONENT):
|
||||
tracker = self.component_manager.get_component(TRACKER_ANNOUNCER_COMPONENT)
|
||||
tracker_peers = await tracker.get_kademlia_peer_list(bytes.fromhex(blob_hash))
|
||||
log.info("Found %d peers for %s from trackers.", len(tracker_peers), blob_hash[:8])
|
||||
peer_q.put_nowait(tracker_peers)
|
||||
elif not self.component_manager.has_component(DHT_COMPONENT):
|
||||
raise Exception("Peer list needs, at least, either a DHT component or a Tracker component for discovery.")
|
||||
if search_bottom_out_limit is not None:
|
||||
search_bottom_out_limit = int(search_bottom_out_limit)
|
||||
if search_bottom_out_limit <= 0:
|
||||
# TODO: use error from lbry.error
|
||||
raise Exception("invalid bottom out limit")
|
||||
else:
|
||||
search_bottom_out_limit = 4
|
||||
peers = []
|
||||
if self.component_manager.has_component(DHT_COMPONENT):
|
||||
await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
|
||||
peer_q = asyncio.Queue(loop=self.component_manager.loop)
|
||||
await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
|
||||
while not peer_q.empty():
|
||||
peers.extend(peer_q.get_nowait())
|
||||
results = {
|
||||
(peer.address, peer.tcp_port): {
|
||||
"node_id": hexlify(peer.node_id).decode() if peer.node_id else None,
|
||||
results = [
|
||||
{
|
||||
"node_id": hexlify(peer.node_id).decode(),
|
||||
"address": peer.address,
|
||||
"udp_port": peer.udp_port,
|
||||
"tcp_port": peer.tcp_port,
|
||||
}
|
||||
for peer in peers
|
||||
}
|
||||
return paginate_list(list(results.values()), page, page_size)
|
||||
]
|
||||
return paginate_list(results, page, page_size)
|
||||
|
||||
@requires(DATABASE_COMPONENT)
|
||||
async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
|
||||
|
@ -5474,11 +5394,11 @@ class Daemon(metaclass=JSONRPCServerType):
|
|||
return results
|
||||
|
||||
@staticmethod
|
||||
def _old_get_temp_claim_info(tx, txo, address, claim_dict, name):
|
||||
def _old_get_temp_claim_info(tx, txo, address, claim_dict, name, bid):
|
||||
return {
|
||||
"claim_id": txo.claim_id,
|
||||
"name": name,
|
||||
"amount": dewies_to_lbc(txo.amount),
|
||||
"amount": bid,
|
||||
"address": address,
|
||||
"txid": tx.id,
|
||||
"nout": txo.position,
|
||||
|
|
|
@ -80,6 +80,8 @@ class MarketFeed:
|
|||
self.rate = ExchangeRate(self.market, rate, int(time.time()))
|
||||
self.last_check = time.time()
|
||||
return self.rate
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except asyncio.TimeoutError:
|
||||
log.warning("Timed out fetching exchange rate from %s.", self.name)
|
||||
except json.JSONDecodeError as e:
|
||||
|
@ -194,9 +196,9 @@ FEEDS: Iterable[Type[MarketFeed]] = (
|
|||
BittrexUSDFeed,
|
||||
CoinExBTCFeed,
|
||||
CoinExUSDFeed,
|
||||
# HotbitBTCFeed,
|
||||
# HotbitUSDFeed,
|
||||
# UPbitBTCFeed,
|
||||
HotbitBTCFeed,
|
||||
HotbitUSDFeed,
|
||||
UPbitBTCFeed,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -328,8 +328,8 @@ class JSONResponseEncoder(JSONEncoder):
|
|||
result.update({
|
||||
'streaming_url': managed_stream.stream_url,
|
||||
'stream_hash': managed_stream.stream_hash,
|
||||
'stream_name': managed_stream.stream_name,
|
||||
'suggested_file_name': managed_stream.suggested_file_name,
|
||||
'stream_name': managed_stream.descriptor.stream_name,
|
||||
'suggested_file_name': managed_stream.descriptor.suggested_file_name,
|
||||
'sd_hash': managed_stream.descriptor.sd_hash,
|
||||
'mime_type': managed_stream.mime_type,
|
||||
'key': managed_stream.descriptor.key,
|
||||
|
|
|
@ -37,8 +37,6 @@ def migrate_db(conf, start, end):
|
|||
from .migrate13to14 import do_migration
|
||||
elif current == 14:
|
||||
from .migrate14to15 import do_migration
|
||||
elif current == 15:
|
||||
from .migrate15to16 import do_migration
|
||||
else:
|
||||
raise Exception(f"DB migration of version {current} to {current+1} is not available")
|
||||
try:
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
import os
|
||||
import sqlite3
|
||||
|
||||
|
||||
def do_migration(conf):
|
||||
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
|
||||
connection = sqlite3.connect(db_path)
|
||||
cursor = connection.cursor()
|
||||
|
||||
cursor.executescript("""
|
||||
update blob set should_announce=0
|
||||
where should_announce=1 and
|
||||
blob.blob_hash in (select stream_blob.blob_hash from stream_blob where position=0);
|
||||
""")
|
||||
|
||||
connection.commit()
|
||||
connection.close()
|
|
@ -20,7 +20,7 @@ def do_migration(conf):
|
|||
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
|
||||
blobs_by_stream = {}
|
||||
for stream_hash, position, iv, blob_hash, blob_length in blobs:
|
||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, 0, blob_hash))
|
||||
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash))
|
||||
|
||||
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
|
||||
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,
|
||||
|
|
|
@ -187,8 +187,8 @@ def store_stream(transaction: sqlite3.Connection, sd_blob: 'BlobFile', descripto
|
|||
).fetchall()
|
||||
# ensure should_announce is set regardless if insert was ignored
|
||||
transaction.execute(
|
||||
"update blob set should_announce=1 where blob_hash in (?)",
|
||||
(sd_blob.blob_hash,)
|
||||
"update blob set should_announce=1 where blob_hash in (?, ?)",
|
||||
(sd_blob.blob_hash, descriptor.blobs[0].blob_hash,)
|
||||
).fetchall()
|
||||
|
||||
|
||||
|
@ -449,8 +449,7 @@ class SQLiteStorage(SQLiteMixin):
|
|||
return await self.db.execute_fetchall(
|
||||
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||
"from blob left join stream_blob using (blob_hash) "
|
||||
"where stream_blob.stream_hash is null and blob.is_mine=? and blob.status='finished'"
|
||||
"order by blob.blob_length desc, blob.added_on asc",
|
||||
"where stream_blob.stream_hash is null and blob.is_mine=? order by blob.added_on asc",
|
||||
(is_mine,)
|
||||
)
|
||||
|
||||
|
@ -463,8 +462,7 @@ class SQLiteStorage(SQLiteMixin):
|
|||
content_blobs = await self.db.execute_fetchall(
|
||||
"select blob.blob_hash, blob.blob_length, blob.added_on "
|
||||
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
|
||||
"cross join file using (stream_hash)"
|
||||
"where blob.is_mine=? and blob.status='finished' order by blob.added_on asc, blob.blob_length asc",
|
||||
"cross join file using (stream_hash) where blob.is_mine=? order by blob.added_on asc",
|
||||
(is_mine,)
|
||||
)
|
||||
return content_blobs + sd_blobs
|
||||
|
@ -482,7 +480,6 @@ class SQLiteStorage(SQLiteMixin):
|
|||
is_mine=1
|
||||
then blob_length else 0 end), 0) as private_storage
|
||||
from blob left join stream_blob using (blob_hash)
|
||||
where blob_hash not in (select sd_hash from stream) and blob.status="finished"
|
||||
""")
|
||||
return {
|
||||
'network_storage': network_size,
|
||||
|
@ -534,8 +531,7 @@ class SQLiteStorage(SQLiteMixin):
|
|||
def _get_blobs_for_stream(transaction):
|
||||
crypt_blob_infos = []
|
||||
stream_blobs = transaction.execute(
|
||||
"select s.blob_hash, s.position, s.iv, b.added_on "
|
||||
"from stream_blob s left outer join blob b on b.blob_hash=s.blob_hash where stream_hash=? "
|
||||
"select blob_hash, position, iv from stream_blob where stream_hash=? "
|
||||
"order by position asc", (stream_hash, )
|
||||
).fetchall()
|
||||
if only_completed:
|
||||
|
@ -555,10 +551,9 @@ class SQLiteStorage(SQLiteMixin):
|
|||
for blob_hash, length in lengths:
|
||||
blob_length_dict[blob_hash] = length
|
||||
|
||||
current_time = time.time()
|
||||
for blob_hash, position, iv, added_on in stream_blobs:
|
||||
for blob_hash, position, iv in stream_blobs:
|
||||
blob_length = blob_length_dict.get(blob_hash, 0)
|
||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, added_on or current_time, blob_hash))
|
||||
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash))
|
||||
if not blob_hash:
|
||||
break
|
||||
return crypt_blob_infos
|
||||
|
@ -793,7 +788,7 @@ class SQLiteStorage(SQLiteMixin):
|
|||
|
||||
await self.db.run(_save_claims)
|
||||
if update_file_callbacks:
|
||||
await asyncio.wait(map(asyncio.create_task, update_file_callbacks))
|
||||
await asyncio.wait(update_file_callbacks)
|
||||
if claim_id_to_supports:
|
||||
await self.save_supports(claim_id_to_supports)
|
||||
|
||||
|
|
|
@ -13,12 +13,11 @@ from lbry.schema.url import URL
|
|||
from lbry.wallet.dewies import dewies_to_lbc
|
||||
from lbry.file.source_manager import SourceManager
|
||||
from lbry.file.source import ManagedDownloadSource
|
||||
from lbry.extras.daemon.storage import StoredContentClaim
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.extras.daemon.storage import SQLiteStorage
|
||||
from lbry.wallet import WalletManager
|
||||
from lbry.wallet import WalletManager, Output
|
||||
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -50,10 +49,10 @@ class FileManager:
|
|||
await manager.started.wait()
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
def stop(self):
|
||||
for manager in self.source_managers.values():
|
||||
# fixme: pop or not?
|
||||
await manager.stop()
|
||||
manager.stop()
|
||||
self.started.clear()
|
||||
|
||||
@cache_concurrent
|
||||
|
@ -99,6 +98,8 @@ class FileManager:
|
|||
except asyncio.TimeoutError:
|
||||
raise ResolveTimeoutError(uri)
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
raise
|
||||
log.exception("Unexpected error resolving stream:")
|
||||
raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
|
||||
if 'error' in resolved_result:
|
||||
|
@ -193,24 +194,21 @@ class FileManager:
|
|||
####################
|
||||
# make downloader and wait for start
|
||||
####################
|
||||
# temporary with fields we know so downloader can start. Missing fields are populated later.
|
||||
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
|
||||
amount=txo.amount, height=txo.tx_ref.height,
|
||||
serialized=claim.to_bytes().hex())
|
||||
|
||||
if not claim.stream.source.bt_infohash:
|
||||
# fixme: this shouldnt be here
|
||||
stream = ManagedStream(
|
||||
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
|
||||
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
|
||||
analytics_manager=self.analytics_manager, claim=stored_claim
|
||||
analytics_manager=self.analytics_manager
|
||||
)
|
||||
stream.downloader.node = source_manager.node
|
||||
else:
|
||||
stream = TorrentSource(
|
||||
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
|
||||
file_name=file_name, download_directory=download_directory or self.config.download_dir,
|
||||
status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
|
||||
status=ManagedStream.STATUS_RUNNING,
|
||||
analytics_manager=self.analytics_manager,
|
||||
torrent_session=source_manager.torrent_session
|
||||
)
|
||||
log.info("starting download for %s", uri)
|
||||
|
@ -242,12 +240,13 @@ class FileManager:
|
|||
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
|
||||
stream.set_claim(claim_info, claim)
|
||||
if save_file:
|
||||
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
|
||||
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download),
|
||||
loop=self.loop)
|
||||
return stream
|
||||
except asyncio.TimeoutError:
|
||||
error = DownloadDataTimeoutError(stream.sd_hash)
|
||||
raise error
|
||||
except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
|
||||
except Exception as err: # forgive data timeout, don't delete stream
|
||||
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
|
||||
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
|
||||
if isinstance(err, expected):
|
||||
|
|
|
@ -45,12 +45,11 @@ class ManagedDownloadSource:
|
|||
self.purchase_receipt = None
|
||||
self._added_on = added_on
|
||||
self.analytics_manager = analytics_manager
|
||||
self.downloader = None
|
||||
|
||||
self.saving = asyncio.Event()
|
||||
self.finished_writing = asyncio.Event()
|
||||
self.started_writing = asyncio.Event()
|
||||
self.finished_write_attempt = asyncio.Event()
|
||||
self.saving = asyncio.Event(loop=self.loop)
|
||||
self.finished_writing = asyncio.Event(loop=self.loop)
|
||||
self.started_writing = asyncio.Event(loop=self.loop)
|
||||
self.finished_write_attempt = asyncio.Event(loop=self.loop)
|
||||
|
||||
# @classmethod
|
||||
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
|
||||
|
@ -67,7 +66,7 @@ class ManagedDownloadSource:
|
|||
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def stop_tasks(self):
|
||||
def stop_tasks(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
|
||||
|
|
|
@ -54,16 +54,16 @@ class SourceManager:
|
|||
self.storage = storage
|
||||
self.analytics_manager = analytics_manager
|
||||
self._sources: typing.Dict[str, ManagedDownloadSource] = {}
|
||||
self.started = asyncio.Event()
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
|
||||
def add(self, source: ManagedDownloadSource):
|
||||
self._sources[source.identifier] = source
|
||||
|
||||
async def remove(self, source: ManagedDownloadSource):
|
||||
def remove(self, source: ManagedDownloadSource):
|
||||
if source.identifier not in self._sources:
|
||||
return
|
||||
self._sources.pop(source.identifier)
|
||||
await source.stop_tasks()
|
||||
source.stop_tasks()
|
||||
|
||||
async def initialize_from_database(self):
|
||||
raise NotImplementedError()
|
||||
|
@ -72,10 +72,10 @@ class SourceManager:
|
|||
await self.initialize_from_database()
|
||||
self.started.set()
|
||||
|
||||
async def stop(self):
|
||||
def stop(self):
|
||||
while self._sources:
|
||||
_, source = self._sources.popitem()
|
||||
await source.stop_tasks()
|
||||
source.stop_tasks()
|
||||
self.started.clear()
|
||||
|
||||
async def create(self, file_path: str, key: Optional[bytes] = None,
|
||||
|
@ -83,7 +83,7 @@ class SourceManager:
|
|||
raise NotImplementedError()
|
||||
|
||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||
await self.remove(source)
|
||||
self.remove(source)
|
||||
if delete_file and source.output_file_exists:
|
||||
os.remove(source.full_path)
|
||||
|
||||
|
|
|
@ -2,5 +2,4 @@ build:
|
|||
rm types/v2/* -rf
|
||||
touch types/v2/__init__.py
|
||||
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
|
||||
cd types/v2/ && cp ../../../../../types/jsonschema/* ./
|
||||
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py
|
||||
|
|
|
@ -398,12 +398,6 @@ class Repost(BaseClaim):
|
|||
|
||||
claim_type = Claim.REPOST
|
||||
|
||||
def to_dict(self):
|
||||
claim = super().to_dict()
|
||||
if claim.pop('claim_hash', None):
|
||||
claim['claim_id'] = self.reference.claim_id
|
||||
return claim
|
||||
|
||||
@property
|
||||
def reference(self) -> ClaimReference:
|
||||
return ClaimReference(self.message)
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import os
|
||||
import filetype
|
||||
import logging
|
||||
|
||||
types_map = {
|
||||
# http://www.iana.org/assignments/media-types
|
||||
|
@ -168,38 +166,10 @@ types_map = {
|
|||
'.wmv': ('video/x-ms-wmv', 'video')
|
||||
}
|
||||
|
||||
# maps detected extensions to the possible analogs
|
||||
# i.e. .cbz file is actually a .zip
|
||||
synonyms_map = {
|
||||
'.zip': ['.cbz'],
|
||||
'.rar': ['.cbr'],
|
||||
'.ar': ['.a']
|
||||
}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def guess_media_type(path):
|
||||
_, ext = os.path.splitext(path)
|
||||
extension = ext.strip().lower()
|
||||
|
||||
try:
|
||||
kind = filetype.guess(path)
|
||||
if kind:
|
||||
real_extension = f".{kind.extension}"
|
||||
|
||||
if extension != real_extension:
|
||||
if extension:
|
||||
log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}")
|
||||
else:
|
||||
log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}")
|
||||
|
||||
if extension not in synonyms_map.get(real_extension, []):
|
||||
extension = real_extension
|
||||
|
||||
except OSError as error:
|
||||
pass
|
||||
|
||||
if extension[1:]:
|
||||
if extension in types_map:
|
||||
return types_map[extension]
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
import base64
|
||||
from typing import List, Union, Optional, NamedTuple
|
||||
from typing import List, TYPE_CHECKING, Union, Optional
|
||||
from binascii import hexlify
|
||||
from itertools import chain
|
||||
|
||||
from lbry.error import ResolveCensoredError
|
||||
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
|
||||
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
|
||||
if TYPE_CHECKING:
|
||||
from lbry.wallet.server.db.common import ResolveResult
|
||||
|
||||
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
|
||||
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
|
||||
|
@ -22,31 +24,6 @@ def set_reference(reference, claim_hash, rows):
|
|||
return
|
||||
|
||||
|
||||
class ResolveResult(NamedTuple):
|
||||
name: str
|
||||
normalized_name: str
|
||||
claim_hash: bytes
|
||||
tx_num: int
|
||||
position: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
amount: int
|
||||
short_url: str
|
||||
is_controlling: bool
|
||||
canonical_url: str
|
||||
creation_height: int
|
||||
activation_height: int
|
||||
expiration_height: int
|
||||
effective_amount: int
|
||||
support_amount: int
|
||||
reposted: int
|
||||
last_takeover_height: Optional[int]
|
||||
claims_in_channel: Optional[int]
|
||||
channel_hash: Optional[bytes]
|
||||
reposted_claim_hash: Optional[bytes]
|
||||
signature_valid: Optional[bool]
|
||||
|
||||
|
||||
class Censor:
|
||||
|
||||
NOT_CENSORED = 0
|
||||
|
@ -177,6 +154,19 @@ class Outputs:
|
|||
outputs.blocked, outputs.blocked_total
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_grpc(cls, outputs: OutputsMessage) -> 'Outputs':
|
||||
txs = set()
|
||||
for txo_message in chain(outputs.txos, outputs.extra_txos):
|
||||
if txo_message.WhichOneof('meta') == 'error':
|
||||
continue
|
||||
txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height))
|
||||
return cls(
|
||||
outputs.txos, outputs.extra_txos, txs,
|
||||
outputs.offset, outputs.total,
|
||||
outputs.blocked, outputs.blocked_total
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str:
|
||||
return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode()
|
||||
|
|
960
lbry/schema/types/v2/hub_pb2.py
Normal file
960
lbry/schema/types/v2/hub_pb2.py
Normal file
|
@ -0,0 +1,960 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: hub.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from . import result_pb2 as result__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
name='hub.proto',
|
||||
package='pb',
|
||||
syntax='proto3',
|
||||
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"\x0e\n\x0c\x45mptyMessage\".\n\rServerMessage\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"N\n\x0cHelloMessage\x12\x0c\n\x04port\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\"\n\x07servers\x18\x03 \x03(\x0b\x32\x11.pb.ServerMessage\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\x05\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x8e\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\x05\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x03(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x03(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x03(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x03(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x03(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x03(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x03(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x03(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\x05\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x12\x0f\n\x07sd_hash\x18; \x01(\t2\x88\x03\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x12+\n\x04Ping\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12-\n\x05Hello\x12\x10.pb.HelloMessage\x1a\x10.pb.HelloMessage\"\x00\x12/\n\x07\x41\x64\x64Peer\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12\x35\n\rPeerSubscribe\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12.\n\x07Version\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12/\n\x08\x46\x65\x61tures\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12\x30\n\tBroadcast\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
|
||||
,
|
||||
dependencies=[result__pb2.DESCRIPTOR,])
|
||||
|
||||
|
||||
|
||||
_RANGEFIELD_OP = _descriptor.EnumDescriptor(
|
||||
name='Op',
|
||||
full_name='pb.RangeField.Op',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
values=[
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='EQ', index=0, number=0,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='LTE', index=1, number=1,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='GTE', index=2, number=2,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='LT', index=3, number=3,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
_descriptor.EnumValueDescriptor(
|
||||
name='GT', index=4, number=4,
|
||||
serialized_options=None,
|
||||
type=None,
|
||||
create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
containing_type=None,
|
||||
serialized_options=None,
|
||||
serialized_start=373,
|
||||
serialized_end=419,
|
||||
)
|
||||
_sym_db.RegisterEnumDescriptor(_RANGEFIELD_OP)
|
||||
|
||||
|
||||
_EMPTYMESSAGE = _descriptor.Descriptor(
|
||||
name='EmptyMessage',
|
||||
full_name='pb.EmptyMessage',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=31,
|
||||
serialized_end=45,
|
||||
)
|
||||
|
||||
|
||||
_SERVERMESSAGE = _descriptor.Descriptor(
|
||||
name='ServerMessage',
|
||||
full_name='pb.ServerMessage',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='address', full_name='pb.ServerMessage.address', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='port', full_name='pb.ServerMessage.port', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=47,
|
||||
serialized_end=93,
|
||||
)
|
||||
|
||||
|
||||
_HELLOMESSAGE = _descriptor.Descriptor(
|
||||
name='HelloMessage',
|
||||
full_name='pb.HelloMessage',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='port', full_name='pb.HelloMessage.port', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='host', full_name='pb.HelloMessage.host', index=1,
|
||||
number=2, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='servers', full_name='pb.HelloMessage.servers', index=2,
|
||||
number=3, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=95,
|
||||
serialized_end=173,
|
||||
)
|
||||
|
||||
|
||||
_INVERTIBLEFIELD = _descriptor.Descriptor(
|
||||
name='InvertibleField',
|
||||
full_name='pb.InvertibleField',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='invert', full_name='pb.InvertibleField.invert', index=0,
|
||||
number=1, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.InvertibleField.value', index=1,
|
||||
number=2, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=175,
|
||||
serialized_end=223,
|
||||
)
|
||||
|
||||
|
||||
_STRINGVALUE = _descriptor.Descriptor(
|
||||
name='StringValue',
|
||||
full_name='pb.StringValue',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.StringValue.value', index=0,
|
||||
number=1, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=225,
|
||||
serialized_end=253,
|
||||
)
|
||||
|
||||
|
||||
_BOOLVALUE = _descriptor.Descriptor(
|
||||
name='BoolValue',
|
||||
full_name='pb.BoolValue',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.BoolValue.value', index=0,
|
||||
number=1, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=255,
|
||||
serialized_end=281,
|
||||
)
|
||||
|
||||
|
||||
_UINT32VALUE = _descriptor.Descriptor(
|
||||
name='UInt32Value',
|
||||
full_name='pb.UInt32Value',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.UInt32Value.value', index=0,
|
||||
number=1, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=283,
|
||||
serialized_end=311,
|
||||
)
|
||||
|
||||
|
||||
_RANGEFIELD = _descriptor.Descriptor(
|
||||
name='RangeField',
|
||||
full_name='pb.RangeField',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='op', full_name='pb.RangeField.op', index=0,
|
||||
number=1, type=14, cpp_type=8, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='value', full_name='pb.RangeField.value', index=1,
|
||||
number=2, type=5, cpp_type=1, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
_RANGEFIELD_OP,
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=313,
|
||||
serialized_end=419,
|
||||
)
|
||||
|
||||
|
||||
_SEARCHREQUEST = _descriptor.Descriptor(
|
||||
name='SearchRequest',
|
||||
full_name='pb.SearchRequest',
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim_id', full_name='pb.SearchRequest.claim_id', index=0,
|
||||
number=1, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='channel_id', full_name='pb.SearchRequest.channel_id', index=1,
|
||||
number=2, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='text', full_name='pb.SearchRequest.text', index=2,
|
||||
number=3, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='limit', full_name='pb.SearchRequest.limit', index=3,
|
||||
number=4, type=5, cpp_type=1, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='order_by', full_name='pb.SearchRequest.order_by', index=4,
|
||||
number=5, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='offset', full_name='pb.SearchRequest.offset', index=5,
|
||||
number=6, type=13, cpp_type=3, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='is_controlling', full_name='pb.SearchRequest.is_controlling', index=6,
|
||||
number=7, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='last_take_over_height', full_name='pb.SearchRequest.last_take_over_height', index=7,
|
||||
number=8, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim_name', full_name='pb.SearchRequest.claim_name', index=8,
|
||||
number=9, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='normalized_name', full_name='pb.SearchRequest.normalized_name', index=9,
|
||||
number=10, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_position', full_name='pb.SearchRequest.tx_position', index=10,
|
||||
number=11, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='amount', full_name='pb.SearchRequest.amount', index=11,
|
||||
number=12, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='timestamp', full_name='pb.SearchRequest.timestamp', index=12,
|
||||
number=13, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='creation_timestamp', full_name='pb.SearchRequest.creation_timestamp', index=13,
|
||||
number=14, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='height', full_name='pb.SearchRequest.height', index=14,
|
||||
number=15, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='creation_height', full_name='pb.SearchRequest.creation_height', index=15,
|
||||
number=16, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='activation_height', full_name='pb.SearchRequest.activation_height', index=16,
|
||||
number=17, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='expiration_height', full_name='pb.SearchRequest.expiration_height', index=17,
|
||||
number=18, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='release_time', full_name='pb.SearchRequest.release_time', index=18,
|
||||
number=19, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='short_url', full_name='pb.SearchRequest.short_url', index=19,
|
||||
number=20, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='canonical_url', full_name='pb.SearchRequest.canonical_url', index=20,
|
||||
number=21, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='title', full_name='pb.SearchRequest.title', index=21,
|
||||
number=22, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='author', full_name='pb.SearchRequest.author', index=22,
|
||||
number=23, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='description', full_name='pb.SearchRequest.description', index=23,
|
||||
number=24, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claim_type', full_name='pb.SearchRequest.claim_type', index=24,
|
||||
number=25, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='repost_count', full_name='pb.SearchRequest.repost_count', index=25,
|
||||
number=26, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='stream_type', full_name='pb.SearchRequest.stream_type', index=26,
|
||||
number=27, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='media_type', full_name='pb.SearchRequest.media_type', index=27,
|
||||
number=28, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='fee_amount', full_name='pb.SearchRequest.fee_amount', index=28,
|
||||
number=29, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='fee_currency', full_name='pb.SearchRequest.fee_currency', index=29,
|
||||
number=30, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='duration', full_name='pb.SearchRequest.duration', index=30,
|
||||
number=31, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='reposted_claim_id', full_name='pb.SearchRequest.reposted_claim_id', index=31,
|
||||
number=32, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='censor_type', full_name='pb.SearchRequest.censor_type', index=32,
|
||||
number=33, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='claims_in_channel', full_name='pb.SearchRequest.claims_in_channel', index=33,
|
||||
number=34, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=34,
|
||||
number=36, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=35,
|
||||
number=37, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='support_amount', full_name='pb.SearchRequest.support_amount', index=36,
|
||||
number=38, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='trending_score', full_name='pb.SearchRequest.trending_score', index=37,
|
||||
number=39, type=11, cpp_type=10, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_id', full_name='pb.SearchRequest.tx_id', index=38,
|
||||
number=43, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=39,
|
||||
number=44, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='signature', full_name='pb.SearchRequest.signature', index=40,
|
||||
number=45, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=41,
|
||||
number=46, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=42,
|
||||
number=47, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=43,
|
||||
number=48, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='any_tags', full_name='pb.SearchRequest.any_tags', index=44,
|
||||
number=49, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='all_tags', full_name='pb.SearchRequest.all_tags', index=45,
|
||||
number=50, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='not_tags', full_name='pb.SearchRequest.not_tags', index=46,
|
||||
number=51, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=47,
|
||||
number=52, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='has_source', full_name='pb.SearchRequest.has_source', index=48,
|
||||
number=53, type=11, cpp_type=10, label=1,
|
||||
has_default_value=False, default_value=None,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=49,
|
||||
number=54, type=5, cpp_type=1, label=1,
|
||||
has_default_value=False, default_value=0,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='any_languages', full_name='pb.SearchRequest.any_languages', index=50,
|
||||
number=55, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='all_languages', full_name='pb.SearchRequest.all_languages', index=51,
|
||||
number=56, type=9, cpp_type=9, label=3,
|
||||
has_default_value=False, default_value=[],
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=52,
|
||||
number=57, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='no_totals', full_name='pb.SearchRequest.no_totals', index=53,
|
||||
number=58, type=8, cpp_type=7, label=1,
|
||||
has_default_value=False, default_value=False,
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
_descriptor.FieldDescriptor(
|
||||
name='sd_hash', full_name='pb.SearchRequest.sd_hash', index=54,
|
||||
number=59, type=9, cpp_type=9, label=1,
|
||||
has_default_value=False, default_value=b"".decode('utf-8'),
|
||||
message_type=None, enum_type=None, containing_type=None,
|
||||
is_extension=False, extension_scope=None,
|
||||
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
|
||||
],
|
||||
extensions=[
|
||||
],
|
||||
nested_types=[],
|
||||
enum_types=[
|
||||
],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax='proto3',
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
],
|
||||
serialized_start=422,
|
||||
serialized_end=1972,
|
||||
)
|
||||
|
||||
_HELLOMESSAGE.fields_by_name['servers'].message_type = _SERVERMESSAGE
|
||||
_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP
|
||||
_RANGEFIELD_OP.containing_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['claim_id'].message_type = _INVERTIBLEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['channel_id'].message_type = _INVERTIBLEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['tx_position'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['timestamp'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['creation_timestamp'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['creation_height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['activation_height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['expiration_height'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['release_time'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['repost_count'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['fee_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['duration'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['censor_type'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE
|
||||
_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['trending_score'].message_type = _RANGEFIELD
|
||||
_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE
|
||||
_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE
|
||||
DESCRIPTOR.message_types_by_name['EmptyMessage'] = _EMPTYMESSAGE
|
||||
DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE
|
||||
DESCRIPTOR.message_types_by_name['HelloMessage'] = _HELLOMESSAGE
|
||||
DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD
|
||||
DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE
|
||||
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
|
||||
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
|
||||
DESCRIPTOR.message_types_by_name['RangeField'] = _RANGEFIELD
|
||||
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
EmptyMessage = _reflection.GeneratedProtocolMessageType('EmptyMessage', (_message.Message,), {
|
||||
'DESCRIPTOR' : _EMPTYMESSAGE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.EmptyMessage)
|
||||
})
|
||||
_sym_db.RegisterMessage(EmptyMessage)
|
||||
|
||||
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
|
||||
'DESCRIPTOR' : _SERVERMESSAGE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.ServerMessage)
|
||||
})
|
||||
_sym_db.RegisterMessage(ServerMessage)
|
||||
|
||||
HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), {
|
||||
'DESCRIPTOR' : _HELLOMESSAGE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.HelloMessage)
|
||||
})
|
||||
_sym_db.RegisterMessage(HelloMessage)
|
||||
|
||||
InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), {
|
||||
'DESCRIPTOR' : _INVERTIBLEFIELD,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.InvertibleField)
|
||||
})
|
||||
_sym_db.RegisterMessage(InvertibleField)
|
||||
|
||||
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), {
|
||||
'DESCRIPTOR' : _STRINGVALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.StringValue)
|
||||
})
|
||||
_sym_db.RegisterMessage(StringValue)
|
||||
|
||||
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
|
||||
'DESCRIPTOR' : _BOOLVALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.BoolValue)
|
||||
})
|
||||
_sym_db.RegisterMessage(BoolValue)
|
||||
|
||||
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), {
|
||||
'DESCRIPTOR' : _UINT32VALUE,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.UInt32Value)
|
||||
})
|
||||
_sym_db.RegisterMessage(UInt32Value)
|
||||
|
||||
RangeField = _reflection.GeneratedProtocolMessageType('RangeField', (_message.Message,), {
|
||||
'DESCRIPTOR' : _RANGEFIELD,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.RangeField)
|
||||
})
|
||||
_sym_db.RegisterMessage(RangeField)
|
||||
|
||||
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
|
||||
'DESCRIPTOR' : _SEARCHREQUEST,
|
||||
'__module__' : 'hub_pb2'
|
||||
# @@protoc_insertion_point(class_scope:pb.SearchRequest)
|
||||
})
|
||||
_sym_db.RegisterMessage(SearchRequest)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
|
||||
_HUB = _descriptor.ServiceDescriptor(
|
||||
name='Hub',
|
||||
full_name='pb.Hub',
|
||||
file=DESCRIPTOR,
|
||||
index=0,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_start=1975,
|
||||
serialized_end=2367,
|
||||
methods=[
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Search',
|
||||
full_name='pb.Hub.Search',
|
||||
index=0,
|
||||
containing_service=None,
|
||||
input_type=_SEARCHREQUEST,
|
||||
output_type=result__pb2._OUTPUTS,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Ping',
|
||||
full_name='pb.Hub.Ping',
|
||||
index=1,
|
||||
containing_service=None,
|
||||
input_type=_EMPTYMESSAGE,
|
||||
output_type=_STRINGVALUE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Hello',
|
||||
full_name='pb.Hub.Hello',
|
||||
index=2,
|
||||
containing_service=None,
|
||||
input_type=_HELLOMESSAGE,
|
||||
output_type=_HELLOMESSAGE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='AddPeer',
|
||||
full_name='pb.Hub.AddPeer',
|
||||
index=3,
|
||||
containing_service=None,
|
||||
input_type=_SERVERMESSAGE,
|
||||
output_type=_STRINGVALUE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='PeerSubscribe',
|
||||
full_name='pb.Hub.PeerSubscribe',
|
||||
index=4,
|
||||
containing_service=None,
|
||||
input_type=_SERVERMESSAGE,
|
||||
output_type=_STRINGVALUE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Version',
|
||||
full_name='pb.Hub.Version',
|
||||
index=5,
|
||||
containing_service=None,
|
||||
input_type=_EMPTYMESSAGE,
|
||||
output_type=_STRINGVALUE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Features',
|
||||
full_name='pb.Hub.Features',
|
||||
index=6,
|
||||
containing_service=None,
|
||||
input_type=_EMPTYMESSAGE,
|
||||
output_type=_STRINGVALUE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.MethodDescriptor(
|
||||
name='Broadcast',
|
||||
full_name='pb.Hub.Broadcast',
|
||||
index=7,
|
||||
containing_service=None,
|
||||
input_type=_EMPTYMESSAGE,
|
||||
output_type=_UINT32VALUE,
|
||||
serialized_options=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
])
|
||||
_sym_db.RegisterServiceDescriptor(_HUB)
|
||||
|
||||
DESCRIPTOR.services_by_name['Hub'] = _HUB
|
||||
|
||||
# @@protoc_insertion_point(module_scope)
|
298
lbry/schema/types/v2/hub_pb2_grpc.py
Normal file
298
lbry/schema/types/v2/hub_pb2_grpc.py
Normal file
|
@ -0,0 +1,298 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
||||
from . import hub_pb2 as hub__pb2
|
||||
from . import result_pb2 as result__pb2
|
||||
|
||||
|
||||
class HubStub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def __init__(self, channel):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
channel: A grpc.Channel.
|
||||
"""
|
||||
self.Search = channel.unary_unary(
|
||||
'/pb.Hub/Search',
|
||||
request_serializer=hub__pb2.SearchRequest.SerializeToString,
|
||||
response_deserializer=result__pb2.Outputs.FromString,
|
||||
)
|
||||
self.Ping = channel.unary_unary(
|
||||
'/pb.Hub/Ping',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Hello = channel.unary_unary(
|
||||
'/pb.Hub/Hello',
|
||||
request_serializer=hub__pb2.HelloMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.HelloMessage.FromString,
|
||||
)
|
||||
self.AddPeer = channel.unary_unary(
|
||||
'/pb.Hub/AddPeer',
|
||||
request_serializer=hub__pb2.ServerMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.PeerSubscribe = channel.unary_unary(
|
||||
'/pb.Hub/PeerSubscribe',
|
||||
request_serializer=hub__pb2.ServerMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Version = channel.unary_unary(
|
||||
'/pb.Hub/Version',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Features = channel.unary_unary(
|
||||
'/pb.Hub/Features',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.StringValue.FromString,
|
||||
)
|
||||
self.Broadcast = channel.unary_unary(
|
||||
'/pb.Hub/Broadcast',
|
||||
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
|
||||
response_deserializer=hub__pb2.UInt32Value.FromString,
|
||||
)
|
||||
|
||||
|
||||
class HubServicer(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
def Search(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Ping(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Hello(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def AddPeer(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def PeerSubscribe(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Version(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Features(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
def Broadcast(self, request, context):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details('Method not implemented!')
|
||||
raise NotImplementedError('Method not implemented!')
|
||||
|
||||
|
||||
def add_HubServicer_to_server(servicer, server):
|
||||
rpc_method_handlers = {
|
||||
'Search': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Search,
|
||||
request_deserializer=hub__pb2.SearchRequest.FromString,
|
||||
response_serializer=result__pb2.Outputs.SerializeToString,
|
||||
),
|
||||
'Ping': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Ping,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Hello': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Hello,
|
||||
request_deserializer=hub__pb2.HelloMessage.FromString,
|
||||
response_serializer=hub__pb2.HelloMessage.SerializeToString,
|
||||
),
|
||||
'AddPeer': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.AddPeer,
|
||||
request_deserializer=hub__pb2.ServerMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'PeerSubscribe': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.PeerSubscribe,
|
||||
request_deserializer=hub__pb2.ServerMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Version': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Version,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Features': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Features,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.StringValue.SerializeToString,
|
||||
),
|
||||
'Broadcast': grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Broadcast,
|
||||
request_deserializer=hub__pb2.EmptyMessage.FromString,
|
||||
response_serializer=hub__pb2.UInt32Value.SerializeToString,
|
||||
),
|
||||
}
|
||||
generic_handler = grpc.method_handlers_generic_handler(
|
||||
'pb.Hub', rpc_method_handlers)
|
||||
server.add_generic_rpc_handlers((generic_handler,))
|
||||
|
||||
|
||||
# This class is part of an EXPERIMENTAL API.
|
||||
class Hub(object):
|
||||
"""Missing associated documentation comment in .proto file."""
|
||||
|
||||
@staticmethod
|
||||
def Search(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Search',
|
||||
hub__pb2.SearchRequest.SerializeToString,
|
||||
result__pb2.Outputs.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Ping(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Ping',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Hello(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Hello',
|
||||
hub__pb2.HelloMessage.SerializeToString,
|
||||
hub__pb2.HelloMessage.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def AddPeer(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/AddPeer',
|
||||
hub__pb2.ServerMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def PeerSubscribe(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/PeerSubscribe',
|
||||
hub__pb2.ServerMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Version(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Version',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Features(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Features',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.StringValue.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
||||
|
||||
@staticmethod
|
||||
def Broadcast(request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
insecure=False,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None):
|
||||
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Broadcast',
|
||||
hub__pb2.EmptyMessage.SerializeToString,
|
||||
hub__pb2.UInt32Value.FromString,
|
||||
options, channel_credentials,
|
||||
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
4
lbry/schema/types/v2/result_pb2_grpc.py
Normal file
4
lbry/schema/types/v2/result_pb2_grpc.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
{
|
||||
"title": "Wallet",
|
||||
"description": "An LBC wallet",
|
||||
"type": "object",
|
||||
"required": ["name", "version", "accounts", "preferences"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "Human readable name for this wallet",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Wallet spec version",
|
||||
"type": "integer",
|
||||
"$comment": "Should this be a string? We may need some sort of decimal type if we want exact decimal versions."
|
||||
},
|
||||
"accounts": {
|
||||
"description": "Accounts associated with this wallet",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["address_generator", "certificates", "encrypted", "ledger", "modified_on", "name", "private_key", "public_key", "seed"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"address_generator": {
|
||||
"description": "Higher level manager of either singular or deterministically generated addresses",
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{
|
||||
"required": ["name", "change", "receiving"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "type of address generator: a deterministic chain of addresses",
|
||||
"enum": ["deterministic-chain"],
|
||||
"type": "string"
|
||||
},
|
||||
"change": {
|
||||
"$ref": "#/$defs/address_manager",
|
||||
"description": "Manager for deterministically generated change address (not used for single address)"
|
||||
},
|
||||
"receiving": {
|
||||
"$ref": "#/$defs/address_manager",
|
||||
"description": "Manager for deterministically generated receiving address (not used for single address)"
|
||||
}
|
||||
}
|
||||
}, {
|
||||
"required": ["name"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "type of address generator: a single address",
|
||||
"enum": ["single-address"],
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"certificates": {
|
||||
"type": "object",
|
||||
"description": "Channel keys. Mapping from public key address to pem-formatted private key.",
|
||||
"additionalProperties": {"type": "string"}
|
||||
},
|
||||
"encrypted": {
|
||||
"type": "boolean",
|
||||
"description": "Whether private key and seed are encrypted with a password"
|
||||
},
|
||||
"ledger": {
|
||||
"description": "Which network to use",
|
||||
"type": "string",
|
||||
"examples": [
|
||||
"lbc_mainnet",
|
||||
"lbc_testnet"
|
||||
]
|
||||
},
|
||||
"modified_on": {
|
||||
"description": "last modified time in Unix Time",
|
||||
"type": "integer"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name for account, possibly human readable",
|
||||
"type": "string"
|
||||
},
|
||||
"private_key": {
|
||||
"description": "Private key for address if `address_generator` is a single address. Root of chain of private keys for addresses if `address_generator` is a deterministic chain of addresses. Encrypted if `encrypted` is true.",
|
||||
"type": "string"
|
||||
},
|
||||
"public_key": {
|
||||
"description": "Public key for address if `address_generator` is a single address. Root of chain of public keys for addresses if `address_generator` is a deterministic chain of addresses.",
|
||||
"type": "string"
|
||||
},
|
||||
"seed": {
|
||||
"description": "Human readable representation of `private_key`. encrypted if `encrypted` is set to `true`",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"preferences": {
|
||||
"description": "Timestamped application-level preferences. Values can be objects or of a primitive type.",
|
||||
"$comment": "enable-sync is seen in example wallet. encrypt-on-disk is seen in example wallet. they both have a boolean `value` field. Do we want them explicitly defined here? local and shared seem to have at least a similar structure (type, value [yes, again], version), value being the free-form part. Should we define those here? Or can there be any key under preferences, and `value` be literally be anything in any form?",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"required": ["ts", "value"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"ts": {
|
||||
"type": "number",
|
||||
"description": "When the item was set, in Unix time format.",
|
||||
"$comment": "Do we want a string (decimal)?"
|
||||
},
|
||||
"value": {
|
||||
"$comment": "Sometimes this has been an object, sometimes just a boolean. I don't want to prescribe anything."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"$defs": {
|
||||
"address_manager": {
|
||||
"description": "Manager for deterministically generated addresses",
|
||||
"type": "object",
|
||||
"required": ["gap", "maximum_uses_per_address"],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"gap": {
|
||||
"description": "Maximum allowed consecutive generated addresses with no transactions",
|
||||
"type": "integer"
|
||||
},
|
||||
"maximum_uses_per_address": {
|
||||
"description": "Maximum number of uses for each generated address",
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,7 +23,6 @@ class BackgroundDownloader:
|
|||
except ValueError:
|
||||
return
|
||||
except asyncio.CancelledError:
|
||||
log.debug("Cancelled background downloader")
|
||||
raise
|
||||
except Exception:
|
||||
log.error("Unexpected download error on background downloader")
|
||||
|
|
|
@ -194,13 +194,12 @@ class StreamDescriptor:
|
|||
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
|
||||
if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])):
|
||||
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
|
||||
added_on = time.time()
|
||||
descriptor = cls(
|
||||
loop, blob_dir,
|
||||
binascii.unhexlify(decoded['stream_name']).decode(),
|
||||
decoded['key'],
|
||||
binascii.unhexlify(decoded['suggested_file_name']).decode(),
|
||||
[BlobInfo(info['blob_num'], info['length'], info['iv'], added_on, info.get('blob_hash'))
|
||||
[BlobInfo(info['blob_num'], info['length'], info['iv'], info.get('blob_hash'))
|
||||
for info in decoded['blobs']],
|
||||
decoded['stream_hash'],
|
||||
blob.blob_hash
|
||||
|
@ -267,7 +266,7 @@ class StreamDescriptor:
|
|||
blobs.append(blob_info)
|
||||
blobs.append(
|
||||
# add the stream terminator
|
||||
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), added_on, None, True)
|
||||
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), None, added_on, True)
|
||||
)
|
||||
file_name = os.path.basename(file_path)
|
||||
suggested_file_name = sanitize_file_name(file_name)
|
||||
|
|
|
@ -8,8 +8,6 @@ from lbry.error import DownloadSDTimeoutError
|
|||
from lbry.utils import lru_cache_concurrent
|
||||
from lbry.stream.descriptor import StreamDescriptor
|
||||
from lbry.blob_exchange.downloader import BlobDownloader
|
||||
from lbry.torrent.tracker import enqueue_tracker_search
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.dht.node import Node
|
||||
|
@ -27,8 +25,8 @@ class StreamDownloader:
|
|||
self.config = config
|
||||
self.blob_manager = blob_manager
|
||||
self.sd_hash = sd_hash
|
||||
self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder
|
||||
self.peer_queue = asyncio.Queue() # new peers to try
|
||||
self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder
|
||||
self.peer_queue = asyncio.Queue(loop=loop) # new peers to try
|
||||
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
|
||||
self.descriptor: typing.Optional[StreamDescriptor] = descriptor
|
||||
self.node: typing.Optional['Node'] = None
|
||||
|
@ -72,7 +70,7 @@ class StreamDownloader:
|
|||
now = self.loop.time()
|
||||
sd_blob = await asyncio.wait_for(
|
||||
self.blob_downloader.download_blob(self.sd_hash, connection_id),
|
||||
self.config.blob_download_timeout
|
||||
self.config.blob_download_timeout, loop=self.loop
|
||||
)
|
||||
log.info("downloaded sd blob %s", self.sd_hash)
|
||||
self.time_to_descriptor = self.loop.time() - now
|
||||
|
@ -93,7 +91,6 @@ class StreamDownloader:
|
|||
self.accumulate_task.cancel()
|
||||
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
|
||||
await self.add_fixed_peers()
|
||||
enqueue_tracker_search(bytes.fromhex(self.sd_hash), self.peer_queue)
|
||||
# start searching for peers for the sd hash
|
||||
self.search_queue.put_nowait(self.sd_hash)
|
||||
log.info("searching for peers for stream %s", self.sd_hash)
|
||||
|
@ -101,6 +98,10 @@ class StreamDownloader:
|
|||
if not self.descriptor:
|
||||
await self.load_descriptor(connection_id)
|
||||
|
||||
# add the head blob to the peer search
|
||||
self.search_queue.put_nowait(self.descriptor.blobs[0].blob_hash)
|
||||
log.info("added head blob to peer search for stream %s", self.sd_hash)
|
||||
|
||||
if not await self.blob_manager.storage.stream_exists(self.sd_hash) and save_stream:
|
||||
await self.blob_manager.storage.store_stream(
|
||||
self.blob_manager.get_blob(self.sd_hash, length=self.descriptor.length), self.descriptor
|
||||
|
@ -111,7 +112,7 @@ class StreamDownloader:
|
|||
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
|
||||
blob = await asyncio.wait_for(
|
||||
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
|
||||
self.config.blob_download_timeout * 10
|
||||
self.config.blob_download_timeout * 10, loop=self.loop
|
||||
)
|
||||
return blob
|
||||
|
||||
|
|
|
@ -16,8 +16,10 @@ from lbry.file.source import ManagedDownloadSource
|
|||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.conf import Config
|
||||
from lbry.schema.claim import Claim
|
||||
from lbry.blob.blob_manager import BlobManager
|
||||
from lbry.blob.blob_info import BlobInfo
|
||||
from lbry.dht.node import Node
|
||||
from lbry.extras.daemon.analytics import AnalyticsManager
|
||||
from lbry.wallet.transaction import Transaction
|
||||
|
||||
|
@ -60,9 +62,9 @@ class ManagedStream(ManagedDownloadSource):
|
|||
self.file_output_task: typing.Optional[asyncio.Task] = None
|
||||
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
|
||||
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
|
||||
self.fully_reflected = asyncio.Event()
|
||||
self.streaming = asyncio.Event()
|
||||
self._running = asyncio.Event()
|
||||
self.fully_reflected = asyncio.Event(loop=self.loop)
|
||||
self.streaming = asyncio.Event(loop=self.loop)
|
||||
self._running = asyncio.Event(loop=self.loop)
|
||||
|
||||
@property
|
||||
def sd_hash(self) -> str:
|
||||
|
@ -82,19 +84,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
|
||||
@property
|
||||
def file_name(self) -> Optional[str]:
|
||||
return self._file_name or self.suggested_file_name
|
||||
|
||||
@property
|
||||
def suggested_file_name(self) -> Optional[str]:
|
||||
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
|
||||
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
|
||||
self.stream_claim_info.claim.stream.source.name))
|
||||
|
||||
@property
|
||||
def stream_name(self) -> Optional[str]:
|
||||
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
|
||||
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
|
||||
self.stream_claim_info.claim.stream.source.name)
|
||||
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
|
||||
|
||||
@property
|
||||
def written_bytes(self) -> int:
|
||||
|
@ -128,7 +118,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
|
||||
@property
|
||||
def mime_type(self):
|
||||
return guess_media_type(os.path.basename(self.suggested_file_name))[0]
|
||||
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
|
||||
|
||||
@property
|
||||
def download_path(self):
|
||||
|
@ -161,7 +151,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
|
||||
self._running.set()
|
||||
try:
|
||||
await asyncio.wait_for(self.downloader.start(), timeout)
|
||||
await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop)
|
||||
except asyncio.TimeoutError:
|
||||
self._running.clear()
|
||||
raise DownloadSDTimeoutError(self.sd_hash)
|
||||
|
@ -174,7 +164,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
if not self._file_name:
|
||||
self._file_name = await get_next_available_file_name(
|
||||
self.loop, self.download_directory,
|
||||
self._file_name or sanitize_file_name(self.suggested_file_name)
|
||||
self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
|
||||
)
|
||||
file_name, download_dir = self._file_name, self.download_directory
|
||||
else:
|
||||
|
@ -191,7 +181,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
Stop any running save/stream tasks as well as the downloader and update the status in the database
|
||||
"""
|
||||
|
||||
await self.stop_tasks()
|
||||
self.stop_tasks()
|
||||
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
|
||||
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
|
||||
|
||||
|
@ -279,7 +269,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
|
||||
self.sd_hash[:6], self.full_path)
|
||||
await self.blob_manager.storage.set_saved_file(self.stream_hash)
|
||||
except (Exception, asyncio.CancelledError) as err:
|
||||
except Exception as err:
|
||||
if os.path.isfile(output_path):
|
||||
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
|
||||
os.remove(output_path)
|
||||
|
@ -306,14 +296,14 @@ class ManagedStream(ManagedDownloadSource):
|
|||
self.download_directory = download_directory or self.download_directory or self.config.download_dir
|
||||
if not self.download_directory:
|
||||
raise ValueError("no directory to download to")
|
||||
if not (file_name or self._file_name or self.suggested_file_name):
|
||||
if not (file_name or self._file_name or self.descriptor.suggested_file_name):
|
||||
raise ValueError("no file name to download to")
|
||||
if not os.path.isdir(self.download_directory):
|
||||
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
|
||||
os.mkdir(self.download_directory)
|
||||
self._file_name = await get_next_available_file_name(
|
||||
self.loop, self.download_directory,
|
||||
file_name or self._file_name or sanitize_file_name(self.suggested_file_name)
|
||||
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
|
||||
)
|
||||
await self.blob_manager.storage.change_file_download_dir_and_file_name(
|
||||
self.stream_hash, self.download_directory, self.file_name
|
||||
|
@ -321,16 +311,15 @@ class ManagedStream(ManagedDownloadSource):
|
|||
await self.update_status(ManagedStream.STATUS_RUNNING)
|
||||
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
|
||||
try:
|
||||
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout)
|
||||
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
|
||||
except asyncio.TimeoutError:
|
||||
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
|
||||
await self.stop_tasks()
|
||||
self.stop_tasks()
|
||||
await self.update_status(ManagedStream.STATUS_STOPPED)
|
||||
|
||||
async def stop_tasks(self):
|
||||
def stop_tasks(self):
|
||||
if self.file_output_task and not self.file_output_task.done():
|
||||
self.file_output_task.cancel()
|
||||
await asyncio.gather(self.file_output_task, return_exceptions=True)
|
||||
self.file_output_task = None
|
||||
while self.streaming_responses:
|
||||
req, response = self.streaming_responses.pop()
|
||||
|
@ -367,7 +356,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
return sent
|
||||
except ConnectionError:
|
||||
return sent
|
||||
except (OSError, Exception, asyncio.CancelledError) as err:
|
||||
except (OSError, Exception) as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
|
||||
elif isinstance(err, OSError):
|
||||
|
@ -402,7 +391,7 @@ class ManagedStream(ManagedDownloadSource):
|
|||
self.sd_hash[:6])
|
||||
await self.stop()
|
||||
return
|
||||
await asyncio.sleep(1)
|
||||
await asyncio.sleep(1, loop=self.loop)
|
||||
|
||||
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
|
||||
if '=' in get_range:
|
||||
|
|
|
@ -21,7 +21,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.loop = asyncio.get_event_loop()
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: asyncio.Task = None
|
||||
self.started_listening = asyncio.Event()
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.buf = b''
|
||||
self.transport: asyncio.StreamWriter = None
|
||||
self.writer: typing.Optional['HashBlobWriter'] = None
|
||||
|
@ -29,9 +29,9 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.descriptor: typing.Optional['StreamDescriptor'] = None
|
||||
self.sd_blob: typing.Optional['BlobFile'] = None
|
||||
self.received = []
|
||||
self.incoming = incoming_event or asyncio.Event()
|
||||
self.not_incoming = not_incoming_event or asyncio.Event()
|
||||
self.stop_event = stop_event or asyncio.Event()
|
||||
self.incoming = incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.stop_event = stop_event or asyncio.Event(loop=self.loop)
|
||||
self.chunk_size = response_chunk_size
|
||||
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
|
||||
self.partial_event = partial_event
|
||||
|
@ -94,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.incoming.set()
|
||||
self.send_response({"send_sd_blob": True})
|
||||
try:
|
||||
await asyncio.wait_for(self.sd_blob.verified.wait(), 30)
|
||||
await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop)
|
||||
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
|
||||
self.loop, self.blob_manager.blob_dir, self.sd_blob
|
||||
)
|
||||
|
@ -140,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
|
|||
self.incoming.set()
|
||||
self.send_response({"send_blob": True})
|
||||
try:
|
||||
await asyncio.wait_for(blob.verified.wait(), 30)
|
||||
await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop)
|
||||
self.send_response({"received_blob": True})
|
||||
except asyncio.TimeoutError:
|
||||
self.send_response({"received_blob": False})
|
||||
|
@ -162,10 +162,10 @@ class ReflectorServer:
|
|||
self.loop = asyncio.get_event_loop()
|
||||
self.blob_manager = blob_manager
|
||||
self.server_task: typing.Optional[asyncio.Task] = None
|
||||
self.started_listening = asyncio.Event()
|
||||
self.stopped_listening = asyncio.Event()
|
||||
self.incoming_event = incoming_event or asyncio.Event()
|
||||
self.not_incoming_event = not_incoming_event or asyncio.Event()
|
||||
self.started_listening = asyncio.Event(loop=self.loop)
|
||||
self.stopped_listening = asyncio.Event(loop=self.loop)
|
||||
self.incoming_event = incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop)
|
||||
self.response_chunk_size = response_chunk_size
|
||||
self.stop_event = stop_event
|
||||
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants
|
||||
|
|
|
@ -54,7 +54,7 @@ class StreamManager(SourceManager):
|
|||
self.re_reflect_task: Optional[asyncio.Task] = None
|
||||
self.update_stream_finished_futs: typing.List[asyncio.Future] = []
|
||||
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
|
||||
self.started = asyncio.Event()
|
||||
self.started = asyncio.Event(loop=self.loop)
|
||||
|
||||
@property
|
||||
def streams(self):
|
||||
|
@ -70,7 +70,6 @@ class StreamManager(SourceManager):
|
|||
|
||||
async def recover_streams(self, file_infos: typing.List[typing.Dict]):
|
||||
to_restore = []
|
||||
to_check = []
|
||||
|
||||
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
|
||||
suggested_file_name: str, key: str,
|
||||
|
@ -83,7 +82,6 @@ class StreamManager(SourceManager):
|
|||
if not descriptor:
|
||||
return
|
||||
to_restore.append((descriptor, sd_blob, content_fee))
|
||||
to_check.extend([sd_blob.blob_hash] + [blob.blob_hash for blob in descriptor.blobs[:-1]])
|
||||
|
||||
await asyncio.gather(*[
|
||||
recover_stream(
|
||||
|
@ -95,8 +93,6 @@ class StreamManager(SourceManager):
|
|||
|
||||
if to_restore:
|
||||
await self.storage.recover_streams(to_restore, self.config.download_dir)
|
||||
if to_check:
|
||||
await self.blob_manager.ensure_completed_blobs_status(to_check)
|
||||
|
||||
# if self.blob_manager._save_blobs:
|
||||
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
|
||||
|
@ -150,7 +146,7 @@ class StreamManager(SourceManager):
|
|||
file_info['added_on'], file_info['fully_reflected']
|
||||
)))
|
||||
if add_stream_tasks:
|
||||
await asyncio.gather(*add_stream_tasks)
|
||||
await asyncio.gather(*add_stream_tasks, loop=self.loop)
|
||||
log.info("Started stream manager with %i files", len(self._sources))
|
||||
if not self.node:
|
||||
log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
|
||||
|
@ -159,11 +155,14 @@ class StreamManager(SourceManager):
|
|||
self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
|
||||
*(self._sources[sd_hash].save_file(file_name, download_directory)
|
||||
for (file_name, download_directory, sd_hash) in to_resume_saving),
|
||||
loop=self.loop
|
||||
))
|
||||
|
||||
async def reflect_streams(self):
|
||||
try:
|
||||
return await self._reflect_streams()
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
log.exception("reflector task encountered an unexpected error!")
|
||||
|
||||
|
@ -183,21 +182,21 @@ class StreamManager(SourceManager):
|
|||
batch.append(self.reflect_stream(stream))
|
||||
if len(batch) >= self.config.concurrent_reflector_uploads:
|
||||
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
||||
await asyncio.gather(*batch)
|
||||
await asyncio.gather(*batch, loop=self.loop)
|
||||
log.debug("done processing %s streams", len(batch))
|
||||
batch = []
|
||||
if batch:
|
||||
log.debug("waiting for batch of %s reflecting streams", len(batch))
|
||||
await asyncio.gather(*batch)
|
||||
await asyncio.gather(*batch, loop=self.loop)
|
||||
log.debug("done processing %s streams", len(batch))
|
||||
await asyncio.sleep(300)
|
||||
await asyncio.sleep(300, loop=self.loop)
|
||||
|
||||
async def start(self):
|
||||
await super().start()
|
||||
self.re_reflect_task = self.loop.create_task(self.reflect_streams())
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
def stop(self):
|
||||
super().stop()
|
||||
if self.resume_saving_task and not self.resume_saving_task.done():
|
||||
self.resume_saving_task.cancel()
|
||||
if self.re_reflect_task and not self.re_reflect_task.done():
|
||||
|
@ -224,8 +223,7 @@ class StreamManager(SourceManager):
|
|||
)
|
||||
return task
|
||||
|
||||
@staticmethod
|
||||
async def _retriable_reflect_stream(stream, host, port):
|
||||
async def _retriable_reflect_stream(self, stream, host, port):
|
||||
sent = await stream.upload_to_reflector(host, port)
|
||||
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
|
||||
stream.reflector_progress = 0
|
||||
|
@ -260,7 +258,7 @@ class StreamManager(SourceManager):
|
|||
return
|
||||
if source.identifier in self.running_reflector_uploads:
|
||||
self.running_reflector_uploads[source.identifier].cancel()
|
||||
await source.stop_tasks()
|
||||
source.stop_tasks()
|
||||
if source.identifier in self.streams:
|
||||
del self.streams[source.identifier]
|
||||
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]
|
||||
|
|
|
@ -19,7 +19,7 @@ from lbry.conf import Config
|
|||
from lbry.wallet.util import satoshis_to_coins
|
||||
from lbry.wallet.dewies import lbc_to_dewies
|
||||
from lbry.wallet.orchstr8 import Conductor
|
||||
from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode
|
||||
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode, HubNode
|
||||
from lbry.schema.claim import Claim
|
||||
|
||||
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
|
||||
|
@ -204,13 +204,7 @@ class AsyncioTestCase(unittest.TestCase):
|
|||
|
||||
def add_timeout(self):
|
||||
if self.TIMEOUT:
|
||||
self.loop.call_later(self.TIMEOUT, self.check_timeout, time())
|
||||
|
||||
def check_timeout(self, started):
|
||||
if time() - started >= self.TIMEOUT:
|
||||
self.cancel()
|
||||
else:
|
||||
self.loop.call_later(self.TIMEOUT, self.check_timeout, started)
|
||||
self.loop.call_later(self.TIMEOUT, self.cancel)
|
||||
|
||||
|
||||
class AdvanceTimeTestCase(AsyncioTestCase):
|
||||
|
@ -236,7 +230,8 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.conductor: Optional[Conductor] = None
|
||||
self.blockchain: Optional[LBCWalletNode] = None
|
||||
self.blockchain: Optional[BlockchainNode] = None
|
||||
self.hub: Optional[HubNode] = None
|
||||
self.wallet_node: Optional[WalletNode] = None
|
||||
self.manager: Optional[WalletManager] = None
|
||||
self.ledger: Optional[Ledger] = None
|
||||
|
@ -245,15 +240,16 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
|
||||
async def asyncSetUp(self):
|
||||
self.conductor = Conductor(seed=self.SEED)
|
||||
await self.conductor.start_lbcd()
|
||||
self.addCleanup(self.conductor.stop_lbcd)
|
||||
await self.conductor.start_lbcwallet()
|
||||
self.addCleanup(self.conductor.stop_lbcwallet)
|
||||
await self.conductor.start_blockchain()
|
||||
self.addCleanup(self.conductor.stop_blockchain)
|
||||
await self.conductor.start_spv()
|
||||
self.addCleanup(self.conductor.stop_spv)
|
||||
await self.conductor.start_wallet()
|
||||
self.addCleanup(self.conductor.stop_wallet)
|
||||
self.blockchain = self.conductor.lbcwallet_node
|
||||
await self.conductor.start_hub()
|
||||
self.addCleanup(self.conductor.stop_hub)
|
||||
self.blockchain = self.conductor.blockchain_node
|
||||
self.hub = self.conductor.hub_node
|
||||
self.wallet_node = self.conductor.wallet_node
|
||||
self.manager = self.wallet_node.manager
|
||||
self.ledger = self.wallet_node.ledger
|
||||
|
@ -267,13 +263,6 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
def broadcast(self, tx):
|
||||
return self.ledger.broadcast(tx)
|
||||
|
||||
async def broadcast_and_confirm(self, tx, ledger=None):
|
||||
ledger = ledger or self.ledger
|
||||
notifications = asyncio.create_task(ledger.wait(tx))
|
||||
await ledger.broadcast(tx)
|
||||
await notifications
|
||||
await self.generate_and_wait(1, [tx.id], ledger)
|
||||
|
||||
async def on_header(self, height):
|
||||
if self.ledger.headers.height < height:
|
||||
await self.ledger.on_header.where(
|
||||
|
@ -281,29 +270,11 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
)
|
||||
return True
|
||||
|
||||
async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
|
||||
tx_watch = []
|
||||
txid = None
|
||||
done = False
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
|
||||
def on_transaction_id(self, txid, ledger=None):
|
||||
return (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: e.tx.id == txid
|
||||
)
|
||||
|
||||
txid = await self.blockchain.send_to_address(address, amount)
|
||||
done = txid in tx_watch
|
||||
await watcher
|
||||
|
||||
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
|
||||
return txid
|
||||
|
||||
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
|
||||
if blocks_to_generate > 0:
|
||||
watcher = (ledger or self.ledger).on_transaction.where(
|
||||
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
|
||||
)
|
||||
await self.generate(blocks_to_generate)
|
||||
await watcher
|
||||
|
||||
def on_address_update(self, address):
|
||||
return self.ledger.on_transaction.where(
|
||||
lambda e: e.address == address
|
||||
|
@ -319,16 +290,8 @@ class IntegrationTestCase(AsyncioTestCase):
|
|||
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
await self.blockchain.generate(blocks)
|
||||
height = self.blockchain.block_expected
|
||||
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
|
||||
while True:
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
self.conductor.spv_node.server.synchronized.clear()
|
||||
if self.conductor.spv_node.server.db.db_height < height:
|
||||
continue
|
||||
if self.conductor.spv_node.server._es_height < height:
|
||||
continue
|
||||
break
|
||||
await self.conductor.spv_node.server.synchronized.wait()
|
||||
|
||||
|
||||
class FakeExchangeRateManager(ExchangeRateManager):
|
||||
|
@ -390,19 +353,20 @@ class CommandTestCase(IntegrationTestCase):
|
|||
self.skip_libtorrent = True
|
||||
|
||||
async def asyncSetUp(self):
|
||||
await super().asyncSetUp()
|
||||
|
||||
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
|
||||
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
|
||||
|
||||
await super().asyncSetUp()
|
||||
|
||||
self.daemon = await self.add_daemon(self.wallet_node)
|
||||
|
||||
await self.account.ensure_address_gap()
|
||||
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
|
||||
await self.send_to_address_and_wait(address, 10, 6)
|
||||
sendtxid = await self.blockchain.send_to_address(address, 10)
|
||||
await self.confirm_tx(sendtxid)
|
||||
await self.generate(5)
|
||||
|
||||
server_tmp_dir = tempfile.mkdtemp()
|
||||
self.addCleanup(shutil.rmtree, server_tmp_dir)
|
||||
|
@ -499,14 +463,9 @@ class CommandTestCase(IntegrationTestCase):
|
|||
|
||||
async def confirm_tx(self, txid, ledger=None):
|
||||
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
|
||||
# await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
|
||||
await self.on_transaction_id(txid, ledger)
|
||||
on_tx = self.on_transaction_id(txid, ledger)
|
||||
await asyncio.wait([self.generate(1), on_tx], timeout=5)
|
||||
|
||||
# # actually, if it's in the mempool or in the block we're fine
|
||||
# await self.generate_and_wait(1, [txid], ledger=ledger)
|
||||
# return txid
|
||||
|
||||
return txid
|
||||
|
||||
async def on_transaction_dict(self, tx):
|
||||
|
@ -551,7 +510,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
return self.sout(tx)
|
||||
return tx
|
||||
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
|
||||
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None):
|
||||
account = (daemon or self.daemon).wallet_manager.default_account
|
||||
claim_address = await account.receiving.get_or_create_usable_address()
|
||||
claim = Claim()
|
||||
|
@ -561,7 +520,7 @@ class CommandTestCase(IntegrationTestCase):
|
|||
claim_address, [self.account], self.account
|
||||
)
|
||||
await tx.sign([self.account])
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, blocking)
|
||||
await (daemon or self.daemon).broadcast_or_release(tx, False)
|
||||
return self.sout(tx)
|
||||
|
||||
def create_upload_file(self, data, prefix=None, suffix=None):
|
||||
|
|
|
@ -10,13 +10,47 @@ from typing import Optional
|
|||
import libtorrent
|
||||
|
||||
|
||||
NOTIFICATION_MASKS = [
|
||||
"error",
|
||||
"peer",
|
||||
"port_mapping",
|
||||
"storage",
|
||||
"tracker",
|
||||
"debug",
|
||||
"status",
|
||||
"progress",
|
||||
"ip_block",
|
||||
"dht",
|
||||
"stats",
|
||||
"session_log",
|
||||
"torrent_log",
|
||||
"peer_log",
|
||||
"incoming_request",
|
||||
"dht_log",
|
||||
"dht_operation",
|
||||
"port_mapping_log",
|
||||
"picker_log",
|
||||
"file_progress",
|
||||
"piece_progress",
|
||||
"upload",
|
||||
"block_progress"
|
||||
]
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
|
||||
libtorrent.add_torrent_params_flags_t.flag_auto_managed
|
||||
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe
|
||||
)
|
||||
|
||||
|
||||
def get_notification_type(notification) -> str:
|
||||
for i, notification_type in enumerate(NOTIFICATION_MASKS):
|
||||
if (1 << i) & notification:
|
||||
return notification_type
|
||||
raise ValueError("unrecognized notification type")
|
||||
|
||||
|
||||
class TorrentHandle:
|
||||
def __init__(self, loop, executor, handle):
|
||||
self._loop = loop
|
||||
|
@ -87,7 +121,7 @@ class TorrentHandle:
|
|||
self._show_status()
|
||||
if self.finished.is_set():
|
||||
break
|
||||
await asyncio.sleep(0.1)
|
||||
await asyncio.sleep(0.1, loop=self._loop)
|
||||
|
||||
async def pause(self):
|
||||
await self._loop.run_in_executor(
|
||||
|
@ -122,8 +156,10 @@ class TorrentSession:
|
|||
async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
|
||||
settings = {
|
||||
'listen_interfaces': f"{interface}:{port}",
|
||||
'enable_natpmp': False,
|
||||
'enable_upnp': False
|
||||
'enable_outgoing_utp': True,
|
||||
'enable_incoming_utp': True,
|
||||
'enable_outgoing_tcp': False,
|
||||
'enable_incoming_tcp': False
|
||||
}
|
||||
self._session = await self._loop.run_in_executor(
|
||||
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
|
||||
|
@ -150,7 +186,7 @@ class TorrentSession:
|
|||
await self._loop.run_in_executor(
|
||||
self._executor, self._pop_alerts
|
||||
)
|
||||
await asyncio.sleep(1)
|
||||
await asyncio.sleep(1, loop=self._loop)
|
||||
|
||||
async def pause(self):
|
||||
await self._loop.run_in_executor(
|
||||
|
|
|
@ -36,7 +36,7 @@ class Torrent:
|
|||
def __init__(self, loop, handle):
|
||||
self._loop = loop
|
||||
self._handle = handle
|
||||
self.finished = asyncio.Event()
|
||||
self.finished = asyncio.Event(loop=loop)
|
||||
|
||||
def _threaded_update_status(self):
|
||||
status = self._handle.status()
|
||||
|
@ -58,7 +58,7 @@ class Torrent:
|
|||
log.info("finished downloading torrent!")
|
||||
await self.pause()
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
await asyncio.sleep(1, loop=self._loop)
|
||||
|
||||
async def pause(self):
|
||||
log.info("pause torrent")
|
||||
|
|
|
@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
|
|||
def bt_infohash(self):
|
||||
return self.identifier
|
||||
|
||||
async def stop_tasks(self):
|
||||
def stop_tasks(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
|
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
|
|||
async def start(self):
|
||||
await super().start()
|
||||
|
||||
async def stop(self):
|
||||
await super().stop()
|
||||
def stop(self):
|
||||
super().stop()
|
||||
log.info("finished stopping the torrent manager")
|
||||
|
||||
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
|
||||
|
|
|
@ -1,285 +0,0 @@
|
|||
import random
|
||||
import socket
|
||||
import string
|
||||
import struct
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import ipaddress
|
||||
from collections import namedtuple
|
||||
from functools import reduce
|
||||
from typing import Optional
|
||||
|
||||
from lbry.dht.node import get_kademlia_peers_from_hosts
|
||||
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry import version
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
CONNECTION_EXPIRES_AFTER_SECONDS = 50
|
||||
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
|
||||
DEFAULT_TIMEOUT_SECONDS = 10.0
|
||||
DEFAULT_CONCURRENCY_LIMIT = 100
|
||||
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
|
||||
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
|
||||
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
|
||||
AnnounceRequest = namedtuple("AnnounceRequest",
|
||||
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
|
||||
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
|
||||
AnnounceResponse = namedtuple("AnnounceResponse",
|
||||
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
|
||||
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
|
||||
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
|
||||
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
|
||||
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
|
||||
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
|
||||
structs = {
|
||||
ConnectRequest: struct.Struct(">QII"),
|
||||
ConnectResponse: struct.Struct(">IIQ"),
|
||||
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
|
||||
AnnounceResponse: struct.Struct(">IIIII"),
|
||||
CompactIPv4Peer: struct.Struct(">IH"),
|
||||
ScrapeRequest: struct.Struct(">QII"),
|
||||
ScrapeResponse: struct.Struct(">II"),
|
||||
ScrapeResponseItem: struct.Struct(">III"),
|
||||
ErrorResponse: struct.Struct(">II")
|
||||
}
|
||||
|
||||
|
||||
def decode(cls, data, offset=0):
|
||||
decoder = structs[cls]
|
||||
if cls is AnnounceResponse:
|
||||
return AnnounceResponse(*decoder.unpack_from(data, offset),
|
||||
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
|
||||
elif cls is ScrapeResponse:
|
||||
return ScrapeResponse(*decoder.unpack_from(data, offset),
|
||||
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
|
||||
elif cls is ErrorResponse:
|
||||
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
|
||||
return cls(*decoder.unpack_from(data, offset))
|
||||
|
||||
|
||||
def encode(obj):
|
||||
if isinstance(obj, ScrapeRequest):
|
||||
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
|
||||
elif isinstance(obj, ErrorResponse):
|
||||
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
|
||||
elif isinstance(obj, AnnounceResponse):
|
||||
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
|
||||
return structs[type(obj)].pack(*obj)
|
||||
|
||||
|
||||
def make_peer_id(random_part: Optional[str] = None) -> bytes:
|
||||
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
|
||||
# not to confuse with node id; peer id identifies uniquely the software, version and instance
|
||||
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
|
||||
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
|
||||
|
||||
|
||||
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
|
||||
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
|
||||
self.transport = None
|
||||
self.data_queue = {}
|
||||
self.timeout = timeout
|
||||
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
|
||||
|
||||
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
|
||||
self.transport = transport
|
||||
|
||||
async def request(self, obj, tracker_ip, tracker_port):
|
||||
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
|
||||
try:
|
||||
async with self.semaphore:
|
||||
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
|
||||
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
|
||||
finally:
|
||||
self.data_queue.pop(obj.transaction_id, None)
|
||||
|
||||
async def connect(self, tracker_ip, tracker_port):
|
||||
transaction_id = random.getrandbits(32)
|
||||
return decode(ConnectResponse,
|
||||
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
|
||||
|
||||
@cache_concurrent
|
||||
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
|
||||
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
|
||||
# peer_id is just to ensure cache coherency
|
||||
return (await self.connect(tracker_ip, tracker_port)).connection_id
|
||||
|
||||
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
|
||||
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
|
||||
# this should make the key deterministic but unique per info hash + peer id
|
||||
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
|
||||
transaction_id = random.getrandbits(32)
|
||||
req = AnnounceRequest(
|
||||
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
|
||||
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
|
||||
|
||||
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
|
||||
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
|
||||
transaction_id = random.getrandbits(32)
|
||||
reply = await self.request(
|
||||
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
|
||||
return decode(ScrapeResponse, reply), connection_id
|
||||
|
||||
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
|
||||
if len(data) < 8:
|
||||
return
|
||||
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
|
||||
if transaction_id in self.data_queue:
|
||||
if not self.data_queue[transaction_id].done():
|
||||
if data[3] == 3:
|
||||
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
|
||||
return self.data_queue[transaction_id].set_result(data)
|
||||
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
|
||||
|
||||
def connection_lost(self, exc: Exception = None) -> None:
|
||||
self.transport = None
|
||||
|
||||
|
||||
class TrackerClient:
|
||||
event_controller = StreamController()
|
||||
|
||||
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
|
||||
self.client = UDPTrackerClientProtocol(timeout=timeout)
|
||||
self.transport = None
|
||||
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
|
||||
self.announce_port = announce_port
|
||||
self._get_servers = get_servers
|
||||
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
|
||||
self.tasks = {}
|
||||
|
||||
async def start(self):
|
||||
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
|
||||
lambda: self.client, local_addr=("0.0.0.0", 0))
|
||||
self.event_controller.stream.listen(
|
||||
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
|
||||
|
||||
def stop(self):
|
||||
while self.tasks:
|
||||
self.tasks.popitem()[1].cancel()
|
||||
if self.transport is not None:
|
||||
self.transport.close()
|
||||
self.client = None
|
||||
self.transport = None
|
||||
self.event_controller.close()
|
||||
|
||||
def on_hash(self, info_hash, on_announcement=None):
|
||||
if info_hash not in self.tasks:
|
||||
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
|
||||
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
|
||||
self.tasks[info_hash] = task
|
||||
|
||||
async def announce_many(self, *info_hashes, stopped=False):
|
||||
await asyncio.gather(
|
||||
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
|
||||
return_exceptions=True)
|
||||
|
||||
async def _announce_many(self, server, info_hashes, stopped=False):
|
||||
tracker_ip = await resolve_host(*server, 'udp')
|
||||
still_good_info_hashes = {
|
||||
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
|
||||
if time.time() < next_announcement
|
||||
}
|
||||
results = await asyncio.gather(
|
||||
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
|
||||
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
|
||||
return_exceptions=True)
|
||||
if results:
|
||||
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
|
||||
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
|
||||
|
||||
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
|
||||
found = []
|
||||
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
|
||||
for done in asyncio.as_completed(probes):
|
||||
result = await done
|
||||
if result is not None:
|
||||
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
|
||||
found.append(result)
|
||||
return found
|
||||
|
||||
async def get_kademlia_peer_list(self, info_hash):
|
||||
responses = await self.get_peer_list(info_hash, no_port=True)
|
||||
return await announcement_to_kademlia_peers(*responses)
|
||||
|
||||
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
|
||||
result = None
|
||||
try:
|
||||
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
|
||||
except socket.error:
|
||||
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
|
||||
return
|
||||
self.results.setdefault(tracker_host, {})
|
||||
if info_hash in self.results[tracker_host]:
|
||||
next_announcement, result = self.results[tracker_host][info_hash]
|
||||
if time.time() < next_announcement:
|
||||
return result
|
||||
try:
|
||||
result = await self.client.announce(
|
||||
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
|
||||
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
|
||||
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
|
||||
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
|
||||
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
|
||||
return None
|
||||
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
|
||||
return result
|
||||
|
||||
|
||||
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
|
||||
async def on_announcement(announcement: AnnounceResponse):
|
||||
peers = await announcement_to_kademlia_peers(announcement)
|
||||
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
|
||||
peer_q.put_nowait(peers)
|
||||
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
|
||||
|
||||
|
||||
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
|
||||
peers = [
|
||||
(str(ipaddress.ip_address(peer.address)), peer.port)
|
||||
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
|
||||
]
|
||||
return get_kademlia_peers_from_hosts(peers)
|
||||
|
||||
|
||||
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
|
||||
def __init__(self):
|
||||
self.transport = None
|
||||
self.known_conns = set()
|
||||
self.peers = {}
|
||||
|
||||
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
|
||||
self.transport = transport
|
||||
|
||||
def add_peer(self, info_hash, ip_address: str, port: int):
|
||||
self.peers.setdefault(info_hash, [])
|
||||
self.peers[info_hash].append(encode_peer(ip_address, port))
|
||||
|
||||
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
|
||||
if len(data) < 16:
|
||||
return
|
||||
action = int.from_bytes(data[8:12], "big", signed=False)
|
||||
if action == 0:
|
||||
req = decode(ConnectRequest, data)
|
||||
connection_id = random.getrandbits(32)
|
||||
self.known_conns.add(connection_id)
|
||||
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
|
||||
elif action == 1:
|
||||
req = decode(AnnounceRequest, data)
|
||||
if req.connection_id not in self.known_conns:
|
||||
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
|
||||
else:
|
||||
compact_address = encode_peer(addr[0], req.port)
|
||||
if req.event != 3:
|
||||
self.add_peer(req.info_hash, addr[0], req.port)
|
||||
elif compact_address in self.peers.get(req.info_hash, []):
|
||||
self.peers[req.info_hash].remove(compact_address)
|
||||
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
|
||||
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
|
||||
return self.transport.sendto(resp, addr)
|
||||
|
||||
|
||||
def encode_peer(ip_address: str, port: int):
|
||||
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
|
||||
return compact_ip + port.to_bytes(2, "big", signed=False)
|
|
@ -130,16 +130,21 @@ def get_sd_hash(stream_info):
|
|||
def json_dumps_pretty(obj, **kwargs):
|
||||
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
|
||||
|
||||
try:
|
||||
# the standard contextlib.aclosing() is available in 3.10+
|
||||
from contextlib import aclosing # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
@contextlib.asynccontextmanager
|
||||
async def aclosing(thing):
|
||||
try:
|
||||
yield thing
|
||||
finally:
|
||||
await thing.aclose()
|
||||
|
||||
def cancel_task(task: typing.Optional[asyncio.Task]):
|
||||
if task and not task.done():
|
||||
task.cancel()
|
||||
|
||||
|
||||
def cancel_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
|
||||
for task in tasks:
|
||||
cancel_task(task)
|
||||
|
||||
|
||||
def drain_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
|
||||
while tasks:
|
||||
cancel_task(tasks.pop())
|
||||
|
||||
|
||||
def async_timed_cache(duration: int):
|
||||
def wrapper(func):
|
||||
|
@ -400,7 +405,7 @@ async def fallback_get_external_ip(): # used if spv servers can't be used for i
|
|||
|
||||
async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
|
||||
# used if upnp is disabled or non-functioning
|
||||
from lbry.wallet.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||
from lbry.wallet.server.udp import SPVStatusClientProtocol # pylint: disable=C0415
|
||||
|
||||
hostname_to_ip = {}
|
||||
ip_to_hostnames = collections.defaultdict(list)
|
||||
|
@ -450,8 +455,8 @@ def is_running_from_bundle():
|
|||
|
||||
|
||||
class LockWithMetrics(asyncio.Lock):
|
||||
def __init__(self, acquire_metric, held_time_metric):
|
||||
super().__init__()
|
||||
def __init__(self, acquire_metric, held_time_metric, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
self._acquire_metric = acquire_metric
|
||||
self._lock_held_time_metric = held_time_metric
|
||||
self._lock_acquired_time = None
|
||||
|
@ -469,18 +474,3 @@ class LockWithMetrics(asyncio.Lock):
|
|||
return super().release()
|
||||
finally:
|
||||
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
|
||||
|
||||
|
||||
def get_colliding_prefix_bits(first_value: bytes, second_value: bytes):
|
||||
"""
|
||||
Calculates the amount of colliding prefix bits between <first_value> and <second_value>.
|
||||
This is given by the amount of bits that are the same until the first different one (via XOR),
|
||||
starting from the most significant bit to the least significant bit.
|
||||
:param first_value: first value to compare, bigger than size.
|
||||
:param second_value: second value to compare, bigger than size.
|
||||
:return: amount of prefix colliding bits.
|
||||
"""
|
||||
assert len(first_value) == len(second_value), "length should be the same"
|
||||
size = len(first_value) * 8
|
||||
first_value, second_value = int.from_bytes(first_value, "big"), int.from_bytes(second_value, "big")
|
||||
return size - (first_value ^ second_value).bit_length()
|
||||
|
|
|
@ -1,13 +1,8 @@
|
|||
__lbcd__ = 'lbcd'
|
||||
__lbcctl__ = 'lbcctl'
|
||||
__lbcwallet__ = 'lbcwallet'
|
||||
__lbcd_url__ = (
|
||||
'https://github.com/lbryio/lbcd/releases/download/' +
|
||||
'v0.22.100-rc.0/lbcd_0.22.100-rc.0_TARGET_PLATFORM.tar.gz'
|
||||
)
|
||||
__lbcwallet_url__ = (
|
||||
'https://github.com/lbryio/lbcwallet/releases/download/' +
|
||||
'v0.13.100-alpha.0/lbcwallet_0.13.100-alpha.0_TARGET_PLATFORM.tar.gz'
|
||||
__node_daemon__ = 'lbrycrdd'
|
||||
__node_cli__ = 'lbrycrd-cli'
|
||||
__node_bin__ = ''
|
||||
__node_url__ = (
|
||||
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip'
|
||||
)
|
||||
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
|
||||
|
||||
|
|
|
@ -39,14 +39,9 @@ class DeterministicChannelKeyManager:
|
|||
self.account = account
|
||||
self.last_known = 0
|
||||
self.cache = {}
|
||||
self._private_key: Optional[PrivateKey] = None
|
||||
|
||||
@property
|
||||
def private_key(self):
|
||||
if self._private_key is None:
|
||||
if self.account.private_key is not None:
|
||||
self._private_key = self.account.private_key.child(KeyPath.CHANNEL)
|
||||
return self._private_key
|
||||
self.private_key: Optional[PrivateKey] = None
|
||||
if account.private_key is not None:
|
||||
self.private_key = account.private_key.child(KeyPath.CHANNEL)
|
||||
|
||||
def maybe_generate_deterministic_key_for_channel(self, txo):
|
||||
if self.private_key is None:
|
||||
|
|
|
@ -215,10 +215,6 @@ class PrivateKey(_KeyBase):
|
|||
private_key = cPrivateKey.from_int(key_int)
|
||||
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
|
||||
|
||||
@classmethod
|
||||
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
|
||||
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
|
||||
|
||||
@cachedproperty
|
||||
def private_key_bytes(self):
|
||||
""" Return the serialized private key (no leading zero byte). """
|
||||
|
|
|
@ -1064,182 +1064,4 @@ HASHES = {
|
|||
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
|
||||
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
|
||||
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
|
||||
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
|
||||
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
|
||||
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
|
||||
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
|
||||
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
|
||||
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
|
||||
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
|
||||
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
|
||||
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
|
||||
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
|
||||
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
|
||||
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
|
||||
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
|
||||
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
|
||||
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
|
||||
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
|
||||
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
|
||||
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
|
||||
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
|
||||
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
|
||||
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
|
||||
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
|
||||
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
|
||||
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
|
||||
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
|
||||
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
|
||||
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
|
||||
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
|
||||
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
|
||||
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
|
||||
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
|
||||
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
|
||||
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
|
||||
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
|
||||
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
|
||||
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
|
||||
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
|
||||
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
|
||||
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
|
||||
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
|
||||
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
|
||||
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
|
||||
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
|
||||
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
|
||||
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
|
||||
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
|
||||
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
|
||||
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
|
||||
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
|
||||
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
|
||||
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
|
||||
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
|
||||
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
|
||||
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
|
||||
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
|
||||
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
|
||||
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
|
||||
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
|
||||
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
|
||||
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
|
||||
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
|
||||
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
|
||||
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
|
||||
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
|
||||
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
|
||||
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
|
||||
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
|
||||
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
|
||||
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
|
||||
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
|
||||
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
|
||||
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
|
||||
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
|
||||
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
|
||||
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
|
||||
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
|
||||
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
|
||||
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
|
||||
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
|
||||
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
|
||||
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
|
||||
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
|
||||
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
|
||||
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
|
||||
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
|
||||
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
|
||||
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
|
||||
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
|
||||
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
|
||||
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
|
||||
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
|
||||
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
|
||||
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
|
||||
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
|
||||
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
|
||||
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
|
||||
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
|
||||
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
|
||||
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
|
||||
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
|
||||
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
|
||||
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
|
||||
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
|
||||
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
|
||||
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
|
||||
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
|
||||
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
|
||||
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
|
||||
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
|
||||
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
|
||||
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
|
||||
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
|
||||
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
|
||||
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
|
||||
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
|
||||
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
|
||||
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
|
||||
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
|
||||
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
|
||||
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
|
||||
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
|
||||
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
|
||||
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
|
||||
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
|
||||
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
|
||||
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
|
||||
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
|
||||
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
|
||||
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
|
||||
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
|
||||
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
|
||||
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
|
||||
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
|
||||
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
|
||||
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
|
||||
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
|
||||
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
|
||||
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
|
||||
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
|
||||
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
|
||||
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
|
||||
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
|
||||
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
|
||||
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
|
||||
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
|
||||
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
|
||||
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
|
||||
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
|
||||
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
|
||||
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
|
||||
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
|
||||
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
|
||||
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
|
||||
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
|
||||
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
|
||||
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
|
||||
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
|
||||
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
|
||||
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
|
||||
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
|
||||
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
|
||||
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
|
||||
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
|
||||
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
|
||||
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
|
||||
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
|
||||
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
|
||||
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
|
||||
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
|
||||
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
|
||||
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
|
||||
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
|
||||
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
|
||||
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
|
||||
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
|
||||
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
|
||||
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
|
||||
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ class CoinSelector:
|
|||
_) -> List[OutputEffectiveAmountEstimator]:
|
||||
""" Accumulate UTXOs at random until there is enough to cover the target. """
|
||||
target = self.target + self.cost_of_change
|
||||
self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument
|
||||
self.random.shuffle(txos, self.random.random)
|
||||
selection = []
|
||||
amount = 0
|
||||
for coin in txos:
|
||||
|
|
|
@ -2,7 +2,6 @@ NULL_HASH32 = b'\x00'*32
|
|||
|
||||
CENT = 1000000
|
||||
COIN = 100*CENT
|
||||
DUST = 1000
|
||||
|
||||
TIMEOUT = 30.0
|
||||
|
||||
|
|
|
@ -1211,7 +1211,6 @@ class Database(SQLiteMixin):
|
|||
return addresses
|
||||
|
||||
async def get_address_count(self, cols=None, read_only=False, **constraints):
|
||||
self._clean_txo_constraints_for_aggregation(constraints)
|
||||
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
|
||||
return count[0]['total'] or 0
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
target_timespan = 150
|
||||
|
||||
default_fee_per_byte = 50
|
||||
default_fee_per_name_char = 0
|
||||
default_fee_per_name_char = 200000
|
||||
|
||||
checkpoints = HASHES
|
||||
|
||||
|
@ -329,10 +329,10 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
async def start(self):
|
||||
if not os.path.exists(self.path):
|
||||
os.mkdir(self.path)
|
||||
await asyncio.wait(map(asyncio.create_task, [
|
||||
await asyncio.wait([
|
||||
self.db.open(),
|
||||
self.headers.open()
|
||||
]))
|
||||
])
|
||||
fully_synced = self.on_ready.first
|
||||
asyncio.create_task(self.network.start())
|
||||
await self.network.on_connected.first
|
||||
|
@ -365,10 +365,6 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
await self.db.close()
|
||||
await self.headers.close()
|
||||
|
||||
async def tasks_are_done(self):
|
||||
await self._update_tasks.done.wait()
|
||||
await self._other_tasks.done.wait()
|
||||
|
||||
@property
|
||||
def local_height_including_downloaded_height(self):
|
||||
return max(self.headers.height, self._download_height)
|
||||
|
@ -466,9 +462,9 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
async def subscribe_accounts(self):
|
||||
if self.network.is_connected and self.accounts:
|
||||
log.info("Subscribe to %i accounts", len(self.accounts))
|
||||
await asyncio.wait(map(asyncio.create_task, [
|
||||
await asyncio.wait([
|
||||
self.subscribe_account(a) for a in self.accounts
|
||||
]))
|
||||
])
|
||||
|
||||
async def subscribe_account(self, account: Account):
|
||||
for address_manager in account.address_managers.values():
|
||||
|
@ -722,15 +718,6 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
return account.address_managers[details['chain']]
|
||||
return None
|
||||
|
||||
async def broadcast_or_release(self, tx, blocking=False):
|
||||
try:
|
||||
await self.broadcast(tx)
|
||||
except:
|
||||
await self.release_tx(tx)
|
||||
raise
|
||||
if blocking:
|
||||
await self.wait(tx, timeout=None)
|
||||
|
||||
def broadcast(self, tx):
|
||||
# broadcast can't be a retriable call yet
|
||||
return self.network.broadcast(hexlify(tx.raw).decode())
|
||||
|
@ -789,9 +776,13 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
include_is_my_output=False,
|
||||
include_sent_supports=False,
|
||||
include_sent_tips=False,
|
||||
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
|
||||
include_received_tips=False,
|
||||
hub_server=False) -> Tuple[List[Output], dict, int, int]:
|
||||
encoded_outputs = await query
|
||||
outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
|
||||
if hub_server:
|
||||
outputs = Outputs.from_grpc(encoded_outputs)
|
||||
else:
|
||||
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
|
||||
txs: List[Transaction] = []
|
||||
if len(outputs.txs) > 0:
|
||||
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
|
||||
|
@ -867,10 +858,13 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
txo.received_tips = tips
|
||||
return txos, blocked, outputs.offset, outputs.total
|
||||
|
||||
async def resolve(self, accounts, urls, **kwargs):
|
||||
async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs):
|
||||
txos = []
|
||||
urls_copy = list(urls)
|
||||
resolve = partial(self.network.retriable_call, self.network.resolve)
|
||||
if new_sdk_server:
|
||||
resolve = partial(self.network.new_resolve, new_sdk_server)
|
||||
else:
|
||||
resolve = partial(self.network.retriable_call, self.network.resolve)
|
||||
while urls_copy:
|
||||
batch, urls_copy = urls_copy[:100], urls_copy[100:]
|
||||
txos.extend(
|
||||
|
@ -895,14 +889,17 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
return await self.network.sum_supports(new_sdk_server, **kwargs)
|
||||
|
||||
async def claim_search(
|
||||
self, accounts,
|
||||
include_purchase_receipt=False,
|
||||
include_is_my_output=False,
|
||||
**kwargs) -> Tuple[List[Output], dict, int, int]:
|
||||
self, accounts, include_purchase_receipt=False, include_is_my_output=False,
|
||||
new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]:
|
||||
if new_sdk_server:
|
||||
claim_search = partial(self.network.new_claim_search, new_sdk_server)
|
||||
else:
|
||||
claim_search = self.network.claim_search
|
||||
return await self._inflate_outputs(
|
||||
self.network.claim_search(**kwargs), accounts,
|
||||
claim_search(**kwargs), accounts,
|
||||
include_purchase_receipt=include_purchase_receipt,
|
||||
include_is_my_output=include_is_my_output
|
||||
include_is_my_output=include_is_my_output,
|
||||
hub_server=new_sdk_server is not None
|
||||
)
|
||||
|
||||
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
|
||||
|
@ -938,7 +935,9 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
|
||||
account.id, balance, total_receiving, account.receiving.gap, total_change,
|
||||
account.change.gap, channel_count, len(account.channel_keys), claim_count)
|
||||
except Exception:
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.exception(
|
||||
'Failed to display wallet state, please file issue '
|
||||
'for this bug along with the traceback you see below:')
|
||||
|
@ -961,7 +960,9 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
claim_ids = [p.purchased_claim_id for p in purchases]
|
||||
try:
|
||||
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
||||
except Exception:
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.exception("Resolve failed while looking up purchased claim ids:")
|
||||
resolved = []
|
||||
lookup = {claim.claim_id: claim for claim in resolved}
|
||||
|
@ -1041,7 +1042,9 @@ class Ledger(metaclass=LedgerRegistry):
|
|||
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
|
||||
try:
|
||||
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
|
||||
except Exception:
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
|
||||
raise
|
||||
log.exception("Resolve failed while looking up collection claim ids:")
|
||||
return []
|
||||
claims = []
|
||||
|
|
|
@ -3,6 +3,7 @@ import json
|
|||
import typing
|
||||
import logging
|
||||
import asyncio
|
||||
from distutils.util import strtobool
|
||||
|
||||
from binascii import unhexlify
|
||||
from decimal import Decimal
|
||||
|
@ -182,6 +183,7 @@ class WalletManager:
|
|||
}[config.blockchain_name]
|
||||
|
||||
ledger_config = {
|
||||
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
|
||||
'auto_connect': True,
|
||||
'explicit_servers': [],
|
||||
'hub_timeout': config.hub_timeout,
|
||||
|
@ -236,6 +238,7 @@ class WalletManager:
|
|||
|
||||
async def reset(self):
|
||||
self.ledger.config = {
|
||||
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
|
||||
'auto_connect': True,
|
||||
'explicit_servers': [],
|
||||
'default_servers': Config.lbryum_servers.default,
|
||||
|
@ -317,4 +320,10 @@ class WalletManager:
|
|||
)
|
||||
|
||||
async def broadcast_or_release(self, tx, blocking=False):
|
||||
await self.ledger.broadcast_or_release(tx, blocking=blocking)
|
||||
try:
|
||||
await self.ledger.broadcast(tx)
|
||||
except:
|
||||
await self.ledger.release_tx(tx)
|
||||
raise
|
||||
if blocking:
|
||||
await self.ledger.wait(tx, timeout=None)
|
||||
|
|
|
@ -7,13 +7,16 @@ from time import perf_counter
|
|||
from collections import defaultdict
|
||||
from typing import Dict, Optional, Tuple
|
||||
import aiohttp
|
||||
import grpc
|
||||
from lbry.schema.types.v2 import hub_pb2_grpc
|
||||
from lbry.schema.types.v2.hub_pb2 import SearchRequest
|
||||
|
||||
from lbry import __version__
|
||||
from lbry.utils import resolve_host
|
||||
from lbry.error import IncompatibleWalletServerError
|
||||
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
|
||||
from lbry.wallet.stream import StreamController
|
||||
from lbry.wallet.udp import SPVStatusClientProtocol, SPVPong
|
||||
from lbry.wallet.server.udp import SPVStatusClientProtocol, SPVPong
|
||||
from lbry.conf import KnownHubsList
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -117,9 +120,9 @@ class ClientSession(BaseClientSession):
|
|||
)
|
||||
else:
|
||||
await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
|
||||
except (Exception, asyncio.CancelledError) as err:
|
||||
except Exception as err:
|
||||
if isinstance(err, asyncio.CancelledError):
|
||||
log.info("closing connection to %s:%i", *self.server)
|
||||
log.warning("closing connection to %s:%i", *self.server)
|
||||
else:
|
||||
log.exception("lost connection to spv")
|
||||
finally:
|
||||
|
@ -137,7 +140,7 @@ class ClientSession(BaseClientSession):
|
|||
controller.add(request.args)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
log.debug("Connection lost: %s:%d", *self.server)
|
||||
log.warning("Connection lost: %s:%d", *self.server)
|
||||
super().connection_lost(exc)
|
||||
self.response_time = None
|
||||
self.connection_latency = None
|
||||
|
@ -214,7 +217,7 @@ class Network:
|
|||
def loop_task_done_callback(f):
|
||||
try:
|
||||
f.result()
|
||||
except (Exception, asyncio.CancelledError):
|
||||
except Exception:
|
||||
if self.running:
|
||||
log.exception("wallet server connection loop crashed")
|
||||
|
||||
|
@ -300,7 +303,7 @@ class Network:
|
|||
concurrency=self.config.get('concurrent_hub_requests', 30))
|
||||
try:
|
||||
await client.create_connection()
|
||||
log.info("Connected to spv server %s:%i", host, port)
|
||||
log.warning("Connected to spv server %s:%i", host, port)
|
||||
await client.ensure_server_version()
|
||||
return client
|
||||
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
|
||||
|
@ -312,8 +315,7 @@ class Network:
|
|||
sleep_delay = 30
|
||||
while self.running:
|
||||
await asyncio.wait(
|
||||
map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]),
|
||||
return_when=asyncio.FIRST_COMPLETED
|
||||
[asyncio.sleep(30), self._urgent_need_reconnect.wait()], return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
if self._urgent_need_reconnect.is_set():
|
||||
sleep_delay = 30
|
||||
|
@ -339,13 +341,14 @@ class Network:
|
|||
try:
|
||||
if not self._urgent_need_reconnect.is_set():
|
||||
await asyncio.wait(
|
||||
[self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())],
|
||||
[self._keepalive_task, self._urgent_need_reconnect.wait()],
|
||||
return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
else:
|
||||
await self._keepalive_task
|
||||
if self._urgent_need_reconnect.is_set():
|
||||
log.warning("urgent reconnect needed")
|
||||
self._urgent_need_reconnect.clear()
|
||||
if self._keepalive_task and not self._keepalive_task.done():
|
||||
self._keepalive_task.cancel()
|
||||
except asyncio.CancelledError:
|
||||
|
@ -354,7 +357,7 @@ class Network:
|
|||
self._keepalive_task = None
|
||||
self.client = None
|
||||
self.server_features = None
|
||||
log.info("connection lost to %s", server_str)
|
||||
log.warning("connection lost to %s", server_str)
|
||||
log.info("network loop finished")
|
||||
|
||||
async def stop(self):
|
||||
|
@ -391,6 +394,7 @@ class Network:
|
|||
log.warning("Wallet server call timed out, retrying.")
|
||||
except ConnectionError:
|
||||
log.warning("connection error")
|
||||
|
||||
raise asyncio.CancelledError() # if we got here, we are shutting down
|
||||
|
||||
def _update_remote_height(self, header_args):
|
||||
|
@ -473,6 +477,21 @@ class Network:
|
|||
def claim_search(self, session_override=None, **kwargs):
|
||||
return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override)
|
||||
|
||||
async def new_resolve(self, server, urls):
|
||||
message = {"method": "resolve", "params": {"urls": urls, "protobuf": True}}
|
||||
async with self.aiohttp_session.post(server, json=message) as r:
|
||||
result = await r.json()
|
||||
return result['result']
|
||||
|
||||
async def new_claim_search(self, server, **kwargs):
|
||||
async with grpc.aio.insecure_channel(server) as channel:
|
||||
stub = hub_pb2_grpc.HubStub(channel)
|
||||
try:
|
||||
response = await stub.Search(SearchRequest(**kwargs))
|
||||
except grpc.aio.AioRpcError as error:
|
||||
raise RPCError(error.code(), error.details())
|
||||
return response
|
||||
|
||||
async def sum_supports(self, server, **kwargs):
|
||||
message = {"method": "support_sum", "params": kwargs}
|
||||
async with self.aiohttp_session.post(server, json=message) as r:
|
||||
|
|
|
@ -1,2 +1,5 @@
|
|||
__hub_url__ = (
|
||||
"https://github.com/lbryio/hub/releases/download/v0.2021.12.18.1/hub"
|
||||
)
|
||||
from lbry.wallet.orchstr8.node import Conductor
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
|
|
@ -5,9 +5,7 @@ import aiohttp
|
|||
|
||||
from lbry import wallet
|
||||
from lbry.wallet.orchstr8.node import (
|
||||
Conductor,
|
||||
get_lbcd_node_from_ledger,
|
||||
get_lbcwallet_node_from_ledger
|
||||
Conductor, get_blockchain_node_from_ledger
|
||||
)
|
||||
from lbry.wallet.orchstr8.service import ConductorService
|
||||
|
||||
|
@ -18,11 +16,10 @@ def get_argument_parser():
|
|||
)
|
||||
subparsers = parser.add_subparsers(dest='command', help='sub-command help')
|
||||
|
||||
subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
|
||||
subparsers.add_parser("download", help="Download blockchain node binary.")
|
||||
|
||||
start = subparsers.add_parser("start", help="Start orchstr8 service.")
|
||||
start.add_argument("--lbcd", help="Hostname to start lbcd node.")
|
||||
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
|
||||
start.add_argument("--blockchain", help="Hostname to start blockchain node.")
|
||||
start.add_argument("--spv", help="Hostname to start SPV server.")
|
||||
start.add_argument("--wallet", help="Hostname to start wallet daemon.")
|
||||
|
||||
|
@ -50,8 +47,7 @@ def main():
|
|||
|
||||
if command == 'download':
|
||||
logging.getLogger('blockchain').setLevel(logging.INFO)
|
||||
get_lbcd_node_from_ledger(wallet).ensure()
|
||||
get_lbcwallet_node_from_ledger(wallet).ensure()
|
||||
get_blockchain_node_from_ledger(wallet).ensure()
|
||||
|
||||
elif command == 'generate':
|
||||
loop.run_until_complete(run_remote_command(
|
||||
|
@ -61,12 +57,9 @@ def main():
|
|||
elif command == 'start':
|
||||
|
||||
conductor = Conductor()
|
||||
if getattr(args, 'lbcd', False):
|
||||
conductor.lbcd_node.hostname = args.lbcd
|
||||
loop.run_until_complete(conductor.start_lbcd())
|
||||
if getattr(args, 'lbcwallet', False):
|
||||
conductor.lbcwallet_node.hostname = args.lbcwallet
|
||||
loop.run_until_complete(conductor.start_lbcwallet())
|
||||
if getattr(args, 'blockchain', False):
|
||||
conductor.blockchain_node.hostname = args.blockchain
|
||||
loop.run_until_complete(conductor.start_blockchain())
|
||||
if getattr(args, 'spv', False):
|
||||
conductor.spv_node.hostname = args.spv
|
||||
loop.run_until_complete(conductor.start_spv())
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# pylint: disable=import-error
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
|
@ -8,44 +7,37 @@ import tarfile
|
|||
import logging
|
||||
import tempfile
|
||||
import subprocess
|
||||
import platform
|
||||
import importlib
|
||||
from distutils.util import strtobool
|
||||
|
||||
from binascii import hexlify
|
||||
from typing import Type, Optional
|
||||
import urllib.request
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
|
||||
from lbry.conf import KnownHubsList, Config
|
||||
from lbry.wallet.orchstr8 import __hub_url__
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.chain_reader import BlockchainReaderServer
|
||||
from lbry.wallet.server.db.elasticsearch.sync import ElasticWriter
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from hub.herald.env import ServerEnv
|
||||
from hub.scribe.env import BlockchainEnv
|
||||
from hub.elastic_sync.env import ElasticEnv
|
||||
from hub.herald.service import HubServerService
|
||||
from hub.elastic_sync.service import ElasticSyncService
|
||||
from hub.scribe.service import BlockchainProcessorService
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
def get_spvserver_from_ledger(ledger_module):
|
||||
spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1)
|
||||
spvserver_module = importlib.import_module(spvserver_path)
|
||||
return getattr(spvserver_module, regtest_class_name)
|
||||
|
||||
|
||||
def get_lbcd_node_from_ledger(ledger_module):
|
||||
return LBCDNode(
|
||||
ledger_module.__lbcd_url__,
|
||||
ledger_module.__lbcd__,
|
||||
ledger_module.__lbcctl__
|
||||
)
|
||||
|
||||
|
||||
def get_lbcwallet_node_from_ledger(ledger_module):
|
||||
return LBCWalletNode(
|
||||
ledger_module.__lbcwallet_url__,
|
||||
ledger_module.__lbcwallet__,
|
||||
ledger_module.__lbcctl__
|
||||
def get_blockchain_node_from_ledger(ledger_module):
|
||||
return BlockchainNode(
|
||||
ledger_module.__node_url__,
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__),
|
||||
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__)
|
||||
)
|
||||
|
||||
|
||||
|
@ -53,37 +45,53 @@ class Conductor:
|
|||
|
||||
def __init__(self, seed=None):
|
||||
self.manager_module = WalletManager
|
||||
self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
|
||||
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode()
|
||||
self.spv_module = get_spvserver_from_ledger(lbry.wallet)
|
||||
|
||||
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet)
|
||||
self.spv_node = SPVNode(self.spv_module)
|
||||
self.wallet_node = WalletNode(
|
||||
self.manager_module, RegTestLedger, default_seed=seed
|
||||
)
|
||||
self.lbcd_started = False
|
||||
self.lbcwallet_started = False
|
||||
self.hub_node = HubNode(__hub_url__, "hub", self.spv_node)
|
||||
|
||||
self.blockchain_started = False
|
||||
self.spv_started = False
|
||||
self.wallet_started = False
|
||||
self.hub_started = False
|
||||
|
||||
self.log = log.getChild('conductor')
|
||||
|
||||
async def start_lbcd(self):
|
||||
if not self.lbcd_started:
|
||||
await self.lbcd_node.start()
|
||||
self.lbcd_started = True
|
||||
async def start_blockchain(self):
|
||||
if not self.blockchain_started:
|
||||
asyncio.create_task(self.blockchain_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
await self.blockchain_node.generate(200)
|
||||
self.blockchain_started = True
|
||||
|
||||
async def stop_lbcd(self, cleanup=True):
|
||||
if self.lbcd_started:
|
||||
await self.lbcd_node.stop(cleanup)
|
||||
self.lbcd_started = False
|
||||
async def stop_blockchain(self):
|
||||
if self.blockchain_started:
|
||||
await self.blockchain_node.stop(cleanup=True)
|
||||
self.blockchain_started = False
|
||||
|
||||
async def start_hub(self):
|
||||
if not self.hub_started:
|
||||
asyncio.create_task(self.hub_node.start())
|
||||
await self.blockchain_node.running.wait()
|
||||
self.hub_started = True
|
||||
|
||||
async def stop_hub(self):
|
||||
if self.hub_started:
|
||||
await self.hub_node.stop(cleanup=True)
|
||||
self.hub_started = False
|
||||
|
||||
async def start_spv(self):
|
||||
if not self.spv_started:
|
||||
await self.spv_node.start(self.lbcwallet_node)
|
||||
await self.spv_node.start(self.blockchain_node)
|
||||
self.spv_started = True
|
||||
|
||||
async def stop_spv(self, cleanup=True):
|
||||
async def stop_spv(self):
|
||||
if self.spv_started:
|
||||
await self.spv_node.stop(cleanup)
|
||||
await self.spv_node.stop(cleanup=True)
|
||||
self.spv_started = False
|
||||
|
||||
async def start_wallet(self):
|
||||
|
@ -91,30 +99,13 @@ class Conductor:
|
|||
await self.wallet_node.start(self.spv_node)
|
||||
self.wallet_started = True
|
||||
|
||||
async def stop_wallet(self, cleanup=True):
|
||||
async def stop_wallet(self):
|
||||
if self.wallet_started:
|
||||
await self.wallet_node.stop(cleanup)
|
||||
await self.wallet_node.stop(cleanup=True)
|
||||
self.wallet_started = False
|
||||
|
||||
async def start_lbcwallet(self, clean=True):
|
||||
if not self.lbcwallet_started:
|
||||
await self.lbcwallet_node.start()
|
||||
if clean:
|
||||
mining_addr = await self.lbcwallet_node.get_new_address()
|
||||
self.lbcwallet_node.mining_addr = mining_addr
|
||||
await self.lbcwallet_node.generate(200)
|
||||
# unlock the wallet for the next 1 hour
|
||||
await self.lbcwallet_node.wallet_passphrase("password", 3600)
|
||||
self.lbcwallet_started = True
|
||||
|
||||
async def stop_lbcwallet(self, cleanup=True):
|
||||
if self.lbcwallet_started:
|
||||
await self.lbcwallet_node.stop(cleanup)
|
||||
self.lbcwallet_started = False
|
||||
|
||||
async def start(self):
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet()
|
||||
await self.start_blockchain()
|
||||
await self.start_spv()
|
||||
await self.start_wallet()
|
||||
|
||||
|
@ -122,8 +113,7 @@ class Conductor:
|
|||
all_the_stops = [
|
||||
self.stop_wallet,
|
||||
self.stop_spv,
|
||||
self.stop_lbcwallet,
|
||||
self.stop_lbcd
|
||||
self.stop_blockchain
|
||||
]
|
||||
for stop in all_the_stops:
|
||||
try:
|
||||
|
@ -131,12 +121,6 @@ class Conductor:
|
|||
except Exception as e:
|
||||
log.exception('Exception raised while stopping services:', exc_info=e)
|
||||
|
||||
async def clear_mempool(self):
|
||||
await self.stop_lbcwallet(cleanup=False)
|
||||
await self.stop_lbcd(cleanup=False)
|
||||
await self.start_lbcd()
|
||||
await self.start_lbcwallet(clean=False)
|
||||
|
||||
|
||||
class WalletNode:
|
||||
|
||||
|
@ -157,14 +141,14 @@ class WalletNode:
|
|||
|
||||
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
|
||||
wallets_dir = os.path.join(self.data_path, 'wallets')
|
||||
os.mkdir(wallets_dir)
|
||||
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
|
||||
if not os.path.isdir(wallets_dir):
|
||||
os.mkdir(wallets_dir)
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
with open(wallet_file_name, 'w') as wallet_file:
|
||||
wallet_file.write('{"version": 1, "accounts": []}\n')
|
||||
self.manager = self.manager_class.from_config({
|
||||
'ledgers': {
|
||||
self.ledger_class.get_id(): {
|
||||
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
|
||||
'api_port': self.port,
|
||||
'explicit_servers': [(spv_node.hostname, spv_node.port)],
|
||||
'default_servers': Config.lbryum_servers.default,
|
||||
|
@ -172,7 +156,6 @@ class WalletNode:
|
|||
'known_hubs': config.known_hubs if config else KnownHubsList(),
|
||||
'hub_timeout': 30,
|
||||
'concurrent_hub_requests': 32,
|
||||
'fee_per_name_char': 200000
|
||||
}
|
||||
},
|
||||
'wallets': [wallet_file_name]
|
||||
|
@ -203,83 +186,55 @@ class WalletNode:
|
|||
|
||||
|
||||
class SPVNode:
|
||||
def __init__(self, node_number=1):
|
||||
self.node_number = node_number
|
||||
|
||||
def __init__(self, coin_class, node_number=1):
|
||||
self.coin_class = coin_class
|
||||
self.controller = None
|
||||
self.data_path = None
|
||||
self.server: Optional[HubServerService] = None
|
||||
self.writer: Optional[BlockchainProcessorService] = None
|
||||
self.es_writer: Optional[ElasticSyncService] = None
|
||||
self.server: Optional[BlockchainReaderServer] = None
|
||||
self.writer: Optional[BlockProcessor] = None
|
||||
self.es_writer: Optional[ElasticWriter] = None
|
||||
self.hostname = 'localhost'
|
||||
self.port = 50001 + node_number # avoid conflict with default daemon
|
||||
self.udp_port = self.port
|
||||
self.elastic_notifier_port = 19080 + node_number
|
||||
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
|
||||
self.session_timeout = 600
|
||||
self.stopped = True
|
||||
self.stopped = False
|
||||
self.index_name = uuid4().hex
|
||||
|
||||
async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
|
||||
if not self.stopped:
|
||||
log.warning("spv node is already running")
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'description': '',
|
||||
'payment_address': '',
|
||||
'daily_fee': '0',
|
||||
'db_dir': self.data_path,
|
||||
'daemon_url': lbcwallet_node.rpc_url,
|
||||
'reorg_limit': 100,
|
||||
'host': self.hostname,
|
||||
'tcp_port': self.port,
|
||||
'udp_port': self.udp_port,
|
||||
'elastic_services': self.elastic_services,
|
||||
'session_timeout': self.session_timeout,
|
||||
'max_query_workers': 0,
|
||||
'es_index_prefix': self.index_name,
|
||||
'chain': 'regtest',
|
||||
'index_address_status': False
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
self.writer = BlockchainProcessorService(
|
||||
BlockchainEnv(db_dir=self.data_path, daemon_url=lbcwallet_node.rpc_url,
|
||||
reorg_limit=100, max_query_workers=0, chain='regtest', index_address_status=False)
|
||||
)
|
||||
self.server = HubServerService(ServerEnv(**conf))
|
||||
self.es_writer = ElasticSyncService(
|
||||
ElasticEnv(
|
||||
db_dir=self.data_path, reorg_limit=100, max_query_workers=0, chain='regtest',
|
||||
elastic_notifier_port=self.elastic_notifier_port,
|
||||
es_index_prefix=self.index_name,
|
||||
filtering_channel_ids=(extraconf or {}).get('filtering_channel_ids'),
|
||||
blocking_channel_ids=(extraconf or {}).get('blocking_channel_ids')
|
||||
)
|
||||
)
|
||||
await self.writer.start()
|
||||
await self.es_writer.start()
|
||||
await self.server.start()
|
||||
except Exception as e:
|
||||
self.stopped = True
|
||||
log.exception("failed to start spv node")
|
||||
raise e
|
||||
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None):
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
conf = {
|
||||
'description': '',
|
||||
'payment_address': '',
|
||||
'daily_fee': '0',
|
||||
'db_dir': self.data_path,
|
||||
'daemon_url': blockchain_node.rpc_url,
|
||||
'reorg_limit': 100,
|
||||
'host': self.hostname,
|
||||
'tcp_port': self.port,
|
||||
'udp_port': self.udp_port,
|
||||
'session_timeout': self.session_timeout,
|
||||
'max_query_workers': 0,
|
||||
'es_index_prefix': self.index_name,
|
||||
}
|
||||
if extraconf:
|
||||
conf.update(extraconf)
|
||||
env = Env(self.coin_class, **conf)
|
||||
self.writer = BlockProcessor(env)
|
||||
self.server = BlockchainReaderServer(env)
|
||||
self.es_writer = ElasticWriter(env)
|
||||
await self.writer.open()
|
||||
await self.writer.start()
|
||||
await asyncio.wait([self.server.start(), self.es_writer.start()])
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
log.warning("spv node is already stopped")
|
||||
return
|
||||
try:
|
||||
await self.es_writer.stop(delete_index=True)
|
||||
await self.server.stop()
|
||||
await self.es_writer.delete_index()
|
||||
await self.es_writer.stop()
|
||||
await self.writer.stop()
|
||||
self.stopped = True
|
||||
except Exception as e:
|
||||
log.exception("failed to stop spv node")
|
||||
raise e
|
||||
finally:
|
||||
cleanup and self.cleanup()
|
||||
|
||||
|
@ -287,19 +242,18 @@ class SPVNode:
|
|||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
|
||||
class LBCDProcess(asyncio.SubprocessProtocol):
|
||||
class BlockchainProcess(asyncio.SubprocessProtocol):
|
||||
|
||||
IGNORE_OUTPUT = [
|
||||
b'keypool keep',
|
||||
b'keypool reserve',
|
||||
b'keypool return',
|
||||
b'Block submitted',
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('lbcd')
|
||||
self.log = log.getChild('blockchain')
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
|
@ -310,7 +264,7 @@ class LBCDProcess(asyncio.SubprocessProtocol):
|
|||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'RPCS: RPC server listening on' in data:
|
||||
if b'Done loading' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
|
@ -318,57 +272,39 @@ class LBCDProcess(asyncio.SubprocessProtocol):
|
|||
self.ready.set()
|
||||
|
||||
|
||||
class WalletProcess(asyncio.SubprocessProtocol):
|
||||
class BlockchainNode:
|
||||
|
||||
IGNORE_OUTPUT = [
|
||||
]
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
|
||||
if b'Error:' in data:
|
||||
self.log.error(data.decode())
|
||||
else:
|
||||
self.log.info(data.decode())
|
||||
if b'Error:' in data:
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'WLLT: Finished rescan' in data:
|
||||
self.ready.set()
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
class LBCDNode:
|
||||
def __init__(self, url, daemon, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('lbcd')
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.log = log.getChild('blockchain')
|
||||
self.data_path = None
|
||||
self.protocol = None
|
||||
self.transport = None
|
||||
self.block_expected = 0
|
||||
self.hostname = 'localhost'
|
||||
self.peerport = 29246
|
||||
self.rpcport = 29245
|
||||
self.peerport = 9246 + 2 # avoid conflict with default peer port
|
||||
self.rpcport = 9245 + 2 # avoid conflict with default rpc port
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.stopped = True
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self.running = asyncio.Event()
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
|
@ -377,12 +313,6 @@ class LBCDNode:
|
|||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
|
@ -416,106 +346,174 @@ class LBCDNode:
|
|||
return self.exists or self.download()
|
||||
|
||||
async def start(self):
|
||||
if not self.stopped:
|
||||
return
|
||||
self.stopped = False
|
||||
try:
|
||||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
'--notls',
|
||||
f'--datadir={self.data_path}',
|
||||
'--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
|
||||
'--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
LBCDProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
self.stopped = True
|
||||
log.exception('failed to start lbcd', exc_info=e)
|
||||
raise
|
||||
assert self.ensure()
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
command = [
|
||||
self.daemon_bin,
|
||||
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}',
|
||||
f'-port={self.peerport}'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
BlockchainProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbrycrdd', exc_info=e)
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
self.stopped = True
|
||||
try:
|
||||
if self.transport:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcd', exc_info=e)
|
||||
raise
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.daemon_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
self.running.clear()
|
||||
|
||||
async def clear_mempool(self):
|
||||
self.restart_ready.clear()
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
self.running.clear()
|
||||
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
|
||||
self.restart_ready.set()
|
||||
await self.running.wait()
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
|
||||
async def _cli_cmnd(self, *args):
|
||||
cmnd_args = [
|
||||
self.cli_bin, f'-datadir={self.data_path}', '-regtest',
|
||||
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}'
|
||||
] + list(args)
|
||||
self.log.info(' '.join(cmnd_args))
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
)
|
||||
out, _ = await process.communicate()
|
||||
result = out.decode().strip()
|
||||
self.log.info(result)
|
||||
if result.startswith('error code'):
|
||||
raise Exception(result)
|
||||
return result
|
||||
|
||||
class LBCWalletNode:
|
||||
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
|
||||
BECH32_ADDRESS = "bech32"
|
||||
def generate(self, blocks):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generate', str(blocks))
|
||||
|
||||
def invalidate_block(self, blockhash):
|
||||
return self._cli_cmnd('invalidateblock', blockhash)
|
||||
|
||||
def get_block_hash(self, block):
|
||||
return self._cli_cmnd('getblockhash', str(block))
|
||||
|
||||
def sendrawtransaction(self, tx):
|
||||
return self._cli_cmnd('sendrawtransaction', tx)
|
||||
|
||||
async def get_block(self, block_hash):
|
||||
return json.loads(await self._cli_cmnd('getblock', block_hash, '1'))
|
||||
|
||||
def get_raw_change_address(self):
|
||||
return self._cli_cmnd('getrawchangeaddress')
|
||||
|
||||
def get_new_address(self, address_type):
|
||||
return self._cli_cmnd('getnewaddress', "", address_type)
|
||||
|
||||
async def get_balance(self):
|
||||
return await self._cli_cmnd('getbalance')
|
||||
|
||||
def send_to_address(self, address, amount):
|
||||
return self._cli_cmnd('sendtoaddress', address, str(amount))
|
||||
|
||||
def send_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('sendrawtransaction', tx.decode())
|
||||
|
||||
def create_raw_transaction(self, inputs, outputs):
|
||||
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
||||
|
||||
async def sign_raw_transaction_with_wallet(self, tx):
|
||||
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode()
|
||||
|
||||
def decode_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
||||
|
||||
def get_raw_transaction(self, txid):
|
||||
return self._cli_cmnd('getrawtransaction', txid, '1')
|
||||
|
||||
|
||||
class HubProcess(asyncio.SubprocessProtocol):
|
||||
def __init__(self):
|
||||
self.ready = asyncio.Event()
|
||||
self.stopped = asyncio.Event()
|
||||
self.log = log.getChild('hub')
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if self.log:
|
||||
self.log.info(data.decode())
|
||||
if b'error' in data.lower():
|
||||
self.ready.set()
|
||||
raise SystemError(data.decode())
|
||||
if b'listening on' in data:
|
||||
self.ready.set()
|
||||
str_lines = str(data.decode()).split("\n")
|
||||
for line in str_lines:
|
||||
if 'releaseTime' in line:
|
||||
print(line)
|
||||
|
||||
def process_exited(self):
|
||||
self.stopped.set()
|
||||
self.ready.set()
|
||||
|
||||
|
||||
class HubNode:
|
||||
|
||||
def __init__(self, url, daemon, spv_node):
|
||||
self.spv_node = spv_node
|
||||
self.debug = False
|
||||
|
||||
def __init__(self, url, lbcwallet, cli):
|
||||
self.latest_release_url = url
|
||||
self.project_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
self.bin_dir = os.path.join(self.project_dir, 'bin')
|
||||
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
|
||||
self.cli_bin = os.path.join(self.bin_dir, cli)
|
||||
self.log = log.getChild('lbcwallet')
|
||||
self.protocol = None
|
||||
self.daemon_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.cli_bin = os.path.join(self.bin_dir, daemon)
|
||||
self.log = log.getChild('hub')
|
||||
self.transport = None
|
||||
self.protocol = None
|
||||
self.hostname = 'localhost'
|
||||
self.lbcd_rpcport = 29245
|
||||
self.lbcwallet_rpcport = 29244
|
||||
self.rpcuser = 'rpcuser'
|
||||
self.rpcpassword = 'rpcpassword'
|
||||
self.data_path = tempfile.mkdtemp()
|
||||
self.stopped = True
|
||||
self.rpcport = 50051 # avoid conflict with default rpc port
|
||||
self.stopped = False
|
||||
self.restart_ready = asyncio.Event()
|
||||
self.restart_ready.set()
|
||||
self.running = asyncio.Event()
|
||||
self.block_expected = 0
|
||||
self.mining_addr = ''
|
||||
|
||||
@property
|
||||
def rpc_url(self):
|
||||
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
|
||||
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
|
||||
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
|
||||
|
||||
def is_expected_block(self, e: BlockHeightEvent):
|
||||
return self.block_expected == e.height
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return (
|
||||
os.path.exists(self.lbcwallet_bin)
|
||||
os.path.exists(self.cli_bin) and
|
||||
os.path.exists(self.daemon_bin)
|
||||
)
|
||||
|
||||
def download(self):
|
||||
uname = platform.uname()
|
||||
target_os = str.lower(uname.system)
|
||||
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
|
||||
target_platform = target_os + '_' + target_arch
|
||||
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
|
||||
|
||||
downloaded_file = os.path.join(
|
||||
self.bin_dir,
|
||||
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
|
||||
|
@ -536,12 +534,15 @@ class LBCWalletNode:
|
|||
with zipfile.ZipFile(downloaded_file) as dotzip:
|
||||
dotzip.extractall(self.bin_dir)
|
||||
# zipfile bug https://bugs.python.org/issue15795
|
||||
os.chmod(self.lbcwallet_bin, 0o755)
|
||||
os.chmod(self.cli_bin, 0o755)
|
||||
os.chmod(self.daemon_bin, 0o755)
|
||||
|
||||
elif downloaded_file.endswith('.tar.gz'):
|
||||
with tarfile.open(downloaded_file) as tar:
|
||||
tar.extractall(self.bin_dir)
|
||||
|
||||
os.chmod(self.daemon_bin, 0o755)
|
||||
|
||||
return self.exists
|
||||
|
||||
def ensure(self):
|
||||
|
@ -551,125 +552,40 @@ class LBCWalletNode:
|
|||
assert self.ensure()
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
|
||||
command = [
|
||||
self.lbcwallet_bin,
|
||||
'--noservertls', '--noclienttls',
|
||||
'--regtest',
|
||||
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
|
||||
'--createtemp', f'--appdata={self.data_path}',
|
||||
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
|
||||
self.daemon_bin, 'serve', '--esindex', self.spv_node.index_name + 'claims', '--debug'
|
||||
]
|
||||
self.log.info(' '.join(command))
|
||||
try:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
WalletProcess, *command
|
||||
)
|
||||
self.protocol.transport = self.transport
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
self.stopped = False
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start lbcwallet', exc_info=e)
|
||||
|
||||
def cleanup(self):
|
||||
assert self.stopped
|
||||
shutil.rmtree(self.data_path, ignore_errors=True)
|
||||
while not self.stopped:
|
||||
if self.running.is_set():
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
await self.restart_ready.wait()
|
||||
try:
|
||||
if not self.debug:
|
||||
self.transport, self.protocol = await loop.subprocess_exec(
|
||||
HubProcess, *command
|
||||
)
|
||||
await self.protocol.ready.wait()
|
||||
assert not self.protocol.stopped.is_set()
|
||||
self.running.set()
|
||||
except asyncio.CancelledError:
|
||||
self.running.clear()
|
||||
raise
|
||||
except Exception as e:
|
||||
self.running.clear()
|
||||
log.exception('failed to start hub', exc_info=e)
|
||||
|
||||
async def stop(self, cleanup=True):
|
||||
if self.stopped:
|
||||
return
|
||||
self.stopped = True
|
||||
try:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
except Exception as e:
|
||||
log.exception('failed to stop lbcwallet', exc_info=e)
|
||||
raise
|
||||
if not self.debug:
|
||||
self.transport.terminate()
|
||||
await self.protocol.stopped.wait()
|
||||
self.transport.close()
|
||||
finally:
|
||||
self.log.info("Done shutting down " + self.lbcwallet_bin)
|
||||
self.stopped = True
|
||||
if cleanup:
|
||||
self.cleanup()
|
||||
self.running.clear()
|
||||
|
||||
async def _cli_cmnd(self, *args):
|
||||
cmnd_args = [
|
||||
self.cli_bin,
|
||||
f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
|
||||
] + list(args)
|
||||
self.log.info(' '.join(cmnd_args))
|
||||
loop = asyncio.get_event_loop()
|
||||
asyncio.get_child_watcher().attach_loop(loop)
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
out, err = await process.communicate()
|
||||
result = out.decode().strip()
|
||||
err = err.decode().strip()
|
||||
if len(result) <= 0 and err.startswith('-'):
|
||||
raise Exception(err)
|
||||
if err and 'creating a default config file' not in err:
|
||||
log.warning(err)
|
||||
self.log.info(result)
|
||||
if result.startswith('error code'):
|
||||
raise Exception(result)
|
||||
return result
|
||||
|
||||
def generate(self, blocks):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
|
||||
|
||||
def generate_to_address(self, blocks, addr):
|
||||
self.block_expected += blocks
|
||||
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
|
||||
|
||||
def wallet_passphrase(self, passphrase, timeout):
|
||||
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
|
||||
|
||||
def invalidate_block(self, blockhash):
|
||||
return self._cli_cmnd('invalidateblock', blockhash)
|
||||
|
||||
def get_block_hash(self, block):
|
||||
return self._cli_cmnd('getblockhash', str(block))
|
||||
|
||||
def sendrawtransaction(self, tx):
|
||||
return self._cli_cmnd('sendrawtransaction', tx)
|
||||
|
||||
async def get_block(self, block_hash):
|
||||
return json.loads(await self._cli_cmnd('getblock', block_hash, '1'))
|
||||
|
||||
def get_raw_change_address(self):
|
||||
return self._cli_cmnd('getrawchangeaddress')
|
||||
|
||||
def get_new_address(self, address_type='legacy'):
|
||||
return self._cli_cmnd('getnewaddress', "", address_type)
|
||||
|
||||
async def get_balance(self):
|
||||
return await self._cli_cmnd('getbalance')
|
||||
|
||||
def send_to_address(self, address, amount):
|
||||
return self._cli_cmnd('sendtoaddress', address, str(amount))
|
||||
|
||||
def send_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('sendrawtransaction', tx.decode())
|
||||
|
||||
def create_raw_transaction(self, inputs, outputs):
|
||||
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
|
||||
|
||||
async def sign_raw_transaction_with_wallet(self, tx):
|
||||
# the "withwallet" portion should only come into play if we are doing segwit.
|
||||
# and "withwallet" doesn't exist on lbcd yet.
|
||||
result = await self._cli_cmnd('signrawtransaction', tx)
|
||||
return json.loads(result)['hex'].encode()
|
||||
|
||||
def decode_raw_transaction(self, tx):
|
||||
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
|
||||
|
||||
def get_raw_transaction(self, txid):
|
||||
return self._cli_cmnd('getrawtransaction', txid, '1')
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
|
|
@ -61,10 +61,8 @@ class ConductorService:
|
|||
#set_logging(
|
||||
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
|
||||
#)
|
||||
self.stack.lbcd_started or await self.stack.start_lbcd()
|
||||
self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
|
||||
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
|
||||
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
|
||||
self.stack.blockchain_started or await self.stack.start_blockchain()
|
||||
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port})
|
||||
self.stack.spv_started or await self.stack.start_spv()
|
||||
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
|
||||
self.stack.wallet_started or await self.stack.start_wallet()
|
||||
|
@ -76,7 +74,7 @@ class ConductorService:
|
|||
async def generate(self, request):
|
||||
data = await request.post()
|
||||
blocks = data.get('blocks', 1)
|
||||
await self.stack.lbcwallet_node.generate(int(blocks))
|
||||
await self.stack.blockchain_node.generate(int(blocks))
|
||||
return json_response({'blocks': blocks})
|
||||
|
||||
async def transfer(self, request):
|
||||
|
@ -87,14 +85,11 @@ class ConductorService:
|
|||
if not address:
|
||||
raise ValueError("No address was provided.")
|
||||
amount = data.get('amount', 1)
|
||||
txid = await self.stack.blockchain_node.send_to_address(address, amount)
|
||||
if self.stack.wallet_started:
|
||||
watcher = self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
|
||||
await self.stack.wallet_node.ledger.on_transaction.where(
|
||||
lambda e: e.tx.id == txid and e.address == address
|
||||
)
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
await watcher
|
||||
else:
|
||||
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
|
||||
return json_response({
|
||||
'address': address,
|
||||
'amount': amount,
|
||||
|
@ -103,7 +98,7 @@ class ConductorService:
|
|||
|
||||
async def balance(self, _):
|
||||
return json_response({
|
||||
'balance': await self.stack.lbcwallet_node.get_balance()
|
||||
'balance': await self.stack.blockchain_node.get_balance()
|
||||
})
|
||||
|
||||
async def log(self, request):
|
||||
|
@ -134,7 +129,7 @@ class ConductorService:
|
|||
'type': 'status',
|
||||
'height': self.stack.wallet_node.ledger.headers.height,
|
||||
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
|
||||
'miner': await self.stack.lbcwallet_node.get_balance()
|
||||
'miner': await self.stack.blockchain_node.get_balance()
|
||||
})
|
||||
|
||||
def send_message(self, msg):
|
||||
|
|
|
@ -395,8 +395,8 @@ class RPCSession(SessionBase):
|
|||
namespace=NAMESPACE, labelnames=("version",)
|
||||
)
|
||||
|
||||
def __init__(self, *, framer=None, connection=None):
|
||||
super().__init__(framer=framer)
|
||||
def __init__(self, *, framer=None, loop=None, connection=None):
|
||||
super().__init__(framer=framer, loop=loop)
|
||||
self.connection = connection or self.default_connection()
|
||||
self.client_version = 'unknown'
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@ OP_HASH160 = 0xa9
|
|||
OP_EQUALVERIFY = 0x88
|
||||
OP_CHECKSIG = 0xac
|
||||
OP_CHECKMULTISIG = 0xae
|
||||
OP_CHECKLOCKTIMEVERIFY = 0xb1
|
||||
OP_EQUAL = 0x87
|
||||
OP_PUSHDATA1 = 0x4c
|
||||
OP_PUSHDATA2 = 0x4d
|
||||
|
@ -277,7 +276,7 @@ class Template:
|
|||
elif isinstance(opcode, PUSH_INTEGER):
|
||||
data = values[opcode.name]
|
||||
source.write_many(push_data(
|
||||
data.to_bytes((data.bit_length() + 8) // 8, byteorder='little', signed=True)
|
||||
data.to_bytes((data.bit_length() + 7) // 8, byteorder='little')
|
||||
))
|
||||
elif isinstance(opcode, PUSH_SUBSCRIPT):
|
||||
data = values[opcode.name]
|
||||
|
@ -358,27 +357,19 @@ class InputScript(Script):
|
|||
REDEEM_PUBKEY_HASH = Template('pubkey_hash', (
|
||||
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey')
|
||||
))
|
||||
MULTI_SIG_SCRIPT = Template('multi_sig', (
|
||||
REDEEM_SCRIPT = Template('script', (
|
||||
SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'),
|
||||
OP_CHECKMULTISIG
|
||||
))
|
||||
REDEEM_SCRIPT_HASH_MULTI_SIG = Template('script_hash+multi_sig', (
|
||||
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', MULTI_SIG_SCRIPT)
|
||||
))
|
||||
TIME_LOCK_SCRIPT = Template('timelock', (
|
||||
PUSH_INTEGER('height'), OP_CHECKLOCKTIMEVERIFY, OP_DROP,
|
||||
# rest is identical to OutputScript.PAY_PUBKEY_HASH:
|
||||
OP_DUP, OP_HASH160, PUSH_SINGLE('pubkey_hash'), OP_EQUALVERIFY, OP_CHECKSIG
|
||||
))
|
||||
REDEEM_SCRIPT_HASH_TIME_LOCK = Template('script_hash+timelock', (
|
||||
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey'), PUSH_SUBSCRIPT('script', TIME_LOCK_SCRIPT)
|
||||
REDEEM_SCRIPT_HASH = Template('script_hash', (
|
||||
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', REDEEM_SCRIPT)
|
||||
))
|
||||
|
||||
templates = [
|
||||
REDEEM_PUBKEY,
|
||||
REDEEM_PUBKEY_HASH,
|
||||
REDEEM_SCRIPT_HASH_TIME_LOCK,
|
||||
REDEEM_SCRIPT_HASH_MULTI_SIG,
|
||||
REDEEM_SCRIPT_HASH,
|
||||
REDEEM_SCRIPT
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
@ -389,38 +380,20 @@ class InputScript(Script):
|
|||
})
|
||||
|
||||
@classmethod
|
||||
def redeem_multi_sig_script_hash(cls, signatures, pubkeys):
|
||||
return cls(template=cls.REDEEM_SCRIPT_HASH_MULTI_SIG, values={
|
||||
def redeem_script_hash(cls, signatures, pubkeys):
|
||||
return cls(template=cls.REDEEM_SCRIPT_HASH, values={
|
||||
'signatures': signatures,
|
||||
'script': cls(template=cls.MULTI_SIG_SCRIPT, values={
|
||||
'signatures_count': len(signatures),
|
||||
'pubkeys': pubkeys,
|
||||
'pubkeys_count': len(pubkeys)
|
||||
})
|
||||
'script': cls.redeem_script(signatures, pubkeys)
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def redeem_time_lock_script_hash(cls, signature, pubkey, height=None, pubkey_hash=None, script_source=None):
|
||||
if height and pubkey_hash:
|
||||
script = cls(template=cls.TIME_LOCK_SCRIPT, values={
|
||||
'height': height,
|
||||
'pubkey_hash': pubkey_hash
|
||||
})
|
||||
elif script_source:
|
||||
script = cls(source=script_source, template=cls.TIME_LOCK_SCRIPT)
|
||||
script.parse(script.template)
|
||||
else:
|
||||
raise ValueError("script_source or both height and pubkey_hash are required.")
|
||||
return cls(template=cls.REDEEM_SCRIPT_HASH_TIME_LOCK, values={
|
||||
'signature': signature,
|
||||
'pubkey': pubkey,
|
||||
'script': script
|
||||
def redeem_script(cls, signatures, pubkeys):
|
||||
return cls(template=cls.REDEEM_SCRIPT, values={
|
||||
'signatures_count': len(signatures),
|
||||
'pubkeys': pubkeys,
|
||||
'pubkeys_count': len(pubkeys)
|
||||
})
|
||||
|
||||
@property
|
||||
def is_script_hash(self):
|
||||
return self.template.name.startswith('script_hash+')
|
||||
|
||||
|
||||
class OutputScript(Script):
|
||||
|
||||
|
@ -487,6 +460,21 @@ class OutputScript(Script):
|
|||
UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
|
||||
))
|
||||
|
||||
SELL_SCRIPT = Template('sell_script', (
|
||||
OP_VERIFY, OP_DROP, OP_DROP, OP_DROP, PUSH_INTEGER('price'), OP_PRICECHECK
|
||||
))
|
||||
SELL_CLAIM = Template('sell_claim+pay_script_hash', (
|
||||
OP_SELL_CLAIM, PUSH_SINGLE('claim_id'), PUSH_SUBSCRIPT('sell_script', SELL_SCRIPT),
|
||||
PUSH_SUBSCRIPT('receive_script', InputScript.REDEEM_SCRIPT), OP_2DROP, OP_2DROP
|
||||
) + PAY_SCRIPT_HASH.opcodes)
|
||||
|
||||
BUY_CLAIM = Template('buy_claim+pay_script_hash', (
|
||||
OP_BUY_CLAIM, PUSH_SINGLE('sell_id'),
|
||||
PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim_version'),
|
||||
PUSH_SINGLE('owner_pubkey_hash'), PUSH_SINGLE('negotiation_signature'),
|
||||
OP_2DROP, OP_2DROP, OP_2DROP,
|
||||
) + PAY_SCRIPT_HASH.opcodes)
|
||||
|
||||
templates = [
|
||||
PAY_PUBKEY_FULL,
|
||||
PAY_PUBKEY_HASH,
|
||||
|
@ -501,6 +489,8 @@ class OutputScript(Script):
|
|||
SUPPORT_CLAIM_DATA_SCRIPT,
|
||||
UPDATE_CLAIM_PUBKEY,
|
||||
UPDATE_CLAIM_SCRIPT,
|
||||
SELL_CLAIM, SELL_SCRIPT,
|
||||
BUY_CLAIM,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
@ -560,6 +550,30 @@ class OutputScript(Script):
|
|||
'pubkey_hash': pubkey_hash
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def sell_script(cls, price):
|
||||
return cls(template=cls.SELL_SCRIPT, values={
|
||||
'price': price,
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def sell_claim(cls, claim_id, price, signatures, pubkeys):
|
||||
return cls(template=cls.SELL_CLAIM, values={
|
||||
'claim_id': claim_id,
|
||||
'sell_script': OutputScript.sell_script(price),
|
||||
'receive_script': InputScript.redeem_script(signatures, pubkeys)
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def buy_claim(cls, sell_id, claim_id, claim_version, owner_pubkey_hash, negotiation_signature):
|
||||
return cls(template=cls.BUY_CLAIM, values={
|
||||
'sell_id': sell_id,
|
||||
'claim_id': claim_id,
|
||||
'claim_version': claim_version,
|
||||
'owner_pubkey_hash': owner_pubkey_hash,
|
||||
'negotiation_signature': negotiation_signature,
|
||||
})
|
||||
|
||||
@property
|
||||
def is_pay_pubkey_hash(self):
|
||||
return self.template.name.endswith('pay_pubkey_hash')
|
||||
|
@ -588,6 +602,17 @@ class OutputScript(Script):
|
|||
def is_support_claim_data(self):
|
||||
return self.template.name.startswith('support_claim+data+')
|
||||
|
||||
@property
|
||||
def is_sell_claim(self):
|
||||
return self.template.name.startswith('sell_claim+')
|
||||
|
||||
@property
|
||||
def is_buy_claim(self):
|
||||
return self.template.name.startswith('buy_claim+')
|
||||
|
||||
@property
|
||||
def is_claim_involved(self):
|
||||
return any((self.is_claim_name, self.is_support_claim, self.is_update_claim))
|
||||
return any((
|
||||
self.is_claim_name, self.is_support_claim, self.is_update_claim,
|
||||
self.is_sell_claim, self.is_buy_claim
|
||||
))
|
||||
|
|
1694
lbry/wallet/server/block_processor.py
Normal file
1694
lbry/wallet/server/block_processor.py
Normal file
File diff suppressed because it is too large
Load diff
236
lbry/wallet/server/chain_reader.py
Normal file
236
lbry/wallet/server/chain_reader.py
Normal file
|
@ -0,0 +1,236 @@
|
|||
import signal
|
||||
import logging
|
||||
import asyncio
|
||||
from concurrent.futures.thread import ThreadPoolExecutor
|
||||
import typing
|
||||
|
||||
import lbry
|
||||
from lbry.wallet.server.mempool import MemPool
|
||||
from lbry.wallet.server.db.prefixes import DBState
|
||||
from lbry.wallet.server.udp import StatusServer
|
||||
from lbry.wallet.server.db.db import HubDB
|
||||
from lbry.wallet.server.db.elasticsearch.notifier import ElasticNotifierClientProtocol
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from lbry.prometheus import PrometheusServer
|
||||
|
||||
|
||||
class BlockchainReader:
|
||||
def __init__(self, env, secondary_name: str, thread_workers: int = 1, thread_prefix: str = 'blockchain-reader'):
|
||||
self.env = env
|
||||
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
|
||||
self.shutdown_event = asyncio.Event()
|
||||
self.cancellable_tasks = []
|
||||
self._executor = ThreadPoolExecutor(thread_workers, thread_name_prefix=thread_prefix)
|
||||
|
||||
self.db = HubDB(
|
||||
env.coin, env.db_dir, env.cache_MB, env.reorg_limit, env.cache_all_claim_txos, env.cache_all_tx_hashes,
|
||||
secondary_name=secondary_name, max_open_files=-1, blocking_channel_ids=env.blocking_channel_ids,
|
||||
filtering_channel_ids=env.filtering_channel_ids, executor=self._executor
|
||||
)
|
||||
self.last_state: typing.Optional[DBState] = None
|
||||
self._refresh_interval = 0.1
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
def _detect_changes(self):
|
||||
try:
|
||||
self.db.prefix_db.try_catch_up_with_primary()
|
||||
except:
|
||||
self.log.exception('failed to update secondary db')
|
||||
raise
|
||||
state = self.db.prefix_db.db_state.get()
|
||||
if not state or state.height <= 0:
|
||||
return
|
||||
# if state and self.last_state and self.db.headers and self.last_state.tip == self.db.coin.header_hash(self.db.headers[-1]):
|
||||
# return
|
||||
if self.last_state and self.last_state.height > state.height:
|
||||
self.log.warning("reorg detected, waiting until the writer has flushed the new blocks to advance")
|
||||
return
|
||||
last_height = 0 if not self.last_state else self.last_state.height
|
||||
if self.last_state:
|
||||
while True:
|
||||
if self.db.headers[-1] == self.db.prefix_db.header.get(last_height, deserialize_value=False):
|
||||
self.log.info("connects to block %i", last_height)
|
||||
break
|
||||
else:
|
||||
self.log.warning("disconnect block %i", last_height)
|
||||
self.unwind()
|
||||
last_height -= 1
|
||||
self.db.read_db_state()
|
||||
if not self.last_state or last_height < state.height:
|
||||
for height in range(last_height + 1, state.height + 1):
|
||||
self.log.info("advancing to %i", height)
|
||||
self.advance(height)
|
||||
self.clear_caches()
|
||||
self.last_state = state
|
||||
|
||||
async def poll_for_changes(self):
|
||||
await asyncio.get_event_loop().run_in_executor(self._executor, self._detect_changes)
|
||||
|
||||
async def refresh_blocks_forever(self, synchronized: asyncio.Event):
|
||||
while True:
|
||||
try:
|
||||
async with self._lock:
|
||||
await self.poll_for_changes()
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except:
|
||||
self.log.exception("blockchain reader main loop encountered an unexpected error")
|
||||
raise
|
||||
await asyncio.sleep(self._refresh_interval)
|
||||
synchronized.set()
|
||||
|
||||
def clear_caches(self):
|
||||
pass
|
||||
|
||||
def advance(self, height: int):
|
||||
tx_count = self.db.prefix_db.tx_count.get(height).tx_count
|
||||
assert tx_count not in self.db.tx_counts, f'boom {tx_count} in {len(self.db.tx_counts)} tx counts'
|
||||
assert len(self.db.tx_counts) == height, f"{len(self.db.tx_counts)} != {height}"
|
||||
self.db.tx_counts.append(tx_count)
|
||||
self.db.headers.append(self.db.prefix_db.header.get(height, deserialize_value=False))
|
||||
|
||||
def unwind(self):
|
||||
self.db.tx_counts.pop()
|
||||
self.db.headers.pop()
|
||||
|
||||
|
||||
class BlockchainReaderServer(BlockchainReader):
|
||||
def __init__(self, env):
|
||||
super().__init__(env, 'lbry-reader', thread_workers=1, thread_prefix='hub-worker')
|
||||
self.history_cache = {}
|
||||
self.resolve_outputs_cache = {}
|
||||
self.resolve_cache = {}
|
||||
self.notifications_to_send = []
|
||||
self.status_server = StatusServer()
|
||||
self.daemon = env.coin.DAEMON(env.coin, env.daemon_url) # only needed for broadcasting txs
|
||||
self.prometheus_server: typing.Optional[PrometheusServer] = None
|
||||
self.mempool = MemPool(self.env.coin, self.db)
|
||||
self.session_manager = LBRYSessionManager(
|
||||
env, self.db, self.mempool, self.history_cache, self.resolve_cache,
|
||||
self.resolve_outputs_cache, self.daemon,
|
||||
self.shutdown_event,
|
||||
on_available_callback=self.status_server.set_available,
|
||||
on_unavailable_callback=self.status_server.set_unavailable
|
||||
)
|
||||
self.mempool.session_manager = self.session_manager
|
||||
self.es_notifications = asyncio.Queue()
|
||||
self.es_notification_client = ElasticNotifierClientProtocol(self.es_notifications)
|
||||
self.synchronized = asyncio.Event()
|
||||
self._es_height = None
|
||||
self._es_block_hash = None
|
||||
|
||||
def clear_caches(self):
|
||||
self.history_cache.clear()
|
||||
self.resolve_outputs_cache.clear()
|
||||
self.resolve_cache.clear()
|
||||
# self.clear_search_cache()
|
||||
# self.mempool.notified_mempool_txs.clear()
|
||||
|
||||
def clear_search_cache(self):
|
||||
self.session_manager.search_index.clear_caches()
|
||||
|
||||
def advance(self, height: int):
|
||||
super().advance(height)
|
||||
touched_hashXs = self.db.prefix_db.touched_hashX.get(height).touched_hashXs
|
||||
self.notifications_to_send.append((set(touched_hashXs), height))
|
||||
|
||||
def _detect_changes(self):
|
||||
super()._detect_changes()
|
||||
self.mempool.raw_mempool.clear()
|
||||
self.mempool.raw_mempool.update(
|
||||
{k.tx_hash: v.raw_tx for k, v in self.db.prefix_db.mempool_tx.iterate()}
|
||||
)
|
||||
|
||||
async def poll_for_changes(self):
|
||||
await super().poll_for_changes()
|
||||
self.status_server.set_height(self.db.fs_height, self.db.db_tip)
|
||||
if self.notifications_to_send:
|
||||
for (touched, height) in self.notifications_to_send:
|
||||
await self.mempool.on_block(touched, height)
|
||||
self.log.info("reader advanced to %i", height)
|
||||
if self._es_height == self.db.db_height:
|
||||
self.synchronized.set()
|
||||
await self.mempool.refresh_hashes(self.db.db_height)
|
||||
self.notifications_to_send.clear()
|
||||
|
||||
async def receive_es_notifications(self, synchronized: asyncio.Event):
|
||||
await asyncio.get_event_loop().create_connection(
|
||||
lambda: self.es_notification_client, '127.0.0.1', self.env.elastic_notifier_port
|
||||
)
|
||||
synchronized.set()
|
||||
try:
|
||||
while True:
|
||||
self._es_height, self._es_block_hash = await self.es_notifications.get()
|
||||
self.clear_search_cache()
|
||||
if self.last_state and self._es_block_hash == self.last_state.tip:
|
||||
self.synchronized.set()
|
||||
self.log.info("es and reader are in sync")
|
||||
else:
|
||||
self.log.info("es and reader are not yet in sync %s vs %s", self._es_height, self.db.db_height)
|
||||
finally:
|
||||
self.es_notification_client.close()
|
||||
|
||||
async def start(self):
|
||||
env = self.env
|
||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||
self.log.info(f'software version: {lbry.__version__}')
|
||||
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
|
||||
self.log.info(f'event loop policy: {env.loop_policy}')
|
||||
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
|
||||
await self.daemon.height()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
self.db.open_db()
|
||||
await self.db.initialize_caches()
|
||||
|
||||
self.last_state = self.db.read_db_state()
|
||||
|
||||
await self.start_prometheus()
|
||||
if self.env.udp_port:
|
||||
await self.status_server.start(
|
||||
0, bytes.fromhex(self.env.coin.GENESIS_HASH)[::-1], self.env.country,
|
||||
self.env.host, self.env.udp_port, self.env.allow_lan_udp
|
||||
)
|
||||
await _start_cancellable(self.receive_es_notifications)
|
||||
await _start_cancellable(self.refresh_blocks_forever)
|
||||
await self.session_manager.search_index.start()
|
||||
await _start_cancellable(self.session_manager.serve, self.mempool)
|
||||
|
||||
async def stop(self):
|
||||
self.status_server.stop()
|
||||
async with self._lock:
|
||||
for task in reversed(self.cancellable_tasks):
|
||||
task.cancel()
|
||||
await asyncio.wait(self.cancellable_tasks)
|
||||
self.session_manager.search_index.stop()
|
||||
self.db.close()
|
||||
if self.prometheus_server:
|
||||
await self.prometheus_server.stop()
|
||||
self.prometheus_server = None
|
||||
await self.daemon.close()
|
||||
self._executor.shutdown(wait=True)
|
||||
self.shutdown_event.set()
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
||||
|
||||
async def start_prometheus(self):
|
||||
if not self.prometheus_server and self.env.prometheus_port:
|
||||
self.prometheus_server = PrometheusServer()
|
||||
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)
|
56
lbry/wallet/server/cli.py
Normal file
56
lbry/wallet/server/cli.py
Normal file
|
@ -0,0 +1,56 @@
|
|||
import logging
|
||||
import traceback
|
||||
import argparse
|
||||
from lbry.wallet.server.env import Env
|
||||
from lbry.wallet.server.block_processor import BlockProcessor
|
||||
from lbry.wallet.server.chain_reader import BlockchainReaderServer
|
||||
from lbry.wallet.server.db.elasticsearch.sync import ElasticWriter
|
||||
|
||||
|
||||
def get_args_and_setup_logging(name):
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=name
|
||||
)
|
||||
Env.contribute_to_arg_parser(parser)
|
||||
args = parser.parse_args()
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
|
||||
logging.getLogger('aiohttp').setLevel(logging.WARNING)
|
||||
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
|
||||
return args
|
||||
|
||||
|
||||
def run_writer_forever():
|
||||
args = get_args_and_setup_logging('lbry-hub-writer')
|
||||
try:
|
||||
block_processor = BlockProcessor(Env.from_arg_parser(args))
|
||||
block_processor.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('block processor terminated abnormally')
|
||||
else:
|
||||
logging.info('block processor terminated normally')
|
||||
|
||||
|
||||
def run_server_forever():
|
||||
args = get_args_and_setup_logging('lbry-hub-server')
|
||||
|
||||
try:
|
||||
server = BlockchainReaderServer(Env.from_arg_parser(args))
|
||||
server.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('server terminated abnormally')
|
||||
else:
|
||||
logging.info('server terminated normally')
|
||||
|
||||
|
||||
def run_es_sync_forever():
|
||||
args = get_args_and_setup_logging('lbry-hub-elastic-sync')
|
||||
try:
|
||||
server = ElasticWriter(Env.from_arg_parser(args))
|
||||
server.run()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
logging.critical('es writer terminated abnormally')
|
||||
else:
|
||||
logging.info('es writer terminated normally')
|
381
lbry/wallet/server/coin.py
Normal file
381
lbry/wallet/server/coin.py
Normal file
|
@ -0,0 +1,381 @@
|
|||
import re
|
||||
import struct
|
||||
from typing import List
|
||||
from hashlib import sha256
|
||||
from decimal import Decimal
|
||||
from collections import namedtuple
|
||||
|
||||
import lbry.wallet.server.tx as lib_tx
|
||||
from lbry.wallet.script import OutputScript, OP_CLAIM_NAME, OP_UPDATE_CLAIM, OP_SUPPORT_CLAIM
|
||||
from lbry.wallet.server.tx import DeserializerSegWit
|
||||
from lbry.wallet.server.util import cachedproperty, subclasses
|
||||
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
|
||||
from lbry.wallet.server.daemon import Daemon, LBCDaemon
|
||||
from lbry.wallet.server.script import ScriptPubKey, OpCodes
|
||||
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
|
||||
|
||||
|
||||
Block = namedtuple("Block", "raw header transactions")
|
||||
OP_RETURN = OpCodes.OP_RETURN
|
||||
|
||||
|
||||
class CoinError(Exception):
|
||||
"""Exception raised for coin-related errors."""
|
||||
|
||||
|
||||
class Coin:
|
||||
"""Base class of coin hierarchy."""
|
||||
|
||||
REORG_LIMIT = 200
|
||||
# Not sure if these are coin-specific
|
||||
RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?')
|
||||
VALUE_PER_COIN = 100000000
|
||||
CHUNK_SIZE = 2016
|
||||
BASIC_HEADER_SIZE = 80
|
||||
STATIC_BLOCK_HEADERS = True
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
DESERIALIZER = lib_tx.Deserializer
|
||||
DAEMON = Daemon
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
HEADER_VALUES = [
|
||||
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
|
||||
]
|
||||
HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from
|
||||
MEMPOOL_HISTOGRAM_REFRESH_SECS = 500
|
||||
XPUB_VERBYTES = bytes('????', 'utf-8')
|
||||
XPRV_VERBYTES = bytes('????', 'utf-8')
|
||||
ENCODE_CHECK = Base58.encode_check
|
||||
DECODE_CHECK = Base58.decode_check
|
||||
# Peer discovery
|
||||
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
|
||||
PEERS: List[str] = []
|
||||
|
||||
@classmethod
|
||||
def lookup_coin_class(cls, name, net):
|
||||
"""Return a coin class given name and network.
|
||||
|
||||
Raise an exception if unrecognised."""
|
||||
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
|
||||
for coin in subclasses(Coin):
|
||||
if (coin.NAME.lower() == name.lower() and
|
||||
coin.NET.lower() == net.lower()):
|
||||
coin_req_attrs = req_attrs.copy()
|
||||
missing = [attr for attr in coin_req_attrs
|
||||
if not hasattr(coin, attr)]
|
||||
if missing:
|
||||
raise CoinError(f'coin {name} missing {missing} attributes')
|
||||
return coin
|
||||
raise CoinError(f'unknown coin {name} and network {net} combination')
|
||||
|
||||
@classmethod
|
||||
def sanitize_url(cls, url):
|
||||
# Remove surrounding ws and trailing /s
|
||||
url = url.strip().rstrip('/')
|
||||
match = cls.RPC_URL_REGEX.match(url)
|
||||
if not match:
|
||||
raise CoinError(f'invalid daemon URL: "{url}"')
|
||||
if match.groups()[1] is None:
|
||||
url += f':{cls.RPC_PORT:d}'
|
||||
if not url.startswith('http://') and not url.startswith('https://'):
|
||||
url = 'http://' + url
|
||||
return url + '/'
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, block):
|
||||
"""Check the Genesis block is the right one for this coin.
|
||||
|
||||
Return the block less its unspendable coinbase.
|
||||
"""
|
||||
header = cls.block_header(block, 0)
|
||||
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
|
||||
if header_hex_hash != cls.GENESIS_HASH:
|
||||
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
|
||||
|
||||
return header + bytes(1)
|
||||
|
||||
@classmethod
|
||||
def hashX_from_script(cls, script):
|
||||
"""Returns a hashX from a script, or None if the script is provably
|
||||
unspendable so the output can be dropped.
|
||||
"""
|
||||
if script and script[0] == OP_RETURN:
|
||||
return None
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
@staticmethod
|
||||
def lookup_xverbytes(verbytes):
|
||||
"""Return a (is_xpub, coin_class) pair given xpub/xprv verbytes."""
|
||||
# Order means BTC testnet will override NMC testnet
|
||||
for coin in subclasses(Coin):
|
||||
if verbytes == coin.XPUB_VERBYTES:
|
||||
return True, coin
|
||||
if verbytes == coin.XPRV_VERBYTES:
|
||||
return False, coin
|
||||
raise CoinError('version bytes unrecognised')
|
||||
|
||||
@classmethod
|
||||
def address_to_hashX(cls, address):
|
||||
"""Return a hashX given a coin address."""
|
||||
return cls.hashX_from_script(cls.pay_to_address_script(address))
|
||||
|
||||
@classmethod
|
||||
def P2PKH_address_from_hash160(cls, hash160):
|
||||
"""Return a P2PKH address given a public key."""
|
||||
assert len(hash160) == 20
|
||||
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
|
||||
|
||||
@classmethod
|
||||
def P2PKH_address_from_pubkey(cls, pubkey):
|
||||
"""Return a coin address given a public key."""
|
||||
return cls.P2PKH_address_from_hash160(hash160(pubkey))
|
||||
|
||||
@classmethod
|
||||
def P2SH_address_from_hash160(cls, hash160):
|
||||
"""Return a coin address given a hash160."""
|
||||
assert len(hash160) == 20
|
||||
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
|
||||
|
||||
@classmethod
|
||||
def hash160_to_P2PKH_script(cls, hash160):
|
||||
return ScriptPubKey.P2PKH_script(hash160)
|
||||
|
||||
@classmethod
|
||||
def hash160_to_P2PKH_hashX(cls, hash160):
|
||||
return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160))
|
||||
|
||||
@classmethod
|
||||
def pay_to_address_script(cls, address):
|
||||
"""Return a pubkey script that pays to a pubkey hash.
|
||||
|
||||
Pass the address (either P2PKH or P2SH) in base58 form.
|
||||
"""
|
||||
raw = cls.DECODE_CHECK(address)
|
||||
|
||||
# Require version byte(s) plus hash160.
|
||||
verbyte = -1
|
||||
verlen = len(raw) - 20
|
||||
if verlen > 0:
|
||||
verbyte, hash160 = raw[:verlen], raw[verlen:]
|
||||
|
||||
if verbyte == cls.P2PKH_VERBYTE:
|
||||
return cls.hash160_to_P2PKH_script(hash160)
|
||||
if verbyte in cls.P2SH_VERBYTES:
|
||||
return ScriptPubKey.P2SH_script(hash160)
|
||||
|
||||
raise CoinError(f'invalid address: {address}')
|
||||
|
||||
@classmethod
|
||||
def privkey_WIF(cls, privkey_bytes, compressed):
|
||||
"""Return the private key encoded in Wallet Import Format."""
|
||||
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
|
||||
if compressed:
|
||||
payload.append(0x01)
|
||||
return cls.ENCODE_CHECK(payload)
|
||||
|
||||
@classmethod
|
||||
def header_hash(cls, header):
|
||||
"""Given a header return hash"""
|
||||
return double_sha256(header)
|
||||
|
||||
@classmethod
|
||||
def header_prevhash(cls, header):
|
||||
"""Given a header return previous hash"""
|
||||
return header[4:36]
|
||||
|
||||
@classmethod
|
||||
def static_header_offset(cls, height):
|
||||
"""Given a header height return its offset in the headers file.
|
||||
|
||||
If header sizes change at some point, this is the only code
|
||||
that needs updating."""
|
||||
assert cls.STATIC_BLOCK_HEADERS
|
||||
return height * cls.BASIC_HEADER_SIZE
|
||||
|
||||
@classmethod
|
||||
def static_header_len(cls, height):
|
||||
"""Given a header height return its length."""
|
||||
return (cls.static_header_offset(height + 1)
|
||||
- cls.static_header_offset(height))
|
||||
|
||||
@classmethod
|
||||
def block_header(cls, block, height):
|
||||
"""Returns the block header given a block and its height."""
|
||||
return block[:cls.static_header_len(height)]
|
||||
|
||||
@classmethod
|
||||
def block(cls, raw_block, height):
|
||||
"""Return a Block namedtuple given a raw block and its height."""
|
||||
header = cls.block_header(raw_block, height)
|
||||
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
|
||||
return Block(raw_block, header, txs)
|
||||
|
||||
@classmethod
|
||||
def transaction(cls, raw_tx: bytes):
|
||||
"""Return a Block namedtuple given a raw block and its height."""
|
||||
return cls.DESERIALIZER(raw_tx).read_tx()
|
||||
|
||||
@classmethod
|
||||
def decimal_value(cls, value):
|
||||
"""Return the number of standard coin units as a Decimal given a
|
||||
quantity of smallest units.
|
||||
|
||||
For example 1 BTC is returned for 100 million satoshis.
|
||||
"""
|
||||
return Decimal(value) / cls.VALUE_PER_COIN
|
||||
|
||||
@classmethod
|
||||
def electrum_header(cls, header, height):
|
||||
h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header)))
|
||||
# Add the height that is not present in the header itself
|
||||
h['block_height'] = height
|
||||
# Convert bytes to str
|
||||
h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash'])
|
||||
h['merkle_root'] = hash_to_hex_str(h['merkle_root'])
|
||||
return h
|
||||
|
||||
|
||||
class LBC(Coin):
|
||||
DAEMON = LBCDaemon
|
||||
SESSIONCLS = LBRYElectrumX
|
||||
SESSION_MANAGER = LBRYSessionManager
|
||||
DESERIALIZER = DeserializerSegWit
|
||||
NAME = "LBRY"
|
||||
SHORTNAME = "LBC"
|
||||
NET = "mainnet"
|
||||
BASIC_HEADER_SIZE = 112
|
||||
CHUNK_SIZE = 96
|
||||
XPUB_VERBYTES = bytes.fromhex("0488b21e")
|
||||
XPRV_VERBYTES = bytes.fromhex("0488ade4")
|
||||
P2PKH_VERBYTE = bytes.fromhex("55")
|
||||
P2SH_VERBYTES = bytes.fromhex("7A")
|
||||
WIF_BYTE = bytes.fromhex("1C")
|
||||
GENESIS_HASH = ('9c89283ba0f3227f6c03b70216b9f665'
|
||||
'f0118d5e0fa729cedf4fb34d6a34f463')
|
||||
TX_COUNT = 2716936
|
||||
TX_COUNT_HEIGHT = 329554
|
||||
TX_PER_BLOCK = 1
|
||||
RPC_PORT = 9245
|
||||
REORG_LIMIT = 200
|
||||
|
||||
nOriginalClaimExpirationTime = 262974
|
||||
nExtendedClaimExpirationTime = 2102400
|
||||
nExtendedClaimExpirationForkHeight = 400155
|
||||
nNormalizedNameForkHeight = 539940 # targeting 21 March 2019
|
||||
nMinTakeoverWorkaroundHeight = 496850
|
||||
nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019
|
||||
nWitnessForkHeight = 680770 # targeting 11 Dec 2019
|
||||
nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019
|
||||
proportionalDelayFactor = 32
|
||||
maxTakeoverDelay = 4032
|
||||
|
||||
PEERS = [
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def genesis_block(cls, block):
|
||||
'''Check the Genesis block is the right one for this coin.
|
||||
|
||||
Return the block less its unspendable coinbase.
|
||||
'''
|
||||
header = cls.block_header(block, 0)
|
||||
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
|
||||
if header_hex_hash != cls.GENESIS_HASH:
|
||||
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
|
||||
|
||||
return block
|
||||
|
||||
@classmethod
|
||||
def electrum_header(cls, header, height):
|
||||
version, = struct.unpack('<I', header[:4])
|
||||
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
|
||||
return {
|
||||
'version': version,
|
||||
'prev_block_hash': hash_to_hex_str(header[4:36]),
|
||||
'merkle_root': hash_to_hex_str(header[36:68]),
|
||||
'claim_trie_root': hash_to_hex_str(header[68:100]),
|
||||
'timestamp': timestamp,
|
||||
'bits': bits,
|
||||
'nonce': nonce,
|
||||
'block_height': height,
|
||||
}
|
||||
|
||||
@cachedproperty
|
||||
def address_handlers(self):
|
||||
return ScriptPubKey.PayToHandlers(
|
||||
address=self.P2PKH_address_from_hash160,
|
||||
script_hash=self.P2SH_address_from_hash160,
|
||||
pubkey=self.P2PKH_address_from_pubkey,
|
||||
unspendable=lambda: None,
|
||||
strange=self.claim_address_handler,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def address_from_script(cls, script):
|
||||
'''Given a pk_script, return the address it pays to, or None.'''
|
||||
return ScriptPubKey.pay_to(cls.address_handlers, script)
|
||||
|
||||
@classmethod
|
||||
def claim_address_handler(cls, script):
|
||||
'''Parse a claim script, returns the address
|
||||
'''
|
||||
output = OutputScript(script)
|
||||
if output.is_pay_pubkey_hash:
|
||||
return cls.P2PKH_address_from_hash160(output.values['pubkey_hash'])
|
||||
if output.is_pay_script_hash:
|
||||
return cls.P2SH_address_from_hash160(output.values['script_hash'])
|
||||
if output.is_pay_pubkey:
|
||||
return cls.P2PKH_address_from_pubkey(output.values['pubkey'])
|
||||
if output.is_return_data:
|
||||
return None
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def hashX_from_script(cls, script):
|
||||
'''
|
||||
Overrides electrumx hashX from script by extracting addresses from claim scripts.
|
||||
'''
|
||||
if script and script[0] == OpCodes.OP_RETURN or not script:
|
||||
return None
|
||||
if script[0] in [
|
||||
OP_CLAIM_NAME,
|
||||
OP_UPDATE_CLAIM,
|
||||
OP_SUPPORT_CLAIM,
|
||||
]:
|
||||
return cls.address_to_hashX(cls.claim_address_handler(script))
|
||||
else:
|
||||
return sha256(script).digest()[:HASHX_LEN]
|
||||
|
||||
@classmethod
|
||||
def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int:
|
||||
if extended:
|
||||
return last_updated_height + cls.nExtendedClaimExpirationTime
|
||||
if last_updated_height < cls.nExtendedClaimExpirationForkHeight:
|
||||
return last_updated_height + cls.nOriginalClaimExpirationTime
|
||||
return last_updated_height + cls.nExtendedClaimExpirationTime
|
||||
|
||||
@classmethod
|
||||
def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int:
|
||||
return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay)
|
||||
|
||||
|
||||
class LBCRegTest(LBC):
|
||||
NET = "regtest"
|
||||
GENESIS_HASH = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
|
||||
XPUB_VERBYTES = bytes.fromhex('043587cf')
|
||||
XPRV_VERBYTES = bytes.fromhex('04358394')
|
||||
P2PKH_VERBYTE = bytes.fromhex("6f")
|
||||
P2SH_VERBYTES = bytes.fromhex("c4")
|
||||
|
||||
nOriginalClaimExpirationTime = 500
|
||||
nExtendedClaimExpirationTime = 600
|
||||
nExtendedClaimExpirationForkHeight = 800
|
||||
nNormalizedNameForkHeight = 250
|
||||
nMinTakeoverWorkaroundHeight = -1
|
||||
nMaxTakeoverWorkaroundHeight = -1
|
||||
nWitnessForkHeight = 150
|
||||
nAllClaimsInMerkleForkHeight = 350
|
||||
|
||||
|
||||
class LBCTestNet(LBCRegTest):
|
||||
NET = "testnet"
|
||||
GENESIS_HASH = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
|
375
lbry/wallet/server/daemon.py
Normal file
375
lbry/wallet/server/daemon.py
Normal file
|
@ -0,0 +1,375 @@
|
|||
import asyncio
|
||||
import itertools
|
||||
import json
|
||||
import time
|
||||
from functools import wraps
|
||||
|
||||
import aiohttp
|
||||
from prometheus_client import Gauge, Histogram
|
||||
from lbry.utils import LRUCacheWithMetrics
|
||||
from lbry.wallet.rpc.jsonrpc import RPCError
|
||||
from lbry.wallet.server.util import hex_to_bytes, class_logger
|
||||
from lbry.wallet.rpc import JSONRPC
|
||||
|
||||
|
||||
class DaemonError(Exception):
|
||||
"""Raised when the daemon returns an error in its results."""
|
||||
|
||||
|
||||
class WarmingUpError(Exception):
|
||||
"""Internal - when the daemon is warming up."""
|
||||
|
||||
|
||||
class WorkQueueFullError(Exception):
|
||||
"""Internal - when the daemon's work queue is full."""
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
|
||||
|
||||
class Daemon:
|
||||
"""Handles connections to a daemon at the given URL."""
|
||||
|
||||
WARMING_UP = -28
|
||||
id_counter = itertools.count()
|
||||
|
||||
lbrycrd_request_time_metric = Histogram(
|
||||
"lbrycrd_request", "lbrycrd requests count", namespace=NAMESPACE, labelnames=("method",)
|
||||
)
|
||||
lbrycrd_pending_count_metric = Gauge(
|
||||
"lbrycrd_pending_count", "Number of lbrycrd rpcs that are in flight", namespace=NAMESPACE,
|
||||
labelnames=("method",)
|
||||
)
|
||||
|
||||
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
|
||||
max_retry=4.0):
|
||||
self.coin = coin
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.set_url(url)
|
||||
# Limit concurrent RPC calls to this number.
|
||||
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
|
||||
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
|
||||
self.init_retry = init_retry
|
||||
self.max_retry = max_retry
|
||||
self._height = None
|
||||
self.available_rpcs = {}
|
||||
self.connector = aiohttp.TCPConnector()
|
||||
self._block_hash_cache = LRUCacheWithMetrics(100000)
|
||||
self._block_cache = LRUCacheWithMetrics(2 ** 13, metric_name='block', namespace=NAMESPACE)
|
||||
|
||||
async def close(self):
|
||||
if self.connector:
|
||||
await self.connector.close()
|
||||
self.connector = None
|
||||
|
||||
def set_url(self, url):
|
||||
"""Set the URLS to the given list, and switch to the first one."""
|
||||
urls = url.split(',')
|
||||
urls = [self.coin.sanitize_url(url) for url in urls]
|
||||
for n, url in enumerate(urls):
|
||||
status = '' if n else ' (current)'
|
||||
logged_url = self.logged_url(url)
|
||||
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
|
||||
self.url_index = 0
|
||||
self.urls = urls
|
||||
|
||||
def current_url(self):
|
||||
"""Returns the current daemon URL."""
|
||||
return self.urls[self.url_index]
|
||||
|
||||
def logged_url(self, url=None):
|
||||
"""The host and port part, for logging."""
|
||||
url = url or self.current_url()
|
||||
return url[url.rindex('@') + 1:]
|
||||
|
||||
def failover(self):
|
||||
"""Call to fail-over to the next daemon URL.
|
||||
|
||||
Returns False if there is only one, otherwise True.
|
||||
"""
|
||||
if len(self.urls) > 1:
|
||||
self.url_index = (self.url_index + 1) % len(self.urls)
|
||||
self.logger.info(f'failing over to {self.logged_url()}')
|
||||
return True
|
||||
return False
|
||||
|
||||
def client_session(self):
|
||||
"""An aiohttp client session."""
|
||||
return aiohttp.ClientSession(connector=self.connector, connector_owner=False)
|
||||
|
||||
async def _send_data(self, data):
|
||||
if not self.connector:
|
||||
raise asyncio.CancelledError('Tried to send request during shutdown.')
|
||||
async with self.workqueue_semaphore:
|
||||
async with self.client_session() as session:
|
||||
async with session.post(self.current_url(), data=data) as resp:
|
||||
kind = resp.headers.get('Content-Type', None)
|
||||
if kind == 'application/json':
|
||||
return await resp.json()
|
||||
# bitcoind's HTTP protocol "handling" is a bad joke
|
||||
text = await resp.text()
|
||||
if 'Work queue depth exceeded' in text:
|
||||
raise WorkQueueFullError
|
||||
text = text.strip() or resp.reason
|
||||
self.logger.error(text)
|
||||
raise DaemonError(text)
|
||||
|
||||
async def _send(self, payload, processor):
|
||||
"""Send a payload to be converted to JSON.
|
||||
|
||||
Handles temporary connection issues. Daemon response errors
|
||||
are raise through DaemonError.
|
||||
"""
|
||||
|
||||
def log_error(error):
|
||||
nonlocal last_error_log, retry
|
||||
now = time.time()
|
||||
if now - last_error_log > 60:
|
||||
last_error_log = now
|
||||
self.logger.error(f'{error} Retrying occasionally...')
|
||||
if retry == self.max_retry and self.failover():
|
||||
retry = 0
|
||||
|
||||
on_good_message = None
|
||||
last_error_log = 0
|
||||
data = json.dumps(payload)
|
||||
retry = self.init_retry
|
||||
methods = tuple(
|
||||
[payload['method']] if isinstance(payload, dict) else [request['method'] for request in payload]
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
for method in methods:
|
||||
self.lbrycrd_pending_count_metric.labels(method=method).inc()
|
||||
result = await self._send_data(data)
|
||||
result = processor(result)
|
||||
if on_good_message:
|
||||
self.logger.info(on_good_message)
|
||||
return result
|
||||
except asyncio.TimeoutError:
|
||||
log_error('timeout error.')
|
||||
except aiohttp.ServerDisconnectedError:
|
||||
log_error('disconnected.')
|
||||
on_good_message = 'connection restored'
|
||||
except aiohttp.ClientConnectionError:
|
||||
log_error('connection problem - is your daemon running?')
|
||||
on_good_message = 'connection restored'
|
||||
except aiohttp.ClientError as e:
|
||||
log_error(f'daemon error: {e}')
|
||||
on_good_message = 'running normally'
|
||||
except WarmingUpError:
|
||||
log_error('starting up checking blocks.')
|
||||
on_good_message = 'running normally'
|
||||
except WorkQueueFullError:
|
||||
log_error('work queue full.')
|
||||
on_good_message = 'running normally'
|
||||
finally:
|
||||
for method in methods:
|
||||
self.lbrycrd_pending_count_metric.labels(method=method).dec()
|
||||
await asyncio.sleep(retry)
|
||||
retry = max(min(self.max_retry, retry * 2), self.init_retry)
|
||||
|
||||
async def _send_single(self, method, params=None):
|
||||
"""Send a single request to the daemon."""
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
def processor(result):
|
||||
err = result['error']
|
||||
if not err:
|
||||
return result['result']
|
||||
if err.get('code') == self.WARMING_UP:
|
||||
raise WarmingUpError
|
||||
raise DaemonError(err)
|
||||
|
||||
payload = {'method': method, 'id': next(self.id_counter)}
|
||||
if params:
|
||||
payload['params'] = params
|
||||
result = await self._send(payload, processor)
|
||||
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
|
||||
return result
|
||||
|
||||
async def _send_vector(self, method, params_iterable, replace_errs=False):
|
||||
"""Send several requests of the same method.
|
||||
|
||||
The result will be an array of the same length as params_iterable.
|
||||
If replace_errs is true, any item with an error is returned as None,
|
||||
otherwise an exception is raised."""
|
||||
|
||||
start = time.perf_counter()
|
||||
|
||||
def processor(result):
|
||||
errs = [item['error'] for item in result if item['error']]
|
||||
if any(err.get('code') == self.WARMING_UP for err in errs):
|
||||
raise WarmingUpError
|
||||
if not errs or replace_errs:
|
||||
return [item['result'] for item in result]
|
||||
raise DaemonError(errs)
|
||||
|
||||
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
|
||||
for p in params_iterable]
|
||||
result = []
|
||||
if payload:
|
||||
result = await self._send(payload, processor)
|
||||
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
|
||||
return result
|
||||
|
||||
async def _is_rpc_available(self, method):
|
||||
"""Return whether given RPC method is available in the daemon.
|
||||
|
||||
Results are cached and the daemon will generally not be queried with
|
||||
the same method more than once."""
|
||||
available = self.available_rpcs.get(method)
|
||||
if available is None:
|
||||
available = True
|
||||
try:
|
||||
await self._send_single(method)
|
||||
except DaemonError as e:
|
||||
err = e.args[0]
|
||||
error_code = err.get("code")
|
||||
available = error_code != JSONRPC.METHOD_NOT_FOUND
|
||||
self.available_rpcs[method] = available
|
||||
return available
|
||||
|
||||
async def block_hex_hashes(self, first, count):
|
||||
"""Return the hex hashes of count block starting at height first."""
|
||||
if first + count < (self.cached_height() or 0) - 200:
|
||||
return await self._cached_block_hex_hashes(first, count)
|
||||
params_iterable = ((h, ) for h in range(first, first + count))
|
||||
return await self._send_vector('getblockhash', params_iterable)
|
||||
|
||||
async def _cached_block_hex_hashes(self, first, count):
|
||||
"""Return the hex hashes of count block starting at height first."""
|
||||
cached = self._block_hash_cache.get((first, count))
|
||||
if cached:
|
||||
return cached
|
||||
params_iterable = ((h, ) for h in range(first, first + count))
|
||||
self._block_hash_cache[(first, count)] = await self._send_vector('getblockhash', params_iterable)
|
||||
return self._block_hash_cache[(first, count)]
|
||||
|
||||
async def deserialised_block(self, hex_hash):
|
||||
"""Return the deserialised block with the given hex hash."""
|
||||
if hex_hash not in self._block_cache:
|
||||
block = await self._send_single('getblock', (hex_hash, True))
|
||||
self._block_cache[hex_hash] = block
|
||||
return block
|
||||
return self._block_cache[hex_hash]
|
||||
|
||||
async def raw_blocks(self, hex_hashes):
|
||||
"""Return the raw binary blocks with the given hex hashes."""
|
||||
params_iterable = ((h, False) for h in hex_hashes)
|
||||
blocks = await self._send_vector('getblock', params_iterable)
|
||||
# Convert hex string to bytes
|
||||
return [hex_to_bytes(block) for block in blocks]
|
||||
|
||||
async def mempool_hashes(self):
|
||||
"""Update our record of the daemon's mempool hashes."""
|
||||
return await self._send_single('getrawmempool')
|
||||
|
||||
async def estimatefee(self, block_count):
|
||||
"""Return the fee estimate for the block count. Units are whole
|
||||
currency units per KB, e.g. 0.00000995, or -1 if no estimate
|
||||
is available.
|
||||
"""
|
||||
args = (block_count, )
|
||||
if await self._is_rpc_available('estimatesmartfee'):
|
||||
estimate = await self._send_single('estimatesmartfee', args)
|
||||
return estimate.get('feerate', -1)
|
||||
return await self._send_single('estimatefee', args)
|
||||
|
||||
async def getnetworkinfo(self):
|
||||
"""Return the result of the 'getnetworkinfo' RPC call."""
|
||||
return await self._send_single('getnetworkinfo')
|
||||
|
||||
async def relayfee(self):
|
||||
"""The minimum fee a low-priority tx must pay in order to be accepted
|
||||
to the daemon's memory pool."""
|
||||
network_info = await self.getnetworkinfo()
|
||||
return network_info['relayfee']
|
||||
|
||||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
"""Return the serialized raw transaction with the given hash."""
|
||||
# Cast to int because some coin daemons are old and require it
|
||||
return await self._send_single('getrawtransaction',
|
||||
(hex_hash, int(verbose)))
|
||||
|
||||
async def getrawtransactions(self, hex_hashes, replace_errs=True):
|
||||
"""Return the serialized raw transactions with the given hashes.
|
||||
|
||||
Replaces errors with None by default."""
|
||||
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
|
||||
txs = await self._send_vector('getrawtransaction', params_iterable,
|
||||
replace_errs=replace_errs)
|
||||
# Convert hex strings to bytes
|
||||
return [hex_to_bytes(tx) if tx else None for tx in txs]
|
||||
|
||||
async def broadcast_transaction(self, raw_tx):
|
||||
"""Broadcast a transaction to the network."""
|
||||
return await self._send_single('sendrawtransaction', (raw_tx, ))
|
||||
|
||||
async def height(self):
|
||||
"""Query the daemon for its current height."""
|
||||
self._height = await self._send_single('getblockcount')
|
||||
return self._height
|
||||
|
||||
def cached_height(self):
|
||||
"""Return the cached daemon height.
|
||||
|
||||
If the daemon has not been queried yet this returns None."""
|
||||
return self._height
|
||||
|
||||
|
||||
def handles_errors(decorated_function):
|
||||
@wraps(decorated_function)
|
||||
async def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return await decorated_function(*args, **kwargs)
|
||||
except DaemonError as daemon_error:
|
||||
raise RPCError(1, daemon_error.args[0])
|
||||
return wrapper
|
||||
|
||||
|
||||
class LBCDaemon(Daemon):
|
||||
@handles_errors
|
||||
async def getrawtransaction(self, hex_hash, verbose=False):
|
||||
return await super().getrawtransaction(hex_hash=hex_hash, verbose=verbose)
|
||||
|
||||
@handles_errors
|
||||
async def getclaimbyid(self, claim_id):
|
||||
'''Given a claim id, retrieves claim information.'''
|
||||
return await self._send_single('getclaimbyid', (claim_id,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsbyids(self, claim_ids):
|
||||
'''Given a list of claim ids, batches calls to retrieve claim information.'''
|
||||
return await self._send_vector('getclaimbyid', ((claim_id,) for claim_id in claim_ids))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsforname(self, name):
|
||||
'''Given a name, retrieves all claims matching that name.'''
|
||||
return await self._send_single('getclaimsforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getclaimsfortx(self, txid):
|
||||
'''Given a txid, returns the claims it make.'''
|
||||
return await self._send_single('getclaimsfortx', (txid,)) or []
|
||||
|
||||
@handles_errors
|
||||
async def getnameproof(self, name, block_hash=None):
|
||||
'''Given a name and optional block_hash, returns a name proof and winner, if any.'''
|
||||
return await self._send_single('getnameproof', (name, block_hash,) if block_hash else (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getvalueforname(self, name):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getvalueforname', (name,))
|
||||
|
||||
@handles_errors
|
||||
async def getnamesintrie(self):
|
||||
'''Given a name, returns the winning claim value.'''
|
||||
return await self._send_single('getnamesintrie')
|
||||
|
||||
@handles_errors
|
||||
async def claimname(self, name, hexvalue, amount):
|
||||
'''Claim a name, used for functional tests only.'''
|
||||
return await self._send_single('claimname', (name, hexvalue, float(amount)))
|
45
lbry/wallet/server/db/__init__.py
Normal file
45
lbry/wallet/server/db/__init__.py
Normal file
|
@ -0,0 +1,45 @@
|
|||
import enum
|
||||
|
||||
|
||||
@enum.unique
|
||||
class DB_PREFIXES(enum.Enum):
|
||||
claim_to_support = b'K'
|
||||
support_to_claim = b'L'
|
||||
|
||||
claim_to_txo = b'E'
|
||||
txo_to_claim = b'G'
|
||||
|
||||
claim_to_channel = b'I'
|
||||
channel_to_claim = b'J'
|
||||
|
||||
claim_short_id_prefix = b'F'
|
||||
effective_amount = b'D'
|
||||
claim_expiration = b'O'
|
||||
|
||||
claim_takeover = b'P'
|
||||
pending_activation = b'Q'
|
||||
activated_claim_and_support = b'R'
|
||||
active_amount = b'S'
|
||||
|
||||
repost = b'V'
|
||||
reposted_claim = b'W'
|
||||
|
||||
undo = b'M'
|
||||
touched_or_deleted = b'Y'
|
||||
|
||||
tx = b'B'
|
||||
block_hash = b'C'
|
||||
header = b'H'
|
||||
tx_num = b'N'
|
||||
tx_count = b'T'
|
||||
tx_hash = b'X'
|
||||
utxo = b'u'
|
||||
hashx_utxo = b'h'
|
||||
hashx_history = b'x'
|
||||
db_state = b's'
|
||||
channel_count = b'Z'
|
||||
support_amount = b'a'
|
||||
block_tx = b'b'
|
||||
trending_notifications = b'c'
|
||||
mempool_tx = b'd'
|
||||
touched_hashX = b'e'
|
477
lbry/wallet/server/db/common.py
Normal file
477
lbry/wallet/server/db/common.py
Normal file
|
@ -0,0 +1,477 @@
|
|||
import typing
|
||||
from typing import Optional
|
||||
from lbry.error import ResolveCensoredError
|
||||
|
||||
CLAIM_TYPES = {
|
||||
'stream': 1,
|
||||
'channel': 2,
|
||||
'repost': 3,
|
||||
'collection': 4,
|
||||
}
|
||||
|
||||
STREAM_TYPES = {
|
||||
'video': 1,
|
||||
'audio': 2,
|
||||
'image': 3,
|
||||
'document': 4,
|
||||
'binary': 5,
|
||||
'model': 6,
|
||||
}
|
||||
|
||||
# 9/21/2020
|
||||
MOST_USED_TAGS = {
|
||||
"gaming",
|
||||
"people & blogs",
|
||||
"entertainment",
|
||||
"music",
|
||||
"pop culture",
|
||||
"education",
|
||||
"technology",
|
||||
"blockchain",
|
||||
"news",
|
||||
"funny",
|
||||
"science & technology",
|
||||
"learning",
|
||||
"gameplay",
|
||||
"news & politics",
|
||||
"comedy",
|
||||
"bitcoin",
|
||||
"beliefs",
|
||||
"nature",
|
||||
"art",
|
||||
"economics",
|
||||
"film & animation",
|
||||
"lets play",
|
||||
"games",
|
||||
"sports",
|
||||
"howto & style",
|
||||
"game",
|
||||
"cryptocurrency",
|
||||
"playstation 4",
|
||||
"automotive",
|
||||
"crypto",
|
||||
"mature",
|
||||
"sony interactive entertainment",
|
||||
"walkthrough",
|
||||
"tutorial",
|
||||
"video game",
|
||||
"weapons",
|
||||
"playthrough",
|
||||
"pc",
|
||||
"anime",
|
||||
"how to",
|
||||
"btc",
|
||||
"fun",
|
||||
"ethereum",
|
||||
"food",
|
||||
"travel & events",
|
||||
"minecraft",
|
||||
"science",
|
||||
"autos & vehicles",
|
||||
"play",
|
||||
"politics",
|
||||
"commentary",
|
||||
"twitch",
|
||||
"ps4live",
|
||||
"love",
|
||||
"ps4",
|
||||
"nonprofits & activism",
|
||||
"ps4share",
|
||||
"fortnite",
|
||||
"xbox",
|
||||
"porn",
|
||||
"video games",
|
||||
"trump",
|
||||
"español",
|
||||
"money",
|
||||
"music video",
|
||||
"nintendo",
|
||||
"movie",
|
||||
"coronavirus",
|
||||
"donald trump",
|
||||
"steam",
|
||||
"trailer",
|
||||
"android",
|
||||
"podcast",
|
||||
"xbox one",
|
||||
"survival",
|
||||
"audio",
|
||||
"linux",
|
||||
"travel",
|
||||
"funny moments",
|
||||
"litecoin",
|
||||
"animation",
|
||||
"gamer",
|
||||
"lets",
|
||||
"playstation",
|
||||
"bitcoin news",
|
||||
"history",
|
||||
"xxx",
|
||||
"fox news",
|
||||
"dance",
|
||||
"god",
|
||||
"adventure",
|
||||
"liberal",
|
||||
"2020",
|
||||
"horror",
|
||||
"government",
|
||||
"freedom",
|
||||
"reaction",
|
||||
"meme",
|
||||
"photography",
|
||||
"truth",
|
||||
"health",
|
||||
"lbry",
|
||||
"family",
|
||||
"online",
|
||||
"eth",
|
||||
"crypto news",
|
||||
"diy",
|
||||
"trading",
|
||||
"gold",
|
||||
"memes",
|
||||
"world",
|
||||
"space",
|
||||
"lol",
|
||||
"covid-19",
|
||||
"rpg",
|
||||
"humor",
|
||||
"democrat",
|
||||
"film",
|
||||
"call of duty",
|
||||
"tech",
|
||||
"religion",
|
||||
"conspiracy",
|
||||
"rap",
|
||||
"cnn",
|
||||
"hangoutsonair",
|
||||
"unboxing",
|
||||
"fiction",
|
||||
"conservative",
|
||||
"cars",
|
||||
"hoa",
|
||||
"epic",
|
||||
"programming",
|
||||
"progressive",
|
||||
"cryptocurrency news",
|
||||
"classical",
|
||||
"jesus",
|
||||
"movies",
|
||||
"book",
|
||||
"ps3",
|
||||
"republican",
|
||||
"fitness",
|
||||
"books",
|
||||
"multiplayer",
|
||||
"animals",
|
||||
"pokemon",
|
||||
"bitcoin price",
|
||||
"facebook",
|
||||
"sharefactory",
|
||||
"criptomonedas",
|
||||
"cod",
|
||||
"bible",
|
||||
"business",
|
||||
"stream",
|
||||
"comics",
|
||||
"how",
|
||||
"fail",
|
||||
"nsfw",
|
||||
"new music",
|
||||
"satire",
|
||||
"pets & animals",
|
||||
"computer",
|
||||
"classical music",
|
||||
"indie",
|
||||
"musica",
|
||||
"msnbc",
|
||||
"fps",
|
||||
"mod",
|
||||
"sport",
|
||||
"sony",
|
||||
"ripple",
|
||||
"auto",
|
||||
"rock",
|
||||
"marvel",
|
||||
"complete",
|
||||
"mining",
|
||||
"political",
|
||||
"mobile",
|
||||
"pubg",
|
||||
"hip hop",
|
||||
"flat earth",
|
||||
"xbox 360",
|
||||
"reviews",
|
||||
"vlogging",
|
||||
"latest news",
|
||||
"hack",
|
||||
"tarot",
|
||||
"iphone",
|
||||
"media",
|
||||
"cute",
|
||||
"christian",
|
||||
"free speech",
|
||||
"trap",
|
||||
"war",
|
||||
"remix",
|
||||
"ios",
|
||||
"xrp",
|
||||
"spirituality",
|
||||
"song",
|
||||
"league of legends",
|
||||
"cat"
|
||||
}
|
||||
|
||||
MATURE_TAGS = [
|
||||
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
|
||||
]
|
||||
|
||||
|
||||
def normalize_tag(tag):
|
||||
return tag.replace(" ", "_").replace("&", "and").replace("-", "_")
|
||||
|
||||
|
||||
COMMON_TAGS = {
|
||||
tag: normalize_tag(tag) for tag in list(MOST_USED_TAGS)
|
||||
}
|
||||
|
||||
INDEXED_LANGUAGES = [
|
||||
'none',
|
||||
'en',
|
||||
'aa',
|
||||
'ab',
|
||||
'ae',
|
||||
'af',
|
||||
'ak',
|
||||
'am',
|
||||
'an',
|
||||
'ar',
|
||||
'as',
|
||||
'av',
|
||||
'ay',
|
||||
'az',
|
||||
'ba',
|
||||
'be',
|
||||
'bg',
|
||||
'bh',
|
||||
'bi',
|
||||
'bm',
|
||||
'bn',
|
||||
'bo',
|
||||
'br',
|
||||
'bs',
|
||||
'ca',
|
||||
'ce',
|
||||
'ch',
|
||||
'co',
|
||||
'cr',
|
||||
'cs',
|
||||
'cu',
|
||||
'cv',
|
||||
'cy',
|
||||
'da',
|
||||
'de',
|
||||
'dv',
|
||||
'dz',
|
||||
'ee',
|
||||
'el',
|
||||
'eo',
|
||||
'es',
|
||||
'et',
|
||||
'eu',
|
||||
'fa',
|
||||
'ff',
|
||||
'fi',
|
||||
'fj',
|
||||
'fo',
|
||||
'fr',
|
||||
'fy',
|
||||
'ga',
|
||||
'gd',
|
||||
'gl',
|
||||
'gn',
|
||||
'gu',
|
||||
'gv',
|
||||
'ha',
|
||||
'he',
|
||||
'hi',
|
||||
'ho',
|
||||
'hr',
|
||||
'ht',
|
||||
'hu',
|
||||
'hy',
|
||||
'hz',
|
||||
'ia',
|
||||
'id',
|
||||
'ie',
|
||||
'ig',
|
||||
'ii',
|
||||
'ik',
|
||||
'io',
|
||||
'is',
|
||||
'it',
|
||||
'iu',
|
||||
'ja',
|
||||
'jv',
|
||||
'ka',
|
||||
'kg',
|
||||
'ki',
|
||||
'kj',
|
||||
'kk',
|
||||
'kl',
|
||||
'km',
|
||||
'kn',
|
||||
'ko',
|
||||
'kr',
|
||||
'ks',
|
||||
'ku',
|
||||
'kv',
|
||||
'kw',
|
||||
'ky',
|
||||
'la',
|
||||
'lb',
|
||||
'lg',
|
||||
'li',
|
||||
'ln',
|
||||
'lo',
|
||||
'lt',
|
||||
'lu',
|
||||
'lv',
|
||||
'mg',
|
||||
'mh',
|
||||
'mi',
|
||||
'mk',
|
||||
'ml',
|
||||
'mn',
|
||||
'mr',
|
||||
'ms',
|
||||
'mt',
|
||||
'my',
|
||||
'na',
|
||||
'nb',
|
||||
'nd',
|
||||
'ne',
|
||||
'ng',
|
||||
'nl',
|
||||
'nn',
|
||||
'no',
|
||||
'nr',
|
||||
'nv',
|
||||
'ny',
|
||||
'oc',
|
||||
'oj',
|
||||
'om',
|
||||
'or',
|
||||
'os',
|
||||
'pa',
|
||||
'pi',
|
||||
'pl',
|
||||
'ps',
|
||||
'pt',
|
||||
'qu',
|
||||
'rm',
|
||||
'rn',
|
||||
'ro',
|
||||
'ru',
|
||||
'rw',
|
||||
'sa',
|
||||
'sc',
|
||||
'sd',
|
||||
'se',
|
||||
'sg',
|
||||
'si',
|
||||
'sk',
|
||||
'sl',
|
||||
'sm',
|
||||
'sn',
|
||||
'so',
|
||||
'sq',
|
||||
'sr',
|
||||
'ss',
|
||||
'st',
|
||||
'su',
|
||||
'sv',
|
||||
'sw',
|
||||
'ta',
|
||||
'te',
|
||||
'tg',
|
||||
'th',
|
||||
'ti',
|
||||
'tk',
|
||||
'tl',
|
||||
'tn',
|
||||
'to',
|
||||
'tr',
|
||||
'ts',
|
||||
'tt',
|
||||
'tw',
|
||||
'ty',
|
||||
'ug',
|
||||
'uk',
|
||||
'ur',
|
||||
'uz',
|
||||
've',
|
||||
'vi',
|
||||
'vo',
|
||||
'wa',
|
||||
'wo',
|
||||
'xh',
|
||||
'yi',
|
||||
'yo',
|
||||
'za',
|
||||
'zh',
|
||||
'zu'
|
||||
]
|
||||
|
||||
|
||||
class ResolveResult(typing.NamedTuple):
|
||||
name: str
|
||||
normalized_name: str
|
||||
claim_hash: bytes
|
||||
tx_num: int
|
||||
position: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
amount: int
|
||||
short_url: str
|
||||
is_controlling: bool
|
||||
canonical_url: str
|
||||
creation_height: int
|
||||
activation_height: int
|
||||
expiration_height: int
|
||||
effective_amount: int
|
||||
support_amount: int
|
||||
reposted: int
|
||||
last_takeover_height: typing.Optional[int]
|
||||
claims_in_channel: typing.Optional[int]
|
||||
channel_hash: typing.Optional[bytes]
|
||||
reposted_claim_hash: typing.Optional[bytes]
|
||||
signature_valid: typing.Optional[bool]
|
||||
|
||||
|
||||
class TrendingNotification(typing.NamedTuple):
|
||||
height: int
|
||||
prev_amount: int
|
||||
new_amount: int
|
||||
|
||||
|
||||
class UTXO(typing.NamedTuple):
|
||||
tx_num: int
|
||||
tx_pos: int
|
||||
tx_hash: bytes
|
||||
height: int
|
||||
value: int
|
||||
|
||||
|
||||
OptionalResolveResultOrError = Optional[typing.Union[ResolveResult, ResolveCensoredError, LookupError, ValueError]]
|
||||
|
||||
|
||||
class ExpandedResolveResult(typing.NamedTuple):
|
||||
stream: OptionalResolveResultOrError
|
||||
channel: OptionalResolveResultOrError
|
||||
repost: OptionalResolveResultOrError
|
||||
reposted_channel: OptionalResolveResultOrError
|
||||
|
||||
|
||||
class DBError(Exception):
|
||||
"""Raised on general DB errors generally indicating corruption."""
|
1159
lbry/wallet/server/db/db.py
Normal file
1159
lbry/wallet/server/db/db.py
Normal file
File diff suppressed because it is too large
Load diff
1
lbry/wallet/server/db/elasticsearch/__init__.py
Normal file
1
lbry/wallet/server/db/elasticsearch/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
from .search import SearchIndex
|
150
lbry/wallet/server/db/elasticsearch/common.py
Normal file
150
lbry/wallet/server/db/elasticsearch/common.py
Normal file
|
@ -0,0 +1,150 @@
|
|||
from decimal import Decimal
|
||||
from typing import Iterable
|
||||
|
||||
from lbry.error import TooManyClaimSearchParametersError
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.url import normalize_name
|
||||
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
||||
from lbry.wallet.server.db.elasticsearch.constants import REPLACEMENTS, FIELDS, TEXT_FIELDS, RANGE_FIELDS
|
||||
|
||||
|
||||
def expand_query(**kwargs):
|
||||
if "amount_order" in kwargs:
|
||||
kwargs["limit"] = 1
|
||||
kwargs["order_by"] = "effective_amount"
|
||||
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
||||
if 'name' in kwargs:
|
||||
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
||||
if kwargs.get('is_controlling') is False:
|
||||
kwargs.pop('is_controlling')
|
||||
query = {'must': [], 'must_not': []}
|
||||
collapse = None
|
||||
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
||||
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
||||
for key, value in kwargs.items():
|
||||
key = key.replace('claim.', '')
|
||||
many = key.endswith('__in') or isinstance(value, list)
|
||||
if many and len(value) > 2048:
|
||||
raise TooManyClaimSearchParametersError(key, 2048)
|
||||
if many:
|
||||
key = key.replace('__in', '')
|
||||
value = list(filter(None, value))
|
||||
if value is None or isinstance(value, list) and len(value) == 0:
|
||||
continue
|
||||
key = REPLACEMENTS.get(key, key)
|
||||
if key in FIELDS:
|
||||
partial_id = False
|
||||
if key == 'claim_type':
|
||||
if isinstance(value, str):
|
||||
value = CLAIM_TYPES[value]
|
||||
else:
|
||||
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||
elif key == 'stream_type':
|
||||
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
if key == '_id':
|
||||
if isinstance(value, Iterable):
|
||||
value = [item[::-1].hex() for item in value]
|
||||
else:
|
||||
value = value[::-1].hex()
|
||||
if not many and key in ('_id', 'claim_id') and len(value) < 20:
|
||||
partial_id = True
|
||||
if key in ('signature_valid', 'has_source'):
|
||||
continue # handled later
|
||||
if key in TEXT_FIELDS:
|
||||
key += '.keyword'
|
||||
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
||||
if partial_id:
|
||||
query['must'].append({"prefix": {"claim_id": value}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
||||
operator_length = 2 if value[:2] in ops else 1
|
||||
operator, value = value[:operator_length], value[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"range": {key: {ops[operator]: value}}})
|
||||
elif many:
|
||||
query['must'].append({"terms": {key: value}})
|
||||
else:
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"term": {key: {"value": value}}})
|
||||
elif key == 'not_channel_ids':
|
||||
for channel_id in value:
|
||||
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
||||
query['must_not'].append({"term": {'_id': channel_id}})
|
||||
elif key == 'channel_ids':
|
||||
query['must'].append({"terms": {'channel_id.keyword': value}})
|
||||
elif key == 'claim_ids':
|
||||
query['must'].append({"terms": {'claim_id.keyword': value}})
|
||||
elif key == 'media_types':
|
||||
query['must'].append({"terms": {'media_type.keyword': value}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': value}})
|
||||
elif key == 'all_languages':
|
||||
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
||||
elif key == 'any_tags':
|
||||
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
||||
elif key == 'all_tags':
|
||||
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_tags':
|
||||
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_claim_id':
|
||||
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
||||
elif key == 'limit_claims_per_channel':
|
||||
collapse = ('channel_id.keyword', value)
|
||||
if kwargs.get('has_channel_signature'):
|
||||
query['must'].append({"exists": {"field": "signature"}})
|
||||
if 'signature_valid' in kwargs:
|
||||
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
elif 'signature_valid' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
||||
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
if 'has_source' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
||||
query['should'].append(
|
||||
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
||||
if kwargs.get('text'):
|
||||
query['must'].append(
|
||||
{"simple_query_string":
|
||||
{"query": kwargs["text"], "fields": [
|
||||
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
||||
]}})
|
||||
query = {
|
||||
"_source": {"excludes": ["description", "title"]},
|
||||
'query': {'bool': query},
|
||||
"sort": [],
|
||||
}
|
||||
if "limit" in kwargs:
|
||||
query["size"] = kwargs["limit"]
|
||||
if 'offset' in kwargs:
|
||||
query["from"] = kwargs["offset"]
|
||||
if 'order_by' in kwargs:
|
||||
if isinstance(kwargs["order_by"], str):
|
||||
kwargs["order_by"] = [kwargs["order_by"]]
|
||||
for value in kwargs['order_by']:
|
||||
if 'trending_group' in value:
|
||||
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
||||
continue
|
||||
is_asc = value.startswith('^')
|
||||
value = value[1:] if is_asc else value
|
||||
value = REPLACEMENTS.get(value, value)
|
||||
if value in TEXT_FIELDS:
|
||||
value += '.keyword'
|
||||
query['sort'].append({value: "asc" if is_asc else "desc"})
|
||||
if collapse:
|
||||
query["collapse"] = {
|
||||
"field": collapse[0],
|
||||
"inner_hits": {
|
||||
"name": collapse[0],
|
||||
"size": collapse[1],
|
||||
"sort": query["sort"]
|
||||
}
|
||||
}
|
||||
return query
|
100
lbry/wallet/server/db/elasticsearch/constants.py
Normal file
100
lbry/wallet/server/db/elasticsearch/constants.py
Normal file
|
@ -0,0 +1,100 @@
|
|||
INDEX_DEFAULT_SETTINGS = {
|
||||
"settings":
|
||||
{"analysis":
|
||||
{"analyzer": {
|
||||
"default": {"tokenizer": "whitespace", "filter": ["lowercase", "porter_stem"]}}},
|
||||
"index":
|
||||
{"refresh_interval": -1,
|
||||
"number_of_shards": 1,
|
||||
"number_of_replicas": 0,
|
||||
"sort": {
|
||||
"field": ["trending_score", "release_time"],
|
||||
"order": ["desc", "desc"]
|
||||
}}
|
||||
},
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"claim_id": {
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"ignore_above": 256,
|
||||
"type": "keyword"
|
||||
}
|
||||
},
|
||||
"type": "text",
|
||||
"index_prefixes": {
|
||||
"min_chars": 1,
|
||||
"max_chars": 10
|
||||
}
|
||||
},
|
||||
"sd_hash": {
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"ignore_above": 96,
|
||||
"type": "keyword"
|
||||
}
|
||||
},
|
||||
"type": "text",
|
||||
"index_prefixes": {
|
||||
"min_chars": 1,
|
||||
"max_chars": 4
|
||||
}
|
||||
},
|
||||
"height": {"type": "integer"},
|
||||
"claim_type": {"type": "byte"},
|
||||
"censor_type": {"type": "byte"},
|
||||
"trending_score": {"type": "double"},
|
||||
"release_time": {"type": "long"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FIELDS = {
|
||||
'_id',
|
||||
'claim_id', 'claim_type', 'claim_name', 'normalized_name',
|
||||
'tx_id', 'tx_nout', 'tx_position',
|
||||
'short_url', 'canonical_url',
|
||||
'is_controlling', 'last_take_over_height',
|
||||
'public_key_bytes', 'public_key_id', 'claims_in_channel',
|
||||
'channel_id', 'signature', 'signature_digest', 'is_signature_valid',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'fee_amount', 'fee_currency',
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'stream_type', 'media_type', 'censor_type',
|
||||
'title', 'author', 'description',
|
||||
'timestamp', 'creation_timestamp',
|
||||
'duration', 'release_time',
|
||||
'tags', 'languages', 'has_source', 'reposted_claim_type',
|
||||
'reposted_claim_id', 'repost_count', 'sd_hash',
|
||||
'trending_score', 'tx_num'
|
||||
}
|
||||
|
||||
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id',
|
||||
'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature',
|
||||
'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id',
|
||||
'tags', 'sd_hash'}
|
||||
|
||||
RANGE_FIELDS = {
|
||||
'height', 'creation_height', 'activation_height', 'expiration_height',
|
||||
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
|
||||
'tx_position', 'repost_count', 'limit_claims_per_channel',
|
||||
'amount', 'effective_amount', 'support_amount',
|
||||
'trending_score', 'censor_type', 'tx_num'
|
||||
}
|
||||
|
||||
ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS
|
||||
|
||||
REPLACEMENTS = {
|
||||
'claim_name': 'normalized_name',
|
||||
'name': 'normalized_name',
|
||||
'txid': 'tx_id',
|
||||
'nout': 'tx_nout',
|
||||
'trending_group': 'trending_score',
|
||||
'trending_mixed': 'trending_score',
|
||||
'trending_global': 'trending_score',
|
||||
'trending_local': 'trending_score',
|
||||
'reposted': 'repost_count',
|
||||
'stream_types': 'stream_type',
|
||||
'media_types': 'media_type',
|
||||
'valid_channel_signature': 'is_signature_valid'
|
||||
}
|
117
lbry/wallet/server/db/elasticsearch/fast_ar_trending.py
Normal file
117
lbry/wallet/server/db/elasticsearch/fast_ar_trending.py
Normal file
|
@ -0,0 +1,117 @@
|
|||
FAST_AR_TRENDING_SCRIPT = """
|
||||
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
|
||||
|
||||
double logsumexp(double x, double y)
|
||||
{
|
||||
double top;
|
||||
if(x > y)
|
||||
top = x;
|
||||
else
|
||||
top = y;
|
||||
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
|
||||
return(result);
|
||||
}
|
||||
|
||||
double logdiffexp(double big, double small)
|
||||
{
|
||||
return big + Math.log(1.0 - Math.exp(small - big));
|
||||
}
|
||||
|
||||
double squash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return -Math.log(1.0 - x);
|
||||
else
|
||||
return Math.log(x + 1.0);
|
||||
}
|
||||
|
||||
double unsquash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return 1.0 - Math.exp(-x);
|
||||
else
|
||||
return Math.exp(x) - 1.0;
|
||||
}
|
||||
|
||||
double log_to_squash(double x)
|
||||
{
|
||||
return logsumexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squash_to_log(double x)
|
||||
{
|
||||
//assert x > 0.0;
|
||||
return logdiffexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squashed_add(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
|
||||
// Cases where the signs are the same
|
||||
if (x < 0.0 && y < 0.0)
|
||||
return -logsumexp(-x, logdiffexp(-y, 0.0));
|
||||
if (x >= 0.0 && y >= 0.0)
|
||||
return logsumexp(x, logdiffexp(y, 0.0));
|
||||
// Where the signs differ
|
||||
if (x >= 0.0 && y < 0.0)
|
||||
if (Math.abs(x) >= Math.abs(y))
|
||||
return logsumexp(0.0, logdiffexp(x, -y));
|
||||
else
|
||||
return -logsumexp(0.0, logdiffexp(-y, x));
|
||||
if (x < 0.0 && y >= 0.0)
|
||||
{
|
||||
// Addition is commutative, hooray for new math
|
||||
return squashed_add(y, x);
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
double squashed_multiply(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
|
||||
int sign;
|
||||
if(x*y >= 0.0)
|
||||
sign = 1;
|
||||
else
|
||||
sign = -1;
|
||||
return sign*logsumexp(squash_to_log(Math.abs(x))
|
||||
+ squash_to_log(Math.abs(y)), 0.0);
|
||||
}
|
||||
|
||||
// Squashed inflated units
|
||||
double inflateUnits(int height) {
|
||||
double timescale = 576.0; // Half life of 400 = e-folding time of a day
|
||||
// by coincidence, so may as well go with it
|
||||
return log_to_squash(height / timescale);
|
||||
}
|
||||
|
||||
double spikePower(double newAmount) {
|
||||
if (newAmount < 50.0) {
|
||||
return(0.5);
|
||||
} else if (newAmount < 85.0) {
|
||||
return(newAmount / 100.0);
|
||||
} else {
|
||||
return(0.85);
|
||||
}
|
||||
}
|
||||
|
||||
double spikeMass(double oldAmount, double newAmount) {
|
||||
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
||||
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
||||
double power = spikePower(newAmount);
|
||||
if (oldAmount > newAmount) {
|
||||
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
} else {
|
||||
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
}
|
||||
}
|
||||
|
||||
for (i in params.src.changes) {
|
||||
double units = inflateUnits(i.height);
|
||||
if (ctx._source.trending_score == null) {
|
||||
ctx._source.trending_score = 0.0;
|
||||
}
|
||||
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
|
||||
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
|
||||
}
|
||||
"""
|
55
lbry/wallet/server/db/elasticsearch/notifier.py
Normal file
55
lbry/wallet/server/db/elasticsearch/notifier.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
import struct
|
||||
import typing
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ElasticNotifierProtocol(asyncio.Protocol):
|
||||
"""notifies the reader when ES has written updates"""
|
||||
|
||||
def __init__(self, listeners):
|
||||
self._listeners = listeners
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
self._listeners.append(self)
|
||||
log.info("got es notifier connection")
|
||||
|
||||
def connection_lost(self, exc) -> None:
|
||||
self._listeners.remove(self)
|
||||
self.transport = None
|
||||
|
||||
def send_height(self, height: int, block_hash: bytes):
|
||||
log.info("notify es update '%s'", height)
|
||||
self.transport.write(struct.pack(b'>Q32s', height, block_hash))
|
||||
|
||||
|
||||
class ElasticNotifierClientProtocol(asyncio.Protocol):
|
||||
"""notifies the reader when ES has written updates"""
|
||||
|
||||
def __init__(self, notifications: asyncio.Queue):
|
||||
self.notifications = notifications
|
||||
self.transport: typing.Optional[asyncio.Transport] = None
|
||||
|
||||
def close(self):
|
||||
if self.transport and not self.transport.is_closing():
|
||||
self.transport.close()
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.transport = transport
|
||||
log.info("connected to es notifier")
|
||||
|
||||
def connection_lost(self, exc) -> None:
|
||||
self.transport = None
|
||||
|
||||
def data_received(self, data: bytes) -> None:
|
||||
try:
|
||||
height, block_hash = struct.unpack(b'>Q32s', data)
|
||||
except:
|
||||
log.exception("failed to decode %s", (data or b'').hex())
|
||||
raise
|
||||
self.notifications.put_nowait((height, block_hash))
|
726
lbry/wallet/server/db/elasticsearch/search.py
Normal file
726
lbry/wallet/server/db/elasticsearch/search.py
Normal file
|
@ -0,0 +1,726 @@
|
|||
import time
|
||||
import asyncio
|
||||
import struct
|
||||
from binascii import unhexlify
|
||||
from collections import Counter, deque
|
||||
from decimal import Decimal
|
||||
from operator import itemgetter
|
||||
from typing import Optional, List, Iterable, Union
|
||||
|
||||
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
|
||||
from lbry.schema.result import Outputs, Censor
|
||||
from lbry.schema.tags import clean_tags
|
||||
from lbry.schema.url import URL, normalize_name
|
||||
from lbry.utils import LRUCache
|
||||
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
|
||||
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
|
||||
RANGE_FIELDS, ALL_FIELDS
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.server.db.common import ResolveResult
|
||||
|
||||
|
||||
class ChannelResolution(str):
|
||||
@classmethod
|
||||
def lookup_error(cls, url):
|
||||
return LookupError(f'Could not find channel in "{url}".')
|
||||
|
||||
|
||||
class StreamResolution(str):
|
||||
@classmethod
|
||||
def lookup_error(cls, url):
|
||||
return LookupError(f'Could not find claim at "{url}".')
|
||||
|
||||
|
||||
class IndexVersionMismatch(Exception):
|
||||
def __init__(self, got_version, expected_version):
|
||||
self.got_version = got_version
|
||||
self.expected_version = expected_version
|
||||
|
||||
|
||||
class SearchIndex:
|
||||
VERSION = 1
|
||||
|
||||
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200):
|
||||
self.search_timeout = search_timeout
|
||||
self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import
|
||||
self.search_client: Optional[AsyncElasticsearch] = None
|
||||
self.sync_client: Optional[AsyncElasticsearch] = None
|
||||
self.index = index_prefix + 'claims'
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.claim_cache = LRUCache(2 ** 15)
|
||||
self.search_cache = LRUCache(2 ** 17)
|
||||
self._elastic_host = elastic_host
|
||||
self._elastic_port = elastic_port
|
||||
|
||||
async def get_index_version(self) -> int:
|
||||
try:
|
||||
template = await self.sync_client.indices.get_template(self.index)
|
||||
return template[self.index]['version']
|
||||
except NotFoundError:
|
||||
return 0
|
||||
|
||||
async def set_index_version(self, version):
|
||||
await self.sync_client.indices.put_template(
|
||||
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
||||
)
|
||||
|
||||
async def start(self) -> bool:
|
||||
if self.sync_client:
|
||||
return False
|
||||
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
||||
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
||||
self.search_client = AsyncElasticsearch(hosts, timeout=self.search_timeout)
|
||||
while True:
|
||||
try:
|
||||
await self.sync_client.cluster.health(wait_for_status='yellow')
|
||||
break
|
||||
except ConnectionError:
|
||||
self.logger.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||
acked = res.get('acknowledged', False)
|
||||
if acked:
|
||||
await self.set_index_version(self.VERSION)
|
||||
return acked
|
||||
index_version = await self.get_index_version()
|
||||
if index_version != self.VERSION:
|
||||
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
return acked
|
||||
|
||||
def stop(self):
|
||||
clients = [self.sync_client, self.search_client]
|
||||
self.sync_client, self.search_client = None, None
|
||||
return asyncio.ensure_future(asyncio.gather(*(client.close() for client in clients)))
|
||||
|
||||
def delete_index(self):
|
||||
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||
|
||||
async def _consume_claim_producer(self, claim_producer):
|
||||
count = 0
|
||||
async for op, doc in claim_producer:
|
||||
if op == 'delete':
|
||||
yield {
|
||||
'_index': self.index,
|
||||
'_op_type': 'delete',
|
||||
'_id': doc
|
||||
}
|
||||
else:
|
||||
yield {
|
||||
'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS},
|
||||
'_id': doc['claim_id'],
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
count += 1
|
||||
if count % 100 == 0:
|
||||
self.logger.info("Indexing in progress, %d claims.", count)
|
||||
if count:
|
||||
self.logger.info("Indexing done for %d claims.", count)
|
||||
else:
|
||||
self.logger.debug("Indexing done for %d claims.", count)
|
||||
|
||||
async def claim_consumer(self, claim_producer):
|
||||
touched = set()
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, self._consume_claim_producer(claim_producer),
|
||||
raise_on_error=False):
|
||||
if not ok:
|
||||
self.logger.warning("indexing failed for an item: %s", item)
|
||||
else:
|
||||
item = item.popitem()[1]
|
||||
touched.add(item['_id'])
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.debug("Indexing done.")
|
||||
|
||||
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
||||
if channels:
|
||||
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
else:
|
||||
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
key = 'channel_id' if channels else 'claim_id'
|
||||
update['script'] = {
|
||||
"source": f"ctx._source.censor_type={censor_type}; "
|
||||
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
||||
"lang": "painless",
|
||||
"params": blockdict
|
||||
}
|
||||
return update
|
||||
|
||||
async def update_trending_score(self, params):
|
||||
update_trending_score_script = """
|
||||
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
|
||||
|
||||
double logsumexp(double x, double y)
|
||||
{
|
||||
double top;
|
||||
if(x > y)
|
||||
top = x;
|
||||
else
|
||||
top = y;
|
||||
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
|
||||
return(result);
|
||||
}
|
||||
|
||||
double logdiffexp(double big, double small)
|
||||
{
|
||||
return big + Math.log(1.0 - Math.exp(small - big));
|
||||
}
|
||||
|
||||
double squash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return -Math.log(1.0 - x);
|
||||
else
|
||||
return Math.log(x + 1.0);
|
||||
}
|
||||
|
||||
double unsquash(double x)
|
||||
{
|
||||
if(x < 0.0)
|
||||
return 1.0 - Math.exp(-x);
|
||||
else
|
||||
return Math.exp(x) - 1.0;
|
||||
}
|
||||
|
||||
double log_to_squash(double x)
|
||||
{
|
||||
return logsumexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squash_to_log(double x)
|
||||
{
|
||||
//assert x > 0.0;
|
||||
return logdiffexp(x, 0.0);
|
||||
}
|
||||
|
||||
double squashed_add(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
|
||||
// Cases where the signs are the same
|
||||
if (x < 0.0 && y < 0.0)
|
||||
return -logsumexp(-x, logdiffexp(-y, 0.0));
|
||||
if (x >= 0.0 && y >= 0.0)
|
||||
return logsumexp(x, logdiffexp(y, 0.0));
|
||||
// Where the signs differ
|
||||
if (x >= 0.0 && y < 0.0)
|
||||
if (Math.abs(x) >= Math.abs(y))
|
||||
return logsumexp(0.0, logdiffexp(x, -y));
|
||||
else
|
||||
return -logsumexp(0.0, logdiffexp(-y, x));
|
||||
if (x < 0.0 && y >= 0.0)
|
||||
{
|
||||
// Addition is commutative, hooray for new math
|
||||
return squashed_add(y, x);
|
||||
}
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
double squashed_multiply(double x, double y)
|
||||
{
|
||||
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
|
||||
int sign;
|
||||
if(x*y >= 0.0)
|
||||
sign = 1;
|
||||
else
|
||||
sign = -1;
|
||||
return sign*logsumexp(squash_to_log(Math.abs(x))
|
||||
+ squash_to_log(Math.abs(y)), 0.0);
|
||||
}
|
||||
|
||||
// Squashed inflated units
|
||||
double inflateUnits(int height) {
|
||||
double timescale = 576.0; // Half life of 400 = e-folding time of a day
|
||||
// by coincidence, so may as well go with it
|
||||
return log_to_squash(height / timescale);
|
||||
}
|
||||
|
||||
double spikePower(double newAmount) {
|
||||
if (newAmount < 50.0) {
|
||||
return(0.5);
|
||||
} else if (newAmount < 85.0) {
|
||||
return(newAmount / 100.0);
|
||||
} else {
|
||||
return(0.85);
|
||||
}
|
||||
}
|
||||
|
||||
double spikeMass(double oldAmount, double newAmount) {
|
||||
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
|
||||
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
|
||||
double power = spikePower(newAmount);
|
||||
if (oldAmount > newAmount) {
|
||||
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
} else {
|
||||
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
|
||||
}
|
||||
}
|
||||
for (i in params.src.changes) {
|
||||
double units = inflateUnits(i.height);
|
||||
if (ctx._source.trending_score == null) {
|
||||
ctx._source.trending_score = 0.0;
|
||||
}
|
||||
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
|
||||
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
|
||||
}
|
||||
"""
|
||||
start = time.perf_counter()
|
||||
|
||||
def producer():
|
||||
for claim_id, claim_updates in params.items():
|
||||
yield {
|
||||
'_id': claim_id,
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'script': {
|
||||
'lang': 'painless',
|
||||
'source': update_trending_score_script,
|
||||
'params': {'src': {
|
||||
'changes': [
|
||||
{
|
||||
'height': p.height,
|
||||
'prev_amount': p.prev_amount / 1E8,
|
||||
'new_amount': p.new_amount / 1E8,
|
||||
} for p in claim_updates
|
||||
]
|
||||
}}
|
||||
},
|
||||
}
|
||||
if not params:
|
||||
return
|
||||
async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False):
|
||||
if not ok:
|
||||
self.logger.warning("updating trending failed for an item: %s", item)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000))
|
||||
|
||||
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||
if filtered_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if filtered_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.clear_caches()
|
||||
|
||||
def clear_caches(self):
|
||||
self.search_cache.clear()
|
||||
self.claim_cache.clear()
|
||||
|
||||
async def cached_search(self, kwargs):
|
||||
total_referenced = []
|
||||
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
return cache_item.result
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
return cache_item.result
|
||||
censor = Censor(Censor.SEARCH)
|
||||
if kwargs.get('no_totals'):
|
||||
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
else:
|
||||
response, offset, total = await self.search(**kwargs)
|
||||
censor.apply(response)
|
||||
total_referenced.extend(response)
|
||||
|
||||
if censor.censored:
|
||||
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
|
||||
total_referenced.extend(response)
|
||||
response = [
|
||||
ResolveResult(
|
||||
name=r['claim_name'],
|
||||
normalized_name=r['normalized_name'],
|
||||
claim_hash=r['claim_hash'],
|
||||
tx_num=r['tx_num'],
|
||||
position=r['tx_nout'],
|
||||
tx_hash=r['tx_hash'],
|
||||
height=r['height'],
|
||||
amount=r['amount'],
|
||||
short_url=r['short_url'],
|
||||
is_controlling=r['is_controlling'],
|
||||
canonical_url=r['canonical_url'],
|
||||
creation_height=r['creation_height'],
|
||||
activation_height=r['activation_height'],
|
||||
expiration_height=r['expiration_height'],
|
||||
effective_amount=r['effective_amount'],
|
||||
support_amount=r['support_amount'],
|
||||
last_takeover_height=r['last_take_over_height'],
|
||||
claims_in_channel=r['claims_in_channel'],
|
||||
channel_hash=r['channel_hash'],
|
||||
reposted_claim_hash=r['reposted_claim_hash'],
|
||||
reposted=r['reposted'],
|
||||
signature_valid=r['signature_valid']
|
||||
) for r in response
|
||||
]
|
||||
extra = [
|
||||
ResolveResult(
|
||||
name=r['claim_name'],
|
||||
normalized_name=r['normalized_name'],
|
||||
claim_hash=r['claim_hash'],
|
||||
tx_num=r['tx_num'],
|
||||
position=r['tx_nout'],
|
||||
tx_hash=r['tx_hash'],
|
||||
height=r['height'],
|
||||
amount=r['amount'],
|
||||
short_url=r['short_url'],
|
||||
is_controlling=r['is_controlling'],
|
||||
canonical_url=r['canonical_url'],
|
||||
creation_height=r['creation_height'],
|
||||
activation_height=r['activation_height'],
|
||||
expiration_height=r['expiration_height'],
|
||||
effective_amount=r['effective_amount'],
|
||||
support_amount=r['support_amount'],
|
||||
last_takeover_height=r['last_take_over_height'],
|
||||
claims_in_channel=r['claims_in_channel'],
|
||||
channel_hash=r['channel_hash'],
|
||||
reposted_claim_hash=r['reposted_claim_hash'],
|
||||
reposted=r['reposted'],
|
||||
signature_valid=r['signature_valid']
|
||||
) for r in await self._get_referenced_rows(total_referenced)
|
||||
]
|
||||
result = Outputs.to_base64(
|
||||
response, extra, offset, total, censor
|
||||
)
|
||||
cache_item.result = result
|
||||
return result
|
||||
|
||||
async def get_many(self, *claim_ids):
|
||||
await self.populate_claim_cache(*claim_ids)
|
||||
return filter(None, map(self.claim_cache.get, claim_ids))
|
||||
|
||||
async def populate_claim_cache(self, *claim_ids):
|
||||
missing = [claim_id for claim_id in claim_ids if self.claim_cache.get(claim_id) is None]
|
||||
if missing:
|
||||
results = await self.search_client.mget(
|
||||
index=self.index, body={"ids": missing}
|
||||
)
|
||||
for result in expand_result(filter(lambda doc: doc['found'], results["docs"])):
|
||||
self.claim_cache.set(result['claim_id'], result)
|
||||
|
||||
|
||||
async def search(self, **kwargs):
|
||||
try:
|
||||
return await self.search_ahead(**kwargs)
|
||||
except NotFoundError:
|
||||
return [], 0, 0
|
||||
# return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
|
||||
|
||||
async def search_ahead(self, **kwargs):
|
||||
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
|
||||
per_channel_per_page = kwargs.pop('limit_claims_per_channel', 0) or 0
|
||||
remove_duplicates = kwargs.pop('remove_duplicates', False)
|
||||
page_size = kwargs.pop('limit', 10)
|
||||
offset = kwargs.pop('offset', 0)
|
||||
kwargs['limit'] = 1000
|
||||
cache_item = ResultCacheItem.from_cache(f"ahead{per_channel_per_page}{kwargs}", self.search_cache)
|
||||
if cache_item.result is not None:
|
||||
reordered_hits = cache_item.result
|
||||
else:
|
||||
async with cache_item.lock:
|
||||
if cache_item.result:
|
||||
reordered_hits = cache_item.result
|
||||
else:
|
||||
query = expand_query(**kwargs)
|
||||
search_hits = deque((await self.search_client.search(
|
||||
query, index=self.index, track_total_hits=False,
|
||||
_source_includes=['_id', 'channel_id', 'reposted_claim_id', 'creation_height']
|
||||
))['hits']['hits'])
|
||||
if remove_duplicates:
|
||||
search_hits = self.__remove_duplicates(search_hits)
|
||||
if per_channel_per_page > 0:
|
||||
reordered_hits = self.__search_ahead(search_hits, page_size, per_channel_per_page)
|
||||
else:
|
||||
reordered_hits = [(hit['_id'], hit['_source']['channel_id']) for hit in search_hits]
|
||||
cache_item.result = reordered_hits
|
||||
result = list(await self.get_many(*(claim_id for claim_id, _ in reordered_hits[offset:(offset + page_size)])))
|
||||
return result, 0, len(reordered_hits)
|
||||
|
||||
def __remove_duplicates(self, search_hits: deque) -> deque:
|
||||
known_ids = {} # claim_id -> (creation_height, hit_id), where hit_id is either reposted claim id or original
|
||||
dropped = set()
|
||||
for hit in search_hits:
|
||||
hit_height, hit_id = hit['_source']['creation_height'], hit['_source']['reposted_claim_id'] or hit['_id']
|
||||
if hit_id not in known_ids:
|
||||
known_ids[hit_id] = (hit_height, hit['_id'])
|
||||
else:
|
||||
previous_height, previous_id = known_ids[hit_id]
|
||||
if hit_height < previous_height:
|
||||
known_ids[hit_id] = (hit_height, hit['_id'])
|
||||
dropped.add(previous_id)
|
||||
else:
|
||||
dropped.add(hit['_id'])
|
||||
return deque(hit for hit in search_hits if hit['_id'] not in dropped)
|
||||
|
||||
def __search_ahead(self, search_hits: list, page_size: int, per_channel_per_page: int):
|
||||
reordered_hits = []
|
||||
channel_counters = Counter()
|
||||
next_page_hits_maybe_check_later = deque()
|
||||
while search_hits or next_page_hits_maybe_check_later:
|
||||
if reordered_hits and len(reordered_hits) % page_size == 0:
|
||||
channel_counters.clear()
|
||||
elif not reordered_hits:
|
||||
pass
|
||||
else:
|
||||
break # means last page was incomplete and we are left with bad replacements
|
||||
for _ in range(len(next_page_hits_maybe_check_later)):
|
||||
claim_id, channel_id = next_page_hits_maybe_check_later.popleft()
|
||||
if per_channel_per_page > 0 and channel_counters[channel_id] < per_channel_per_page:
|
||||
reordered_hits.append((claim_id, channel_id))
|
||||
channel_counters[channel_id] += 1
|
||||
else:
|
||||
next_page_hits_maybe_check_later.append((claim_id, channel_id))
|
||||
while search_hits:
|
||||
hit = search_hits.popleft()
|
||||
hit_id, hit_channel_id = hit['_id'], hit['_source']['channel_id']
|
||||
if hit_channel_id is None or per_channel_per_page <= 0:
|
||||
reordered_hits.append((hit_id, hit_channel_id))
|
||||
elif channel_counters[hit_channel_id] < per_channel_per_page:
|
||||
reordered_hits.append((hit_id, hit_channel_id))
|
||||
channel_counters[hit_channel_id] += 1
|
||||
if len(reordered_hits) % page_size == 0:
|
||||
break
|
||||
else:
|
||||
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
|
||||
return reordered_hits
|
||||
|
||||
async def _get_referenced_rows(self, txo_rows: List[dict]):
|
||||
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
|
||||
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
|
||||
referenced_ids |= set(filter(None, (row['channel_id'] for row in txo_rows)))
|
||||
referenced_ids |= set(filter(None, (row['censoring_channel_id'] for row in txo_rows)))
|
||||
|
||||
referenced_txos = []
|
||||
if referenced_ids:
|
||||
referenced_txos.extend(await self.get_many(*referenced_ids))
|
||||
referenced_ids = set(filter(None, (row['channel_id'] for row in referenced_txos)))
|
||||
|
||||
if referenced_ids:
|
||||
referenced_txos.extend(await self.get_many(*referenced_ids))
|
||||
|
||||
return referenced_txos
|
||||
|
||||
|
||||
def expand_query(**kwargs):
|
||||
if "amount_order" in kwargs:
|
||||
kwargs["limit"] = 1
|
||||
kwargs["order_by"] = "effective_amount"
|
||||
kwargs["offset"] = int(kwargs["amount_order"]) - 1
|
||||
if 'name' in kwargs:
|
||||
kwargs['name'] = normalize_name(kwargs.pop('name'))
|
||||
if kwargs.get('is_controlling') is False:
|
||||
kwargs.pop('is_controlling')
|
||||
query = {'must': [], 'must_not': []}
|
||||
collapse = None
|
||||
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
|
||||
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
|
||||
for key, value in kwargs.items():
|
||||
key = key.replace('claim.', '')
|
||||
many = key.endswith('__in') or isinstance(value, list)
|
||||
if many and len(value) > 2048:
|
||||
raise TooManyClaimSearchParametersError(key, 2048)
|
||||
if many:
|
||||
key = key.replace('__in', '')
|
||||
value = list(filter(None, value))
|
||||
if value is None or isinstance(value, list) and len(value) == 0:
|
||||
continue
|
||||
key = REPLACEMENTS.get(key, key)
|
||||
if key in FIELDS:
|
||||
partial_id = False
|
||||
if key == 'claim_type':
|
||||
if isinstance(value, str):
|
||||
value = CLAIM_TYPES[value]
|
||||
else:
|
||||
value = [CLAIM_TYPES[claim_type] for claim_type in value]
|
||||
elif key == 'stream_type':
|
||||
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
|
||||
if key == '_id':
|
||||
if isinstance(value, Iterable):
|
||||
value = [item[::-1].hex() for item in value]
|
||||
else:
|
||||
value = value[::-1].hex()
|
||||
if not many and key in ('_id', 'claim_id', 'sd_hash') and len(value) < 20:
|
||||
partial_id = True
|
||||
if key in ('signature_valid', 'has_source'):
|
||||
continue # handled later
|
||||
if key in TEXT_FIELDS:
|
||||
key += '.keyword'
|
||||
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
|
||||
if partial_id:
|
||||
query['must'].append({"prefix": {key: value}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
|
||||
operator_length = 2 if value[:2] in ops else 1
|
||||
operator, value = value[:operator_length], value[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"range": {key: {ops[operator]: value}}})
|
||||
elif key in RANGE_FIELDS and isinstance(value, list) and all(v[0] in ops for v in value):
|
||||
range_constraints = []
|
||||
for v in value:
|
||||
operator_length = 2 if v[:2] in ops else 1
|
||||
operator, stripped_op_v = v[:operator_length], v[operator_length:]
|
||||
if key == 'fee_amount':
|
||||
stripped_op_v = str(Decimal(stripped_op_v)*1000)
|
||||
range_constraints.append((operator, stripped_op_v))
|
||||
query['must'].append({"range": {key: {ops[operator]: v for operator, v in range_constraints}}})
|
||||
elif many:
|
||||
query['must'].append({"terms": {key: value}})
|
||||
else:
|
||||
if key == 'fee_amount':
|
||||
value = str(Decimal(value)*1000)
|
||||
query['must'].append({"term": {key: {"value": value}}})
|
||||
elif key == 'not_channel_ids':
|
||||
for channel_id in value:
|
||||
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
|
||||
query['must_not'].append({"term": {'_id': channel_id}})
|
||||
elif key == 'channel_ids':
|
||||
query['must'].append({"terms": {'channel_id.keyword': value}})
|
||||
elif key == 'claim_ids':
|
||||
query['must'].append({"terms": {'claim_id.keyword': value}})
|
||||
elif key == 'media_types':
|
||||
query['must'].append({"terms": {'media_type.keyword': value}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': clean_tags(value)}})
|
||||
elif key == 'any_languages':
|
||||
query['must'].append({"terms": {'languages': value}})
|
||||
elif key == 'all_languages':
|
||||
query['must'].extend([{"term": {'languages': tag}} for tag in value])
|
||||
elif key == 'any_tags':
|
||||
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
|
||||
elif key == 'all_tags':
|
||||
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_tags':
|
||||
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
|
||||
elif key == 'not_claim_id':
|
||||
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
|
||||
elif key == 'limit_claims_per_channel':
|
||||
collapse = ('channel_id.keyword', value)
|
||||
if kwargs.get('has_channel_signature'):
|
||||
query['must'].append({"exists": {"field": "signature"}})
|
||||
if 'signature_valid' in kwargs:
|
||||
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
elif 'signature_valid' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
|
||||
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
|
||||
if 'has_source' in kwargs:
|
||||
query.setdefault('should', [])
|
||||
query["minimum_should_match"] = 1
|
||||
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
|
||||
query['should'].append(
|
||||
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
|
||||
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
|
||||
if kwargs.get('text'):
|
||||
query['must'].append(
|
||||
{"simple_query_string":
|
||||
{"query": kwargs["text"], "fields": [
|
||||
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
|
||||
]}})
|
||||
query = {
|
||||
"_source": {"excludes": ["description", "title"]},
|
||||
'query': {'bool': query},
|
||||
"sort": [],
|
||||
}
|
||||
if "limit" in kwargs:
|
||||
query["size"] = kwargs["limit"]
|
||||
if 'offset' in kwargs:
|
||||
query["from"] = kwargs["offset"]
|
||||
if 'order_by' in kwargs:
|
||||
if isinstance(kwargs["order_by"], str):
|
||||
kwargs["order_by"] = [kwargs["order_by"]]
|
||||
for value in kwargs['order_by']:
|
||||
if 'trending_group' in value:
|
||||
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
|
||||
continue
|
||||
is_asc = value.startswith('^')
|
||||
value = value[1:] if is_asc else value
|
||||
value = REPLACEMENTS.get(value, value)
|
||||
if value in TEXT_FIELDS:
|
||||
value += '.keyword'
|
||||
query['sort'].append({value: "asc" if is_asc else "desc"})
|
||||
if collapse:
|
||||
query["collapse"] = {
|
||||
"field": collapse[0],
|
||||
"inner_hits": {
|
||||
"name": collapse[0],
|
||||
"size": collapse[1],
|
||||
"sort": query["sort"]
|
||||
}
|
||||
}
|
||||
return query
|
||||
|
||||
|
||||
def expand_result(results):
|
||||
inner_hits = []
|
||||
expanded = []
|
||||
for result in results:
|
||||
if result.get("inner_hits"):
|
||||
for _, inner_hit in result["inner_hits"].items():
|
||||
inner_hits.extend(inner_hit["hits"]["hits"])
|
||||
continue
|
||||
result = result['_source']
|
||||
result['claim_hash'] = unhexlify(result['claim_id'])[::-1]
|
||||
if result['reposted_claim_id']:
|
||||
result['reposted_claim_hash'] = unhexlify(result['reposted_claim_id'])[::-1]
|
||||
else:
|
||||
result['reposted_claim_hash'] = None
|
||||
result['channel_hash'] = unhexlify(result['channel_id'])[::-1] if result['channel_id'] else None
|
||||
result['txo_hash'] = unhexlify(result['tx_id'])[::-1] + struct.pack('<I', result['tx_nout'])
|
||||
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
|
||||
result['reposted'] = result.pop('repost_count')
|
||||
result['signature_valid'] = result.pop('is_signature_valid')
|
||||
# result['normalized'] = result.pop('normalized_name')
|
||||
# if result['censoring_channel_hash']:
|
||||
# result['censoring_channel_hash'] = unhexlify(result['censoring_channel_hash'])[::-1]
|
||||
expanded.append(result)
|
||||
if inner_hits:
|
||||
return expand_result(inner_hits)
|
||||
return expanded
|
||||
|
||||
|
||||
class ResultCacheItem:
|
||||
__slots__ = '_result', 'lock', 'has_result'
|
||||
|
||||
def __init__(self):
|
||||
self.has_result = asyncio.Event()
|
||||
self.lock = asyncio.Lock()
|
||||
self._result = None
|
||||
|
||||
@property
|
||||
def result(self) -> str:
|
||||
return self._result
|
||||
|
||||
@result.setter
|
||||
def result(self, result: str):
|
||||
self._result = result
|
||||
if result is not None:
|
||||
self.has_result.set()
|
||||
|
||||
@classmethod
|
||||
def from_cache(cls, cache_key, cache):
|
||||
cache_item = cache.get(cache_key)
|
||||
if cache_item is None:
|
||||
cache_item = cache[cache_key] = ResultCacheItem()
|
||||
return cache_item
|
318
lbry/wallet/server/db/elasticsearch/sync.py
Normal file
318
lbry/wallet/server/db/elasticsearch/sync.py
Normal file
|
@ -0,0 +1,318 @@
|
|||
import os
|
||||
import time
|
||||
import signal
|
||||
import json
|
||||
import typing
|
||||
from collections import defaultdict
|
||||
import asyncio
|
||||
import logging
|
||||
from elasticsearch import AsyncElasticsearch, NotFoundError
|
||||
from elasticsearch.helpers import async_streaming_bulk
|
||||
|
||||
from lbry.schema.result import Censor
|
||||
from lbry.wallet.server.db.elasticsearch.search import IndexVersionMismatch
|
||||
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS, INDEX_DEFAULT_SETTINGS
|
||||
from lbry.wallet.server.db.elasticsearch.common import expand_query
|
||||
from lbry.wallet.server.db.elasticsearch.notifier import ElasticNotifierProtocol
|
||||
from lbry.wallet.server.db.elasticsearch.fast_ar_trending import FAST_AR_TRENDING_SCRIPT
|
||||
from lbry.wallet.server.chain_reader import BlockchainReader
|
||||
from lbry.wallet.server.db.revertable import RevertableOp
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
|
||||
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
class ElasticWriter(BlockchainReader):
|
||||
VERSION = 1
|
||||
|
||||
def __init__(self, env):
|
||||
super().__init__(env, 'lbry-elastic-writer', thread_workers=1, thread_prefix='lbry-elastic-writer')
|
||||
# self._refresh_interval = 0.1
|
||||
self._task = None
|
||||
self.index = self.env.es_index_prefix + 'claims'
|
||||
self._elastic_host = env.elastic_host
|
||||
self._elastic_port = env.elastic_port
|
||||
self.sync_timeout = 1800
|
||||
self.sync_client = AsyncElasticsearch(
|
||||
[{'host': self._elastic_host, 'port': self._elastic_port}], timeout=self.sync_timeout
|
||||
)
|
||||
self._es_info_path = os.path.join(env.db_dir, 'es_info')
|
||||
self._last_wrote_height = 0
|
||||
self._last_wrote_block_hash = None
|
||||
|
||||
self._touched_claims = set()
|
||||
self._deleted_claims = set()
|
||||
|
||||
self._removed_during_undo = set()
|
||||
|
||||
self._trending = defaultdict(list)
|
||||
self._advanced = True
|
||||
self.synchronized = asyncio.Event()
|
||||
self._listeners: typing.List[ElasticNotifierProtocol] = []
|
||||
|
||||
async def run_es_notifier(self, synchronized: asyncio.Event):
|
||||
server = await asyncio.get_event_loop().create_server(
|
||||
lambda: ElasticNotifierProtocol(self._listeners), '127.0.0.1', self.env.elastic_notifier_port
|
||||
)
|
||||
self.log.info("ES notifier server listening on TCP localhost:%i", self.env.elastic_notifier_port)
|
||||
synchronized.set()
|
||||
async with server:
|
||||
await server.serve_forever()
|
||||
|
||||
def notify_es_notification_listeners(self, height: int, block_hash: bytes):
|
||||
for p in self._listeners:
|
||||
p.send_height(height, block_hash)
|
||||
self.log.info("notify listener %i", height)
|
||||
|
||||
def _read_es_height(self):
|
||||
with open(self._es_info_path, 'r') as f:
|
||||
info = json.loads(f.read())
|
||||
self._last_wrote_height = int(info.get('height', 0))
|
||||
self._last_wrote_block_hash = info.get('block_hash', None)
|
||||
|
||||
async def read_es_height(self):
|
||||
await asyncio.get_event_loop().run_in_executor(self._executor, self._read_es_height)
|
||||
|
||||
def write_es_height(self, height: int, block_hash: str):
|
||||
with open(self._es_info_path, 'w') as f:
|
||||
f.write(json.dumps({'height': height, 'block_hash': block_hash}, indent=2))
|
||||
self._last_wrote_height = height
|
||||
self._last_wrote_block_hash = block_hash
|
||||
|
||||
async def get_index_version(self) -> int:
|
||||
try:
|
||||
template = await self.sync_client.indices.get_template(self.index)
|
||||
return template[self.index]['version']
|
||||
except NotFoundError:
|
||||
return 0
|
||||
|
||||
async def set_index_version(self, version):
|
||||
await self.sync_client.indices.put_template(
|
||||
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
|
||||
)
|
||||
|
||||
async def start_index(self) -> bool:
|
||||
if self.sync_client:
|
||||
return False
|
||||
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
|
||||
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
|
||||
while True:
|
||||
try:
|
||||
await self.sync_client.cluster.health(wait_for_status='yellow')
|
||||
break
|
||||
except ConnectionError:
|
||||
self.log.warning("Failed to connect to Elasticsearch. Waiting for it!")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
|
||||
acked = res.get('acknowledged', False)
|
||||
if acked:
|
||||
await self.set_index_version(self.VERSION)
|
||||
return acked
|
||||
index_version = await self.get_index_version()
|
||||
if index_version != self.VERSION:
|
||||
self.log.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
|
||||
raise IndexVersionMismatch(index_version, self.VERSION)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
return acked
|
||||
|
||||
async def stop_index(self):
|
||||
if self.sync_client:
|
||||
await self.sync_client.close()
|
||||
self.sync_client = None
|
||||
|
||||
def delete_index(self):
|
||||
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
|
||||
|
||||
def update_filter_query(self, censor_type, blockdict, channels=False):
|
||||
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
|
||||
if channels:
|
||||
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
else:
|
||||
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
|
||||
key = 'channel_id' if channels else 'claim_id'
|
||||
update['script'] = {
|
||||
"source": f"ctx._source.censor_type={censor_type}; "
|
||||
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
|
||||
"lang": "painless",
|
||||
"params": blockdict
|
||||
}
|
||||
return update
|
||||
|
||||
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
|
||||
if filtered_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if filtered_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_streams:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
if blocked_channels:
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
await self.sync_client.update_by_query(
|
||||
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
|
||||
async def _claim_producer(self):
|
||||
for deleted in self._deleted_claims:
|
||||
yield {
|
||||
'_index': self.index,
|
||||
'_op_type': 'delete',
|
||||
'_id': deleted.hex()
|
||||
}
|
||||
for touched in self._touched_claims:
|
||||
claim = self.db.claim_producer(touched)
|
||||
if claim:
|
||||
yield {
|
||||
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
|
||||
'_id': claim['claim_id'],
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'doc_as_upsert': True
|
||||
}
|
||||
for claim_hash, notifications in self._trending.items():
|
||||
yield {
|
||||
'_id': claim_hash.hex(),
|
||||
'_index': self.index,
|
||||
'_op_type': 'update',
|
||||
'script': {
|
||||
'lang': 'painless',
|
||||
'source': FAST_AR_TRENDING_SCRIPT,
|
||||
'params': {'src': {
|
||||
'changes': [
|
||||
{
|
||||
'height': notify_height,
|
||||
'prev_amount': trending_v.previous_amount / 1E8,
|
||||
'new_amount': trending_v.new_amount / 1E8,
|
||||
} for (notify_height, trending_v) in notifications
|
||||
]
|
||||
}}
|
||||
},
|
||||
}
|
||||
|
||||
def advance(self, height: int):
|
||||
super().advance(height)
|
||||
|
||||
touched_or_deleted = self.db.prefix_db.touched_or_deleted.get(height)
|
||||
for k, v in self.db.prefix_db.trending_notification.iterate((height,)):
|
||||
self._trending[k.claim_hash].append((k.height, v))
|
||||
if touched_or_deleted:
|
||||
readded_after_reorg = self._removed_during_undo.intersection(touched_or_deleted.touched_claims)
|
||||
self._deleted_claims.difference_update(readded_after_reorg)
|
||||
self._touched_claims.update(touched_or_deleted.touched_claims)
|
||||
self._deleted_claims.update(touched_or_deleted.deleted_claims)
|
||||
self._touched_claims.difference_update(self._deleted_claims)
|
||||
for to_del in touched_or_deleted.deleted_claims:
|
||||
if to_del in self._trending:
|
||||
self._trending.pop(to_del)
|
||||
self.log.info("advanced to %i, %i touched %i to delete (%i %i)", height, len(touched_or_deleted.touched_claims), len(touched_or_deleted.deleted_claims),
|
||||
len(self._touched_claims), len(self._deleted_claims))
|
||||
self._advanced = True
|
||||
|
||||
def unwind(self):
|
||||
self.db.tx_counts.pop()
|
||||
reverted_block_hash = self.db.coin.header_hash(self.db.headers.pop())
|
||||
packed = self.db.prefix_db.undo.get(len(self.db.tx_counts), reverted_block_hash)
|
||||
touched_or_deleted = None
|
||||
claims_to_delete = []
|
||||
# find and apply the touched_or_deleted items in the undos for the reverted blocks
|
||||
assert packed, f'missing undo information for block {len(self.db.tx_counts)}'
|
||||
while packed:
|
||||
op, packed = RevertableOp.unpack(packed)
|
||||
if op.is_delete and op.key.startswith(DB_PREFIXES.touched_or_deleted.value):
|
||||
assert touched_or_deleted is None, 'only should have one match'
|
||||
touched_or_deleted = self.db.prefix_db.touched_or_deleted.unpack_value(op.value)
|
||||
elif op.is_delete and op.key.startswith(DB_PREFIXES.claim_to_txo.value):
|
||||
v = self.db.prefix_db.claim_to_txo.unpack_value(op.value)
|
||||
if v.root_tx_num == v.tx_num and v.root_tx_num > self.db.tx_counts[-1]:
|
||||
claims_to_delete.append(self.db.prefix_db.claim_to_txo.unpack_key(op.key).claim_hash)
|
||||
if touched_or_deleted:
|
||||
self._touched_claims.update(set(touched_or_deleted.deleted_claims).union(
|
||||
touched_or_deleted.touched_claims.difference(set(claims_to_delete))))
|
||||
self._deleted_claims.update(claims_to_delete)
|
||||
self._removed_during_undo.update(claims_to_delete)
|
||||
self._advanced = True
|
||||
self.log.warning("delete %i claim and upsert %i from reorg", len(self._deleted_claims), len(self._touched_claims))
|
||||
|
||||
async def poll_for_changes(self):
|
||||
await super().poll_for_changes()
|
||||
cnt = 0
|
||||
success = 0
|
||||
if self._advanced:
|
||||
if self._touched_claims or self._deleted_claims or self._trending:
|
||||
async for ok, item in async_streaming_bulk(
|
||||
self.sync_client, self._claim_producer(),
|
||||
raise_on_error=False):
|
||||
cnt += 1
|
||||
if not ok:
|
||||
self.log.warning("indexing failed for an item: %s", item)
|
||||
else:
|
||||
success += 1
|
||||
await self.sync_client.indices.refresh(self.index)
|
||||
self.write_es_height(self.db.db_height, self.db.db_tip[::-1].hex())
|
||||
self.log.info("Indexing block %i done. %i/%i successful", self._last_wrote_height, success, cnt)
|
||||
self._touched_claims.clear()
|
||||
self._deleted_claims.clear()
|
||||
self._removed_during_undo.clear()
|
||||
self._trending.clear()
|
||||
self._advanced = False
|
||||
self.synchronized.set()
|
||||
self.notify_es_notification_listeners(self._last_wrote_height, self.db.db_tip)
|
||||
|
||||
@property
|
||||
def last_synced_height(self) -> int:
|
||||
return self._last_wrote_height
|
||||
|
||||
async def start(self):
|
||||
env = self.env
|
||||
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
|
||||
|
||||
def _start_cancellable(run, *args):
|
||||
_flag = asyncio.Event()
|
||||
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
|
||||
return _flag.wait()
|
||||
|
||||
self.db.open_db()
|
||||
await self.db.initialize_caches()
|
||||
await self.start_index()
|
||||
self.last_state = self.db.read_db_state()
|
||||
|
||||
await _start_cancellable(self.run_es_notifier)
|
||||
await _start_cancellable(self.refresh_blocks_forever)
|
||||
|
||||
async def stop(self, delete_index=False):
|
||||
async with self._lock:
|
||||
while self.cancellable_tasks:
|
||||
t = self.cancellable_tasks.pop()
|
||||
if not t.done():
|
||||
t.cancel()
|
||||
if delete_index:
|
||||
await self.delete_index()
|
||||
await self.stop_index()
|
||||
self._executor.shutdown(wait=True)
|
||||
|
||||
def run(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
def __exit():
|
||||
raise SystemExit()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, __exit)
|
||||
loop.add_signal_handler(signal.SIGTERM, __exit)
|
||||
loop.run_until_complete(self.start())
|
||||
loop.run_until_complete(self.shutdown_event.wait())
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
pass
|
||||
finally:
|
||||
loop.run_until_complete(self.stop())
|
141
lbry/wallet/server/db/interface.py
Normal file
141
lbry/wallet/server/db/interface.py
Normal file
|
@ -0,0 +1,141 @@
|
|||
import struct
|
||||
import typing
|
||||
|
||||
import rocksdb
|
||||
from typing import Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
|
||||
|
||||
|
||||
class PrefixDB:
|
||||
"""
|
||||
Base class for a revertable rocksdb database (a rocksdb db where each set of applied changes can be undone)
|
||||
"""
|
||||
UNDO_KEY_STRUCT = struct.Struct(b'>Q32s')
|
||||
PARTIAL_UNDO_KEY_STRUCT = struct.Struct(b'>Q')
|
||||
|
||||
def __init__(self, path, max_open_files=64, secondary_path='', max_undo_depth: int = 200, unsafe_prefixes=None):
|
||||
column_family_options = {
|
||||
prefix.value: rocksdb.ColumnFamilyOptions() for prefix in DB_PREFIXES
|
||||
} if secondary_path else {}
|
||||
self.column_families: typing.Dict[bytes, 'rocksdb.ColumnFamilyHandle'] = {}
|
||||
self._db = rocksdb.DB(
|
||||
path, rocksdb.Options(
|
||||
create_if_missing=True, use_fsync=True, target_file_size_base=33554432,
|
||||
max_open_files=max_open_files if not secondary_path else -1
|
||||
), secondary_name=secondary_path, column_families=column_family_options
|
||||
)
|
||||
for prefix in DB_PREFIXES:
|
||||
cf = self._db.get_column_family(prefix.value)
|
||||
if cf is None and not secondary_path:
|
||||
self._db.create_column_family(prefix.value, rocksdb.ColumnFamilyOptions())
|
||||
cf = self._db.get_column_family(prefix.value)
|
||||
self.column_families[prefix.value] = cf
|
||||
|
||||
self._op_stack = RevertableOpStack(self.get, unsafe_prefixes=unsafe_prefixes)
|
||||
self._max_undo_depth = max_undo_depth
|
||||
|
||||
def unsafe_commit(self):
|
||||
"""
|
||||
Write staged changes to the database without keeping undo information
|
||||
Changes written cannot be undone
|
||||
"""
|
||||
try:
|
||||
if not len(self._op_stack):
|
||||
return
|
||||
with self._db.write_batch(sync=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
get_column_family = self.column_families.__getitem__
|
||||
for staged_change in self._op_stack:
|
||||
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||
if staged_change.is_put:
|
||||
batch_put((column_family, staged_change.key), staged_change.value)
|
||||
else:
|
||||
batch_delete((column_family, staged_change.key))
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def commit(self, height: int, block_hash: bytes):
|
||||
"""
|
||||
Write changes for a block height to the database and keep undo information so that the changes can be reverted
|
||||
"""
|
||||
undo_ops = self._op_stack.get_undo_ops()
|
||||
delete_undos = []
|
||||
if height > self._max_undo_depth:
|
||||
delete_undos.extend(self._db.iterator(
|
||||
start=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(0),
|
||||
iterate_upper_bound=DB_PREFIXES.undo.value + self.PARTIAL_UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
|
||||
include_value=False
|
||||
))
|
||||
try:
|
||||
undo_c_f = self.column_families[DB_PREFIXES.undo.value]
|
||||
with self._db.write_batch(sync=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
get_column_family = self.column_families.__getitem__
|
||||
for staged_change in self._op_stack:
|
||||
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||
if staged_change.is_put:
|
||||
batch_put((column_family, staged_change.key), staged_change.value)
|
||||
else:
|
||||
batch_delete((column_family, staged_change.key))
|
||||
for undo_to_delete in delete_undos:
|
||||
batch_delete((undo_c_f, undo_to_delete))
|
||||
batch_put((undo_c_f, DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)), undo_ops)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def rollback(self, height: int, block_hash: bytes):
|
||||
"""
|
||||
Revert changes for a block height
|
||||
"""
|
||||
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height, block_hash)
|
||||
undo_c_f = self.column_families[DB_PREFIXES.undo.value]
|
||||
undo_info = self._db.get((undo_c_f, undo_key))
|
||||
self._op_stack.apply_packed_undo_ops(undo_info)
|
||||
try:
|
||||
with self._db.write_batch(sync=True) as batch:
|
||||
batch_put = batch.put
|
||||
batch_delete = batch.delete
|
||||
get_column_family = self.column_families.__getitem__
|
||||
for staged_change in self._op_stack:
|
||||
column_family = get_column_family(DB_PREFIXES(staged_change.key[:1]).value)
|
||||
if staged_change.is_put:
|
||||
batch_put((column_family, staged_change.key), staged_change.value)
|
||||
else:
|
||||
batch_delete((column_family, staged_change.key))
|
||||
# batch_delete(undo_key)
|
||||
finally:
|
||||
self._op_stack.clear()
|
||||
|
||||
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
|
||||
cf = self.column_families[key[:1]]
|
||||
return self._db.get((cf, key), fill_cache=fill_cache)
|
||||
|
||||
def iterator(self, start: bytes, column_family: 'rocksdb.ColumnFamilyHandle' = None,
|
||||
iterate_lower_bound: bytes = None, iterate_upper_bound: bytes = None,
|
||||
reverse: bool = False, include_key: bool = True, include_value: bool = True,
|
||||
fill_cache: bool = True, prefix_same_as_start: bool = True, auto_prefix_mode: bool = True):
|
||||
return self._db.iterator(
|
||||
start=start, column_family=column_family, iterate_lower_bound=iterate_lower_bound,
|
||||
iterate_upper_bound=iterate_upper_bound, reverse=reverse, include_key=include_key,
|
||||
include_value=include_value, fill_cache=fill_cache, prefix_same_as_start=prefix_same_as_start,
|
||||
auto_prefix_mode=auto_prefix_mode
|
||||
)
|
||||
|
||||
def close(self):
|
||||
self._db.close()
|
||||
|
||||
def try_catch_up_with_primary(self):
|
||||
self._db.try_catch_up_with_primary()
|
||||
|
||||
@property
|
||||
def closed(self) -> bool:
|
||||
return self._db.is_closed
|
||||
|
||||
def stage_raw_put(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertablePut(key, value))
|
||||
|
||||
def stage_raw_delete(self, key: bytes, value: bytes):
|
||||
self._op_stack.append_op(RevertableDelete(key, value))
|
1790
lbry/wallet/server/db/prefixes.py
Normal file
1790
lbry/wallet/server/db/prefixes.py
Normal file
File diff suppressed because it is too large
Load diff
175
lbry/wallet/server/db/revertable.py
Normal file
175
lbry/wallet/server/db/revertable.py
Normal file
|
@ -0,0 +1,175 @@
|
|||
import struct
|
||||
import logging
|
||||
from string import printable
|
||||
from collections import defaultdict
|
||||
from typing import Tuple, Iterable, Callable, Optional
|
||||
from lbry.wallet.server.db import DB_PREFIXES
|
||||
|
||||
_OP_STRUCT = struct.Struct('>BLL')
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
class RevertableOp:
|
||||
__slots__ = [
|
||||
'key',
|
||||
'value',
|
||||
]
|
||||
is_put = 0
|
||||
|
||||
def __init__(self, key: bytes, value: bytes):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
@property
|
||||
def is_delete(self) -> bool:
|
||||
return not self.is_put
|
||||
|
||||
def invert(self) -> 'RevertableOp':
|
||||
raise NotImplementedError()
|
||||
|
||||
def pack(self) -> bytes:
|
||||
"""
|
||||
Serialize to bytes
|
||||
"""
|
||||
return struct.pack(
|
||||
f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key,
|
||||
self.value
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]:
|
||||
"""
|
||||
Deserialize from bytes
|
||||
|
||||
:param packed: bytes containing at least one packed revertable op
|
||||
:return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes
|
||||
"""
|
||||
is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9])
|
||||
key = packed[9:9 + key_len]
|
||||
value = packed[9 + key_len:9 + key_len + val_len]
|
||||
if is_put == 1:
|
||||
return RevertablePut(key, value), packed[9 + key_len + val_len:]
|
||||
return RevertableDelete(key, value), packed[9 + key_len + val_len:]
|
||||
|
||||
def __eq__(self, other: 'RevertableOp') -> bool:
|
||||
return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return str(self)
|
||||
|
||||
def __str__(self) -> str:
|
||||
from lbry.wallet.server.db.prefixes import auto_decode_item
|
||||
k, v = auto_decode_item(self.key, self.value)
|
||||
key = ''.join(c if c in printable else '.' for c in str(k))
|
||||
val = ''.join(c if c in printable else '.' for c in str(v))
|
||||
return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}"
|
||||
|
||||
|
||||
class RevertableDelete(RevertableOp):
|
||||
def invert(self):
|
||||
return RevertablePut(self.key, self.value)
|
||||
|
||||
|
||||
class RevertablePut(RevertableOp):
|
||||
is_put = True
|
||||
|
||||
def invert(self):
|
||||
return RevertableDelete(self.key, self.value)
|
||||
|
||||
|
||||
class OpStackIntegrity(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RevertableOpStack:
|
||||
def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None):
|
||||
"""
|
||||
This represents a sequence of revertable puts and deletes to a key-value database that checks for integrity
|
||||
violations when applying the puts and deletes. The integrity checks assure that keys that do not exist
|
||||
are not deleted, and that when keys are deleted the current value is correctly known so that the delete
|
||||
may be undone. When putting values, the integrity checks assure that existing values are not overwritten
|
||||
without first being deleted. Updates are performed by applying a delete op for the old value and a put op
|
||||
for the new value.
|
||||
|
||||
:param get_fn: getter function from an object implementing `KeyValueStorage`
|
||||
:param unsafe_prefixes: optional set of prefixes to ignore integrity errors for, violations are still logged
|
||||
"""
|
||||
self._get = get_fn
|
||||
self._items = defaultdict(list)
|
||||
self._unsafe_prefixes = unsafe_prefixes or set()
|
||||
|
||||
def append_op(self, op: RevertableOp):
|
||||
"""
|
||||
Apply a put or delete op, checking that it introduces no integrity errors
|
||||
"""
|
||||
|
||||
inverted = op.invert()
|
||||
if self._items[op.key] and inverted == self._items[op.key][-1]:
|
||||
self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both
|
||||
return
|
||||
elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op
|
||||
return # raise an error?
|
||||
stored_val = self._get(op.key)
|
||||
has_stored_val = stored_val is not None
|
||||
delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val)
|
||||
will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key])
|
||||
try:
|
||||
if op.is_put and has_stored_val and not will_delete_existing_stored:
|
||||
raise OpStackIntegrity(
|
||||
f"db op tries to add on top of existing key without deleting first: {op}"
|
||||
)
|
||||
elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored:
|
||||
# there is a value and we're not deleting it in this op
|
||||
# check that a delete for the stored value is in the stack
|
||||
raise OpStackIntegrity(f"db op tries to delete with incorrect existing value {op}")
|
||||
elif op.is_delete and not has_stored_val:
|
||||
raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}")
|
||||
elif op.is_delete and stored_val != op.value:
|
||||
raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}")
|
||||
except OpStackIntegrity as err:
|
||||
if op.key[:1] in self._unsafe_prefixes:
|
||||
log.debug(f"skipping over integrity error: {err}")
|
||||
else:
|
||||
raise err
|
||||
self._items[op.key].append(op)
|
||||
|
||||
def extend_ops(self, ops: Iterable[RevertableOp]):
|
||||
"""
|
||||
Apply a sequence of put or delete ops, checking that they introduce no integrity errors
|
||||
"""
|
||||
for op in ops:
|
||||
self.append_op(op)
|
||||
|
||||
def clear(self):
|
||||
self._items.clear()
|
||||
|
||||
def __len__(self):
|
||||
return sum(map(len, self._items.values()))
|
||||
|
||||
def __iter__(self):
|
||||
for key, ops in self._items.items():
|
||||
for op in ops:
|
||||
yield op
|
||||
|
||||
def __reversed__(self):
|
||||
for key, ops in self._items.items():
|
||||
for op in reversed(ops):
|
||||
yield op
|
||||
|
||||
def get_undo_ops(self) -> bytes:
|
||||
"""
|
||||
Get the serialized bytes to undo all of the changes made by the pending ops
|
||||
"""
|
||||
return b''.join(op.invert().pack() for op in reversed(self))
|
||||
|
||||
def apply_packed_undo_ops(self, packed: bytes):
|
||||
"""
|
||||
Unpack and apply a sequence of undo ops from serialized undo bytes
|
||||
"""
|
||||
while packed:
|
||||
op, packed = RevertableOp.unpack(packed)
|
||||
self.append_op(op)
|
||||
|
||||
def get_last_op_for_key(self, key: bytes) -> Optional[RevertableOp]:
|
||||
if key in self._items and self._items[key]:
|
||||
return self._items[key][-1]
|
388
lbry/wallet/server/env.py
Normal file
388
lbry/wallet/server/env.py
Normal file
|
@ -0,0 +1,388 @@
|
|||
# Copyright (c) 2016, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
import math
|
||||
import re
|
||||
import resource
|
||||
from os import environ
|
||||
from collections import namedtuple
|
||||
from ipaddress import ip_address
|
||||
|
||||
from lbry.wallet.server.util import class_logger
|
||||
from lbry.wallet.server.coin import Coin, LBC, LBCTestNet, LBCRegTest
|
||||
import lbry.wallet.server.util as lib_util
|
||||
|
||||
|
||||
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
|
||||
|
||||
|
||||
class Env:
|
||||
|
||||
# Peer discovery
|
||||
PD_OFF, PD_SELF, PD_ON = range(3)
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
|
||||
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
|
||||
chain=None, es_index_prefix=None, cache_MB=None, reorg_limit=None, tcp_port=None,
|
||||
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
|
||||
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
|
||||
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
|
||||
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
|
||||
session_timeout=None, drop_client=None, description=None, daily_fee=None,
|
||||
database_query_timeout=None, db_max_open_files=512, elastic_notifier_port=None,
|
||||
blocking_channel_ids=None, filtering_channel_ids=None):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
|
||||
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
|
||||
self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL')
|
||||
self.db_max_open_files = db_max_open_files
|
||||
|
||||
self.host = host if host is not None else self.default('HOST', 'localhost')
|
||||
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
|
||||
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
|
||||
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
|
||||
self.elastic_notifier_port = elastic_notifier_port if elastic_notifier_port is not None else self.integer('ELASTIC_NOTIFIER_PORT', 19080)
|
||||
|
||||
self.loop_policy = self.set_event_loop_policy(
|
||||
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
|
||||
)
|
||||
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
|
||||
self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4)
|
||||
self.websocket_host = websocket_host if websocket_host is not None else self.default('WEBSOCKET_HOST', self.host)
|
||||
self.websocket_port = websocket_port if websocket_port is not None else self.integer('WEBSOCKET_PORT', None)
|
||||
if coin is not None:
|
||||
assert issubclass(coin, Coin)
|
||||
self.coin = coin
|
||||
else:
|
||||
chain = chain if chain is not None else self.default('NET', 'mainnet').strip().lower()
|
||||
if chain == 'mainnet':
|
||||
self.coin = LBC
|
||||
elif chain == 'testnet':
|
||||
self.coin = LBCTestNet
|
||||
else:
|
||||
self.coin = LBCRegTest
|
||||
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
|
||||
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
|
||||
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
|
||||
# Server stuff
|
||||
self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None)
|
||||
self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port)
|
||||
self.ssl_port = ssl_port if ssl_port is not None else self.integer('SSL_PORT', None)
|
||||
if self.ssl_port:
|
||||
self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required('SSL_CERTFILE')
|
||||
self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required('SSL_KEYFILE')
|
||||
self.rpc_port = rpc_port if rpc_port is not None else self.integer('RPC_PORT', 8000)
|
||||
self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0)
|
||||
self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer('MAX_SUBSCRIPTIONS', 10000)
|
||||
self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None)
|
||||
# self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
|
||||
self.anon_logs = anon_logs if anon_logs is not None else self.boolean('ANON_LOGS', False)
|
||||
self.log_sessions = log_sessions if log_sessions is not None else self.integer('LOG_SESSIONS', 3600)
|
||||
self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False)
|
||||
self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False)
|
||||
self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False)
|
||||
self.country = country if country is not None else self.default('COUNTRY', 'US')
|
||||
# Peer discovery
|
||||
self.peer_discovery = self.peer_discovery_enum()
|
||||
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
|
||||
self.peer_hubs = self.extract_peer_hubs()
|
||||
# self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
|
||||
# self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
|
||||
# The electrum client takes the empty string as unspecified
|
||||
self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '')
|
||||
self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '')
|
||||
# Server limits to help prevent DoS
|
||||
self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000)
|
||||
self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000)
|
||||
# self.max_subs = self.integer('MAX_SUBS', 250000)
|
||||
self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions()
|
||||
# self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
|
||||
self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600)
|
||||
self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile)
|
||||
self.description = description if description is not None else self.default('DESCRIPTION', '')
|
||||
self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0')
|
||||
|
||||
# Identities
|
||||
clearnet_identity = self.clearnet_identity()
|
||||
tor_identity = self.tor_identity(clearnet_identity)
|
||||
self.identities = [identity
|
||||
for identity in (clearnet_identity, tor_identity)
|
||||
if identity is not None]
|
||||
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
|
||||
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
|
||||
|
||||
# Filtering / Blocking
|
||||
self.blocking_channel_ids = (blocking_channel_ids if blocking_channel_ids is not None else self.default('BLOCKING_CHANNEL_IDS', '')).split(' ')
|
||||
self.filtering_channel_ids = (filtering_channel_ids if filtering_channel_ids is not None else self.default('FILTERING_CHANNEL_IDS', '')).split(' ')
|
||||
|
||||
@classmethod
|
||||
def default(cls, envvar, default):
|
||||
return environ.get(envvar, default)
|
||||
|
||||
@classmethod
|
||||
def boolean(cls, envvar, default):
|
||||
default = 'Yes' if default else ''
|
||||
return bool(cls.default(envvar, default).strip())
|
||||
|
||||
@classmethod
|
||||
def required(cls, envvar):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
raise cls.Error(f'required envvar {envvar} not set')
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def string_amount(cls, envvar, default):
|
||||
value = environ.get(envvar, default)
|
||||
amount_pattern = re.compile("[0-9]{0,10}(\.[0-9]{1,8})?")
|
||||
if len(value) > 0 and not amount_pattern.fullmatch(value):
|
||||
raise cls.Error(f'{value} is not a valid amount for {envvar}')
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def integer(cls, envvar, default):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
return int(value)
|
||||
except Exception:
|
||||
raise cls.Error(f'cannot convert envvar {envvar} value {value} to an integer')
|
||||
|
||||
@classmethod
|
||||
def custom(cls, envvar, default, parse):
|
||||
value = environ.get(envvar)
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
return parse(value)
|
||||
except Exception as e:
|
||||
raise cls.Error(f'cannot parse envvar {envvar} value {value}') from e
|
||||
|
||||
@classmethod
|
||||
def obsolete(cls, envvars):
|
||||
bad = [envvar for envvar in envvars if environ.get(envvar)]
|
||||
if bad:
|
||||
raise cls.Error(f'remove obsolete environment variables {bad}')
|
||||
|
||||
@classmethod
|
||||
def set_event_loop_policy(cls, policy_name: str = None):
|
||||
if not policy_name or policy_name == 'default':
|
||||
import asyncio
|
||||
return asyncio.get_event_loop_policy()
|
||||
elif policy_name == 'uvloop':
|
||||
import uvloop
|
||||
import asyncio
|
||||
loop_policy = uvloop.EventLoopPolicy()
|
||||
asyncio.set_event_loop_policy(loop_policy)
|
||||
return loop_policy
|
||||
raise cls.Error(f'unknown event loop policy "{policy_name}"')
|
||||
|
||||
def cs_host(self, *, for_rpc):
|
||||
"""Returns the 'host' argument to pass to asyncio's create_server
|
||||
call. The result can be a single host name string, a list of
|
||||
host name strings, or an empty string to bind to all interfaces.
|
||||
|
||||
If rpc is True the host to use for the RPC server is returned.
|
||||
Otherwise the host to use for SSL/TCP servers is returned.
|
||||
"""
|
||||
host = self.rpc_host if for_rpc else self.host
|
||||
result = [part.strip() for part in host.split(',')]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
# An empty result indicates all interfaces, which we do not
|
||||
# permitted for an RPC server.
|
||||
if for_rpc and not result:
|
||||
result = 'localhost'
|
||||
if result == 'localhost':
|
||||
# 'localhost' resolves to ::1 (ipv6) on many systems, which fails on default setup of
|
||||
# docker, using 127.0.0.1 instead forces ipv4
|
||||
result = '127.0.0.1'
|
||||
return result
|
||||
|
||||
def sane_max_sessions(self):
|
||||
"""Return the maximum number of sessions to permit. Normally this
|
||||
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
|
||||
downwards if running with a small open file rlimit."""
|
||||
env_value = self.integer('MAX_SESSIONS', 1000)
|
||||
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
|
||||
# We give the DB 250 files; allow ElectrumX 100 for itself
|
||||
value = max(0, min(env_value, nofile_limit - 350))
|
||||
if value < env_value:
|
||||
self.logger.warning(f'lowered maximum sessions from {env_value:,d} to {value:,d} '
|
||||
f'because your open file limit is {nofile_limit:,d}')
|
||||
return value
|
||||
|
||||
def clearnet_identity(self):
|
||||
host = self.default('REPORT_HOST', None)
|
||||
if host is None:
|
||||
return None
|
||||
try:
|
||||
ip = ip_address(host)
|
||||
except ValueError:
|
||||
bad = (not lib_util.is_valid_hostname(host)
|
||||
or host.lower() == 'localhost')
|
||||
else:
|
||||
bad = (ip.is_multicast or ip.is_unspecified
|
||||
or (ip.is_private and self.peer_announce))
|
||||
if bad:
|
||||
raise self.Error(f'"{host}" is not a valid REPORT_HOST')
|
||||
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
|
||||
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
|
||||
if tcp_port == ssl_port:
|
||||
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
|
||||
f'both resolve to {tcp_port}')
|
||||
return NetIdentity(
|
||||
host,
|
||||
tcp_port,
|
||||
ssl_port,
|
||||
''
|
||||
)
|
||||
|
||||
def tor_identity(self, clearnet):
|
||||
host = self.default('REPORT_HOST_TOR', None)
|
||||
if host is None:
|
||||
return None
|
||||
if not host.endswith('.onion'):
|
||||
raise self.Error(f'tor host "{host}" must end with ".onion"')
|
||||
|
||||
def port(port_kind):
|
||||
"""Returns the clearnet identity port, if any and not zero,
|
||||
otherwise the listening port."""
|
||||
result = 0
|
||||
if clearnet:
|
||||
result = getattr(clearnet, port_kind)
|
||||
return result or getattr(self, port_kind)
|
||||
|
||||
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
|
||||
port('tcp_port')) or None
|
||||
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
|
||||
port('ssl_port')) or None
|
||||
if tcp_port == ssl_port:
|
||||
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
|
||||
f'both resolve to {tcp_port}')
|
||||
|
||||
return NetIdentity(
|
||||
host,
|
||||
tcp_port,
|
||||
ssl_port,
|
||||
'_tor',
|
||||
)
|
||||
|
||||
def hosts_dict(self):
|
||||
return {identity.host: {'tcp_port': identity.tcp_port,
|
||||
'ssl_port': identity.ssl_port}
|
||||
for identity in self.identities}
|
||||
|
||||
def peer_discovery_enum(self):
|
||||
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
|
||||
if pd in ('off', ''):
|
||||
return self.PD_OFF
|
||||
elif pd == 'self':
|
||||
return self.PD_SELF
|
||||
else:
|
||||
return self.PD_ON
|
||||
|
||||
def extract_peer_hubs(self):
|
||||
return [hub.strip() for hub in self.default('PEER_HUBS', '').split(',') if hub.strip()]
|
||||
|
||||
@classmethod
|
||||
def contribute_to_arg_parser(cls, parser):
|
||||
parser.add_argument('--db_dir', type=str, help='path of the directory containing lbry-leveldb',
|
||||
default=cls.default('DB_DIRECTORY', None))
|
||||
parser.add_argument('--daemon_url',
|
||||
help='URL for rpc from lbrycrd, <rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>',
|
||||
default=cls.default('DAEMON_URL', None))
|
||||
parser.add_argument('--db_max_open_files', type=int, default=512,
|
||||
help='number of files leveldb can have open at a time')
|
||||
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
|
||||
help='Interface for hub server to listen on')
|
||||
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
|
||||
help='TCP port to listen on for hub server')
|
||||
parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001),
|
||||
help='UDP port to listen on for hub server')
|
||||
parser.add_argument('--rpc_host', default=cls.default('RPC_HOST', 'localhost'), type=str,
|
||||
help='Listening interface for admin rpc')
|
||||
parser.add_argument('--rpc_port', default=cls.integer('RPC_PORT', 8000), type=int,
|
||||
help='Listening port for admin rpc')
|
||||
parser.add_argument('--websocket_host', default=cls.default('WEBSOCKET_HOST', 'localhost'), type=str,
|
||||
help='Listening interface for websocket')
|
||||
parser.add_argument('--websocket_port', default=cls.integer('WEBSOCKET_PORT', None), type=int,
|
||||
help='Listening port for websocket')
|
||||
|
||||
parser.add_argument('--ssl_port', default=cls.integer('SSL_PORT', None), type=int,
|
||||
help='SSL port to listen on for hub server')
|
||||
parser.add_argument('--ssl_certfile', default=cls.default('SSL_CERTFILE', None), type=str,
|
||||
help='Path to SSL cert file')
|
||||
parser.add_argument('--ssl_keyfile', default=cls.default('SSL_KEYFILE', None), type=str,
|
||||
help='Path to SSL key file')
|
||||
parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth')
|
||||
parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str,
|
||||
help='elasticsearch host')
|
||||
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
|
||||
help='elasticsearch port')
|
||||
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
|
||||
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
|
||||
choices=['default', 'uvloop'])
|
||||
parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4),
|
||||
help='number of threads used by the request handler to read the database')
|
||||
parser.add_argument('--cache_MB', type=int, default=cls.integer('CACHE_MB', 1024),
|
||||
help='size of the leveldb lru cache, in megabytes')
|
||||
parser.add_argument('--cache_all_tx_hashes', type=bool,
|
||||
help='Load all tx hashes into memory. This will make address subscriptions and sync, '
|
||||
'resolve, transaction fetching, and block sync all faster at the expense of higher '
|
||||
'memory usage')
|
||||
parser.add_argument('--cache_all_claim_txos', type=bool,
|
||||
help='Load all claim txos into memory. This will make address subscriptions and sync, '
|
||||
'resolve, transaction fetching, and block sync all faster at the expense of higher '
|
||||
'memory usage')
|
||||
parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0),
|
||||
help='port for hub prometheus metrics to listen on, disabled by default')
|
||||
parser.add_argument('--max_subscriptions', type=int, default=cls.integer('MAX_SUBSCRIPTIONS', 10000),
|
||||
help='max subscriptions per connection')
|
||||
parser.add_argument('--banner_file', type=str, default=cls.default('BANNER_FILE', None),
|
||||
help='path to file containing banner text')
|
||||
parser.add_argument('--anon_logs', type=bool, default=cls.boolean('ANON_LOGS', False),
|
||||
help="don't log ip addresses")
|
||||
parser.add_argument('--allow_lan_udp', type=bool, default=cls.boolean('ALLOW_LAN_UDP', False),
|
||||
help='reply to hub UDP ping messages from LAN ip addresses')
|
||||
parser.add_argument('--country', type=str, default=cls.default('COUNTRY', 'US'), help='')
|
||||
parser.add_argument('--max_send', type=int, default=cls.default('MAX_SEND', 1000000), help='')
|
||||
parser.add_argument('--max_receive', type=int, default=cls.default('MAX_RECEIVE', 1000000), help='')
|
||||
parser.add_argument('--max_sessions', type=int, default=cls.default('MAX_SESSIONS', 1000), help='')
|
||||
parser.add_argument('--session_timeout', type=int, default=cls.default('SESSION_TIMEOUT', 600), help='')
|
||||
parser.add_argument('--drop_client', type=str, default=cls.default('DROP_CLIENT', None), help='')
|
||||
parser.add_argument('--description', type=str, default=cls.default('DESCRIPTION', ''), help='')
|
||||
parser.add_argument('--daily_fee', type=float, default=cls.default('DAILY_FEE', 0.0), help='')
|
||||
parser.add_argument('--payment_address', type=str, default=cls.default('PAYMENT_ADDRESS', ''), help='')
|
||||
parser.add_argument('--donation_address', type=str, default=cls.default('DONATION_ADDRESS', ''), help='')
|
||||
parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'),
|
||||
help="Which chain to use, default is mainnet")
|
||||
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
|
||||
help="elasticsearch query timeout")
|
||||
|
||||
@classmethod
|
||||
def from_arg_parser(cls, args):
|
||||
return cls(
|
||||
db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files,
|
||||
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
|
||||
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
|
||||
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
|
||||
cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
|
||||
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
|
||||
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
|
||||
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
|
||||
log_sessions=None, allow_lan_udp=args.allow_lan_udp,
|
||||
cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos,
|
||||
country=args.country, payment_address=args.payment_address, donation_address=args.donation_address,
|
||||
max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions,
|
||||
session_timeout=args.session_timeout, drop_client=args.drop_client, description=args.description,
|
||||
daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000)
|
||||
)
|
160
lbry/wallet/server/hash.py
Normal file
160
lbry/wallet/server/hash.py
Normal file
|
@ -0,0 +1,160 @@
|
|||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
"""Cryptograph hash functions and related classes."""
|
||||
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
|
||||
from lbry.wallet.server.util import bytes_to_int, int_to_bytes, hex_to_bytes
|
||||
|
||||
_sha256 = hashlib.sha256
|
||||
_sha512 = hashlib.sha512
|
||||
_new_hash = hashlib.new
|
||||
_new_hmac = hmac.new
|
||||
HASHX_LEN = 11
|
||||
CLAIM_HASH_LEN = 20
|
||||
|
||||
|
||||
def sha256(x):
|
||||
"""Simple wrapper of hashlib sha256."""
|
||||
return _sha256(x).digest()
|
||||
|
||||
|
||||
def ripemd160(x):
|
||||
"""Simple wrapper of hashlib ripemd160."""
|
||||
h = _new_hash('ripemd160')
|
||||
h.update(x)
|
||||
return h.digest()
|
||||
|
||||
|
||||
def double_sha256(x):
|
||||
"""SHA-256 of SHA-256, as used extensively in bitcoin."""
|
||||
return sha256(sha256(x))
|
||||
|
||||
|
||||
def hmac_sha512(key, msg):
|
||||
"""Use SHA-512 to provide an HMAC."""
|
||||
return _new_hmac(key, msg, _sha512).digest()
|
||||
|
||||
|
||||
def hash160(x):
|
||||
"""RIPEMD-160 of SHA-256.
|
||||
|
||||
Used to make bitcoin addresses from pubkeys."""
|
||||
return ripemd160(sha256(x))
|
||||
|
||||
|
||||
def hash_to_hex_str(x: bytes) -> str:
|
||||
"""Convert a big-endian binary hash to displayed hex string.
|
||||
|
||||
Display form of a binary hash is reversed and converted to hex.
|
||||
"""
|
||||
return x[::-1].hex()
|
||||
|
||||
|
||||
def hex_str_to_hash(x: str) -> bytes:
|
||||
"""Convert a displayed hex string to a binary hash."""
|
||||
return hex_to_bytes(x)[::-1]
|
||||
|
||||
|
||||
class Base58Error(Exception):
|
||||
"""Exception used for Base58 errors."""
|
||||
|
||||
|
||||
class Base58:
|
||||
"""Class providing base 58 functionality."""
|
||||
|
||||
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
assert len(chars) == 58
|
||||
cmap = {c: n for n, c in enumerate(chars)}
|
||||
|
||||
@staticmethod
|
||||
def char_value(c):
|
||||
val = Base58.cmap.get(c)
|
||||
if val is None:
|
||||
raise Base58Error(f'invalid base 58 character "{c}"')
|
||||
return val
|
||||
|
||||
@staticmethod
|
||||
def decode(txt):
|
||||
"""Decodes txt into a big-endian bytearray."""
|
||||
if not isinstance(txt, str):
|
||||
raise TypeError('a string is required')
|
||||
|
||||
if not txt:
|
||||
raise Base58Error('string cannot be empty')
|
||||
|
||||
value = 0
|
||||
for c in txt:
|
||||
value = value * 58 + Base58.char_value(c)
|
||||
|
||||
result = int_to_bytes(value)
|
||||
|
||||
# Prepend leading zero bytes if necessary
|
||||
count = 0
|
||||
for c in txt:
|
||||
if c != '1':
|
||||
break
|
||||
count += 1
|
||||
if count:
|
||||
result = bytes(count) + result
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def encode(be_bytes):
|
||||
"""Converts a big-endian bytearray into a base58 string."""
|
||||
value = bytes_to_int(be_bytes)
|
||||
|
||||
txt = ''
|
||||
while value:
|
||||
value, mod = divmod(value, 58)
|
||||
txt += Base58.chars[mod]
|
||||
|
||||
for byte in be_bytes:
|
||||
if byte != 0:
|
||||
break
|
||||
txt += '1'
|
||||
|
||||
return txt[::-1]
|
||||
|
||||
@staticmethod
|
||||
def decode_check(txt, *, hash_fn=double_sha256):
|
||||
"""Decodes a Base58Check-encoded string to a payload. The version
|
||||
prefixes it."""
|
||||
be_bytes = Base58.decode(txt)
|
||||
result, check = be_bytes[:-4], be_bytes[-4:]
|
||||
if check != hash_fn(result)[:4]:
|
||||
raise Base58Error(f'invalid base 58 checksum for {txt}')
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def encode_check(payload, *, hash_fn=double_sha256):
|
||||
"""Encodes a payload bytearray (which includes the version byte(s))
|
||||
into a Base58Check string."""
|
||||
be_bytes = payload + hash_fn(payload)[:4]
|
||||
return Base58.encode(be_bytes)
|
233
lbry/wallet/server/mempool.py
Normal file
233
lbry/wallet/server/mempool.py
Normal file
|
@ -0,0 +1,233 @@
|
|||
# Copyright (c) 2016-2018, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# See the file "LICENCE" for information about the copyright
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Mempool handling."""
|
||||
import asyncio
|
||||
import itertools
|
||||
import time
|
||||
import attr
|
||||
import typing
|
||||
from collections import defaultdict
|
||||
from prometheus_client import Histogram
|
||||
from lbry.wallet.server.util import class_logger
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.session import LBRYSessionManager
|
||||
from wallet.server.db.db import LevelDB
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class MemPoolTx:
|
||||
prevouts = attr.ib()
|
||||
# A pair is a (hashX, value) tuple
|
||||
in_pairs = attr.ib()
|
||||
out_pairs = attr.ib()
|
||||
fee = attr.ib()
|
||||
size = attr.ib()
|
||||
raw_tx = attr.ib()
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class MemPoolTxSummary:
|
||||
hash = attr.ib()
|
||||
fee = attr.ib()
|
||||
has_unconfirmed_inputs = attr.ib()
|
||||
|
||||
|
||||
NAMESPACE = "wallet_server"
|
||||
HISTOGRAM_BUCKETS = (
|
||||
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
|
||||
)
|
||||
mempool_process_time_metric = Histogram(
|
||||
"processed_mempool", "Time to process mempool and notify touched addresses",
|
||||
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
|
||||
)
|
||||
|
||||
|
||||
class MemPool:
|
||||
def __init__(self, coin, db: 'LevelDB', refresh_secs=1.0):
|
||||
self.coin = coin
|
||||
self._db = db
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.txs = {}
|
||||
self.raw_mempool = {}
|
||||
self.touched_hashXs: typing.DefaultDict[bytes, typing.Set[bytes]] = defaultdict(set) # None can be a key
|
||||
self.refresh_secs = refresh_secs
|
||||
self.mempool_process_time_metric = mempool_process_time_metric
|
||||
self.session_manager: typing.Optional['LBRYSessionManager'] = None
|
||||
|
||||
async def refresh_hashes(self, height: int):
|
||||
start = time.perf_counter()
|
||||
new_touched = await self._process_mempool()
|
||||
await self.on_mempool(set(self.touched_hashXs), new_touched, height)
|
||||
duration = time.perf_counter() - start
|
||||
self.mempool_process_time_metric.observe(duration)
|
||||
|
||||
async def _process_mempool(self) -> typing.Set[bytes]: # returns list of new touched hashXs
|
||||
# Re-sync with the new set of hashes
|
||||
|
||||
# hashXs = self.hashXs # hashX: [tx_hash, ...]
|
||||
touched_hashXs = set()
|
||||
|
||||
# Remove txs that aren't in mempool anymore
|
||||
for tx_hash in set(self.txs).difference(self.raw_mempool.keys()):
|
||||
tx = self.txs.pop(tx_hash)
|
||||
tx_hashXs = {hashX for hashX, value in tx.in_pairs}.union({hashX for hashX, value in tx.out_pairs})
|
||||
for hashX in tx_hashXs:
|
||||
if hashX in self.touched_hashXs and tx_hash in self.touched_hashXs[hashX]:
|
||||
self.touched_hashXs[hashX].remove(tx_hash)
|
||||
if not self.touched_hashXs[hashX]:
|
||||
self.touched_hashXs.pop(hashX)
|
||||
touched_hashXs.update(tx_hashXs)
|
||||
|
||||
tx_map = {}
|
||||
for tx_hash, raw_tx in self.raw_mempool.items():
|
||||
if tx_hash in self.txs:
|
||||
continue
|
||||
tx, tx_size = self.coin.DESERIALIZER(raw_tx).read_tx_and_vsize()
|
||||
# Convert the inputs and outputs into (hashX, value) pairs
|
||||
# Drop generation-like inputs from MemPoolTx.prevouts
|
||||
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
|
||||
for txin in tx.inputs
|
||||
if not txin.is_generation())
|
||||
txout_pairs = tuple((self.coin.hashX_from_script(txout.pk_script), txout.value)
|
||||
for txout in tx.outputs)
|
||||
|
||||
tx_map[tx_hash] = MemPoolTx(txin_pairs, None, txout_pairs, 0, tx_size, raw_tx)
|
||||
|
||||
# Determine all prevouts not in the mempool, and fetch the
|
||||
# UTXO information from the database. Failed prevout lookups
|
||||
# return None - concurrent database updates happen - which is
|
||||
# relied upon by _accept_transactions. Ignore prevouts that are
|
||||
# generation-like.
|
||||
# prevouts = tuple(prevout for tx in tx_map.values()
|
||||
# for prevout in tx.prevouts
|
||||
# if prevout[0] not in self.raw_mempool)
|
||||
# utxos = await self._db.lookup_utxos(prevouts)
|
||||
# utxo_map = dict(zip(prevouts, utxos))
|
||||
# unspent = set(utxo_map)
|
||||
|
||||
for tx_hash, tx in tx_map.items():
|
||||
in_pairs = []
|
||||
for prevout in tx.prevouts:
|
||||
# utxo = utxo_map.get(prevout)
|
||||
# if not utxo:
|
||||
prev_hash, prev_index = prevout
|
||||
if prev_hash in self.txs: # accepted mempool
|
||||
utxo = self.txs[prev_hash].out_pairs[prev_index]
|
||||
elif prev_hash in tx_map: # this set of changes
|
||||
utxo = tx_map[prev_hash].out_pairs[prev_index]
|
||||
else: # get it from the db
|
||||
prev_tx_num = self._db.prefix_db.tx_num.get(prev_hash)
|
||||
if not prev_tx_num:
|
||||
continue
|
||||
prev_tx_num = prev_tx_num.tx_num
|
||||
hashX_val = self._db.prefix_db.hashX_utxo.get(tx_hash[:4], prev_tx_num, prev_index)
|
||||
if not hashX_val:
|
||||
continue
|
||||
hashX = hashX_val.hashX
|
||||
utxo_value = self._db.prefix_db.utxo.get(hashX, prev_tx_num, prev_index)
|
||||
utxo = (hashX, utxo_value.amount)
|
||||
# if not prev_raw:
|
||||
# print("derp", prev_hash[::-1].hex())
|
||||
# print(self._db.get_tx_num(prev_hash))
|
||||
# prev_tx, prev_tx_size = self.coin.DESERIALIZER(prev_raw.raw_tx).read_tx_and_vsize()
|
||||
# prev_txo = prev_tx.outputs[prev_index]
|
||||
# utxo = (self.coin.hashX_from_script(prev_txo.pk_script), prev_txo.value)
|
||||
in_pairs.append(utxo)
|
||||
|
||||
# # Spend the prevouts
|
||||
# unspent.difference_update(tx.prevouts)
|
||||
|
||||
# Save the in_pairs, compute the fee and accept the TX
|
||||
tx.in_pairs = tuple(in_pairs)
|
||||
# Avoid negative fees if dealing with generation-like transactions
|
||||
# because some in_parts would be missing
|
||||
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
|
||||
sum(v for _, v in tx.out_pairs)))
|
||||
self.txs[tx_hash] = tx
|
||||
# print(f"added {tx_hash[::-1].hex()} reader to mempool")
|
||||
|
||||
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
|
||||
self.touched_hashXs[hashX].add(tx_hash)
|
||||
touched_hashXs.add(hashX)
|
||||
# utxo_map = {prevout: utxo_map[prevout] for prevout in unspent}
|
||||
|
||||
return touched_hashXs
|
||||
|
||||
def transaction_summaries(self, hashX):
|
||||
"""Return a list of MemPoolTxSummary objects for the hashX."""
|
||||
result = []
|
||||
for tx_hash in self.touched_hashXs.get(hashX, ()):
|
||||
tx = self.txs[tx_hash]
|
||||
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
|
||||
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
|
||||
return result
|
||||
|
||||
def get_mempool_height(self, tx_hash: bytes) -> int:
|
||||
# Height Progression
|
||||
# -2: not broadcast
|
||||
# -1: in mempool but has unconfirmed inputs
|
||||
# 0: in mempool and all inputs confirmed
|
||||
# +num: confirmed in a specific block (height)
|
||||
if tx_hash not in self.txs:
|
||||
return -2
|
||||
tx = self.txs[tx_hash]
|
||||
unspent_inputs = any(hash in self.raw_mempool for hash, idx in tx.prevouts)
|
||||
if unspent_inputs:
|
||||
return -1
|
||||
return 0
|
||||
|
||||
async def start(self, height, session_manager: 'LBRYSessionManager'):
|
||||
self.notify_sessions = session_manager._notify_sessions
|
||||
await self._notify_sessions(height, set(), set())
|
||||
|
||||
async def on_mempool(self, touched, new_touched, height):
|
||||
await self._notify_sessions(height, touched, new_touched)
|
||||
|
||||
async def on_block(self, touched, height):
|
||||
await self._notify_sessions(height, touched, set())
|
||||
|
||||
async def _notify_sessions(self, height, touched, new_touched):
|
||||
"""Notify sessions about height changes and touched addresses."""
|
||||
height_changed = height != self.session_manager.notified_height
|
||||
if height_changed:
|
||||
await self.session_manager._refresh_hsub_results(height)
|
||||
|
||||
if not self.session_manager.sessions:
|
||||
return
|
||||
|
||||
if height_changed:
|
||||
header_tasks = [
|
||||
session.send_notification('blockchain.headers.subscribe', (self.session_manager.hsub_results[session.subscribe_headers_raw], ))
|
||||
for session in self.session_manager.sessions.values() if session.subscribe_headers
|
||||
]
|
||||
if header_tasks:
|
||||
self.logger.info(f'notify {len(header_tasks)} sessions of new header')
|
||||
asyncio.create_task(asyncio.wait(header_tasks))
|
||||
for hashX in touched.intersection(self.session_manager.mempool_statuses.keys()):
|
||||
self.session_manager.mempool_statuses.pop(hashX, None)
|
||||
# self.bp._chain_executor
|
||||
await asyncio.get_event_loop().run_in_executor(
|
||||
None, touched.intersection_update, self.session_manager.hashx_subscriptions_by_session.keys()
|
||||
)
|
||||
|
||||
if touched or new_touched or (height_changed and self.session_manager.mempool_statuses):
|
||||
notified_hashxs = 0
|
||||
session_hashxes_to_notify = defaultdict(list)
|
||||
to_notify = touched if height_changed else new_touched
|
||||
|
||||
for hashX in to_notify:
|
||||
if hashX not in self.session_manager.hashx_subscriptions_by_session:
|
||||
continue
|
||||
for session_id in self.session_manager.hashx_subscriptions_by_session[hashX]:
|
||||
session_hashxes_to_notify[session_id].append(hashX)
|
||||
notified_hashxs += 1
|
||||
for session_id, hashXes in session_hashxes_to_notify.items():
|
||||
asyncio.create_task(self.session_manager.sessions[session_id].send_history_notifications(*hashXes))
|
||||
if session_hashxes_to_notify:
|
||||
self.logger.info(f'notified {len(session_hashxes_to_notify)} sessions/{notified_hashxs:,d} touched addresses')
|
258
lbry/wallet/server/merkle.py
Normal file
258
lbry/wallet/server/merkle.py
Normal file
|
@ -0,0 +1,258 @@
|
|||
# Copyright (c) 2018, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Merkle trees, branches, proofs and roots."""
|
||||
|
||||
from asyncio import Event
|
||||
from math import ceil, log
|
||||
|
||||
from lbry.wallet.server.hash import double_sha256
|
||||
|
||||
|
||||
class Merkle:
|
||||
"""Perform merkle tree calculations on binary hashes using a given hash
|
||||
function.
|
||||
|
||||
If the hash count is not even, the final hash is repeated when
|
||||
calculating the next merkle layer up the tree.
|
||||
"""
|
||||
|
||||
def __init__(self, hash_func=double_sha256):
|
||||
self.hash_func = hash_func
|
||||
|
||||
@staticmethod
|
||||
def tree_depth(hash_count):
|
||||
return Merkle.branch_length(hash_count) + 1
|
||||
|
||||
@staticmethod
|
||||
def branch_length(hash_count):
|
||||
"""Return the length of a merkle branch given the number of hashes."""
|
||||
if not isinstance(hash_count, int):
|
||||
raise TypeError('hash_count must be an integer')
|
||||
if hash_count < 1:
|
||||
raise ValueError('hash_count must be at least 1')
|
||||
return ceil(log(hash_count, 2))
|
||||
|
||||
@staticmethod
|
||||
def branch_and_root(hashes, index, length=None, hash_func=double_sha256):
|
||||
"""Return a (merkle branch, merkle_root) pair given hashes, and the
|
||||
index of one of those hashes.
|
||||
"""
|
||||
hashes = list(hashes)
|
||||
if not isinstance(index, int):
|
||||
raise TypeError('index must be an integer')
|
||||
# This also asserts hashes is not empty
|
||||
if not 0 <= index < len(hashes):
|
||||
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
|
||||
natural_length = Merkle.branch_length(len(hashes))
|
||||
if length is None:
|
||||
length = natural_length
|
||||
else:
|
||||
if not isinstance(length, int):
|
||||
raise TypeError('length must be an integer')
|
||||
if length < natural_length:
|
||||
raise ValueError('length out of range')
|
||||
|
||||
branch = []
|
||||
for _ in range(length):
|
||||
if len(hashes) & 1:
|
||||
hashes.append(hashes[-1])
|
||||
branch.append(hashes[index ^ 1])
|
||||
index >>= 1
|
||||
hashes = [hash_func(hashes[n] + hashes[n + 1])
|
||||
for n in range(0, len(hashes), 2)]
|
||||
|
||||
return branch, hashes[0]
|
||||
|
||||
@staticmethod
|
||||
def root(hashes, length=None):
|
||||
"""Return the merkle root of a non-empty iterable of binary hashes."""
|
||||
branch, root = Merkle.branch_and_root(hashes, 0, length)
|
||||
return root
|
||||
|
||||
# @staticmethod
|
||||
# def root_from_proof(hash, branch, index, hash_func=double_sha256):
|
||||
# """Return the merkle root given a hash, a merkle branch to it, and
|
||||
# its index in the hashes array.
|
||||
#
|
||||
# branch is an iterable sorted deepest to shallowest. If the
|
||||
# returned root is the expected value then the merkle proof is
|
||||
# verified.
|
||||
#
|
||||
# The caller should have confirmed the length of the branch with
|
||||
# branch_length(). Unfortunately this is not easily done for
|
||||
# bitcoin transactions as the number of transactions in a block
|
||||
# is unknown to an SPV client.
|
||||
# """
|
||||
# for elt in branch:
|
||||
# if index & 1:
|
||||
# hash = hash_func(elt + hash)
|
||||
# else:
|
||||
# hash = hash_func(hash + elt)
|
||||
# index >>= 1
|
||||
# if index:
|
||||
# raise ValueError('index out of range for branch')
|
||||
# return hash
|
||||
|
||||
@staticmethod
|
||||
def level(hashes, depth_higher):
|
||||
"""Return a level of the merkle tree of hashes the given depth
|
||||
higher than the bottom row of the original tree."""
|
||||
size = 1 << depth_higher
|
||||
root = Merkle.root
|
||||
return [root(hashes[n: n + size], depth_higher)
|
||||
for n in range(0, len(hashes), size)]
|
||||
|
||||
@staticmethod
|
||||
def branch_and_root_from_level(level, leaf_hashes, index,
|
||||
depth_higher):
|
||||
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
|
||||
level cached.
|
||||
|
||||
To maximally reduce the amount of data hashed in computing a
|
||||
markle branch, cache a tree of depth N at level N // 2.
|
||||
|
||||
level is a list of hashes in the middle of the tree (returned
|
||||
by level())
|
||||
|
||||
leaf_hashes are the leaves needed to calculate a partial branch
|
||||
up to level.
|
||||
|
||||
depth_higher is how much higher level is than the leaves of the tree
|
||||
|
||||
index is the index in the full list of hashes of the hash whose
|
||||
merkle branch we want.
|
||||
"""
|
||||
if not isinstance(level, list):
|
||||
raise TypeError("level must be a list")
|
||||
if not isinstance(leaf_hashes, list):
|
||||
raise TypeError("leaf_hashes must be a list")
|
||||
leaf_index = (index >> depth_higher) << depth_higher
|
||||
leaf_branch, leaf_root = Merkle.branch_and_root(
|
||||
leaf_hashes, index - leaf_index, depth_higher)
|
||||
index >>= depth_higher
|
||||
level_branch, root = Merkle.branch_and_root(level, index)
|
||||
# Check last so that we know index is in-range
|
||||
if leaf_root != level[index]:
|
||||
raise ValueError('leaf hashes inconsistent with level')
|
||||
return leaf_branch + level_branch, root
|
||||
|
||||
|
||||
class MerkleCache:
|
||||
"""A cache to calculate merkle branches efficiently."""
|
||||
|
||||
def __init__(self, merkle, source_func):
|
||||
"""Initialise a cache hashes taken from source_func:
|
||||
|
||||
async def source_func(index, count):
|
||||
...
|
||||
"""
|
||||
self.merkle = merkle
|
||||
self.source_func = source_func
|
||||
self.length = 0
|
||||
self.depth_higher = 0
|
||||
self.initialized = Event()
|
||||
|
||||
def _segment_length(self):
|
||||
return 1 << self.depth_higher
|
||||
|
||||
def _leaf_start(self, index):
|
||||
"""Given a level's depth higher and a hash index, return the leaf
|
||||
index and leaf hash count needed to calculate a merkle branch.
|
||||
"""
|
||||
depth_higher = self.depth_higher
|
||||
return (index >> depth_higher) << depth_higher
|
||||
|
||||
def _level(self, hashes):
|
||||
return self.merkle.level(hashes, self.depth_higher)
|
||||
|
||||
async def _extend_to(self, length):
|
||||
"""Extend the length of the cache if necessary."""
|
||||
if length <= self.length:
|
||||
return
|
||||
# Start from the beginning of any final partial segment.
|
||||
# Retain the value of depth_higher; in practice this is fine
|
||||
start = self._leaf_start(self.length)
|
||||
hashes = await self.source_func(start, length - start)
|
||||
self.level[start >> self.depth_higher:] = self._level(hashes)
|
||||
self.length = length
|
||||
|
||||
async def _level_for(self, length):
|
||||
"""Return a (level_length, final_hash) pair for a truncation
|
||||
of the hashes to the given length."""
|
||||
if length == self.length:
|
||||
return self.level
|
||||
level = self.level[:length >> self.depth_higher]
|
||||
leaf_start = self._leaf_start(length)
|
||||
count = min(self._segment_length(), length - leaf_start)
|
||||
hashes = await self.source_func(leaf_start, count)
|
||||
level += self._level(hashes)
|
||||
return level
|
||||
|
||||
async def initialize(self, length):
|
||||
"""Call to initialize the cache to a source of given length."""
|
||||
self.length = length
|
||||
self.depth_higher = self.merkle.tree_depth(length) // 2
|
||||
self.level = self._level(await self.source_func(0, length))
|
||||
self.initialized.set()
|
||||
|
||||
def truncate(self, length):
|
||||
"""Truncate the cache so it covers no more than length underlying
|
||||
hashes."""
|
||||
if not isinstance(length, int):
|
||||
raise TypeError('length must be an integer')
|
||||
if length <= 0:
|
||||
raise ValueError('length must be positive')
|
||||
if length >= self.length:
|
||||
return
|
||||
length = self._leaf_start(length)
|
||||
self.length = length
|
||||
self.level[length >> self.depth_higher:] = []
|
||||
|
||||
async def branch_and_root(self, length, index):
|
||||
"""Return a merkle branch and root. Length is the number of
|
||||
hashes used to calculate the merkle root, index is the position
|
||||
of the hash to calculate the branch of.
|
||||
|
||||
index must be less than length, which must be at least 1."""
|
||||
if not isinstance(length, int):
|
||||
raise TypeError('length must be an integer')
|
||||
if not isinstance(index, int):
|
||||
raise TypeError('index must be an integer')
|
||||
if length <= 0:
|
||||
raise ValueError('length must be positive')
|
||||
if index >= length:
|
||||
raise ValueError('index must be less than length')
|
||||
await self.initialized.wait()
|
||||
await self._extend_to(length)
|
||||
leaf_start = self._leaf_start(index)
|
||||
count = min(self._segment_length(), length - leaf_start)
|
||||
leaf_hashes = await self.source_func(leaf_start, count)
|
||||
if length < self._segment_length():
|
||||
return self.merkle.branch_and_root(leaf_hashes, index)
|
||||
level = await self._level_for(length)
|
||||
return self.merkle.branch_and_root_from_level(
|
||||
level, leaf_hashes, index, self.depth_higher)
|
135
lbry/wallet/server/metrics.py
Normal file
135
lbry/wallet/server/metrics.py
Normal file
|
@ -0,0 +1,135 @@
|
|||
import time
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def calculate_elapsed(start) -> int:
|
||||
return int((time.perf_counter() - start) * 1000)
|
||||
|
||||
|
||||
def calculate_avg_percentiles(data) -> Tuple[int, int, int, int, int, int, int, int]:
|
||||
if not data:
|
||||
return 0, 0, 0, 0, 0, 0, 0, 0
|
||||
data.sort()
|
||||
size = len(data)
|
||||
return (
|
||||
int(sum(data) / size),
|
||||
data[0],
|
||||
data[math.ceil(size * .05) - 1],
|
||||
data[math.ceil(size * .25) - 1],
|
||||
data[math.ceil(size * .50) - 1],
|
||||
data[math.ceil(size * .75) - 1],
|
||||
data[math.ceil(size * .95) - 1],
|
||||
data[-1]
|
||||
)
|
||||
|
||||
|
||||
def remove_select_list(sql) -> str:
|
||||
return sql[sql.index('FROM'):]
|
||||
|
||||
|
||||
class APICallMetrics:
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
# total requests received
|
||||
self.receive_count = 0
|
||||
self.cache_response_count = 0
|
||||
|
||||
# millisecond timings for query based responses
|
||||
self.query_response_times = []
|
||||
self.query_intrp_times = []
|
||||
self.query_error_times = []
|
||||
|
||||
self.query_python_times = []
|
||||
self.query_wait_times = []
|
||||
self.query_sql_times = [] # aggregate total of multiple SQL calls made per request
|
||||
|
||||
self.individual_sql_times = [] # every SQL query run on server
|
||||
|
||||
# actual queries
|
||||
self.errored_queries = set()
|
||||
self.interrupted_queries = set()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
# total requests received
|
||||
"receive_count": self.receive_count,
|
||||
# sum of these is total responses made
|
||||
"cache_response_count": self.cache_response_count,
|
||||
"query_response_count": len(self.query_response_times),
|
||||
"intrp_response_count": len(self.query_intrp_times),
|
||||
"error_response_count": len(self.query_error_times),
|
||||
# millisecond timings for non-cache responses
|
||||
"response": calculate_avg_percentiles(self.query_response_times),
|
||||
"interrupt": calculate_avg_percentiles(self.query_intrp_times),
|
||||
"error": calculate_avg_percentiles(self.query_error_times),
|
||||
# response, interrupt and error each also report the python, wait and sql stats:
|
||||
"python": calculate_avg_percentiles(self.query_python_times),
|
||||
"wait": calculate_avg_percentiles(self.query_wait_times),
|
||||
"sql": calculate_avg_percentiles(self.query_sql_times),
|
||||
# extended timings for individual sql executions
|
||||
"individual_sql": calculate_avg_percentiles(self.individual_sql_times),
|
||||
"individual_sql_count": len(self.individual_sql_times),
|
||||
# actual queries
|
||||
"errored_queries": list(self.errored_queries),
|
||||
"interrupted_queries": list(self.interrupted_queries),
|
||||
}
|
||||
|
||||
def start(self):
|
||||
self.receive_count += 1
|
||||
|
||||
def cache_response(self):
|
||||
self.cache_response_count += 1
|
||||
|
||||
def _add_query_timings(self, request_total_time, metrics):
|
||||
if metrics and 'execute_query' in metrics:
|
||||
sub_process_total = metrics[self.name][0]['total']
|
||||
individual_query_times = [f['total'] for f in metrics['execute_query']]
|
||||
aggregated_query_time = sum(individual_query_times)
|
||||
self.individual_sql_times.extend(individual_query_times)
|
||||
self.query_sql_times.append(aggregated_query_time)
|
||||
self.query_python_times.append(sub_process_total - aggregated_query_time)
|
||||
self.query_wait_times.append(request_total_time - sub_process_total)
|
||||
|
||||
@staticmethod
|
||||
def _add_queries(query_set, metrics):
|
||||
if metrics and 'execute_query' in metrics:
|
||||
for execute_query in metrics['execute_query']:
|
||||
if 'sql' in execute_query:
|
||||
query_set.add(remove_select_list(execute_query['sql']))
|
||||
|
||||
def query_response(self, start, metrics):
|
||||
self.query_response_times.append(calculate_elapsed(start))
|
||||
self._add_query_timings(self.query_response_times[-1], metrics)
|
||||
|
||||
def query_interrupt(self, start, metrics):
|
||||
self.query_intrp_times.append(calculate_elapsed(start))
|
||||
self._add_queries(self.interrupted_queries, metrics)
|
||||
self._add_query_timings(self.query_intrp_times[-1], metrics)
|
||||
|
||||
def query_error(self, start, metrics):
|
||||
self.query_error_times.append(calculate_elapsed(start))
|
||||
self._add_queries(self.errored_queries, metrics)
|
||||
self._add_query_timings(self.query_error_times[-1], metrics)
|
||||
|
||||
|
||||
class ServerLoadData:
|
||||
|
||||
def __init__(self):
|
||||
self._apis = {}
|
||||
|
||||
def for_api(self, name) -> APICallMetrics:
|
||||
if name not in self._apis:
|
||||
self._apis[name] = APICallMetrics(name)
|
||||
return self._apis[name]
|
||||
|
||||
def to_json_and_reset(self, status):
|
||||
try:
|
||||
return {
|
||||
'api': {name: api.to_json() for name, api in self._apis.items()},
|
||||
'status': status
|
||||
}
|
||||
finally:
|
||||
self._apis = {}
|
119
lbry/wallet/server/prefetcher.py
Normal file
119
lbry/wallet/server/prefetcher.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
import asyncio
|
||||
import typing
|
||||
|
||||
from lbry.wallet.server.util import chunks, class_logger
|
||||
if typing.TYPE_CHECKING:
|
||||
from lbry.wallet.server.daemon import LBCDaemon
|
||||
from lbry.wallet.server.coin import Coin
|
||||
|
||||
|
||||
class Prefetcher:
|
||||
"""Prefetches blocks (in the forward direction only)."""
|
||||
|
||||
def __init__(self, daemon: 'LBCDaemon', coin: 'Coin', blocks_event: asyncio.Event):
|
||||
self.logger = class_logger(__name__, self.__class__.__name__)
|
||||
self.daemon = daemon
|
||||
self.coin = coin
|
||||
self.blocks_event = blocks_event
|
||||
self.blocks = []
|
||||
self.caught_up = False
|
||||
# Access to fetched_height should be protected by the semaphore
|
||||
self.fetched_height = None
|
||||
self.semaphore = asyncio.Semaphore()
|
||||
self.refill_event = asyncio.Event()
|
||||
# The prefetched block cache size. The min cache size has
|
||||
# little effect on sync time.
|
||||
self.cache_size = 0
|
||||
self.min_cache_size = 10 * 1024 * 1024
|
||||
# This makes the first fetch be 10 blocks
|
||||
self.ave_size = self.min_cache_size // 10
|
||||
self.polling_delay = 0.5
|
||||
|
||||
async def main_loop(self, bp_height):
|
||||
"""Loop forever polling for more blocks."""
|
||||
await self.reset_height(bp_height)
|
||||
try:
|
||||
while True:
|
||||
# Sleep a while if there is nothing to prefetch
|
||||
await self.refill_event.wait()
|
||||
if not await self._prefetch_blocks():
|
||||
await asyncio.sleep(self.polling_delay)
|
||||
finally:
|
||||
self.logger.info("block pre-fetcher is shutting down")
|
||||
|
||||
def get_prefetched_blocks(self):
|
||||
"""Called by block processor when it is processing queued blocks."""
|
||||
blocks = self.blocks
|
||||
self.blocks = []
|
||||
self.cache_size = 0
|
||||
self.refill_event.set()
|
||||
return blocks
|
||||
|
||||
async def reset_height(self, height):
|
||||
"""Reset to prefetch blocks from the block processor's height.
|
||||
|
||||
Used in blockchain reorganisations. This coroutine can be
|
||||
called asynchronously to the _prefetch_blocks coroutine so we
|
||||
must synchronize with a semaphore.
|
||||
"""
|
||||
async with self.semaphore:
|
||||
self.blocks.clear()
|
||||
self.cache_size = 0
|
||||
self.fetched_height = height
|
||||
self.refill_event.set()
|
||||
|
||||
daemon_height = await self.daemon.height()
|
||||
behind = daemon_height - height
|
||||
if behind > 0:
|
||||
self.logger.info(f'catching up to daemon height {daemon_height:,d} '
|
||||
f'({behind:,d} blocks behind)')
|
||||
else:
|
||||
self.logger.info(f'caught up to daemon height {daemon_height:,d}')
|
||||
|
||||
async def _prefetch_blocks(self):
|
||||
"""Prefetch some blocks and put them on the queue.
|
||||
|
||||
Repeats until the queue is full or caught up.
|
||||
"""
|
||||
daemon = self.daemon
|
||||
daemon_height = await daemon.height()
|
||||
async with self.semaphore:
|
||||
while self.cache_size < self.min_cache_size:
|
||||
# Try and catch up all blocks but limit to room in cache.
|
||||
# Constrain fetch count to between 0 and 500 regardless;
|
||||
# testnet can be lumpy.
|
||||
cache_room = self.min_cache_size // self.ave_size
|
||||
count = min(daemon_height - self.fetched_height, cache_room)
|
||||
count = min(500, max(count, 0))
|
||||
if not count:
|
||||
self.caught_up = True
|
||||
return False
|
||||
|
||||
first = self.fetched_height + 1
|
||||
hex_hashes = await daemon.block_hex_hashes(first, count)
|
||||
if self.caught_up:
|
||||
self.logger.info('new block height {:,d} hash {}'
|
||||
.format(first + count-1, hex_hashes[-1]))
|
||||
blocks = await daemon.raw_blocks(hex_hashes)
|
||||
|
||||
assert count == len(blocks)
|
||||
|
||||
# Special handling for genesis block
|
||||
if first == 0:
|
||||
blocks[0] = self.coin.genesis_block(blocks[0])
|
||||
self.logger.info(f'verified genesis block with hash {hex_hashes[0]}')
|
||||
|
||||
# Update our recent average block size estimate
|
||||
size = sum(len(block) for block in blocks)
|
||||
if count >= 10:
|
||||
self.ave_size = size // count
|
||||
else:
|
||||
self.ave_size = (size + (10 - count) * self.ave_size) // 10
|
||||
|
||||
self.blocks.extend(blocks)
|
||||
self.cache_size += size
|
||||
self.fetched_height += count
|
||||
self.blocks_event.set()
|
||||
|
||||
self.refill_event.clear()
|
||||
return True
|
289
lbry/wallet/server/script.py
Normal file
289
lbry/wallet/server/script.py
Normal file
|
@ -0,0 +1,289 @@
|
|||
# Copyright (c) 2016-2017, Neil Booth
|
||||
#
|
||||
# All rights reserved.
|
||||
#
|
||||
# The MIT License (MIT)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
# and warranty status of this software.
|
||||
|
||||
"""Script-related classes and functions."""
|
||||
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
from lbry.wallet.server.util import unpack_le_uint16_from, unpack_le_uint32_from, \
|
||||
pack_le_uint16, pack_le_uint32
|
||||
|
||||
|
||||
class EnumError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Enumeration:
|
||||
|
||||
def __init__(self, name, enumList):
|
||||
self.__doc__ = name
|
||||
|
||||
lookup = {}
|
||||
reverseLookup = {}
|
||||
i = 0
|
||||
uniqueNames = set()
|
||||
uniqueValues = set()
|
||||
for x in enumList:
|
||||
if isinstance(x, tuple):
|
||||
x, i = x
|
||||
if not isinstance(x, str):
|
||||
raise EnumError(f"enum name {x} not a string")
|
||||
if not isinstance(i, int):
|
||||
raise EnumError(f"enum value {i} not an integer")
|
||||
if x in uniqueNames:
|
||||
raise EnumError(f"enum name {x} not unique")
|
||||
if i in uniqueValues:
|
||||
raise EnumError(f"enum value {i} not unique")
|
||||
uniqueNames.add(x)
|
||||
uniqueValues.add(i)
|
||||
lookup[x] = i
|
||||
reverseLookup[i] = x
|
||||
i = i + 1
|
||||
self.lookup = lookup
|
||||
self.reverseLookup = reverseLookup
|
||||
|
||||
def __getattr__(self, attr):
|
||||
result = self.lookup.get(attr)
|
||||
if result is None:
|
||||
raise AttributeError(f'enumeration has no member {attr}')
|
||||
return result
|
||||
|
||||
def whatis(self, value):
|
||||
return self.reverseLookup[value]
|
||||
|
||||
|
||||
class ScriptError(Exception):
|
||||
"""Exception used for script errors."""
|
||||
|
||||
|
||||
OpCodes = Enumeration("Opcodes", [
|
||||
("OP_0", 0), ("OP_PUSHDATA1", 76),
|
||||
"OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE",
|
||||
"OP_RESERVED",
|
||||
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7", "OP_8",
|
||||
"OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
|
||||
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF",
|
||||
"OP_ELSE", "OP_ENDIF", "OP_VERIFY", "OP_RETURN",
|
||||
"OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP",
|
||||
"OP_2OVER", "OP_2ROT", "OP_2SWAP", "OP_IFDUP", "OP_DEPTH", "OP_DROP",
|
||||
"OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
|
||||
"OP_SWAP", "OP_TUCK",
|
||||
"OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE",
|
||||
"OP_INVERT", "OP_AND", "OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY",
|
||||
"OP_RESERVED1", "OP_RESERVED2",
|
||||
"OP_1ADD", "OP_1SUB", "OP_2MUL", "OP_2DIV", "OP_NEGATE", "OP_ABS",
|
||||
"OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV", "OP_MOD",
|
||||
"OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR", "OP_NUMEQUAL",
|
||||
"OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN", "OP_GREATERTHAN",
|
||||
"OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
|
||||
"OP_WITHIN",
|
||||
"OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160", "OP_HASH256",
|
||||
"OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
|
||||
"OP_CHECKMULTISIGVERIFY",
|
||||
"OP_NOP1",
|
||||
"OP_CHECKLOCKTIMEVERIFY", "OP_CHECKSEQUENCEVERIFY"
|
||||
])
|
||||
|
||||
|
||||
# Paranoia to make it hard to create bad scripts
|
||||
assert OpCodes.OP_DUP == 0x76
|
||||
assert OpCodes.OP_HASH160 == 0xa9
|
||||
assert OpCodes.OP_EQUAL == 0x87
|
||||
assert OpCodes.OP_EQUALVERIFY == 0x88
|
||||
assert OpCodes.OP_CHECKSIG == 0xac
|
||||
assert OpCodes.OP_CHECKMULTISIG == 0xae
|
||||
|
||||
|
||||
def _match_ops(ops, pattern):
|
||||
if len(ops) != len(pattern):
|
||||
return False
|
||||
for op, pop in zip(ops, pattern):
|
||||
if pop != op:
|
||||
# -1 means 'data push', whose op is an (op, data) tuple
|
||||
if pop == -1 and isinstance(op, tuple):
|
||||
continue
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ScriptPubKey:
|
||||
"""A class for handling a tx output script that gives conditions
|
||||
necessary for spending.
|
||||
"""
|
||||
|
||||
TO_ADDRESS_OPS = [OpCodes.OP_DUP, OpCodes.OP_HASH160, -1,
|
||||
OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]
|
||||
TO_P2SH_OPS = [OpCodes.OP_HASH160, -1, OpCodes.OP_EQUAL]
|
||||
TO_PUBKEY_OPS = [-1, OpCodes.OP_CHECKSIG]
|
||||
|
||||
PayToHandlers = namedtuple('PayToHandlers', 'address script_hash pubkey '
|
||||
'unspendable strange')
|
||||
|
||||
@classmethod
|
||||
def pay_to(cls, handlers, script):
|
||||
"""Parse a script, invoke the appropriate handler and
|
||||
return the result.
|
||||
|
||||
One of the following handlers is invoked:
|
||||
handlers.address(hash160)
|
||||
handlers.script_hash(hash160)
|
||||
handlers.pubkey(pubkey)
|
||||
handlers.unspendable()
|
||||
handlers.strange(script)
|
||||
"""
|
||||
try:
|
||||
ops = Script.get_ops(script)
|
||||
except ScriptError:
|
||||
return handlers.unspendable()
|
||||
|
||||
match = _match_ops
|
||||
|
||||
if match(ops, cls.TO_ADDRESS_OPS):
|
||||
return handlers.address(ops[2][-1])
|
||||
if match(ops, cls.TO_P2SH_OPS):
|
||||
return handlers.script_hash(ops[1][-1])
|
||||
if match(ops, cls.TO_PUBKEY_OPS):
|
||||
return handlers.pubkey(ops[0][-1])
|
||||
if ops and ops[0] == OpCodes.OP_RETURN:
|
||||
return handlers.unspendable()
|
||||
return handlers.strange(script)
|
||||
|
||||
@classmethod
|
||||
def P2SH_script(cls, hash160):
|
||||
return (bytes([OpCodes.OP_HASH160])
|
||||
+ Script.push_data(hash160)
|
||||
+ bytes([OpCodes.OP_EQUAL]))
|
||||
|
||||
@classmethod
|
||||
def P2PKH_script(cls, hash160):
|
||||
return (bytes([OpCodes.OP_DUP, OpCodes.OP_HASH160])
|
||||
+ Script.push_data(hash160)
|
||||
+ bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]))
|
||||
|
||||
@classmethod
|
||||
def validate_pubkey(cls, pubkey, req_compressed=False):
|
||||
if isinstance(pubkey, (bytes, bytearray)):
|
||||
if len(pubkey) == 33 and pubkey[0] in (2, 3):
|
||||
return # Compressed
|
||||
if len(pubkey) == 65 and pubkey[0] == 4:
|
||||
if not req_compressed:
|
||||
return
|
||||
raise PubKeyError('uncompressed pubkeys are invalid')
|
||||
raise PubKeyError(f'invalid pubkey {pubkey}')
|
||||
|
||||
@classmethod
|
||||
def pubkey_script(cls, pubkey):
|
||||
cls.validate_pubkey(pubkey)
|
||||
return Script.push_data(pubkey) + bytes([OpCodes.OP_CHECKSIG])
|
||||
|
||||
@classmethod
|
||||
def multisig_script(cls, m, pubkeys):
|
||||
"""Returns the script for a pay-to-multisig transaction."""
|
||||
n = len(pubkeys)
|
||||
if not 1 <= m <= n <= 15:
|
||||
raise ScriptError(f'{m:d} of {n:d} multisig script not possible')
|
||||
for pubkey in pubkeys:
|
||||
cls.validate_pubkey(pubkey, req_compressed=True)
|
||||
# See https://bitcoin.org/en/developer-guide
|
||||
# 2 of 3 is: OP_2 pubkey1 pubkey2 pubkey3 OP_3 OP_CHECKMULTISIG
|
||||
return (bytes([OP_1 + m - 1])
|
||||
+ b''.join(cls.push_data(pubkey) for pubkey in pubkeys)
|
||||
+ bytes([OP_1 + n - 1, OP_CHECK_MULTISIG]))
|
||||
|
||||
|
||||
class Script:
|
||||
|
||||
@classmethod
|
||||
def get_ops(cls, script):
|
||||
ops = []
|
||||
|
||||
# The unpacks or script[n] below throw on truncated scripts
|
||||
try:
|
||||
n = 0
|
||||
while n < len(script):
|
||||
op = script[n]
|
||||
n += 1
|
||||
|
||||
if op <= OpCodes.OP_PUSHDATA4:
|
||||
# Raw bytes follow
|
||||
if op < OpCodes.OP_PUSHDATA1:
|
||||
dlen = op
|
||||
elif op == OpCodes.OP_PUSHDATA1:
|
||||
dlen = script[n]
|
||||
n += 1
|
||||
elif op == OpCodes.OP_PUSHDATA2:
|
||||
dlen, = unpack_le_uint16_from(script[n: n + 2])
|
||||
n += 2
|
||||
else:
|
||||
dlen, = unpack_le_uint32_from(script[n: n + 4])
|
||||
n += 4
|
||||
if n + dlen > len(script):
|
||||
raise IndexError
|
||||
op = (op, script[n:n + dlen])
|
||||
n += dlen
|
||||
|
||||
ops.append(op)
|
||||
except Exception:
|
||||
# Truncated script; e.g. tx_hash
|
||||
# ebc9fa1196a59e192352d76c0f6e73167046b9d37b8302b6bb6968dfd279b767
|
||||
raise ScriptError('truncated script')
|
||||
|
||||
return ops
|
||||
|
||||
@classmethod
|
||||
def push_data(cls, data):
|
||||
"""Returns the opcodes to push the data on the stack."""
|
||||
assert isinstance(data, (bytes, bytearray))
|
||||
|
||||
n = len(data)
|
||||
if n < OpCodes.OP_PUSHDATA1:
|
||||
return bytes([n]) + data
|
||||
if n < 256:
|
||||
return bytes([OpCodes.OP_PUSHDATA1, n]) + data
|
||||
if n < 65536:
|
||||
return bytes([OpCodes.OP_PUSHDATA2]) + pack_le_uint16(n) + data
|
||||
return bytes([OpCodes.OP_PUSHDATA4]) + pack_le_uint32(n) + data
|
||||
|
||||
@classmethod
|
||||
def opcode_name(cls, opcode):
|
||||
if OpCodes.OP_0 < opcode < OpCodes.OP_PUSHDATA1:
|
||||
return f'OP_{opcode:d}'
|
||||
try:
|
||||
return OpCodes.whatis(opcode)
|
||||
except KeyError:
|
||||
return f'OP_UNKNOWN:{opcode:d}'
|
||||
|
||||
@classmethod
|
||||
def dump(cls, script):
|
||||
opcodes, datas = cls.get_ops(script)
|
||||
for opcode, data in zip(opcodes, datas):
|
||||
name = cls.opcode_name(opcode)
|
||||
if data is None:
|
||||
print(name)
|
||||
else:
|
||||
print(f'{name} {data.hex()} ({len(data):d} bytes)')
|
1577
lbry/wallet/server/session.py
Normal file
1577
lbry/wallet/server/session.py
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue