Compare commits

..

149 commits

Author SHA1 Message Date
Jonathan Moody
eb5da9511e Revert "TEMP: Try python 3.8."
This reverts commit 8def4d5177.
2023-04-03 13:34:36 -04:00
Jonathan Moody
8722ef840e Bump python_requires >= 3.8.
Code to handle CancelledError (as subclass of Exception) was removed.
2023-04-03 13:34:36 -04:00
Jonathan Moody
6e75a1a89b TEMP: Try python 3.8. 2023-04-03 13:34:36 -04:00
Jonathan Moody
ef3189de1d Work on some DeprecationWarnings: The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8. 2023-04-03 13:34:36 -04:00
Jonathan Moody
c2d2080034 Try to suppress asyncio.CancelledError in a different way in test_streaming.py. 2023-04-03 13:34:36 -04:00
Jonathan Moody
d0b5a0a8fd TEMP: Add workflow_dispatch. 2023-04-03 13:34:36 -04:00
Jonathan Moody
1d0e17be21 Another place generalized to Exception or asyncio.CancelledError. 2023-04-03 13:34:36 -04:00
Jonathan Moody
4ef03bb1f4 Try separate file_manager.stop() and start() calls to better
control order of events in test.
While file_manager is stopped, we get no response to file_list().
2023-04-03 13:34:36 -04:00
Jonathan Moody
4bd4bcdc27 Try ubuntu-20.04 to resolve missing libffi.so.7 issue. 2023-04-03 13:34:36 -04:00
Jonathan Moody
e5ca967fa2 Make FileManager.stop() async because SourceManager.stop() is now async. 2023-04-03 13:34:36 -04:00
Jonathan Moody
eed7d02e8b Tweak aiohttp version to be compatible with hub repository. 2023-04-03 13:34:36 -04:00
Jonathan Moody
02aecad52b CancelledError derives from BaseException in Python >= 3.8. The significant functional
change here is in upload_to_reflector(). Unit tests in TestReflector were failing.
Deal with lint related to CancelledError cleanup.
2023-04-03 13:34:36 -04:00
Jonathan Moody
585962d930 Make stop(), stop_tasks() consistently async routines, and have stop_tasks()
wait for file_output_task completion. This fixes a problem with
test_download_stop_resume_delete.
2023-04-03 13:34:36 -04:00
Jonathan Moody
ea4fba39a6 Fix Transport, DatagramTransport mockup issues. 2023-04-03 13:34:36 -04:00
Jonathan Moody
7a86406746 Fix and enable lint no-self-use & try-except-raise. 2023-04-03 13:34:36 -04:00
Jonathan Moody
c8a3eb97a4 Bump pylint version. Old pylint did not find standard library stuff on 3.9.12. 2023-04-03 13:34:36 -04:00
Lex Berezhny
20213628d7 upgrade cryptography 2023-04-03 13:34:36 -04:00
Lex Berezhny
2d1649f972 pylint disable shuffle() arg check 2023-04-03 13:34:36 -04:00
Lex Berezhny
5cb04b86a0 shuffle() needs custom random, removed loop from Event()/Queue() 2023-04-03 13:34:36 -04:00
Lex Berezhny
93ab6b3be3 passing loop to asyncio functions is deprecated 2023-04-03 13:34:36 -04:00
Lex Berezhny
b9762c3e64 update plyvel 2023-04-03 13:34:36 -04:00
Lex Berezhny
82592d00ef try building 3.9 2023-04-03 13:34:36 -04:00
Jonathan Moody
c118174c1a Try shell: bash to simplify. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d284acd8b8 Remove "debug pip cache". 2023-02-02 14:16:07 -05:00
Jonathan Moody
235c98372d Fix syntax. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d2f5073ef4 Single "set pip cache dir" task with conditional inside. 2023-02-02 14:16:07 -05:00
Jonathan Moody
84e5e43117 Bump upload-artifact version too. 2023-02-02 14:16:07 -05:00
Jonathan Moody
7bd025ae54 Upgrade change-string-case. Use startsWith() to test runner.os.
Bump change-string-case-action version again.
2023-02-02 14:16:07 -05:00
Jonathan Moody
8f28ce65b0 Switch to environment vars in $GITHUB_ENV. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d36e305129 Functions save-state, set-output deprecated. Use new mechanism. 2023-02-02 14:16:07 -05:00
Jonathan Moody
2609dee8fb Bump checkout, setup-python, cache action verions. 2023-02-02 14:16:07 -05:00
Lex Berezhny
a2da86d4b5 v0.113.0 2023-01-23 10:43:02 -05:00
Alex Grin
aa16c7fee5 Update conf.py 2023-01-23 10:30:25 -05:00
Alex Grin
3266f72b82 add s1.lbry.network 2023-01-23 10:30:25 -05:00
Jack Robison
77cd2a3f8a add more non lbry.com hubs/bootstrap dht nodes 2023-01-23 10:30:25 -05:00
Alex Grin
308e586e9a add grin's domain to bootstrap hubs list 2023-01-23 10:30:25 -05:00
84beddfd77 Added tracker and dht from pigg.es
Added tracker and dht from pigg.es
2023-01-22 19:09:17 -05:00
Victor Shyba
6258651650
Merge pull request #3716 from lbryio/dht_exceptions
handle remote exceptions on routing table ping
2022-12-13 17:18:47 -03:00
Victor Shyba
cc5f0b6630 handle remote exception on routing table ping 2022-12-13 16:56:58 -03:00
Jonathan Moody
f64d507d39 TEMP: Pin workflows to ubuntu-20.04 to work around missing ripemd160 issue. 2022-12-12 21:47:41 -05:00
Jonathan Moody
001819d5c2 Bump Hub to include fix for supports with wrong names. 2022-11-20 20:34:30 -05:00
Jonathan Moody
8b4c046d28 Try pyinstaller==4.6 to fix MacOS build failure. 2022-11-20 20:34:30 -05:00
Jonathan Moody
2c20ad6c43 Add another zlib.error mapped to InvalidPasswordError. 2022-11-20 20:34:30 -05:00
Jonathan Moody
9e610cc54c Update test for Hub rename of method stage_put() -> stash_put(). 2022-11-20 20:34:30 -05:00
Jonathan Moody
b9d25c6d01 Bump hub to latest, getting fix for TX negative caching issue and others. 2022-11-20 20:34:30 -05:00
Jonathan Moody
419b5b45f2 Allow a few initial "transaction not found" responses from Hub. 2022-11-20 20:34:30 -05:00
Jonathan Moody
516c2dd5d0 Bump hub to fix subscribe race + EADDRINUSE issue. 2022-11-20 20:34:30 -05:00
Jonathan Moody
b99102f9c9 Bump max_misuse_attempts by 50% to 120000. 2022-11-20 20:34:30 -05:00
Lex Berezhny
8c6c7b655c v0.112.0 2022-10-30 21:56:17 -04:00
Lex Berezhny
48c6873fc4 channel_sign command has customizeable salt 2022-10-30 21:53:53 -04:00
Victor Shyba
15dc52bd9a
Merge pull request #3695 from lbryio/3690
Fix claim fields fallback raising errors before download is saved on database
2022-10-28 11:16:52 -03:00
Victor Shyba
52d555078f initialize stored claim field for fallback earlier 2022-10-19 15:13:47 -03:00
Victor Shyba
cc976bd010 test for early fallback of suggested_file_name 2022-10-19 15:13:47 -03:00
Lex Berezhny
9cc6992011 torrents needs loop 2022-10-18 17:23:56 -04:00
Lex Berezhny
a1b87460c5 passing loop to asyncio functions is deprecated 2022-10-18 17:23:56 -04:00
jessopb
007e1115c4 v0.111.0 2022-10-18 11:18:26 -04:00
jessopb
20ae51b949
Merge pull request #3692 from lbryio/fix-import/export-backwards-compat
fix backwards compatibility in wallet import/export
2022-10-18 11:16:34 -04:00
zeppi
24e9c7b435 fix backwards compatibility in wallet import/export 2022-10-18 10:53:51 -04:00
zeppi
b452e76e1d reverting version to 0.110.0 2022-10-18 10:27:44 -04:00
Lex Berezhny
341834c30d v0.111.0 2022-10-18 00:37:44 -04:00
Victor Shyba
12bac730bd tests: check mime type as well 2022-10-18 00:31:10 -04:00
Victor Shyba
1027337833 fallback for stream name and tests 2022-10-18 00:31:10 -04:00
Victor Shyba
97fef21f75 fallback for suggested file name and tests 2022-10-18 00:31:10 -04:00
Lex Berezhny
9dafd5f69b added claim_list filtering by reposted_claim_id and fix for claim_id of reposted claim in JSON output 2022-10-18 00:28:13 -04:00
Victor Shyba
fd4f0b2049 bump first-start checkpoint 2022-10-18 00:26:10 -04:00
Lex Berezhny
734f0651a4 minor refactor 2022-10-18 00:25:41 -04:00
zeppi
94deaf55df lint 2022-10-18 00:25:41 -04:00
zeppi
d957d46f96 lint 2022-10-18 00:25:41 -04:00
zeppi
0217aede3d update docs 2022-10-18 00:25:41 -04:00
zeppi
e4e1600f51 Enable unencrypted wallet import and export 2022-10-18 00:25:41 -04:00
Jonathan Moody
d0aad8ccaf Add zlib.error string just observed for the first time. 2022-09-29 22:18:54 -04:00
Jonathan Moody
ab50cfa5c1 Add test steps to repeatedly sync_apply() using a bad password. 2022-09-29 22:18:54 -04:00
Jonathan Moody
5a26aea398 Feedback: Reuse IntegrationTestcase.generate() in generate_and_wait(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
bd1cebdb4c Bump hub to include TaskGroup fix. 2022-09-29 22:18:54 -04:00
Jonathan Moody
ec433f069f Substitute InvalidPasswordError for zlib.error. 2022-09-29 22:18:54 -04:00
Jonathan Moody
cd6d3fec9c Wait for initial sync in test_wallet_syncing_status(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
8c474a69de Hub error message changed to include blocked/filtered. 2022-09-29 22:18:54 -04:00
Jonathan Moody
8903056648 Bump hub version to latest. 2022-09-29 22:18:54 -04:00
Jonathan Moody
749a92d0e5 Wait on block height too in generate_and_wait(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
a7d7efecc7 Logging level back to INFO. 2022-09-20 10:04:23 -04:00
Jonathan Moody
c88f0797a3 Log f.exception() if present instead of "Stopped". 2022-09-20 10:04:23 -04:00
Jonathan Moody
137ebd503d Test insufficient funds behavior. 2022-09-20 10:04:23 -04:00
Jonathan Moody
c3f5dd780e Revise exception handling. 2022-09-20 10:04:23 -04:00
Jonathan Moody
20b1865879 Don't use retriable_call(). Add handling for InsufficientFundsError. 2022-09-20 10:04:23 -04:00
Jonathan Moody
231b982422 Wait on usage payement TX to be processed. 2022-09-20 10:04:23 -04:00
Jonathan Moody
fd69401791 Catch and log exceptions coming from the pay() task.
Change test to reproduce failure.
2022-09-20 10:04:23 -04:00
Jonathan Moody
718d046833 Logging for test_single_server_payment debug. 2022-09-20 10:04:23 -04:00
Victor Shyba
e10f57d1ed
Merge pull request #3642 from lbryio/libtorrent
use official libtorrent, fix tests, make it a normal dependency
2022-09-09 19:58:38 -03:00
Victor Shyba
8a033d58df fix torrent component 2022-09-09 12:21:59 -03:00
Victor Shyba
c07c369a28 add libtorrent pyinstaller hook 2022-09-09 12:21:59 -03:00
Victor Shyba
5be990fc55 do not ignore libtorrent import error 2022-09-09 12:21:59 -03:00
Victor Shyba
8f26010c04 make libtorrent a normal dependency 2022-09-09 12:21:59 -03:00
Victor Shyba
3021962e3d tests: add peer directly instead of relying on torrent dht 2022-09-09 12:21:59 -03:00
Victor Shyba
84d89ce5af torrent: disable upnp, natpmp. 2022-09-09 12:21:59 -03:00
Victor Shyba
0961cad716 remove dead code 2022-09-09 12:21:59 -03:00
Jonathan Moody
5c543cb374 Wait for hub to update with all 100 new blocks
before proceeding with initial_headers_sync().
2022-09-08 22:40:09 -04:00
Jonathan Moody
f78d7896a5 Revert "Add more failure message details for debugging."
This reverts commit 5e00a79751.
2022-09-08 18:13:26 -04:00
Jonathan Moody
78a28de2aa Align style of generate() with generate_and_wait(). 2022-09-08 18:13:26 -04:00
Jonathan Moody
45a255e7a2 Reuse generate() logic to wait on hub
instead of half-baked reorg() logic.
2022-09-08 18:13:26 -04:00
Jonathan Moody
d2738c2e72 Add more failure message details for debugging. 2022-09-08 18:13:26 -04:00
Jonathan Moody
a7c7ab7f7b Correct the terminal height we wait for in generate(). 2022-09-08 18:13:26 -04:00
Jonathan Moody
988f288715 Lint fix for _es_height checks. 2022-09-08 18:13:26 -04:00
Jonathan Moody
38e9b5b432 Wait for _es_height in addition to db_height. 2022-09-08 18:13:26 -04:00
Victor Shyba
f7455600cc
Merge pull request #3625 from lbryio/dht_crawler
Add script to collect DHT metrics
2022-09-07 12:56:41 -03:00
Victor Shyba
c7c2d6fe5a collect connections reachability 2022-09-07 12:03:11 -03:00
Victor Shyba
c6c0228970 fix crawler startup query 2022-09-07 12:03:11 -03:00
Victor Shyba
8d9d2c76ae routing table sizes as histogram 2022-09-07 12:03:11 -03:00
Victor Shyba
0b059a5445 use a histogram for latency, remove labels 2022-09-07 12:03:11 -03:00
Victor Shyba
ab67f417ee dht_crawler: wait and retry during port switch 2022-09-07 12:03:11 -03:00
Victor Shyba
0e7a1aee0a dht_crawler: clean in memory set for expired peers 2022-09-07 12:03:11 -03:00
Victor Shyba
d0497cf6b5 dht_crawler: skip saving connections for now 2022-09-07 12:03:11 -03:00
Victor Shyba
c38573d5de dht_crawler: gather both loops, avoid task exceptions being hidden 2022-09-07 12:03:11 -03:00
Victor Shyba
f077e56cec dht_crawler:only count latency during findNode 2022-09-07 12:03:11 -03:00
Victor Shyba
5e58c2f224 fix hosting metrics, improve logging 2022-09-07 12:03:11 -03:00
Victor Shyba
cc64789e96 dht_crawler: fix logging for missing ports 2022-09-07 12:03:11 -03:00
Victor Shyba
b5c390ca04 docker: add volume declaration 2022-09-07 12:03:11 -03:00
Victor Shyba
da2ffb000e skip peers with bad ports without raising 2022-09-07 12:03:11 -03:00
Victor Shyba
df77392fe0 dht crawler:improve logging, metrics, make startup concurrent 2022-09-07 12:03:11 -03:00
Victor Shyba
9aa9ecdc0a add arg for db path 2022-09-07 12:03:11 -03:00
Victor Shyba
43b45a939b format logging 2022-09-07 12:03:11 -03:00
Victor Shyba
e2922a434f add script to generate probe dataset 2022-09-07 12:03:11 -03:00
Victor Shyba
0d6125de0b add sd_hash prober 2022-09-07 12:03:11 -03:00
Victor Shyba
13af7800c2 refactor script, remove dep 2022-09-07 12:03:11 -03:00
Victor Shyba
47a5d37d7c change default metric port, add sqlalchemy to dockerfile 2022-09-07 12:03:11 -03:00
Victor Shyba
4a3a7e318d update pip and setuptools on dht dockerfile 2022-09-07 12:03:11 -03:00
Victor Shyba
85ff487af5 dht_crawler: randomize port when idle 2022-09-07 12:03:11 -03:00
Victor Shyba
62eb9d5c75 dht_crawler: only count non zero connections 2022-09-07 12:03:11 -03:00
Victor Shyba
cfe5c8de8a dht_crawler: serve prometheus metrics at 7070 2022-09-07 12:03:11 -03:00
Victor Shyba
0497698c5b dht_crawler: skip ping if known node_id 2022-09-07 12:03:11 -03:00
Victor Shyba
508bdb8e94 dht_crawler: keep working set in memory, flush to db on intervals 2022-09-07 12:03:11 -03:00
Victor Shyba
cd42f0d726 dht_crawler: fix node id store 2022-09-07 12:03:11 -03:00
Victor Shyba
2706b66a92 dht_crawler: dont re-bootstrap. try known reachable even when they expire 2022-09-07 12:03:11 -03:00
Victor Shyba
29c2d5715d dht_crawler: fix last_seen update 2022-09-07 12:03:11 -03:00
Victor Shyba
965389b759 dht_crawler: process older first, avoid discarding 2022-09-07 12:03:11 -03:00
Victor Shyba
174439f517 dht_crawler: cleanup, try not to reset key 2022-09-07 12:03:11 -03:00
Victor Shyba
baf422fc03 dht_crawler: extract refresh_limit, bump to 1h 2022-09-07 12:03:11 -03:00
Victor Shyba
61f7fbe230 dht_crawler: avoid reads 2022-09-07 12:03:11 -03:00
Victor Shyba
c6c27925b7 dht_crawler: flush/commit only when finished 2022-09-07 12:03:11 -03:00
Victor Shyba
be4c62cf32 check membership instead of one update per peer 2022-09-07 12:03:11 -03:00
Victor Shyba
443a1c32fa dht_crawler: save a set of connections to avoid dupes, enable initial crawl 2022-09-07 12:03:11 -03:00
Victor Shyba
90c2a58470 dht_crawler: dont gather empty, fix crash 2022-09-07 12:03:11 -03:00
Victor Shyba
adc79ec404 dht_crawler: only warn for missing key if it replied 2022-09-07 12:03:11 -03:00
Victor Shyba
137d8ca4ac dht_crawler: enable WAL 2022-09-07 12:03:11 -03:00
Victor Shyba
abf4d888af dht_crawler: warn if we cannot get node id 2022-09-07 12:03:11 -03:00
Victor Shyba
6c350e57dd dht_crawler: query recently checked as stats 2022-09-07 12:03:11 -03:00
Victor Shyba
fb7a93096e only count checked unreachable 2022-09-07 12:03:11 -03:00
Victor Shyba
7ea88e7b31 dht_crawler: store data 2022-09-07 12:03:11 -03:00
Victor Shyba
2361e34541 dht crawler, initial version 2022-09-07 12:03:11 -03:00
Victor Shyba
be06378437 add method for getting the node_id from a known peer on peer manager 2022-09-07 12:03:11 -03:00
71 changed files with 1313 additions and 412 deletions

View file

@ -1,24 +1,24 @@
name: ci name: ci
on: ["push", "pull_request"] on: ["push", "pull_request", "workflow_dispatch"]
jobs: jobs:
lint: lint:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel - run: pip install --user --upgrade pip wheel
- run: pip install -e .[torrent,lint] - run: pip install -e .[lint]
- run: make lint - run: make lint
tests-unit: tests-unit:
@ -26,31 +26,31 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-latest - ubuntu-20.04
- macos-latest - macos-latest
- windows-latest - windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- name: set pip cache dir - name: set pip cache dir
id: pip-cache shell: bash
run: echo "::set-output name=dir::$(pip cache dir)" run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ${{ steps.pip-cache.outputs.dir }} path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- id: os-name - id: os-name
uses: ASzc/change-string-case-action@v1 uses: ASzc/change-string-case-action@v5
with: with:
string: ${{ runner.os }} string: ${{ runner.os }}
- run: python -m pip install --user --upgrade pip wheel - run: python -m pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux') - if: startsWith(runner.os, 'linux')
run: pip install -e .[torrent,test] run: pip install -e .[test]
- if: startsWith(runner.os, 'linux') - if: startsWith(runner.os, 'linux')
env: env:
HOME: /tmp HOME: /tmp
@ -72,7 +72,7 @@ jobs:
tests-integration: tests-integration:
name: "tests / integration" name: "tests / integration"
runs-on: ubuntu-latest runs-on: ubuntu-20.04
strategy: strategy:
matrix: matrix:
test: test:
@ -93,16 +93,16 @@ jobs:
uses: elastic/elastic-github-actions/elasticsearch@master uses: elastic/elastic-github-actions/elasticsearch@master
with: with:
stack-version: 7.12.1 stack-version: 7.12.1
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- if: matrix.test == 'other' - if: matrix.test == 'other'
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ./.tox path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }} key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
@ -123,7 +123,7 @@ jobs:
coverage: coverage:
needs: ["tests-unit", "tests-integration"] needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- name: finalize coverage report submission - name: finalize coverage report submission
env: env:
@ -138,29 +138,29 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-18.04 - ubuntu-20.04
- macos-latest - macos-latest
- windows-latest - windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- id: os-name - id: os-name
uses: ASzc/change-string-case-action@v1 uses: ASzc/change-string-case-action@v5
with: with:
string: ${{ runner.os }} string: ${{ runner.os }}
- name: set pip cache dir - name: set pip cache dir
id: pip-cache shell: bash
run: echo "::set-output name=dir::$(pip cache dir)" run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ${{ steps.pip-cache.outputs.dir }} path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.4 - run: pip install pyinstaller==4.6
- run: pip install -e . - run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v') - if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py run: python docker/set_build.py
@ -175,7 +175,7 @@ jobs:
pip install pywin32==301 pip install pywin32==301
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet.exe --version dist/lbrynet.exe --version
- uses: actions/upload-artifact@v2 - uses: actions/upload-artifact@v3
with: with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }} name: lbrynet-${{ steps.os-name.outputs.lowercase }}
path: dist/ path: dist/
@ -184,7 +184,7 @@ jobs:
name: "release" name: "release"
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
needs: ["build"] needs: ["build"]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2

View file

@ -7,7 +7,7 @@ on:
jobs: jobs:
release: release:
name: "slack notification" name: "slack notification"
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0 - uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown id: markdown

View file

@ -2,6 +2,7 @@ FROM debian:10-slim
ARG user=lbry ARG user=lbry
ARG projects_dir=/home/$user ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker ARG DOCKER_COMMIT=docker
@ -27,12 +28,16 @@ RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir COPY . $projects_dir
RUN chown -R $user:$user $projects_dir RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user USER $user
WORKDIR $projects_dir WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install RUN make install
RUN python3 docker/set_build.py RUN python3 docker/set_build.py
RUN rm ~/.cache -rf RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"] ENTRYPOINT ["python3", "scripts/dht_node.py"]

View file

@ -1,2 +1,2 @@
__version__ = "0.110.0" __version__ = "0.113.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

View file

@ -87,8 +87,8 @@ class AbstractBlob:
self.blob_completed_callback = blob_completed_callback self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {} self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event(loop=self.loop) self.verified: asyncio.Event = asyncio.Event()
self.writing: asyncio.Event = asyncio.Event(loop=self.loop) self.writing: asyncio.Event = asyncio.Event()
self.readers: typing.List[typing.BinaryIO] = [] self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time() self.added_on = added_on or time.time()
self.is_mine = is_mine self.is_mine = is_mine
@ -222,7 +222,7 @@ class AbstractBlob:
peer_port: typing.Optional[int] = None) -> HashBlobWriter: peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed(): if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}") raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
writer = HashBlobWriter(self.blob_hash, self.get_length, fut) writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer self.writers[(peer_address, peer_port)] = writer

View file

@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.buf = b'' self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result # this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event(loop=self.loop) self.closed = asyncio.Event()
def data_received(self, data: bytes): def data_received(self, data: bytes):
if self.connection_manager: if self.connection_manager:
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.transport.write(msg) self.transport.write(msg)
if self.connection_manager: if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg)) self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop) response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
availability_response = response.get_availability_response() availability_response = response.get_availability_response()
price_response = response.get_price_response() price_response = response.get_price_response()
blob_response = response.get_blob_response() blob_response = response.get_blob_response()
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
f" timeout in {self.peer_timeout}" f" timeout in {self.peer_timeout}"
log.debug(msg) log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}" msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop) await asyncio.wait_for(self.writer.finished, self.peer_timeout)
# wait for the io to finish # wait for the io to finish
await self.blob.verified.wait() await self.blob.verified.wait()
log.info("%s at %fMB/s", msg, log.info("%s at %fMB/s", msg,
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
try: try:
self._blob_bytes_received = 0 self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port) self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future(loop=self.loop) self._response_fut = asyncio.Future()
return await self._download_blob() return await self._download_blob()
except OSError: except OSError:
# i'm not sure how to fix this race condition - jack # i'm not sure how to fix this race condition - jack
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
try: try:
if not connected_protocol: if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port), await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout, loop=loop) peer_connect_timeout)
connected_protocol = protocol connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable(): if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection # blob is None happens when we are just opening a connection

View file

@ -30,7 +30,7 @@ class BlobDownloader:
self.failures: typing.Dict['KademliaPeer', int] = {} self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.Set['KademliaPeer'] = set() self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {} self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event(loop=self.loop) self.is_running = asyncio.Event()
def should_race_continue(self, blob: 'AbstractBlob'): def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10) max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
@ -64,8 +64,8 @@ class BlobDownloader:
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1 self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self): async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)] active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED') await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
def cleanup_active(self): def cleanup_active(self):
if not self.active_connections and not self.connections: if not self.active_connections and not self.connections:
@ -126,7 +126,7 @@ class BlobDownloader:
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node', async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
blob_hash: str) -> 'AbstractBlob': blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download) search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash) search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue) peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers) fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)

View file

@ -25,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol):
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.buf = b'' self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event(loop=self.loop) self.started_transfer = asyncio.Event()
self.transfer_finished = asyncio.Event(loop=self.loop) self.transfer_finished = asyncio.Event()
self.close_on_idle_task: typing.Optional[asyncio.Task] = None self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self): async def close_on_idle(self):
while self.transport: while self.transport:
try: try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop) await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port) log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close() return self.close()
@ -101,7 +101,7 @@ class BlobServerProtocol(asyncio.Protocol):
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port) log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set() self.started_transfer.set()
try: try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop) sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
if sent and sent > 0: if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent) self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port) log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
@ -157,7 +157,7 @@ class BlobServer:
self.loop = loop self.loop = loop
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout

View file

@ -574,9 +574,6 @@ class TranscodeConfig(BaseConfig):
class CLIConfig(TranscodeConfig): class CLIConfig(TranscodeConfig):
api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT') api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT')
exit_on_disconnect = Toggle(
'Shutdown daemon when connection to wallet server closes.', False
)
@property @property
def api_connection_url(self) -> str: def api_connection_url(self) -> str:
@ -691,6 +688,9 @@ class Config(CLIConfig):
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [ tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252), ('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252), ('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
]) ])
lbryum_servers = Servers("SPV wallet servers", [ lbryum_servers = Servers("SPV wallet servers", [
@ -703,14 +703,20 @@ class Config(CLIConfig):
('spv17.lbry.com', 50001), ('spv17.lbry.com', 50001),
('spv18.lbry.com', 50001), ('spv18.lbry.com', 50001),
('spv19.lbry.com', 50001), ('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
]) ])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [ known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin ('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator ('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST ('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST ('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU ('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444) # ASIA ('lbrynet4.lbry.com', 4444), # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
]) ])
# blockchain # blockchain

View file

@ -67,7 +67,7 @@ class ConnectionManager:
while True: while True:
last = time.perf_counter() last = time.perf_counter()
await asyncio.sleep(0.1, loop=self.loop) await asyncio.sleep(0.1)
self._status['incoming_bps'].clear() self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear() self._status['outgoing_bps'].clear()
now = time.perf_counter() now = time.perf_counter()

View file

@ -42,15 +42,13 @@ class BlobAnnouncer:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers) log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err: except Exception as err:
self.announcements_sent_metric.labels(peers=0, error=True).inc() self.announcements_sent_metric.labels(peers=0, error=True).inc()
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise err
log.warning("error announcing %s: %s", blob_hash[:8], str(err)) log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10): async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size: while batch_size:
if not self.node.joined.is_set(): if not self.node.joined.is_set():
await self.node.joined.wait() await self.node.joined.wait()
await asyncio.sleep(60, loop=self.loop) await asyncio.sleep(60)
if not self.node.protocol.routing_table.get_peers(): if not self.node.protocol.routing_table.get_peers():
log.warning("No peers in DHT, announce round skipped") log.warning("No peers in DHT, announce round skipped")
continue continue
@ -59,7 +57,7 @@ class BlobAnnouncer:
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue)) log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0: while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue)) log.info("%i blobs to announce", len(self.announce_queue))
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)], loop=self.loop) await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
announced = list(filter(None, self.announced)) announced = list(filter(None, self.announced))
if announced: if announced:
await self.storage.update_last_announced_blobs(announced) await self.storage.update_last_announced_blobs(announced)

View file

@ -37,7 +37,7 @@ class Node:
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index, is_bootstrap_node) split_buckets_under_index, is_bootstrap_node)
self.listening_port: asyncio.DatagramTransport = None self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event(loop=self.loop) self.joined = asyncio.Event()
self._join_task: asyncio.Task = None self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None self._refresh_task: asyncio.Task = None
self._storage = storage self._storage = storage
@ -79,7 +79,7 @@ class Node:
else: else:
if force_once: if force_once:
break break
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None) self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
await fut await fut
continue continue
@ -93,7 +93,7 @@ class Node:
if force_once: if force_once:
break break
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None) self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
await fut await fut
@ -108,7 +108,7 @@ class Node:
for peer in peers: for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port) log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather( stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop *(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
) )
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted] stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to: if stored_to:
@ -182,14 +182,14 @@ class Node:
for address, udp_port in known_node_urls or [] for address, udp_port in known_node_urls or []
])) ]))
except socket.gaierror: except socket.gaierror:
await asyncio.sleep(30, loop=self.loop) await asyncio.sleep(30)
continue continue
self.protocol.peer_manager.reset() self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0) self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32) await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
await asyncio.sleep(1, loop=self.loop) await asyncio.sleep(1)
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None): def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls)) self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
@ -271,7 +271,7 @@ class Node:
def accumulate_peers(self, search_queue: asyncio.Queue, def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None peer_queue: typing.Optional[asyncio.Queue] = None
) -> typing.Tuple[asyncio.Queue, asyncio.Task]: ) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue(loop=self.loop) queue = peer_queue or asyncio.Queue()
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue)) return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))

View file

@ -100,6 +100,9 @@ class PeerManager:
self._node_id_reverse_mapping[node_id] = (address, udp_port) self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys()) self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def get_node_id_for_endpoint(self, address, port):
return self._node_id_mapping.get((address, port))
def prune(self): # TODO: periodically call this def prune(self): # TODO: periodically call this
now = self._loop.time() now = self._loop.time()
to_pop = [] to_pop = []
@ -150,9 +153,10 @@ class PeerManager:
def peer_is_good(self, peer: 'KademliaPeer'): def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port) return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address) def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port) node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
@dataclass(unsafe_hash=True) @dataclass(unsafe_hash=True)

View file

@ -8,7 +8,7 @@ from typing import TYPE_CHECKING
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
from lbry.dht.peer import make_kademlia_peer from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
from lbry.dht.serialization.datagram import PAGE_KEY from lbry.dht.serialization.datagram import PAGE_KEY
if TYPE_CHECKING: if TYPE_CHECKING:
@ -26,6 +26,15 @@ class FindResponse:
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]: def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError() raise NotImplementedError()
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
for contact_triple in self.get_close_triples():
node_id, address, udp_port = contact_triple
try:
yield make_kademlia_peer(node_id, address, udp_port)
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
peer_info.udp_port, address, udp_port)
class FindNodeResponse(FindResponse): class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]): def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
@ -74,7 +83,7 @@ class IterativeFinder(AsyncIterator):
self.contacted: typing.Set['KademliaPeer'] = set() self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key) self.distance = Distance(key)
self.iteration_queue = asyncio.Queue(loop=self.loop) self.iteration_queue = asyncio.Queue()
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {} self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.iteration_count = 0 self.iteration_count = 0
@ -125,13 +134,8 @@ class IterativeFinder(AsyncIterator):
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse): async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer) self._add_active(peer)
for contact_triple in response.get_close_triples(): for new_peer in response.get_close_kademlia_peers(peer):
node_id, address, udp_port = contact_triple self._add_active(new_peer)
try:
self._add_active(make_kademlia_peer(node_id, address, udp_port))
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
peer.udp_port, address, udp_port)
self.check_result_ready(response) self.check_result_ready(response)
self._log_state(reason="check result") self._log_state(reason="check result")
@ -319,7 +323,7 @@ class IterativeValueFinder(IterativeFinder):
decoded_peers = set() decoded_peers = set()
for compact_addr in parsed.found_compact_addresses: for compact_addr in parsed.found_compact_addresses:
try: try:
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)) decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
except ValueError: except ValueError:
log.warning("misbehaving peer %s:%i returned invalid peer for blob", log.warning("misbehaving peer %s:%i returned invalid peer for blob",
peer.address, peer.udp_port) peer.address, peer.udp_port)
@ -341,7 +345,7 @@ class IterativeValueFinder(IterativeFinder):
def check_result_ready(self, response: FindValueResponse): def check_result_ready(self, response: FindValueResponse):
if response.found: if response.found:
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr) blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses] for compact_addr in response.found_compact_addresses]
to_yield = [] to_yield = []
for blob_peer in blob_peers: for blob_peer in blob_peers:

View file

@ -253,7 +253,7 @@ class PingQueue:
del self._pending_contacts[peer] del self._pending_contacts[peer]
self.maybe_ping(peer) self.maybe_ping(peer)
break break
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
def start(self): def start(self):
assert not self._running assert not self._running
@ -319,10 +319,10 @@ class KademliaProtocol(DatagramProtocol):
self.ping_queue = PingQueue(self.loop, self) self.ping_queue = PingQueue(self.loop, self)
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port) self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
self.rpc_timeout = rpc_timeout self.rpc_timeout = rpc_timeout
self._split_lock = asyncio.Lock(loop=self.loop) self._split_lock = asyncio.Lock()
self._to_remove: typing.Set['KademliaPeer'] = set() self._to_remove: typing.Set['KademliaPeer'] = set()
self._to_add: typing.Set['KademliaPeer'] = set() self._to_add: typing.Set['KademliaPeer'] = set()
self._wakeup_routing_task = asyncio.Event(loop=self.loop) self._wakeup_routing_task = asyncio.Event()
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
@functools.lru_cache(128) @functools.lru_cache(128)
@ -385,7 +385,7 @@ class KademliaProtocol(DatagramProtocol):
while self._to_add: while self._to_add:
async with self._split_lock: async with self._split_lock:
await self._add_peer(self._to_add.pop()) await self._add_peer(self._to_add.pop())
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop) await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
self._wakeup_routing_task.clear() self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram): def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):

View file

@ -8,6 +8,7 @@ from prometheus_client import Gauge
from lbry import utils from lbry import utils
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.dht.peer import KademliaPeer, PeerManager from lbry.dht.peer import KademliaPeer, PeerManager
@ -395,7 +396,7 @@ class TreeRoutingTable:
try: try:
await probe(to_replace) await probe(to_replace)
return False return False
except asyncio.TimeoutError: except (asyncio.TimeoutError, RemoteException):
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index, log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port) to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.buckets[bucket_index]: if to_replace in self.buckets[bucket_index]:

View file

@ -1,5 +1,5 @@
from lbry.conf import Config
from lbry.extras.cli import execute_command from lbry.extras.cli import execute_command
from lbry.conf import Config
def daemon_rpc(conf: Config, method: str, **kwargs): def daemon_rpc(conf: Config, method: str, **kwargs):

View file

@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
def running(self): def running(self):
return self._running return self._running
async def get_status(self): async def get_status(self): # pylint: disable=no-self-use
return return
async def start(self): async def start(self):

View file

@ -42,7 +42,7 @@ class ComponentManager:
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.component_classes = {} self.component_classes = {}
self.components = set() self.components = set()
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop()) self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items(): for component_name, component_class in self.default_component_classes.items():
@ -118,7 +118,7 @@ class ComponentManager:
component._setup() for component in stage if not component.running component._setup() for component in stage if not component.running
] ]
if needing_start: if needing_start:
await asyncio.wait(needing_start) await asyncio.wait(map(asyncio.create_task, needing_start))
self.started.set() self.started.set()
async def stop(self): async def stop(self):
@ -131,7 +131,7 @@ class ComponentManager:
component._stop() for component in stage if component.running component._stop() for component in stage if component.running
] ]
if needing_stop: if needing_stop:
await asyncio.wait(needing_stop) await asyncio.wait(map(asyncio.create_task, needing_stop))
def all_components_running(self, *component_names): def all_components_running(self, *component_names):
""" """

View file

@ -28,11 +28,7 @@ from lbry.torrent.torrent_manager import TorrentManager
from lbry.wallet import WalletManager from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer from lbry.wallet.usage_payment import WalletServerPayer
from lbry.torrent.tracker import TrackerClient from lbry.torrent.tracker import TrackerClient
from lbry.torrent.session import TorrentSession
try:
from lbry.torrent.session import TorrentSession
except ImportError:
TorrentSession = None
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -361,10 +357,6 @@ class FileManagerComponent(Component):
wallet = self.component_manager.get_component(WALLET_COMPONENT) wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT) \ node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None if self.component_manager.has_component(DHT_COMPONENT) else None
try:
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
except NameError:
torrent = None
log.info('Starting the file manager') log.info('Starting the file manager')
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
self.file_manager = FileManager( self.file_manager = FileManager(
@ -373,7 +365,8 @@ class FileManagerComponent(Component):
self.file_manager.source_managers['stream'] = StreamManager( self.file_manager.source_managers['stream'] = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node, loop, self.conf, blob_manager, wallet, storage, node,
) )
if TorrentSession and LIBTORRENT_COMPONENT not in self.conf.components_to_skip: if self.component_manager.has_component(LIBTORRENT_COMPONENT):
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
self.file_manager.source_managers['torrent'] = TorrentManager( self.file_manager.source_managers['torrent'] = TorrentManager(
loop, self.conf, torrent, storage, self.component_manager.analytics_manager loop, self.conf, torrent, storage, self.component_manager.analytics_manager
) )
@ -381,7 +374,7 @@ class FileManagerComponent(Component):
log.info('Done setting up file manager') log.info('Done setting up file manager')
async def stop(self): async def stop(self):
self.file_manager.stop() await self.file_manager.stop()
class BackgroundDownloaderComponent(Component): class BackgroundDownloaderComponent(Component):
@ -502,9 +495,8 @@ class TorrentComponent(Component):
} }
async def start(self): async def start(self):
if TorrentSession: self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None) await self.torrent_session.bind() # TODO: specify host/port
await self.torrent_session.bind() # TODO: specify host/port
async def stop(self): async def stop(self):
if self.torrent_session: if self.torrent_session:
@ -559,7 +551,7 @@ class UPnPComponent(Component):
while True: while True:
if now: if now:
await self._maintain_redirects() await self._maintain_redirects()
await asyncio.sleep(360, loop=self.component_manager.loop) await asyncio.sleep(360)
async def _maintain_redirects(self): async def _maintain_redirects(self):
# setup the gateway if necessary # setup the gateway if necessary
@ -568,8 +560,6 @@ class UPnPComponent(Component):
self.upnp = await UPnP.discover(loop=self.component_manager.loop) self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string) log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.warning("upnp discovery failed: %s", err) log.warning("upnp discovery failed: %s", err)
self.upnp = None self.upnp = None
@ -681,7 +671,7 @@ class UPnPComponent(Component):
log.info("Removing upnp redirects: %s", self.upnp_redirects) log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([ await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items() self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
], loop=self.component_manager.loop) ])
if self._maintain_redirects_task and not self._maintain_redirects_task.done(): if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel() self._maintain_redirects_task.cancel()

View file

@ -614,7 +614,8 @@ class Daemon(metaclass=JSONRPCServerType):
content_type='application/json' content_type='application/json'
) )
async def handle_metrics_get_request(self, request: web.Request): @staticmethod
async def handle_metrics_get_request(request: web.Request):
try: try:
return web.Response( return web.Response(
text=prom_generate_latest().decode(), text=prom_generate_latest().decode(),
@ -1330,7 +1331,7 @@ class Daemon(metaclass=JSONRPCServerType):
@requires("wallet") @requires("wallet")
async def jsonrpc_wallet_export(self, password=None, wallet_id=None): async def jsonrpc_wallet_export(self, password=None, wallet_id=None):
""" """
Export wallet data Exports encrypted wallet data if password is supplied; otherwise plain JSON.
Wallet must be unlocked to perform this operation. Wallet must be unlocked to perform this operation.
@ -1342,18 +1343,19 @@ class Daemon(metaclass=JSONRPCServerType):
--wallet_id=<wallet_id> : (str) wallet being exported --wallet_id=<wallet_id> : (str) wallet being exported
Returns: Returns:
(str) data (str) data: base64-encoded encrypted wallet, or cleartext JSON
""" """
assert password is not None, "passwordless use is not implemented yet"
wallet = self.wallet_manager.get_wallet_or_default(wallet_id) wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
encrypted = wallet.pack(password) if password is None:
return encrypted.decode() return wallet.to_json()
return wallet.pack(password).decode()
@requires("wallet") @requires("wallet")
async def jsonrpc_wallet_import(self, data, password=None, wallet_id=None, blocking=False): async def jsonrpc_wallet_import(self, data, password=None, wallet_id=None, blocking=False):
""" """
Import wallet data and merge accounts, preferences. Import wallet data and merge accounts and preferences. Data is expected to be JSON if
password is not supplied.
Wallet must be unlocked to perform this operation. Wallet must be unlocked to perform this operation.
@ -1368,9 +1370,8 @@ class Daemon(metaclass=JSONRPCServerType):
--blocking : (bool) wait until any new accounts have merged --blocking : (bool) wait until any new accounts have merged
Returns: Returns:
(str) data (str) base64-encoded encrypted wallet, or cleartext JSON
""" """
assert not data.strip().startswith("{"), "unencrypted wallet import is not implemented yet"
wallet = self.wallet_manager.get_wallet_or_default(wallet_id) wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data) added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
for new_account in itertools.chain(added_accounts, merged_accounts): for new_account in itertools.chain(added_accounts, merged_accounts):
@ -1384,8 +1385,7 @@ class Daemon(metaclass=JSONRPCServerType):
for new_account in added_accounts: for new_account in added_accounts:
asyncio.create_task(self.ledger.subscribe_account(new_account)) asyncio.create_task(self.ledger.subscribe_account(new_account))
wallet.save() wallet.save()
encrypted = wallet.pack(password) return await self.jsonrpc_wallet_export(password=password, wallet_id=wallet_id)
return encrypted.decode()
@requires("wallet") @requires("wallet")
async def jsonrpc_wallet_add(self, wallet_id): async def jsonrpc_wallet_add(self, wallet_id):
@ -2410,6 +2410,7 @@ class Daemon(metaclass=JSONRPCServerType):
Usage: Usage:
claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent] claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent]
[--reposted_claim_id=<reposted_claim_id>...]
[--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>] [--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>] [--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>]
[--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips] [--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips]
@ -2420,6 +2421,7 @@ class Daemon(metaclass=JSONRPCServerType):
--channel_id=<channel_id> : (str or list) streams in this channel --channel_id=<channel_id> : (str or list) streams in this channel
--name=<name> : (str or list) claim name --name=<name> : (str or list) claim name
--is_spent : (bool) shows previous claim updates and abandons --is_spent : (bool) shows previous claim updates and abandons
--reposted_claim_id=<reposted_claim_id> : (str or list) reposted claim id
--account_id=<account_id> : (str) id of the account to query --account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet --wallet_id=<wallet_id> : (str) restrict results to specific wallet
--has_source : (bool) list claims containing a source field --has_source : (bool) list claims containing a source field
@ -2942,19 +2944,21 @@ class Daemon(metaclass=JSONRPCServerType):
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
async def jsonrpc_channel_sign( async def jsonrpc_channel_sign(
self, channel_name=None, channel_id=None, hexdata=None, channel_account_id=None, wallet_id=None): self, channel_name=None, channel_id=None, hexdata=None, salt=None,
channel_account_id=None, wallet_id=None):
""" """
Signs data using the specified channel signing key. Signs data using the specified channel signing key.
Usage: Usage:
channel_sign [<channel_name> | --channel_name=<channel_name>] channel_sign [<channel_name> | --channel_name=<channel_name>] [<channel_id> | --channel_id=<channel_id>]
[<channel_id> | --channel_id=<channel_id>] [<hexdata> | --hexdata=<hexdata>] [<hexdata> | --hexdata=<hexdata>] [<salt> | --salt=<salt>]
[--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>] [--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>]
Options: Options:
--channel_name=<channel_name> : (str) name of channel used to sign (or use channel id) --channel_name=<channel_name> : (str) name of channel used to sign (or use channel id)
--channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name) --channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name)
--hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal --hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal
--salt=<salt> : (str) salt to use for signing, default is to use timestamp
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in --channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts. for channel certificates, defaults to all accounts.
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet --wallet_id=<wallet_id> : (str) restrict operation to specific wallet
@ -2971,11 +2975,13 @@ class Daemon(metaclass=JSONRPCServerType):
signing_channel = await self.get_channel_or_error( signing_channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True wallet, channel_account_id, channel_id, channel_name, for_signing=True
) )
timestamp = str(int(time.time())) if salt is None:
signature = signing_channel.sign_data(unhexlify(str(hexdata)), timestamp) salt = str(int(time.time()))
signature = signing_channel.sign_data(unhexlify(str(hexdata)), salt)
return { return {
'signature': signature, 'signature': signature,
'signing_ts': timestamp 'signing_ts': salt, # DEPRECATED
'salt': salt,
} }
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)

View file

@ -80,8 +80,6 @@ class MarketFeed:
self.rate = ExchangeRate(self.market, rate, int(time.time())) self.rate = ExchangeRate(self.market, rate, int(time.time()))
self.last_check = time.time() self.last_check = time.time()
return self.rate return self.rate
except asyncio.CancelledError:
raise
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.warning("Timed out fetching exchange rate from %s.", self.name) log.warning("Timed out fetching exchange rate from %s.", self.name)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:

View file

@ -328,8 +328,8 @@ class JSONResponseEncoder(JSONEncoder):
result.update({ result.update({
'streaming_url': managed_stream.stream_url, 'streaming_url': managed_stream.stream_url,
'stream_hash': managed_stream.stream_hash, 'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.descriptor.stream_name, 'stream_name': managed_stream.stream_name,
'suggested_file_name': managed_stream.descriptor.suggested_file_name, 'suggested_file_name': managed_stream.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash, 'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type, 'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key, 'key': managed_stream.descriptor.key,

View file

@ -793,7 +793,7 @@ class SQLiteStorage(SQLiteMixin):
await self.db.run(_save_claims) await self.db.run(_save_claims)
if update_file_callbacks: if update_file_callbacks:
await asyncio.wait(update_file_callbacks) await asyncio.wait(map(asyncio.create_task, update_file_callbacks))
if claim_id_to_supports: if claim_id_to_supports:
await self.save_supports(claim_id_to_supports) await self.save_supports(claim_id_to_supports)

View file

@ -13,11 +13,12 @@ from lbry.schema.url import URL
from lbry.wallet.dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from lbry.file.source_manager import SourceManager from lbry.file.source_manager import SourceManager
from lbry.file.source import ManagedDownloadSource from lbry.file.source import ManagedDownloadSource
from lbry.extras.daemon.storage import StoredContentClaim
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.extras.daemon.analytics import AnalyticsManager from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import WalletManager, Output from lbry.wallet import WalletManager
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -49,10 +50,10 @@ class FileManager:
await manager.started.wait() await manager.started.wait()
self.started.set() self.started.set()
def stop(self): async def stop(self):
for manager in self.source_managers.values(): for manager in self.source_managers.values():
# fixme: pop or not? # fixme: pop or not?
manager.stop() await manager.stop()
self.started.clear() self.started.clear()
@cache_concurrent @cache_concurrent
@ -98,8 +99,6 @@ class FileManager:
except asyncio.TimeoutError: except asyncio.TimeoutError:
raise ResolveTimeoutError(uri) raise ResolveTimeoutError(uri)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise
log.exception("Unexpected error resolving stream:") log.exception("Unexpected error resolving stream:")
raise ResolveError(f"Unexpected error resolving stream: {str(err)}") raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
if 'error' in resolved_result: if 'error' in resolved_result:
@ -194,21 +193,24 @@ class FileManager:
#################### ####################
# make downloader and wait for start # make downloader and wait for start
#################### ####################
# temporary with fields we know so downloader can start. Missing fields are populated later.
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
amount=txo.amount, height=txo.tx_ref.height,
serialized=claim.to_bytes().hex())
if not claim.stream.source.bt_infohash: if not claim.stream.source.bt_infohash:
# fixme: this shouldnt be here # fixme: this shouldnt be here
stream = ManagedStream( stream = ManagedStream(
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash, self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment, download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
analytics_manager=self.analytics_manager analytics_manager=self.analytics_manager, claim=stored_claim
) )
stream.downloader.node = source_manager.node stream.downloader.node = source_manager.node
else: else:
stream = TorrentSource( stream = TorrentSource(
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash, self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
file_name=file_name, download_directory=download_directory or self.config.download_dir, file_name=file_name, download_directory=download_directory or self.config.download_dir,
status=ManagedStream.STATUS_RUNNING, status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
analytics_manager=self.analytics_manager,
torrent_session=source_manager.torrent_session torrent_session=source_manager.torrent_session
) )
log.info("starting download for %s", uri) log.info("starting download for %s", uri)
@ -240,13 +242,12 @@ class FileManager:
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier) claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
stream.set_claim(claim_info, claim) stream.set_claim(claim_info, claim)
if save_file: if save_file:
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download), await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
loop=self.loop)
return stream return stream
except asyncio.TimeoutError: except asyncio.TimeoutError:
error = DownloadDataTimeoutError(stream.sd_hash) error = DownloadDataTimeoutError(stream.sd_hash)
raise error raise error
except Exception as err: # forgive data timeout, don't delete stream except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError, expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError) KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
if isinstance(err, expected): if isinstance(err, expected):

View file

@ -47,10 +47,10 @@ class ManagedDownloadSource:
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.downloader = None self.downloader = None
self.saving = asyncio.Event(loop=self.loop) self.saving = asyncio.Event()
self.finished_writing = asyncio.Event(loop=self.loop) self.finished_writing = asyncio.Event()
self.started_writing = asyncio.Event(loop=self.loop) self.started_writing = asyncio.Event()
self.finished_write_attempt = asyncio.Event(loop=self.loop) self.finished_write_attempt = asyncio.Event()
# @classmethod # @classmethod
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str, # async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
@ -67,7 +67,7 @@ class ManagedDownloadSource:
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None): async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
raise NotImplementedError() raise NotImplementedError()
def stop_tasks(self): async def stop_tasks(self):
raise NotImplementedError() raise NotImplementedError()
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'): def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):

View file

@ -54,16 +54,16 @@ class SourceManager:
self.storage = storage self.storage = storage
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self._sources: typing.Dict[str, ManagedDownloadSource] = {} self._sources: typing.Dict[str, ManagedDownloadSource] = {}
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
def add(self, source: ManagedDownloadSource): def add(self, source: ManagedDownloadSource):
self._sources[source.identifier] = source self._sources[source.identifier] = source
def remove(self, source: ManagedDownloadSource): async def remove(self, source: ManagedDownloadSource):
if source.identifier not in self._sources: if source.identifier not in self._sources:
return return
self._sources.pop(source.identifier) self._sources.pop(source.identifier)
source.stop_tasks() await source.stop_tasks()
async def initialize_from_database(self): async def initialize_from_database(self):
raise NotImplementedError() raise NotImplementedError()
@ -72,10 +72,10 @@ class SourceManager:
await self.initialize_from_database() await self.initialize_from_database()
self.started.set() self.started.set()
def stop(self): async def stop(self):
while self._sources: while self._sources:
_, source = self._sources.popitem() _, source = self._sources.popitem()
source.stop_tasks() await source.stop_tasks()
self.started.clear() self.started.clear()
async def create(self, file_path: str, key: Optional[bytes] = None, async def create(self, file_path: str, key: Optional[bytes] = None,
@ -83,7 +83,7 @@ class SourceManager:
raise NotImplementedError() raise NotImplementedError()
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False): async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
self.remove(source) await self.remove(source)
if delete_file and source.output_file_exists: if delete_file and source.output_file_exists:
os.remove(source.full_path) os.remove(source.full_path)

View file

@ -398,6 +398,12 @@ class Repost(BaseClaim):
claim_type = Claim.REPOST claim_type = Claim.REPOST
def to_dict(self):
claim = super().to_dict()
if claim.pop('claim_hash', None):
claim['claim_id'] = self.reference.claim_id
return claim
@property @property
def reference(self) -> ClaimReference: def reference(self) -> ClaimReference:
return ClaimReference(self.message) return ClaimReference(self.message)

View file

@ -23,6 +23,7 @@ class BackgroundDownloader:
except ValueError: except ValueError:
return return
except asyncio.CancelledError: except asyncio.CancelledError:
log.debug("Cancelled background downloader")
raise raise
except Exception: except Exception:
log.error("Unexpected download error on background downloader") log.error("Unexpected download error on background downloader")

View file

@ -27,8 +27,8 @@ class StreamDownloader:
self.config = config self.config = config
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.sd_hash = sd_hash self.sd_hash = sd_hash
self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder
self.peer_queue = asyncio.Queue(loop=loop) # new peers to try self.peer_queue = asyncio.Queue() # new peers to try
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue) self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
self.descriptor: typing.Optional[StreamDescriptor] = descriptor self.descriptor: typing.Optional[StreamDescriptor] = descriptor
self.node: typing.Optional['Node'] = None self.node: typing.Optional['Node'] = None
@ -72,7 +72,7 @@ class StreamDownloader:
now = self.loop.time() now = self.loop.time()
sd_blob = await asyncio.wait_for( sd_blob = await asyncio.wait_for(
self.blob_downloader.download_blob(self.sd_hash, connection_id), self.blob_downloader.download_blob(self.sd_hash, connection_id),
self.config.blob_download_timeout, loop=self.loop self.config.blob_download_timeout
) )
log.info("downloaded sd blob %s", self.sd_hash) log.info("downloaded sd blob %s", self.sd_hash)
self.time_to_descriptor = self.loop.time() - now self.time_to_descriptor = self.loop.time() - now
@ -111,7 +111,7 @@ class StreamDownloader:
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}") raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
blob = await asyncio.wait_for( blob = await asyncio.wait_for(
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id), self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
self.config.blob_download_timeout * 10, loop=self.loop self.config.blob_download_timeout * 10
) )
return blob return blob

View file

@ -60,9 +60,9 @@ class ManagedStream(ManagedDownloadSource):
self.file_output_task: typing.Optional[asyncio.Task] = None self.file_output_task: typing.Optional[asyncio.Task] = None
self.delayed_stop_task: typing.Optional[asyncio.Task] = None self.delayed_stop_task: typing.Optional[asyncio.Task] = None
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = [] self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
self.fully_reflected = asyncio.Event(loop=self.loop) self.fully_reflected = asyncio.Event()
self.streaming = asyncio.Event(loop=self.loop) self.streaming = asyncio.Event()
self._running = asyncio.Event(loop=self.loop) self._running = asyncio.Event()
@property @property
def sd_hash(self) -> str: def sd_hash(self) -> str:
@ -82,7 +82,19 @@ class ManagedStream(ManagedDownloadSource):
@property @property
def file_name(self) -> Optional[str]: def file_name(self) -> Optional[str]:
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None) return self._file_name or self.suggested_file_name
@property
def suggested_file_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name))
@property
def stream_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name)
@property @property
def written_bytes(self) -> int: def written_bytes(self) -> int:
@ -116,7 +128,7 @@ class ManagedStream(ManagedDownloadSource):
@property @property
def mime_type(self): def mime_type(self):
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0] return guess_media_type(os.path.basename(self.suggested_file_name))[0]
@property @property
def download_path(self): def download_path(self):
@ -149,7 +161,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("start downloader for stream (sd hash: %s)", self.sd_hash) log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
self._running.set() self._running.set()
try: try:
await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop) await asyncio.wait_for(self.downloader.start(), timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
self._running.clear() self._running.clear()
raise DownloadSDTimeoutError(self.sd_hash) raise DownloadSDTimeoutError(self.sd_hash)
@ -162,7 +174,7 @@ class ManagedStream(ManagedDownloadSource):
if not self._file_name: if not self._file_name:
self._file_name = await get_next_available_file_name( self._file_name = await get_next_available_file_name(
self.loop, self.download_directory, self.loop, self.download_directory,
self._file_name or sanitize_file_name(self.descriptor.suggested_file_name) self._file_name or sanitize_file_name(self.suggested_file_name)
) )
file_name, download_dir = self._file_name, self.download_directory file_name, download_dir = self._file_name, self.download_directory
else: else:
@ -179,7 +191,7 @@ class ManagedStream(ManagedDownloadSource):
Stop any running save/stream tasks as well as the downloader and update the status in the database Stop any running save/stream tasks as well as the downloader and update the status in the database
""" """
self.stop_tasks() await self.stop_tasks()
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING: if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED) await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
@ -267,7 +279,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id, log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
self.sd_hash[:6], self.full_path) self.sd_hash[:6], self.full_path)
await self.blob_manager.storage.set_saved_file(self.stream_hash) await self.blob_manager.storage.set_saved_file(self.stream_hash)
except Exception as err: except (Exception, asyncio.CancelledError) as err:
if os.path.isfile(output_path): if os.path.isfile(output_path):
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash) log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
os.remove(output_path) os.remove(output_path)
@ -294,14 +306,14 @@ class ManagedStream(ManagedDownloadSource):
self.download_directory = download_directory or self.download_directory or self.config.download_dir self.download_directory = download_directory or self.download_directory or self.config.download_dir
if not self.download_directory: if not self.download_directory:
raise ValueError("no directory to download to") raise ValueError("no directory to download to")
if not (file_name or self._file_name or self.descriptor.suggested_file_name): if not (file_name or self._file_name or self.suggested_file_name):
raise ValueError("no file name to download to") raise ValueError("no file name to download to")
if not os.path.isdir(self.download_directory): if not os.path.isdir(self.download_directory):
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory) log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
os.mkdir(self.download_directory) os.mkdir(self.download_directory)
self._file_name = await get_next_available_file_name( self._file_name = await get_next_available_file_name(
self.loop, self.download_directory, self.loop, self.download_directory,
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name) file_name or self._file_name or sanitize_file_name(self.suggested_file_name)
) )
await self.blob_manager.storage.change_file_download_dir_and_file_name( await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, self.download_directory, self.file_name self.stream_hash, self.download_directory, self.file_name
@ -309,15 +321,16 @@ class ManagedStream(ManagedDownloadSource):
await self.update_status(ManagedStream.STATUS_RUNNING) await self.update_status(ManagedStream.STATUS_RUNNING)
self.file_output_task = self.loop.create_task(self._save_file(self.full_path)) self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
try: try:
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop) await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id) log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
self.stop_tasks() await self.stop_tasks()
await self.update_status(ManagedStream.STATUS_STOPPED) await self.update_status(ManagedStream.STATUS_STOPPED)
def stop_tasks(self): async def stop_tasks(self):
if self.file_output_task and not self.file_output_task.done(): if self.file_output_task and not self.file_output_task.done():
self.file_output_task.cancel() self.file_output_task.cancel()
await asyncio.gather(self.file_output_task, return_exceptions=True)
self.file_output_task = None self.file_output_task = None
while self.streaming_responses: while self.streaming_responses:
req, response = self.streaming_responses.pop() req, response = self.streaming_responses.pop()
@ -354,7 +367,7 @@ class ManagedStream(ManagedDownloadSource):
return sent return sent
except ConnectionError: except ConnectionError:
return sent return sent
except (OSError, Exception) as err: except (OSError, Exception, asyncio.CancelledError) as err:
if isinstance(err, asyncio.CancelledError): if isinstance(err, asyncio.CancelledError):
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id) log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
elif isinstance(err, OSError): elif isinstance(err, OSError):
@ -389,7 +402,7 @@ class ManagedStream(ManagedDownloadSource):
self.sd_hash[:6]) self.sd_hash[:6])
await self.stop() await self.stop()
return return
await asyncio.sleep(1, loop=self.loop) await asyncio.sleep(1)
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]: def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
if '=' in get_range: if '=' in get_range:

View file

@ -21,7 +21,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: asyncio.Task = None self.server_task: asyncio.Task = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.buf = b'' self.buf = b''
self.transport: asyncio.StreamWriter = None self.transport: asyncio.StreamWriter = None
self.writer: typing.Optional['HashBlobWriter'] = None self.writer: typing.Optional['HashBlobWriter'] = None
@ -29,9 +29,9 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.descriptor: typing.Optional['StreamDescriptor'] = None self.descriptor: typing.Optional['StreamDescriptor'] = None
self.sd_blob: typing.Optional['BlobFile'] = None self.sd_blob: typing.Optional['BlobFile'] = None
self.received = [] self.received = []
self.incoming = incoming_event or asyncio.Event(loop=self.loop) self.incoming = incoming_event or asyncio.Event()
self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop) self.not_incoming = not_incoming_event or asyncio.Event()
self.stop_event = stop_event or asyncio.Event(loop=self.loop) self.stop_event = stop_event or asyncio.Event()
self.chunk_size = response_chunk_size self.chunk_size = response_chunk_size
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
self.partial_event = partial_event self.partial_event = partial_event
@ -94,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set() self.incoming.set()
self.send_response({"send_sd_blob": True}) self.send_response({"send_sd_blob": True})
try: try:
await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop) await asyncio.wait_for(self.sd_blob.verified.wait(), 30)
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob( self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
self.loop, self.blob_manager.blob_dir, self.sd_blob self.loop, self.blob_manager.blob_dir, self.sd_blob
) )
@ -140,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set() self.incoming.set()
self.send_response({"send_blob": True}) self.send_response({"send_blob": True})
try: try:
await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop) await asyncio.wait_for(blob.verified.wait(), 30)
self.send_response({"received_blob": True}) self.send_response({"received_blob": True})
except asyncio.TimeoutError: except asyncio.TimeoutError:
self.send_response({"received_blob": False}) self.send_response({"received_blob": False})
@ -162,10 +162,10 @@ class ReflectorServer:
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.stopped_listening = asyncio.Event(loop=self.loop) self.stopped_listening = asyncio.Event()
self.incoming_event = incoming_event or asyncio.Event(loop=self.loop) self.incoming_event = incoming_event or asyncio.Event()
self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop) self.not_incoming_event = not_incoming_event or asyncio.Event()
self.response_chunk_size = response_chunk_size self.response_chunk_size = response_chunk_size
self.stop_event = stop_event self.stop_event = stop_event
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants

View file

@ -54,7 +54,7 @@ class StreamManager(SourceManager):
self.re_reflect_task: Optional[asyncio.Task] = None self.re_reflect_task: Optional[asyncio.Task] = None
self.update_stream_finished_futs: typing.List[asyncio.Future] = [] self.update_stream_finished_futs: typing.List[asyncio.Future] = []
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {} self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
@property @property
def streams(self): def streams(self):
@ -150,7 +150,7 @@ class StreamManager(SourceManager):
file_info['added_on'], file_info['fully_reflected'] file_info['added_on'], file_info['fully_reflected']
))) )))
if add_stream_tasks: if add_stream_tasks:
await asyncio.gather(*add_stream_tasks, loop=self.loop) await asyncio.gather(*add_stream_tasks)
log.info("Started stream manager with %i files", len(self._sources)) log.info("Started stream manager with %i files", len(self._sources))
if not self.node: if not self.node:
log.info("no DHT node given, resuming downloads trusting that we can contact reflector") log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
@ -159,14 +159,11 @@ class StreamManager(SourceManager):
self.resume_saving_task = asyncio.ensure_future(asyncio.gather( self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
*(self._sources[sd_hash].save_file(file_name, download_directory) *(self._sources[sd_hash].save_file(file_name, download_directory)
for (file_name, download_directory, sd_hash) in to_resume_saving), for (file_name, download_directory, sd_hash) in to_resume_saving),
loop=self.loop
)) ))
async def reflect_streams(self): async def reflect_streams(self):
try: try:
return await self._reflect_streams() return await self._reflect_streams()
except asyncio.CancelledError:
raise
except Exception: except Exception:
log.exception("reflector task encountered an unexpected error!") log.exception("reflector task encountered an unexpected error!")
@ -186,21 +183,21 @@ class StreamManager(SourceManager):
batch.append(self.reflect_stream(stream)) batch.append(self.reflect_stream(stream))
if len(batch) >= self.config.concurrent_reflector_uploads: if len(batch) >= self.config.concurrent_reflector_uploads:
log.debug("waiting for batch of %s reflecting streams", len(batch)) log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch, loop=self.loop) await asyncio.gather(*batch)
log.debug("done processing %s streams", len(batch)) log.debug("done processing %s streams", len(batch))
batch = [] batch = []
if batch: if batch:
log.debug("waiting for batch of %s reflecting streams", len(batch)) log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch, loop=self.loop) await asyncio.gather(*batch)
log.debug("done processing %s streams", len(batch)) log.debug("done processing %s streams", len(batch))
await asyncio.sleep(300, loop=self.loop) await asyncio.sleep(300)
async def start(self): async def start(self):
await super().start() await super().start()
self.re_reflect_task = self.loop.create_task(self.reflect_streams()) self.re_reflect_task = self.loop.create_task(self.reflect_streams())
def stop(self): async def stop(self):
super().stop() await super().stop()
if self.resume_saving_task and not self.resume_saving_task.done(): if self.resume_saving_task and not self.resume_saving_task.done():
self.resume_saving_task.cancel() self.resume_saving_task.cancel()
if self.re_reflect_task and not self.re_reflect_task.done(): if self.re_reflect_task and not self.re_reflect_task.done():
@ -227,7 +224,8 @@ class StreamManager(SourceManager):
) )
return task return task
async def _retriable_reflect_stream(self, stream, host, port): @staticmethod
async def _retriable_reflect_stream(stream, host, port):
sent = await stream.upload_to_reflector(host, port) sent = await stream.upload_to_reflector(host, port)
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0: while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
stream.reflector_progress = 0 stream.reflector_progress = 0
@ -262,7 +260,7 @@ class StreamManager(SourceManager):
return return
if source.identifier in self.running_reflector_uploads: if source.identifier in self.running_reflector_uploads:
self.running_reflector_uploads[source.identifier].cancel() self.running_reflector_uploads[source.identifier].cancel()
source.stop_tasks() await source.stop_tasks()
if source.identifier in self.streams: if source.identifier in self.streams:
del self.streams[source.identifier] del self.streams[source.identifier]
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]] blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]

View file

@ -301,15 +301,8 @@ class IntegrationTestCase(AsyncioTestCase):
watcher = (ledger or self.ledger).on_transaction.where( watcher = (ledger or self.ledger).on_transaction.where(
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
) )
self.conductor.spv_node.server.synchronized.clear() await self.generate(blocks_to_generate)
await self.blockchain.generate(blocks_to_generate)
height = self.blockchain.block_expected
await watcher await watcher
while True:
await self.conductor.spv_node.server.synchronized.wait()
self.conductor.spv_node.server.synchronized.clear()
if self.conductor.spv_node.server.db.db_height >= height:
break
def on_address_update(self, address): def on_address_update(self, address):
return self.ledger.on_transaction.where( return self.ledger.on_transaction.where(
@ -324,15 +317,18 @@ class IntegrationTestCase(AsyncioTestCase):
async def generate(self, blocks): async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """ """ Ask lbrycrd to generate some blocks and wait until ledger has them. """
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block) prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
height = self.blockchain.block_expected
self.conductor.spv_node.server.synchronized.clear() self.conductor.spv_node.server.synchronized.clear()
await self.blockchain.generate(blocks) await self.blockchain.generate(blocks)
height = self.blockchain.block_expected
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
while True: while True:
await self.conductor.spv_node.server.synchronized.wait() await self.conductor.spv_node.server.synchronized.wait()
self.conductor.spv_node.server.synchronized.clear() self.conductor.spv_node.server.synchronized.clear()
if self.conductor.spv_node.server.db.db_height >= height: if self.conductor.spv_node.server.db.db_height < height:
break continue
if self.conductor.spv_node.server._es_height < height:
continue
break
class FakeExchangeRateManager(ExchangeRateManager): class FakeExchangeRateManager(ExchangeRateManager):

View file

@ -10,47 +10,13 @@ from typing import Optional
import libtorrent import libtorrent
NOTIFICATION_MASKS = [
"error",
"peer",
"port_mapping",
"storage",
"tracker",
"debug",
"status",
"progress",
"ip_block",
"dht",
"stats",
"session_log",
"torrent_log",
"peer_log",
"incoming_request",
"dht_log",
"dht_operation",
"port_mapping_log",
"picker_log",
"file_progress",
"piece_progress",
"upload",
"block_progress"
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted? DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
libtorrent.add_torrent_params_flags_t.flag_auto_managed libtorrent.add_torrent_params_flags_t.flag_auto_managed
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe | libtorrent.add_torrent_params_flags_t.flag_update_subscribe
) )
def get_notification_type(notification) -> str:
for i, notification_type in enumerate(NOTIFICATION_MASKS):
if (1 << i) & notification:
return notification_type
raise ValueError("unrecognized notification type")
class TorrentHandle: class TorrentHandle:
def __init__(self, loop, executor, handle): def __init__(self, loop, executor, handle):
self._loop = loop self._loop = loop
@ -121,7 +87,7 @@ class TorrentHandle:
self._show_status() self._show_status()
if self.finished.is_set(): if self.finished.is_set():
break break
await asyncio.sleep(0.1, loop=self._loop) await asyncio.sleep(0.1)
async def pause(self): async def pause(self):
await self._loop.run_in_executor( await self._loop.run_in_executor(
@ -156,10 +122,8 @@ class TorrentSession:
async def bind(self, interface: str = '0.0.0.0', port: int = 10889): async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
settings = { settings = {
'listen_interfaces': f"{interface}:{port}", 'listen_interfaces': f"{interface}:{port}",
'enable_outgoing_utp': True, 'enable_natpmp': False,
'enable_incoming_utp': True, 'enable_upnp': False
'enable_outgoing_tcp': False,
'enable_incoming_tcp': False
} }
self._session = await self._loop.run_in_executor( self._session = await self._loop.run_in_executor(
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
@ -186,7 +150,7 @@ class TorrentSession:
await self._loop.run_in_executor( await self._loop.run_in_executor(
self._executor, self._pop_alerts self._executor, self._pop_alerts
) )
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
async def pause(self): async def pause(self):
await self._loop.run_in_executor( await self._loop.run_in_executor(

View file

@ -36,7 +36,7 @@ class Torrent:
def __init__(self, loop, handle): def __init__(self, loop, handle):
self._loop = loop self._loop = loop
self._handle = handle self._handle = handle
self.finished = asyncio.Event(loop=loop) self.finished = asyncio.Event()
def _threaded_update_status(self): def _threaded_update_status(self):
status = self._handle.status() status = self._handle.status()
@ -58,7 +58,7 @@ class Torrent:
log.info("finished downloading torrent!") log.info("finished downloading torrent!")
await self.pause() await self.pause()
break break
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
async def pause(self): async def pause(self):
log.info("pause torrent") log.info("pause torrent")

View file

@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
def bt_infohash(self): def bt_infohash(self):
return self.identifier return self.identifier
def stop_tasks(self): async def stop_tasks(self):
pass pass
@property @property
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
async def start(self): async def start(self):
await super().start() await super().start()
def stop(self): async def stop(self):
super().stop() await super().stop()
log.info("finished stopping the torrent manager") log.info("finished stopping the torrent manager")
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False): async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):

View file

@ -450,8 +450,8 @@ def is_running_from_bundle():
class LockWithMetrics(asyncio.Lock): class LockWithMetrics(asyncio.Lock):
def __init__(self, acquire_metric, held_time_metric, loop=None): def __init__(self, acquire_metric, held_time_metric):
super().__init__(loop=loop) super().__init__()
self._acquire_metric = acquire_metric self._acquire_metric = acquire_metric
self._lock_held_time_metric = held_time_metric self._lock_held_time_metric = held_time_metric
self._lock_acquired_time = None self._lock_acquired_time = None

View file

@ -1064,4 +1064,182 @@ HASHES = {
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80', 1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465', 1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6', 1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
} }

View file

@ -141,7 +141,7 @@ class CoinSelector:
_) -> List[OutputEffectiveAmountEstimator]: _) -> List[OutputEffectiveAmountEstimator]:
""" Accumulate UTXOs at random until there is enough to cover the target. """ """ Accumulate UTXOs at random until there is enough to cover the target. """
target = self.target + self.cost_of_change target = self.target + self.cost_of_change
self.random.shuffle(txos, self.random.random) self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument
selection = [] selection = []
amount = 0 amount = 0
for coin in txos: for coin in txos:

View file

@ -329,10 +329,10 @@ class Ledger(metaclass=LedgerRegistry):
async def start(self): async def start(self):
if not os.path.exists(self.path): if not os.path.exists(self.path):
os.mkdir(self.path) os.mkdir(self.path)
await asyncio.wait([ await asyncio.wait(map(asyncio.create_task, [
self.db.open(), self.db.open(),
self.headers.open() self.headers.open()
]) ]))
fully_synced = self.on_ready.first fully_synced = self.on_ready.first
asyncio.create_task(self.network.start()) asyncio.create_task(self.network.start())
await self.network.on_connected.first await self.network.on_connected.first
@ -466,9 +466,9 @@ class Ledger(metaclass=LedgerRegistry):
async def subscribe_accounts(self): async def subscribe_accounts(self):
if self.network.is_connected and self.accounts: if self.network.is_connected and self.accounts:
log.info("Subscribe to %i accounts", len(self.accounts)) log.info("Subscribe to %i accounts", len(self.accounts))
await asyncio.wait([ await asyncio.wait(map(asyncio.create_task, [
self.subscribe_account(a) for a in self.accounts self.subscribe_account(a) for a in self.accounts
]) ]))
async def subscribe_account(self, account: Account): async def subscribe_account(self, account: Account):
for address_manager in account.address_managers.values(): for address_manager in account.address_managers.values():
@ -722,6 +722,15 @@ class Ledger(metaclass=LedgerRegistry):
return account.address_managers[details['chain']] return account.address_managers[details['chain']]
return None return None
async def broadcast_or_release(self, tx, blocking=False):
try:
await self.broadcast(tx)
except:
await self.release_tx(tx)
raise
if blocking:
await self.wait(tx, timeout=None)
def broadcast(self, tx): def broadcast(self, tx):
# broadcast can't be a retriable call yet # broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode()) return self.network.broadcast(hexlify(tx.raw).decode())
@ -929,9 +938,7 @@ class Ledger(metaclass=LedgerRegistry):
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ", "%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
account.id, balance, total_receiving, account.receiving.gap, total_change, account.id, balance, total_receiving, account.receiving.gap, total_change,
account.change.gap, channel_count, len(account.channel_keys), claim_count) account.change.gap, channel_count, len(account.channel_keys), claim_count)
except Exception as err: except Exception:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception( log.exception(
'Failed to display wallet state, please file issue ' 'Failed to display wallet state, please file issue '
'for this bug along with the traceback you see below:') 'for this bug along with the traceback you see below:')
@ -954,9 +961,7 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = [p.purchased_claim_id for p in purchases] claim_ids = [p.purchased_claim_id for p in purchases]
try: try:
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids) resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err: except Exception:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up purchased claim ids:") log.exception("Resolve failed while looking up purchased claim ids:")
resolved = [] resolved = []
lookup = {claim.claim_id: claim for claim in resolved} lookup = {claim.claim_id: claim for claim in resolved}
@ -1036,9 +1041,7 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset] claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
try: try:
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids) resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err: except Exception:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up collection claim ids:") log.exception("Resolve failed while looking up collection claim ids:")
return [] return []
claims = [] claims = []

View file

@ -190,8 +190,7 @@ class WalletManager:
'jurisdiction': config.jurisdiction, 'jurisdiction': config.jurisdiction,
'concurrent_hub_requests': config.concurrent_hub_requests, 'concurrent_hub_requests': config.concurrent_hub_requests,
'data_path': config.wallet_dir, 'data_path': config.wallet_dir,
'tx_cache_size': config.transaction_cache_size, 'tx_cache_size': config.transaction_cache_size
'exit_on_disconnect': config.exit_on_disconnect,
} }
if 'LBRY_FEE_PER_NAME_CHAR' in os.environ: if 'LBRY_FEE_PER_NAME_CHAR' in os.environ:
ledger_config['fee_per_name_char'] = int(os.environ.get('LBRY_FEE_PER_NAME_CHAR')) ledger_config['fee_per_name_char'] = int(os.environ.get('LBRY_FEE_PER_NAME_CHAR'))
@ -245,7 +244,6 @@ class WalletManager:
'hub_timeout': self.config.hub_timeout, 'hub_timeout': self.config.hub_timeout,
'concurrent_hub_requests': self.config.concurrent_hub_requests, 'concurrent_hub_requests': self.config.concurrent_hub_requests,
'data_path': self.config.wallet_dir, 'data_path': self.config.wallet_dir,
'exit_on_disconnect': self.config.exit_on_disconnect,
} }
if Config.lbryum_servers.is_set(self.config): if Config.lbryum_servers.is_set(self.config):
self.ledger.config['explicit_servers'] = self.config.lbryum_servers self.ledger.config['explicit_servers'] = self.config.lbryum_servers
@ -319,10 +317,4 @@ class WalletManager:
) )
async def broadcast_or_release(self, tx, blocking=False): async def broadcast_or_release(self, tx, blocking=False):
try: await self.ledger.broadcast_or_release(tx, blocking=blocking)
await self.ledger.broadcast(tx)
except:
await self.ledger.release_tx(tx)
raise
if blocking:
await self.ledger.wait(tx, timeout=None)

View file

@ -3,7 +3,6 @@ import asyncio
import json import json
import socket import socket
import random import random
import sys
from time import perf_counter from time import perf_counter
from collections import defaultdict from collections import defaultdict
from typing import Dict, Optional, Tuple from typing import Dict, Optional, Tuple
@ -118,7 +117,7 @@ class ClientSession(BaseClientSession):
) )
else: else:
await asyncio.sleep(max(0, max_idle - (now - self.last_send))) await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
except Exception as err: except (Exception, asyncio.CancelledError) as err:
if isinstance(err, asyncio.CancelledError): if isinstance(err, asyncio.CancelledError):
log.info("closing connection to %s:%i", *self.server) log.info("closing connection to %s:%i", *self.server)
else: else:
@ -198,10 +197,6 @@ class Network:
def jurisdiction(self): def jurisdiction(self):
return self.config.get("jurisdiction") return self.config.get("jurisdiction")
@property
def exit_on_disconnect(self):
return self.config["exit_on_disconnect"]
def disconnect(self): def disconnect(self):
if self._keepalive_task and not self._keepalive_task.done(): if self._keepalive_task and not self._keepalive_task.done():
self._keepalive_task.cancel() self._keepalive_task.cancel()
@ -219,7 +214,7 @@ class Network:
def loop_task_done_callback(f): def loop_task_done_callback(f):
try: try:
f.result() f.result()
except Exception: except (Exception, asyncio.CancelledError):
if self.running: if self.running:
log.exception("wallet server connection loop crashed") log.exception("wallet server connection loop crashed")
@ -317,7 +312,8 @@ class Network:
sleep_delay = 30 sleep_delay = 30
while self.running: while self.running:
await asyncio.wait( await asyncio.wait(
[asyncio.sleep(30), self._urgent_need_reconnect.wait()], return_when=asyncio.FIRST_COMPLETED map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]),
return_when=asyncio.FIRST_COMPLETED
) )
if self._urgent_need_reconnect.is_set(): if self._urgent_need_reconnect.is_set():
sleep_delay = 30 sleep_delay = 30
@ -343,7 +339,7 @@ class Network:
try: try:
if not self._urgent_need_reconnect.is_set(): if not self._urgent_need_reconnect.is_set():
await asyncio.wait( await asyncio.wait(
[self._keepalive_task, self._urgent_need_reconnect.wait()], [self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())],
return_when=asyncio.FIRST_COMPLETED return_when=asyncio.FIRST_COMPLETED
) )
else: else:
@ -378,13 +374,7 @@ class Network:
def rpc(self, list_or_method, args, restricted=True, session: Optional[ClientSession] = None): def rpc(self, list_or_method, args, restricted=True, session: Optional[ClientSession] = None):
if session or self.is_connected: if session or self.is_connected:
session = session or self.client session = session or self.client
try: return session.send_request(list_or_method, args)
return session.send_request(list_or_method, args)
except asyncio.TimeoutError:
if self.exit_on_disconnect:
log.error("exiting on server disconnect")
sys.exit(1)
raise
else: else:
self._urgent_need_reconnect.set() self._urgent_need_reconnect.set()
raise ConnectionError("Attempting to send rpc request when connection is not available.") raise ConnectionError("Attempting to send rpc request when connection is not available.")
@ -398,16 +388,9 @@ class Network:
try: try:
return await function(*args, **kwargs) return await function(*args, **kwargs)
except asyncio.TimeoutError: except asyncio.TimeoutError:
if self.exit_on_disconnect: log.warning("Wallet server call timed out, retrying.")
log.error("Wallet server call timed out, exiting on server disconnect.")
sys.exit(1)
else:
log.warning("Wallet server call timed out, retrying.")
except ConnectionError: except ConnectionError:
log.warning("connection error") log.warning("connection error")
if self.exit_on_disconnect:
log.error("exiting on server disconnect")
sys.exit(1)
raise asyncio.CancelledError() # if we got here, we are shutting down raise asyncio.CancelledError() # if we got here, we are shutting down
def _update_remote_height(self, header_args): def _update_remote_height(self, header_args):

View file

@ -214,6 +214,7 @@ class SPVNode:
self.port = 50001 + node_number # avoid conflict with default daemon self.port = 50001 + node_number # avoid conflict with default daemon
self.udp_port = self.port self.udp_port = self.port
self.elastic_notifier_port = 19080 + node_number self.elastic_notifier_port = 19080 + node_number
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
self.session_timeout = 600 self.session_timeout = 600
self.stopped = True self.stopped = True
self.index_name = uuid4().hex self.index_name = uuid4().hex
@ -235,7 +236,7 @@ class SPVNode:
'host': self.hostname, 'host': self.hostname,
'tcp_port': self.port, 'tcp_port': self.port,
'udp_port': self.udp_port, 'udp_port': self.udp_port,
'elastic_notifier_port': self.elastic_notifier_port, 'elastic_services': self.elastic_services,
'session_timeout': self.session_timeout, 'session_timeout': self.session_timeout,
'max_query_workers': 0, 'max_query_workers': 0,
'es_index_prefix': self.index_name, 'es_index_prefix': self.index_name,
@ -263,8 +264,7 @@ class SPVNode:
await self.server.start() await self.server.start()
except Exception as e: except Exception as e:
self.stopped = True self.stopped = True
if not isinstance(e, asyncio.CancelledError): log.exception("failed to start spv node")
log.exception("failed to start spv node")
raise e raise e
async def stop(self, cleanup=True): async def stop(self, cleanup=True):

View file

@ -395,8 +395,8 @@ class RPCSession(SessionBase):
namespace=NAMESPACE, labelnames=("version",) namespace=NAMESPACE, labelnames=("version",)
) )
def __init__(self, *, framer=None, loop=None, connection=None): def __init__(self, *, framer=None, connection=None):
super().__init__(framer=framer, loop=loop) super().__init__(framer=framer)
self.connection = connection or self.default_connection() self.connection = connection or self.default_connection()
self.client_version = 'unknown' self.client_version = 'unknown'

View file

@ -2,6 +2,7 @@ import asyncio
import logging import logging
from lbry.error import ( from lbry.error import (
InsufficientFundsError,
ServerPaymentFeeAboveMaxAllowedError, ServerPaymentFeeAboveMaxAllowedError,
ServerPaymentInvalidAddressError, ServerPaymentInvalidAddressError,
ServerPaymentWalletLockedError ServerPaymentWalletLockedError
@ -24,41 +25,66 @@ class WalletServerPayer:
self.max_fee = max_fee self.max_fee = max_fee
self._on_payment_controller = StreamController() self._on_payment_controller = StreamController()
self.on_payment = self._on_payment_controller.stream self.on_payment = self._on_payment_controller.stream
self.on_payment.listen(None, on_error=lambda e: logging.warning(e.args[0])) self.on_payment.listen(None, on_error=lambda e: log.warning(e.args[0]))
async def pay(self): async def pay(self):
while self.running:
try:
await self._pay()
except (asyncio.TimeoutError, ConnectionError):
if not self.running:
break
delay = max(self.payment_period / 24, 10)
log.warning("Payement failed. Will retry after %g seconds.", delay)
asyncio.sleep(delay)
except BaseException as e:
if not isinstance(e, asyncio.CancelledError):
log.exception("Unexpected exception. Payment task exiting early.")
self.running = False
raise
async def _pay(self):
while self.running: while self.running:
await asyncio.sleep(self.payment_period) await asyncio.sleep(self.payment_period)
features = await self.ledger.network.retriable_call(self.ledger.network.get_server_features) features = await self.ledger.network.get_server_features()
log.debug("pay loop: received server features: %s", str(features))
address = features['payment_address'] address = features['payment_address']
amount = str(features['daily_fee']) amount = str(features['daily_fee'])
if not address or not amount: if not address or not amount:
log.debug("pay loop: no address or no amount")
continue continue
if not self.ledger.is_pubkey_address(address): if not self.ledger.is_pubkey_address(address):
log.info("pay loop: address not pubkey")
self._on_payment_controller.add_error(ServerPaymentInvalidAddressError(address)) self._on_payment_controller.add_error(ServerPaymentInvalidAddressError(address))
continue continue
if self.wallet.is_locked: if self.wallet.is_locked:
log.info("pay loop: wallet is locked")
self._on_payment_controller.add_error(ServerPaymentWalletLockedError()) self._on_payment_controller.add_error(ServerPaymentWalletLockedError())
continue continue
amount = lbc_to_dewies(features['daily_fee']) # check that this is in lbc and not dewies amount = lbc_to_dewies(features['daily_fee']) # check that this is in lbc and not dewies
limit = lbc_to_dewies(self.max_fee) limit = lbc_to_dewies(self.max_fee)
if amount > limit: if amount > limit:
log.info("pay loop: amount (%d) > limit (%d)", amount, limit)
self._on_payment_controller.add_error( self._on_payment_controller.add_error(
ServerPaymentFeeAboveMaxAllowedError(features['daily_fee'], self.max_fee) ServerPaymentFeeAboveMaxAllowedError(features['daily_fee'], self.max_fee)
) )
continue continue
tx = await Transaction.create( try:
[], tx = await Transaction.create(
[Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))], [],
self.wallet.get_accounts_or_all(None), [Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))],
self.wallet.get_account_or_default(None) self.wallet.get_accounts_or_all(None),
) self.wallet.get_account_or_default(None)
)
except InsufficientFundsError:
self._on_payment_controller.add_error(InsufficientFundsError())
continue
await self.ledger.broadcast(tx) await self.ledger.broadcast_or_release(tx, blocking=True)
if self.analytics_manager: if self.analytics_manager:
await self.analytics_manager.send_credits_sent() await self.analytics_manager.send_credits_sent()
self._on_payment_controller.add(tx) self._on_payment_controller.add(tx)
@ -70,7 +96,18 @@ class WalletServerPayer:
self.wallet = wallet self.wallet = wallet
self.running = True self.running = True
self.task = asyncio.ensure_future(self.pay()) self.task = asyncio.ensure_future(self.pay())
self.task.add_done_callback(lambda _: log.info("Stopping wallet server payments.")) self.task.add_done_callback(self._done_callback)
def _done_callback(self, f):
if f.cancelled():
reason = "Cancelled"
elif f.exception():
reason = f'Exception: {f.exception()}'
elif not self.running:
reason = "Stopped"
else:
reason = ""
log.info("Stopping wallet server payments. %s", reason)
async def stop(self): async def stop(self):
if self.running: if self.running:

View file

@ -10,6 +10,7 @@ from collections import UserDict
from hashlib import sha256 from hashlib import sha256
from operator import attrgetter from operator import attrgetter
from lbry.crypto.crypt import better_aes_encrypt, better_aes_decrypt from lbry.crypto.crypt import better_aes_encrypt, better_aes_decrypt
from lbry.error import InvalidPasswordError
from .account import Account from .account import Account
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
@ -138,6 +139,10 @@ class Wallet:
'accounts': [a.to_dict(encrypt_password) for a in self.accounts] 'accounts': [a.to_dict(encrypt_password) for a in self.accounts]
} }
def to_json(self):
assert not self.is_locked, "Cannot serialize a wallet with locked/encrypted accounts."
return json.dumps(self.to_dict())
def save(self): def save(self):
if self.preferences.get(ENCRYPT_ON_DISK, False): if self.preferences.get(ENCRYPT_ON_DISK, False):
if self.encryption_password is not None: if self.encryption_password is not None:
@ -164,21 +169,32 @@ class Wallet:
def pack(self, password): def pack(self, password):
assert not self.is_locked, "Cannot pack a wallet with locked/encrypted accounts." assert not self.is_locked, "Cannot pack a wallet with locked/encrypted accounts."
new_data = json.dumps(self.to_dict()) new_data_compressed = zlib.compress(self.to_json().encode())
new_data_compressed = zlib.compress(new_data.encode())
return better_aes_encrypt(password, new_data_compressed) return better_aes_encrypt(password, new_data_compressed)
@classmethod @classmethod
def unpack(cls, password, encrypted): def unpack(cls, password, encrypted):
decrypted = better_aes_decrypt(password, encrypted) decrypted = better_aes_decrypt(password, encrypted)
decompressed = zlib.decompress(decrypted) try:
decompressed = zlib.decompress(decrypted)
except zlib.error as e:
if "incorrect header check" in e.args[0].lower():
raise InvalidPasswordError()
if "unknown compression method" in e.args[0].lower():
raise InvalidPasswordError()
if "invalid window size" in e.args[0].lower():
raise InvalidPasswordError()
raise
return json.loads(decompressed) return json.loads(decompressed)
def merge(self, manager: 'WalletManager', def merge(self, manager: 'WalletManager',
password: str, data: str) -> (List['Account'], List['Account']): password: str, data: str) -> (List['Account'], List['Account']):
assert not self.is_locked, "Cannot sync apply on a locked wallet." assert not self.is_locked, "Cannot sync apply on a locked wallet."
added_accounts, merged_accounts = [], [] added_accounts, merged_accounts = [], []
decrypted_data = self.unpack(password, data) if password is None:
decrypted_data = json.loads(data)
else:
decrypted_data = self.unpack(password, data)
self.preferences.merge(decrypted_data.get('preferences', {})) self.preferences.merge(decrypted_data.get('preferences', {}))
for account_dict in decrypted_data['accounts']: for account_dict in decrypted_data['accounts']:
ledger = manager.get_or_create_ledger(account_dict['ledger']) ledger = manager.get_or_create_ledger(account_dict['ledger'])

520
scripts/dht_crawler.py Normal file
View file

@ -0,0 +1,520 @@
import sys
import datetime
import logging
import asyncio
import os.path
import random
import time
import typing
from dataclasses import dataclass, astuple, replace
from aiohttp import web
from prometheus_client import Gauge, generate_latest as prom_generate_latest, Counter, Histogram
import lbry.dht.error
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import make_kademlia_peer, PeerManager, decode_tcp_peer_from_compact_address
from lbry.dht.protocol.distance import Distance
from lbry.dht.protocol.iterative_find import FindValueResponse, FindNodeResponse, FindResponse
from lbry.extras.daemon.storage import SQLiteMixin
from lbry.conf import Config
from lbry.utils import resolve_host
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
class SDHashSamples:
def __init__(self, samples_file_path):
with open(samples_file_path, "rb") as sample_file:
self._samples = sample_file.read()
assert len(self._samples) % 48 == 0
self.size = len(self._samples) // 48
def read_samples(self, count=1):
for _ in range(count):
offset = 48 * random.randrange(0, self.size)
yield self._samples[offset:offset + 48]
class PeerStorage(SQLiteMixin):
CREATE_TABLES_QUERY = """
PRAGMA JOURNAL_MODE=WAL;
CREATE TABLE IF NOT EXISTS peer (
peer_id INTEGER NOT NULL,
node_id VARCHAR(96),
address VARCHAR,
udp_port INTEGER,
tcp_port INTEGER,
first_online DATETIME,
errors INTEGER,
last_churn INTEGER,
added_on DATETIME NOT NULL,
last_check DATETIME,
last_seen DATETIME,
latency INTEGER,
PRIMARY KEY (peer_id)
);
CREATE TABLE IF NOT EXISTS connection (
from_peer_id INTEGER NOT NULL,
to_peer_id INTEGER NOT NULL,
PRIMARY KEY (from_peer_id, to_peer_id),
FOREIGN KEY(from_peer_id) REFERENCES peer (peer_id),
FOREIGN KEY(to_peer_id) REFERENCES peer (peer_id)
);
"""
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
async def all_peers(self):
return [
DHTPeer(**peer) for peer in await self.db.execute_fetchall(
"select * from peer where latency > 0 or last_seen > datetime('now', '-1 hour')")
]
async def save_peers(self, *peers):
log.info("Saving graph nodes (peers) to DB")
await self.db.executemany(
"INSERT OR REPLACE INTO peer("
"node_id, address, udp_port, tcp_port, first_online, errors, last_churn,"
"added_on, last_check, last_seen, latency, peer_id) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",
[astuple(peer) for peer in peers]
)
log.info("Finished saving graph nodes (peers) to DB")
async def save_connections(self, connections_map):
log.info("Saving graph edges (connections) to DB")
await self.db.executemany(
"DELETE FROM connection WHERE from_peer_id = ?", [(key,) for key in connections_map])
for from_peer_id in connections_map:
await self.db.executemany(
"INSERT INTO connection(from_peer_id, to_peer_id) VALUES(?,?)",
[(from_peer_id, to_peer_id) for to_peer_id in connections_map[from_peer_id]])
log.info("Finished saving graph edges (connections) to DB")
@dataclass(frozen=True)
class DHTPeer:
node_id: str
address: str
udp_port: int
tcp_port: int = None
first_online: datetime.datetime = None
errors: int = None
last_churn: int = None
added_on: datetime.datetime = None
last_check: datetime.datetime = None
last_seen: datetime.datetime = None
latency: int = None
peer_id: int = None
@classmethod
def from_kad_peer(cls, peer, peer_id):
node_id = peer.node_id.hex() if peer.node_id else None
return DHTPeer(
node_id=node_id, address=peer.address, udp_port=peer.udp_port, tcp_port=peer.tcp_port,
peer_id=peer_id, added_on=datetime.datetime.utcnow())
def to_kad_peer(self):
node_id = bytes.fromhex(self.node_id) if self.node_id else None
return make_kademlia_peer(node_id, self.address, self.udp_port, self.tcp_port)
def new_node(address="0.0.0.0", udp_port=0, node_id=None):
node_id = node_id or generate_id()
loop = asyncio.get_event_loop()
return Node(loop, PeerManager(loop), node_id, udp_port, udp_port, 3333, address)
class Crawler:
unique_total_hosts_metric = Gauge(
"unique_total_hosts", "Number of unique hosts seen in the last interval", namespace="dht_crawler_node",
)
reachable_hosts_metric = Gauge(
"reachable_hosts", "Number of hosts that replied in the last interval", namespace="dht_crawler_node",
)
total_historic_hosts_metric = Gauge(
"history_total_hosts", "Number of hosts seen since first run.", namespace="dht_crawler_node",
)
pending_check_hosts_metric = Gauge(
"pending_hosts", "Number of hosts on queue to be checked.", namespace="dht_crawler_node",
)
hosts_with_errors_metric = Gauge(
"error_hosts", "Number of hosts that raised errors during contact.", namespace="dht_crawler_node",
)
ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS = tuple(map(float, range(100))) + (
500., 1000., 2000., float('inf')
)
connections_found_metric = Histogram(
"connections_found", "Number of hosts returned by the last successful contact.", namespace="dht_crawler_node",
buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
known_connections_found_metric = Histogram(
"known_connections_found", "Number of already known hosts returned by last contact.",
namespace="dht_crawler_node", buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
reachable_connections_found_metric = Histogram(
"reachable_connections_found", "Number of reachable known hosts returned by last contact.",
namespace="dht_crawler_node", buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
LATENCY_HISTOGRAM_BUCKETS = (
0., 5., 10., 15., 30., 60., 120., 180., 240., 300., 600., 1200., 1800., 4000., 6000., float('inf')
)
host_latency_metric = Histogram(
"host_latency", "Time spent on the last request, in milliseconds.", namespace="dht_crawler_node",
buckets=LATENCY_HISTOGRAM_BUCKETS
)
probed_streams_metric = Counter(
"probed_streams", "Amount of streams probed.", namespace="dht_crawler_node",
)
announced_streams_metric = Counter(
"announced_streams", "Amount of streams where announcements were found.", namespace="dht_crawler_node",
)
working_streams_metric = Counter(
"working_streams", "Amount of streams with reachable hosts.", namespace="dht_crawler_node",
)
def __init__(self, db_path: str, sd_hash_samples: SDHashSamples):
self.node = new_node()
self.db = PeerStorage(db_path)
self.sd_hashes = sd_hash_samples
self._memory_peers = {}
self._reachable_by_node_id = {}
self._connections = {}
async def open(self):
await self.db.open()
self._memory_peers = {
(peer.address, peer.udp_port): peer for peer in await self.db.all_peers()
}
self.refresh_reachable_set()
def refresh_reachable_set(self):
self._reachable_by_node_id = {
bytes.fromhex(peer.node_id): peer for peer in self._memory_peers.values() if (peer.latency or 0) > 0
}
async def probe_files(self):
if not self.sd_hashes:
return
while True:
for sd_hash in self.sd_hashes.read_samples(10_000):
self.refresh_reachable_set()
distance = Distance(sd_hash)
node_ids = list(self._reachable_by_node_id.keys())
node_ids.sort(key=lambda node_id: distance(node_id))
k_closest = [self._reachable_by_node_id[node_id] for node_id in node_ids[:8]]
found = False
working = False
for response in asyncio.as_completed(
[self.request_peers(peer.address, peer.udp_port, peer.node_id, sd_hash) for peer in k_closest]):
response = await response
if response and response.found:
found = True
blob_peers = []
for compact_addr in response.found_compact_addresses:
try:
blob_peers.append(decode_tcp_peer_from_compact_address(compact_addr))
except ValueError as e:
log.error("Error decoding compact peers: %s", e)
for blob_peer in blob_peers:
response = await self.request_peers(blob_peer.address, blob_peer.tcp_port, blob_peer.node_id, sd_hash)
if response:
working = True
log.info("Found responsive peer for %s: %s:%d(%d)",
sd_hash.hex()[:8], blob_peer.address,
blob_peer.udp_port or -1, blob_peer.tcp_port or -1)
else:
log.info("Found dead peer for %s: %s:%d(%d)",
sd_hash.hex()[:8], blob_peer.address,
blob_peer.udp_port or -1, blob_peer.tcp_port or -1)
self.probed_streams_metric.inc()
if found:
self.announced_streams_metric.inc()
if working:
self.working_streams_metric.inc()
log.info("Done querying stream %s for peers. Found: %s, working: %s", sd_hash.hex()[:8], found, working)
await asyncio.sleep(.5)
@property
def refresh_limit(self):
return datetime.datetime.utcnow() - datetime.timedelta(hours=1)
@property
def all_peers(self):
return [
peer for peer in self._memory_peers.values()
if (peer.last_seen and peer.last_seen > self.refresh_limit) or (peer.latency or 0) > 0
]
@property
def active_peers_count(self):
return len(self.all_peers)
@property
def checked_peers_count(self):
return len([peer for peer in self.all_peers if peer.last_check and peer.last_check > self.refresh_limit])
@property
def unreachable_peers_count(self):
return len([peer for peer in self.all_peers
if peer.last_check and peer.last_check > self.refresh_limit and not peer.latency])
@property
def peers_with_errors_count(self):
return len([peer for peer in self.all_peers if (peer.errors or 0) > 0])
def get_peers_needing_check(self):
to_check = [peer for peer in self.all_peers if peer.last_check is None or peer.last_check < self.refresh_limit]
return to_check
def remove_expired_peers(self):
for key, peer in list(self._memory_peers.items()):
if (peer.latency or 0) < 1 and peer.last_seen < self.refresh_limit:
del self._memory_peers[key]
def add_peers(self, *peers):
for peer in peers:
db_peer = self.get_from_peer(peer)
if db_peer and db_peer.node_id is None and peer.node_id is not None:
db_peer = replace(db_peer, node_id=peer.node_id.hex())
elif not db_peer:
db_peer = DHTPeer.from_kad_peer(peer, len(self._memory_peers) + 1)
db_peer = replace(db_peer, last_seen=datetime.datetime.utcnow())
self._memory_peers[(peer.address, peer.udp_port)] = db_peer
async def flush_to_db(self):
await self.db.save_peers(*self._memory_peers.values())
connections_to_save = self._connections
self._connections = {}
# await self.db.save_connections(connections_to_save) heavy call
self.remove_expired_peers()
def get_from_peer(self, peer):
return self._memory_peers.get((peer.address, peer.udp_port), None)
def set_latency(self, peer, latency=None):
if latency:
self.host_latency_metric.observe(latency / 1_000_000.0)
db_peer = self.get_from_peer(peer)
if not db_peer:
return
db_peer = replace(db_peer, latency=latency)
if not db_peer.node_id and peer.node_id:
db_peer = replace(db_peer, node_id=peer.node_id.hex())
if db_peer.first_online and latency is None:
db_peer = replace(db_peer, last_churn=(datetime.datetime.utcnow() - db_peer.first_online).seconds)
elif latency is not None and db_peer.first_online is None:
db_peer = replace(db_peer, first_online=datetime.datetime.utcnow())
db_peer = replace(db_peer, last_check=datetime.datetime.utcnow())
self._memory_peers[(db_peer.address, db_peer.udp_port)] = db_peer
def inc_errors(self, peer):
db_peer = self.get_from_peer(peer)
self._memory_peers[(peer.address, peer.node_id)] = replace(db_peer, errors=(db_peer.errors or 0) + 1)
def associate_peers(self, peer, other_peers):
self._connections[self.get_from_peer(peer).peer_id] = [
self.get_from_peer(other_peer).peer_id for other_peer in other_peers]
async def request_peers(self, host, port, node_id, key=None) -> typing.Optional[FindResponse]:
key = key or node_id
peer = make_kademlia_peer(key, await resolve_host(host, port, 'udp'), port)
for attempt in range(3):
try:
req_start = time.perf_counter_ns()
if key == node_id:
response = await self.node.protocol.get_rpc_peer(peer).find_node(key)
response = FindNodeResponse(key, response)
latency = time.perf_counter_ns() - req_start
self.set_latency(peer, latency)
else:
response = await self.node.protocol.get_rpc_peer(peer).find_value(key)
response = FindValueResponse(key, response)
await asyncio.sleep(0.05)
return response
except asyncio.TimeoutError:
if key == node_id:
self.set_latency(peer, None)
continue
except lbry.dht.error.TransportNotConnected:
log.info("Transport unavailable, waiting 1s to retry")
await asyncio.sleep(1)
except lbry.dht.error.RemoteException as e:
log.info('Peer errored: %s:%d attempt #%d - %s',
host, port, (attempt + 1), str(e))
if key == node_id:
self.inc_errors(peer)
self.set_latency(peer, None)
continue
async def crawl_routing_table(self, host, port, node_id=None):
start = time.time()
log.debug("querying %s:%d", host, port)
address = await resolve_host(host, port, 'udp')
key = node_id or self.node.protocol.peer_manager.get_node_id_for_endpoint(address, port)
peer = make_kademlia_peer(key, address, port)
self.add_peers(peer)
if not key:
latency = None
for _ in range(3):
try:
ping_start = time.perf_counter_ns()
await self.node.protocol.get_rpc_peer(peer).ping()
await asyncio.sleep(0.05)
key = key or self.node.protocol.peer_manager.get_node_id_for_endpoint(address, port)
peer = make_kademlia_peer(key, address, port)
latency = time.perf_counter_ns() - ping_start
break
except asyncio.TimeoutError:
pass
except lbry.dht.error.RemoteException:
self.inc_errors(peer)
pass
self.set_latency(peer, latency if peer.node_id else None)
if not latency or not peer.node_id:
if latency and not peer.node_id:
log.warning("No node id from %s:%d", host, port)
return set()
distance = Distance(key)
max_distance = int.from_bytes(bytes([0xff] * 48), 'big')
peers = set()
factor = 2048
for i in range(1000):
response = await self.request_peers(address, port, key)
new_peers = list(response.get_close_kademlia_peers(peer)) if response else None
if not new_peers:
break
new_peers.sort(key=lambda peer: distance(peer.node_id))
peers.update(new_peers)
far_key = new_peers[-1].node_id
if distance(far_key) <= distance(key):
current_distance = distance(key)
next_jump = current_distance + int(max_distance // factor) # jump closer
factor /= 2
if factor > 8 and next_jump < max_distance:
key = int.from_bytes(peer.node_id, 'big') ^ next_jump
if key.bit_length() > 384:
break
key = key.to_bytes(48, 'big')
else:
break
else:
key = far_key
factor = 2048
if peers:
log.info("Done querying %s:%d in %.2f seconds: %d peers found over %d requests.",
host, port, (time.time() - start), len(peers), i)
if peers:
self.connections_found_metric.observe(len(peers))
known_peers = 0
reachable_connections = 0
for peer in peers:
known_peer = self.get_from_peer(peer)
known_peers += 1 if known_peer else 0
reachable_connections += 1 if known_peer and (known_peer.latency or 0) > 0 else 0
self.known_connections_found_metric.observe(known_peers)
self.reachable_connections_found_metric.observe(reachable_connections)
self.add_peers(*peers)
self.associate_peers(peer, peers)
return peers
async def process(self):
to_process = {}
def submit(_peer):
f = asyncio.ensure_future(
self.crawl_routing_table(_peer.address, _peer.udp_port, bytes.fromhex(_peer.node_id)))
to_process[_peer.peer_id] = f
f.add_done_callback(lambda _: to_process.pop(_peer.peer_id))
to_check = self.get_peers_needing_check()
last_flush = datetime.datetime.utcnow()
while True:
for peer in to_check[:200]:
if peer.peer_id not in to_process:
submit(peer)
await asyncio.sleep(.05)
await asyncio.sleep(0)
self.unique_total_hosts_metric.set(self.checked_peers_count)
self.reachable_hosts_metric.set(self.checked_peers_count - self.unreachable_peers_count)
self.total_historic_hosts_metric.set(len(self._memory_peers))
self.pending_check_hosts_metric.set(len(to_check))
self.hosts_with_errors_metric.set(self.peers_with_errors_count)
log.info("%d known, %d contacted recently, %d unreachable, %d error, %d processing, %d on queue",
self.active_peers_count, self.checked_peers_count, self.unreachable_peers_count,
self.peers_with_errors_count, len(to_process), len(to_check))
if to_process:
await asyncio.wait(to_process.values(), return_when=asyncio.FIRST_COMPLETED)
to_check = self.get_peers_needing_check()
if (datetime.datetime.utcnow() - last_flush).seconds > 60:
log.info("flushing to db")
await self.flush_to_db()
last_flush = datetime.datetime.utcnow()
while not to_check and not to_process:
port = self.node.listening_port.get_extra_info('socket').getsockname()[1]
self.node.stop()
await self.node.start_listening()
log.info("Idle, sleeping a minute. Port changed to %d", port)
await asyncio.sleep(60.0)
to_check = self.get_peers_needing_check()
class SimpleMetrics:
def __init__(self, port):
self.prometheus_port = port
async def handle_metrics_get_request(self, _):
try:
return web.Response(
text=prom_generate_latest().decode(),
content_type='text/plain; version=0.0.4'
)
except Exception:
log.exception('could not generate prometheus data')
raise
async def start(self):
prom_app = web.Application()
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
metrics_runner = web.AppRunner(prom_app)
await metrics_runner.setup()
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
await prom_site.start()
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
if col[0] in ('added_on', 'first_online', 'last_seen', 'last_check'):
d[col[0]] = datetime.datetime.fromisoformat(row[idx]) if row[idx] else None
else:
d[col[0]] = row[idx]
return d
async def test():
db_path = "/tmp/peers.db" if len(sys.argv) == 1 else sys.argv[-1]
asyncio.get_event_loop().set_debug(True)
metrics = SimpleMetrics('8080')
await metrics.start()
conf = Config()
hosting_samples = SDHashSamples("test.sample") if os.path.isfile("test.sample") else None
crawler = Crawler(db_path, hosting_samples)
await crawler.open()
await crawler.flush_to_db()
await crawler.node.start_listening()
if crawler.active_peers_count < 100:
probes = []
for (host, port) in conf.known_dht_nodes:
probes.append(asyncio.create_task(crawler.crawl_routing_table(host, port)))
await asyncio.gather(*probes)
await crawler.flush_to_db()
await asyncio.gather(crawler.process(), crawler.probe_files())
if __name__ == '__main__':
asyncio.run(test())

View file

@ -0,0 +1,24 @@
"""
Hook for libtorrent.
"""
import os
import glob
import os.path
from PyInstaller.utils.hooks import get_module_file_attribute
from PyInstaller import compat
def get_binaries():
if compat.is_win:
files = ('c:/Windows/System32/libssl-1_1-x64.dll', 'c:/Windows/System32/libcrypto-1_1-x64.dll')
for file in files:
if not os.path.isfile(file):
print(f"MISSING {file}")
return [(file, '.') for file in files]
return []
binaries = get_binaries()
for file in glob.glob(os.path.join(get_module_file_attribute('libtorrent'), 'libtorrent*pyd*')):
binaries.append((file, 'libtorrent'))

View file

@ -0,0 +1,44 @@
import asyncio
from typing import Iterable
from lbry.extras.daemon.client import daemon_rpc
from lbry.conf import Config
conf = Config()
async def sample_prefix(prefix: bytes):
result = await daemon_rpc(conf, "claim_search", sd_hash=prefix.hex(), page_size=50)
total_pages = result['total_pages']
print(total_pages)
sd_hashes = set()
for page in range(1, total_pages + 1):
if page > 1:
result = await daemon_rpc(conf, "claim_search", sd_hash=prefix.hex(), page=page, page_size=50)
for item in result['items']:
sd_hash = item.get('value', {}).get('source', {}).get('sd_hash')
if not sd_hash:
print('err', item)
continue
sd_hashes.add(sd_hash)
print('page', page, len(sd_hashes))
return sd_hashes
def save_sample(name: str, samples: Iterable[str]):
with open(name, 'wb') as outfile:
for sample in samples:
outfile.write(bytes.fromhex(sample))
outfile.flush()
print(outfile.tell())
async def main():
samples = set()
futs = [asyncio.ensure_future(sample_prefix(bytes([i]))) for i in range(256)]
for i, completed in enumerate(asyncio.as_completed(futs)):
samples.update(await completed)
print(i, len(samples))
print(save_sample("test.sample", samples))
if __name__ == "__main__":
asyncio.run(main())

View file

@ -28,6 +28,7 @@ disable=
no-else-return, no-else-return,
cyclic-import, cyclic-import,
missing-docstring, missing-docstring,
consider-using-f-string,
duplicate-code, duplicate-code,
expression-not-assigned, expression-not-assigned,
inconsistent-return-statements, inconsistent-return-statements,

View file

@ -18,7 +18,7 @@ setup(
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
keywords="lbry protocol media", keywords="lbry protocol media",
license='MIT', license='MIT',
python_requires='>=3.7', python_requires='>=3.8',
packages=find_packages(exclude=('tests',)), packages=find_packages(exclude=('tests',)),
zip_safe=False, zip_safe=False,
entry_points={ entry_points={
@ -36,7 +36,7 @@ setup(
'distro==1.4.0', 'distro==1.4.0',
'base58==1.0.0', 'base58==1.0.0',
'cffi==1.13.2', 'cffi==1.13.2',
'cryptography==2.5', 'cryptography==3.4.7',
'protobuf==3.17.2', 'protobuf==3.17.2',
'prometheus_client==0.7.1', 'prometheus_client==0.7.1',
'ecdsa==0.13.3', 'ecdsa==0.13.3',
@ -46,18 +46,18 @@ setup(
'coincurve==15.0.0', 'coincurve==15.0.0',
'pbkdf2==1.3', 'pbkdf2==1.3',
'filetype==1.0.9', 'filetype==1.0.9',
'libtorrent==2.0.6',
], ],
extras_require={ extras_require={
'torrent': ['lbry-libtorrent'],
'lint': [ 'lint': [
'pylint==2.10.0' 'pylint==2.13.9'
], ],
'test': [ 'test': [
'coverage', 'coverage',
'jsonschema==4.4.0', 'jsonschema==4.4.0',
], ],
'hub': [ 'hub': [
'hub@git+https://github.com/lbryio/hub.git@9e3963ba23abf2f9ace4654089a7df1fd553f121' 'hub@git+https://github.com/lbryio/hub.git@929448d64bcbe6c5e476757ec78456beaa85e56a'
] ]
}, },
classifiers=[ classifiers=[

View file

@ -51,7 +51,8 @@ def mock_network_loop(loop: asyncio.AbstractEventLoop,
return rx.datagram_received(data, from_addr) return rx.datagram_received(data, from_addr)
protocol = proto_lam() protocol = proto_lam()
transport = asyncio.DatagramTransport(extra={'socket': mock_sock}) transport = mock.Mock(spec=asyncio.DatagramTransport)
transport.get_extra_info = lambda k: {'socket': mock_sock}[k]
transport.is_closing = lambda: False transport.is_closing = lambda: False
transport.close = lambda: mock_sock.close() transport.close = lambda: mock_sock.close()
mock_sock.sendto = sendto mock_sock.sendto = sendto
@ -60,16 +61,14 @@ def mock_network_loop(loop: asyncio.AbstractEventLoop,
dht_network[from_addr] = protocol dht_network[from_addr] = protocol
return transport, protocol return transport, protocol
with mock.patch('socket.socket') as mock_socket: mock_sock = mock.Mock(spec=socket.socket)
mock_sock = mock.Mock(spec=socket.socket) mock_sock.setsockopt = lambda *_: None
mock_sock.setsockopt = lambda *_: None mock_sock.bind = lambda *_: None
mock_sock.bind = lambda *_: None mock_sock.setblocking = lambda *_: None
mock_sock.setblocking = lambda *_: None mock_sock.getsockname = lambda: "0.0.0.0"
mock_sock.getsockname = lambda: "0.0.0.0" mock_sock.getpeername = lambda: ""
mock_sock.getpeername = lambda: "" mock_sock.close = lambda: None
mock_sock.close = lambda: None mock_sock.type = socket.SOCK_DGRAM
mock_sock.type = socket.SOCK_DGRAM mock_sock.fileno = lambda: 7
mock_sock.fileno = lambda: 7 loop.create_datagram_endpoint = create_datagram_endpoint
mock_socket.return_value = mock_sock yield
loop.create_datagram_endpoint = create_datagram_endpoint
yield

View file

@ -102,7 +102,7 @@ class ReconnectTests(IntegrationTestCase):
await self.ledger.stop() await self.ledger.stop()
initial_height = self.ledger.local_height_including_downloaded_height initial_height = self.ledger.local_height_including_downloaded_height
await self.blockchain.generate(100) await self.blockchain.generate(100)
while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 99: # off by 1 while self.conductor.spv_node.server.session_manager.notified_height < initial_height + 100:
await asyncio.sleep(0.1) await asyncio.sleep(0.1)
self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height) self.assertEqual(initial_height, self.ledger.local_height_including_downloaded_height)
await self.ledger.headers.open() await self.ledger.headers.open()

View file

@ -1,6 +1,8 @@
import asyncio import asyncio
import json import json
import string
from binascii import unhexlify from binascii import unhexlify
from random import Random
from lbry.wallet import ENCRYPT_ON_DISK from lbry.wallet import ENCRYPT_ON_DISK
from lbry.error import InvalidPasswordError from lbry.error import InvalidPasswordError
@ -22,6 +24,7 @@ class WalletCommands(CommandTestCase):
async def test_wallet_syncing_status(self): async def test_wallet_syncing_status(self):
address = await self.daemon.jsonrpc_address_unused() address = await self.daemon.jsonrpc_address_unused()
await self.ledger._update_tasks.done.wait()
self.assertFalse(self.daemon.jsonrpc_wallet_status()['is_syncing']) self.assertFalse(self.daemon.jsonrpc_wallet_status()['is_syncing'])
await self.send_to_address_and_wait(address, 1) await self.send_to_address_and_wait(address, 1)
await self.ledger._update_tasks.started.wait() await self.ledger._update_tasks.started.wait()
@ -384,9 +387,16 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
data = await daemon2.jsonrpc_sync_apply('password2') data = await daemon2.jsonrpc_sync_apply('password2')
# sync_apply doesn't save password if encrypt-on-disk is False # sync_apply doesn't save password if encrypt-on-disk is False
self.assertEqual(wallet2.encryption_password, None) self.assertEqual(wallet2.encryption_password, None)
# need to use new password2 in sync_apply
with self.assertRaises(InvalidPasswordError): # Need to use new password2 in sync_apply. Attempts with other passwords
await daemon.jsonrpc_sync_apply('password', data=data['data'], blocking=True) # should fail consistently with InvalidPasswordError.
random = Random('password')
for i in range(200):
bad_guess = ''.join(random.choices(string.digits + string.ascii_letters + string.punctuation, k=40))
self.assertNotEqual(bad_guess, 'password2')
with self.assertRaises(InvalidPasswordError):
await daemon.jsonrpc_sync_apply(bad_guess, data=data['data'], blocking=True)
await daemon.jsonrpc_sync_apply('password2', data=data['data'], blocking=True) await daemon.jsonrpc_sync_apply('password2', data=data['data'], blocking=True)
# sync_apply with new password2 also sets it as new local password # sync_apply with new password2 also sets it as new local password
self.assertEqual(wallet.encryption_password, 'password2') self.assertEqual(wallet.encryption_password, 'password2')
@ -481,3 +491,21 @@ class WalletEncryptionAndSynchronization(CommandTestCase):
daemon2.wallet_manager.default_account.channel_keys, daemon2.wallet_manager.default_account.channel_keys,
daemon.wallet_manager.default_wallet.accounts[1].channel_keys daemon.wallet_manager.default_wallet.accounts[1].channel_keys
) )
# test without passwords
data = await daemon2.jsonrpc_wallet_export()
json_data = json.loads(data)
self.assertEqual(json_data["name"], "Wallet")
self.assertNotIn("four", json_data["preferences"])
json_data["preferences"]["four"] = {"value": 4, "ts": 0}
await daemon.jsonrpc_wallet_import(data=json.dumps(json_data), blocking=True)
self.assertEqual(daemon.jsonrpc_preference_get("four"), {"four": 4})
# if password is empty string, export is encrypted
data = await daemon2.jsonrpc_wallet_export(password="")
self.assertNotEqual(data[0], "{")
# if password is empty string, import is decrypted
await daemon.jsonrpc_wallet_import(data, password="")

View file

@ -3,7 +3,7 @@ import asyncio
from hub.herald import HUB_PROTOCOL_VERSION from hub.herald import HUB_PROTOCOL_VERSION
from hub.herald.session import LBRYElectrumX from hub.herald.session import LBRYElectrumX
from lbry.error import ServerPaymentFeeAboveMaxAllowedError from lbry.error import InsufficientFundsError, ServerPaymentFeeAboveMaxAllowedError
from lbry.wallet.network import ClientSession from lbry.wallet.network import ClientSession
from lbry.wallet.rpc import RPCError from lbry.wallet.rpc import RPCError
from lbry.testcase import IntegrationTestCase, CommandTestCase from lbry.testcase import IntegrationTestCase, CommandTestCase
@ -47,7 +47,8 @@ class TestSessions(IntegrationTestCase):
class TestUsagePayment(CommandTestCase): class TestUsagePayment(CommandTestCase):
async def test_single_server_payment(self): async def test_single_server_payment(self):
wallet_pay_service = self.daemon.component_manager.get_component('wallet_server_payments') wallet_pay_service = self.daemon.component_manager.get_component('wallet_server_payments')
wallet_pay_service.payment_period = 1 self.assertFalse(wallet_pay_service.running)
wallet_pay_service.payment_period = 0.5
# only starts with a positive max key fee # only starts with a positive max key fee
wallet_pay_service.max_fee = "0.0" wallet_pay_service.max_fee = "0.0"
await wallet_pay_service.start(ledger=self.ledger, wallet=self.wallet) await wallet_pay_service.start(ledger=self.ledger, wallet=self.wallet)
@ -86,6 +87,11 @@ class TestUsagePayment(CommandTestCase):
self.assertEqual(tx.outputs[0].amount, 100000000) self.assertEqual(tx.outputs[0].amount, 100000000)
self.assertEqual(tx.outputs[0].get_address(self.ledger), address) self.assertEqual(tx.outputs[0].get_address(self.ledger), address)
# continue paying until account is out of funds
with self.assertRaises(InsufficientFundsError):
for i in range(10):
await asyncio.wait_for(wallet_pay_service.on_payment.first, timeout=30)
self.assertTrue(wallet_pay_service.running)
class TestESSync(CommandTestCase): class TestESSync(CommandTestCase):
async def test_es_sync_utility(self): async def test_es_sync_utility(self):

View file

@ -31,7 +31,7 @@ STREAM_TYPES = {
def verify(channel, data, signature, channel_hash=None): def verify(channel, data, signature, channel_hash=None):
pieces = [ pieces = [
signature['signing_ts'].encode(), signature['salt'].encode(),
channel_hash or channel.claim_hash, channel_hash or channel.claim_hash,
data data
] ]
@ -1239,8 +1239,13 @@ class ChannelCommands(CommandTestCase):
channel = channel_tx.outputs[0] channel = channel_tx.outputs[0]
signature1 = await self.out(self.daemon.jsonrpc_channel_sign(channel_name='@signer', hexdata=data_to_sign)) signature1 = await self.out(self.daemon.jsonrpc_channel_sign(channel_name='@signer', hexdata=data_to_sign))
signature2 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign)) signature2 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign))
signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign, salt='beef'))
signature4 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=data_to_sign, salt='beef'))
self.assertNotEqual(signature2, signature3)
self.assertEqual(signature3, signature4)
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature1)) self.assertTrue(verify(channel, unhexlify(data_to_sign), signature1))
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature2)) self.assertTrue(verify(channel, unhexlify(data_to_sign), signature2))
self.assertTrue(verify(channel, unhexlify(data_to_sign), signature3))
signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=99)) signature3 = await self.out(self.daemon.jsonrpc_channel_sign(channel_id=channel.claim_id, hexdata=99))
self.assertTrue(verify(channel, unhexlify('99'), signature3)) self.assertTrue(verify(channel, unhexlify('99'), signature3))
@ -1597,9 +1602,16 @@ class StreamCommands(ClaimTestCase):
# blocked content is not resolveable # blocked content is not resolveable
error = (await self.resolve('lbry://@some_channel/bad_content'))['error'] error = (await self.resolve('lbry://@some_channel/bad_content'))['error']
self.assertEqual(error['name'], 'BLOCKED') self.assertEqual(error['name'], 'BLOCKED')
self.assertTrue(error['text'].startswith(f"Resolve of 'lbry://@some_channel#{some_channel_id[:1]}/bad_content#{bad_content_id[:1]}' was censored")) self.assertTrue(error['text'].startswith(f"Resolve of 'lbry://@some_channel#{some_channel_id[:1]}/bad_content#{bad_content_id[:1]}' was blocked"))
self.assertTrue(error['censor']['short_url'].startswith('lbry://@blocking#')) self.assertTrue(error['censor']['short_url'].startswith('lbry://@blocking#'))
# local claim list still finds local reposted content that's blocked
claims = await self.claim_list(reposted_claim_id=bad_content_id)
self.assertEqual(claims[0]['name'], 'block1')
self.assertEqual(claims[0]['value']['claim_id'], bad_content_id)
self.assertEqual(claims[1]['name'], 'filter1')
self.assertEqual(claims[1]['value']['claim_id'], bad_content_id)
# a filtered/blocked channel impacts all content inside it # a filtered/blocked channel impacts all content inside it
bad_channel_id = self.get_claim_id( bad_channel_id = self.get_claim_id(
await self.channel_create('@bad_channel', '0.1', tags=['bad-stuff']) await self.channel_create('@bad_channel', '0.1', tags=['bad-stuff'])

View file

@ -18,11 +18,17 @@ class FileCommands(CommandTestCase):
super().__init__(*a, **kw) super().__init__(*a, **kw)
self.skip_libtorrent = False self.skip_libtorrent = False
async def add_forever(self):
while True:
for handle in self.client_session._handles.values():
handle._handle.connect_peer(('127.0.0.1', 4040))
await asyncio.sleep(.1)
async def initialize_torrent(self, tx_to_update=None): async def initialize_torrent(self, tx_to_update=None):
if not hasattr(self, 'seeder_session'): if not hasattr(self, 'seeder_session'):
self.seeder_session = TorrentSession(self.loop, None) self.seeder_session = TorrentSession(self.loop, None)
self.addCleanup(self.seeder_session.stop) self.addCleanup(self.seeder_session.stop)
await self.seeder_session.bind(port=4040) await self.seeder_session.bind('127.0.0.1', port=4040)
btih = await self.seeder_session.add_fake_torrent() btih = await self.seeder_session.add_fake_torrent()
address = await self.account.receiving.get_or_create_usable_address() address = await self.account.receiving.get_or_create_usable_address()
if not tx_to_update: if not tx_to_update:
@ -40,8 +46,9 @@ class FileCommands(CommandTestCase):
await tx.sign([self.account]) await tx.sign([self.account])
await self.broadcast_and_confirm(tx) await self.broadcast_and_confirm(tx)
self.client_session = self.daemon.file_manager.source_managers['torrent'].torrent_session self.client_session = self.daemon.file_manager.source_managers['torrent'].torrent_session
self.client_session._session.add_dht_node(('localhost', 4040))
self.client_session.wait_start = False # fixme: this is super slow on tests self.client_session.wait_start = False # fixme: this is super slow on tests
task = asyncio.create_task(self.add_forever())
self.addCleanup(task.cancel)
return tx, btih return tx, btih
@skipIf(TorrentSession is None, "libtorrent not installed") @skipIf(TorrentSession is None, "libtorrent not installed")
@ -82,6 +89,21 @@ class FileCommands(CommandTestCase):
await self.reflector.blob_manager.delete_blobs(all_except_sd) await self.reflector.blob_manager.delete_blobs(all_except_sd)
self.assertEqual(all_except_sd, await self.daemon.jsonrpc_file_reflect(sd_hash=sd_hash)) self.assertEqual(all_except_sd, await self.daemon.jsonrpc_file_reflect(sd_hash=sd_hash))
async def test_sd_blob_fields_fallback(self):
claim_id = self.get_claim_id(await self.stream_create('foo', '0.01', suffix='.txt'))
stream = (await self.daemon.jsonrpc_file_list())["items"][0]
stream.descriptor.suggested_file_name = ' '
stream.descriptor.stream_name = ' '
stream.descriptor.stream_hash = stream.descriptor.get_stream_hash()
sd_hash = stream.descriptor.sd_hash = stream.descriptor.calculate_sd_hash()
await stream.descriptor.make_sd_blob()
await self.daemon.jsonrpc_file_delete(claim_name='foo')
await self.stream_update(claim_id=claim_id, sd_hash=sd_hash)
file_dict = await self.out(self.daemon.jsonrpc_get('lbry://foo', save_file=True))
self.assertEqual(file_dict['suggested_file_name'], stream.file_name)
self.assertEqual(file_dict['stream_name'], stream.file_name)
self.assertEqual(file_dict['mime_type'], 'text/plain')
async def test_file_management(self): async def test_file_management(self):
await self.stream_create('foo', '0.01') await self.stream_create('foo', '0.01')
await self.stream_create('foo2', '0.01') await self.stream_create('foo2', '0.01')
@ -332,7 +354,7 @@ class FileCommands(CommandTestCase):
await self.daemon.jsonrpc_get('lbry://foo') await self.daemon.jsonrpc_get('lbry://foo')
with open(original_path, 'wb') as handle: with open(original_path, 'wb') as handle:
handle.write(b'some other stuff was there instead') handle.write(b'some other stuff was there instead')
self.daemon.file_manager.stop() await self.daemon.file_manager.stop()
await self.daemon.file_manager.start() await self.daemon.file_manager.start()
await asyncio.wait_for(self.wait_files_to_complete(), timeout=5) # if this hangs, file didn't get set completed await asyncio.wait_for(self.wait_files_to_complete(), timeout=5) # if this hangs, file didn't get set completed
# check that internal state got through up to the file list API # check that internal state got through up to the file list API
@ -360,8 +382,7 @@ class FileCommands(CommandTestCase):
resp = await self.out(self.daemon.jsonrpc_get('lbry://foo', timeout=2)) resp = await self.out(self.daemon.jsonrpc_get('lbry://foo', timeout=2))
self.assertNotIn('error', resp) self.assertNotIn('error', resp)
self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path))
self.daemon.file_manager.stop() await self.daemon.file_manager.stop()
await asyncio.sleep(0.01, loop=self.loop) # FIXME: this sleep should not be needed
self.assertFalse(os.path.isfile(path)) self.assertFalse(os.path.isfile(path))
async def test_incomplete_downloads_retry(self): async def test_incomplete_downloads_retry(self):
@ -456,7 +477,7 @@ class FileCommands(CommandTestCase):
# restart the daemon and make sure the fee is still there # restart the daemon and make sure the fee is still there
self.daemon.file_manager.stop() await self.daemon.file_manager.stop()
await self.daemon.file_manager.start() await self.daemon.file_manager.start()
self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1) self.assertItemCount(await self.daemon.jsonrpc_file_list(), 1)
self.assertEqual((await self.daemon.jsonrpc_file_list())['items'][0].content_fee.raw, raw_content_fee) self.assertEqual((await self.daemon.jsonrpc_file_list())['items'][0].content_fee.raw, raw_content_fee)

View file

@ -3,7 +3,9 @@ import hashlib
import aiohttp import aiohttp
import aiohttp.web import aiohttp.web
import asyncio import asyncio
import contextlib
from lbry.file.source import ManagedDownloadSource
from lbry.utils import aiohttp_request from lbry.utils import aiohttp_request
from lbry.blob.blob_file import MAX_BLOB_SIZE from lbry.blob.blob_file import MAX_BLOB_SIZE
from lbry.testcase import CommandTestCase from lbry.testcase import CommandTestCase
@ -21,7 +23,7 @@ def get_random_bytes(n: int) -> bytes:
class RangeRequests(CommandTestCase): class RangeRequests(CommandTestCase):
async def _restart_stream_manager(self): async def _restart_stream_manager(self):
self.daemon.file_manager.stop() await self.daemon.file_manager.stop()
await self.daemon.file_manager.start() await self.daemon.file_manager.start()
return return
@ -352,14 +354,21 @@ class RangeRequests(CommandTestCase):
path = stream.full_path path = stream.full_path
self.assertIsNotNone(path) self.assertIsNotNone(path)
if wait_for_start_writing: if wait_for_start_writing:
await stream.started_writing.wait() with contextlib.suppress(asyncio.CancelledError):
await stream.started_writing.wait()
self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path))
await self._restart_stream_manager() await self.daemon.file_manager.stop()
# while stopped, we get no response to query and no file is present
self.assertEqual((await self.daemon.jsonrpc_file_list())['items'], [])
self.assertEqual(os.path.isfile(path), stream.status == ManagedDownloadSource.STATUS_FINISHED)
await self.daemon.file_manager.start()
# after restart, we get a response to query and same file path
stream = (await self.daemon.jsonrpc_file_list())['items'][0] stream = (await self.daemon.jsonrpc_file_list())['items'][0]
self.assertIsNotNone(stream.full_path) self.assertIsNotNone(stream.full_path)
self.assertFalse(os.path.isfile(path)) self.assertEqual(stream.full_path, path)
if wait_for_start_writing: if wait_for_start_writing:
await stream.started_writing.wait() with contextlib.suppress(asyncio.CancelledError):
await stream.started_writing.wait()
self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path))
async def test_file_save_stop_before_finished_streaming_only_wait_for_start(self): async def test_file_save_stop_before_finished_streaming_only_wait_for_start(self):
@ -414,6 +423,6 @@ class RangeRequestsLRUCache(CommandTestCase):
# running with cache size 0 gets through without errors without # running with cache size 0 gets through without errors without
# this since the server doesn't stop immediately # this since the server doesn't stop immediately
await asyncio.sleep(1, loop=self.loop) await asyncio.sleep(1)
await self._request_stream() await self._request_stream()

View file

@ -1508,27 +1508,27 @@ class ResolveClaimTakeovers(BaseResolveTestCase):
COIN = int(1E8) COIN = int(1E8)
self.assertEqual(self.conductor.spv_node.writer.height, 207) self.assertEqual(self.conductor.spv_node.writer.height, 207)
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put(
(208, bytes.fromhex(claim_id1)), (0, 10 * COIN) (208, bytes.fromhex(claim_id1)), (0, 10 * COIN)
) )
await self.generate(1) await self.generate(1)
self.assertEqual(self.conductor.spv_node.writer.height, 208) self.assertEqual(self.conductor.spv_node.writer.height, 208)
self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1)) self.assertEqual(1.7090807854206793, await get_trending_score(claim_id1))
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put(
(209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN) (209, bytes.fromhex(claim_id1)), (10 * COIN, 100 * COIN)
) )
await self.generate(1) await self.generate(1)
self.assertEqual(self.conductor.spv_node.writer.height, 209) self.assertEqual(self.conductor.spv_node.writer.height, 209)
self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1)) self.assertEqual(2.2437974397778886, await get_trending_score(claim_id1))
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put(
(309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN) (309, bytes.fromhex(claim_id1)), (100 * COIN, 1000000 * COIN)
) )
await self.generate(100) await self.generate(100)
self.assertEqual(self.conductor.spv_node.writer.height, 309) self.assertEqual(self.conductor.spv_node.writer.height, 309)
self.assertEqual(5.157053472135866, await get_trending_score(claim_id1)) self.assertEqual(5.157053472135866, await get_trending_score(claim_id1))
self.conductor.spv_node.writer.db.prefix_db.trending_notification.stage_put( self.conductor.spv_node.writer.db.prefix_db.trending_notification.stash_put(
(409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN) (409, bytes.fromhex(claim_id1)), (1000000 * COIN, 1 * COIN)
) )
@ -1550,18 +1550,10 @@ class ResolveAfterReorg(BaseResolveTestCase):
blocks = self.ledger.headers.height - start blocks = self.ledger.headers.height - start
self.blockchain.block_expected = start - 1 self.blockchain.block_expected = start - 1
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
self.conductor.spv_node.server.synchronized.clear()
# go back to start # go back to start
await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode()) await self.blockchain.invalidate_block((await self.ledger.headers.hash(start)).decode())
# go to previous + 1 # go to previous + 1
await self.blockchain.generate(blocks + 2) await self.generate(blocks + 2)
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
await self.conductor.spv_node.server.synchronized.wait()
# await asyncio.wait_for(self.on_header(self.blockchain.block_expected), 30.0)
async def assertBlockHash(self, height): async def assertBlockHash(self, height):
reader_db = self.conductor.spv_node.server.db reader_db = self.conductor.spv_node.server.db

View file

@ -2,7 +2,7 @@ import asyncio
import unittest import unittest
from lbry.testcase import CommandTestCase from lbry.testcase import CommandTestCase
from lbry.wallet import Transaction
class TransactionCommandsTestCase(CommandTestCase): class TransactionCommandsTestCase(CommandTestCase):
@ -29,17 +29,42 @@ class TransactionCommandsTestCase(CommandTestCase):
# someone's tx # someone's tx
change_address = await self.blockchain.get_raw_change_address() change_address = await self.blockchain.get_raw_change_address()
sendtxid = await self.blockchain.send_to_address(change_address, 10) sendtxid = await self.blockchain.send_to_address(change_address, 10)
await asyncio.sleep(0.2) # After a few tries, Hub should have the transaction (in mempool).
tx = await self.daemon.jsonrpc_transaction_show(sendtxid) for i in range(5):
self.assertEqual(tx.id, sendtxid) tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.height, -1) # Retry if Hub is not aware of the transaction.
if isinstance(tx, dict):
# Fields: 'success', 'code', 'message'
self.assertFalse(tx['success'], tx)
self.assertEqual(tx['code'], 404, tx)
self.assertEqual(tx['message'], "transaction not found", tx)
await asyncio.sleep(0.1)
continue
break
# verify transaction show (in mempool)
self.assertTrue(isinstance(tx, Transaction), str(tx))
# Fields: 'txid', 'raw', 'height', 'position', 'is_verified', and more.
self.assertEqual(tx.id, sendtxid, vars(tx))
self.assertEqual(tx.height, -1, vars(tx))
self.assertEqual(tx.is_verified, False, vars(tx))
# transaction is confirmed and leaves mempool
await self.generate(1) await self.generate(1)
# verify transaction show
tx = await self.daemon.jsonrpc_transaction_show(sendtxid) tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.height, self.ledger.headers.height) self.assertTrue(isinstance(tx, Transaction), str(tx))
self.assertEqual(tx.id, sendtxid, vars(tx))
self.assertEqual(tx.height, self.ledger.headers.height, vars(tx))
self.assertEqual(tx.is_verified, True, vars(tx))
# inexistent # inexistent
result = await self.daemon.jsonrpc_transaction_show('0'*64) result = await self.daemon.jsonrpc_transaction_show('0'*64)
self.assertFalse(result['success']) self.assertTrue(isinstance(result, dict), result)
# Fields: 'success', 'code', 'message'
self.assertFalse(result['success'], result)
self.assertEqual(result['code'], 404, result)
self.assertEqual(result['message'], "transaction not found", result)
async def test_utxo_release(self): async def test_utxo_release(self):
await self.send_to_address_and_wait( await self.send_to_address_and_wait(

View file

@ -36,7 +36,7 @@ class TestBlob(AsyncioTestCase):
writer.write(self.blob_bytes) writer.write(self.blob_bytes)
await blob.verified.wait() await blob.verified.wait()
self.assertTrue(blob.get_is_verified()) self.assertTrue(blob.get_is_verified())
await asyncio.sleep(0, loop=self.loop) # wait for the db save task await asyncio.sleep(0) # wait for the db save task
return blob return blob
async def _test_close_writers_on_finished(self, blob_class=AbstractBlob, blob_directory=None): async def _test_close_writers_on_finished(self, blob_class=AbstractBlob, blob_directory=None):
@ -48,7 +48,7 @@ class TestBlob(AsyncioTestCase):
with self.assertRaises(InvalidDataError): with self.assertRaises(InvalidDataError):
writers[1].write(self.blob_bytes * 2) writers[1].write(self.blob_bytes * 2)
await writers[1].finished await writers[1].finished
await asyncio.sleep(0, loop=self.loop) await asyncio.sleep(0)
self.assertEqual(4, len(blob.writers)) self.assertEqual(4, len(blob.writers))
# write the blob # write the blob
@ -208,7 +208,7 @@ class TestBlob(AsyncioTestCase):
async def read_blob_buffer(): async def read_blob_buffer():
with reader as read_handle: with reader as read_handle:
self.assertEqual(1, len(blob.readers)) self.assertEqual(1, len(blob.readers))
await asyncio.sleep(2, loop=self.loop) await asyncio.sleep(2)
self.assertEqual(0, len(blob.readers)) self.assertEqual(0, len(blob.readers))
return read_handle.read() return read_handle.read()

View file

@ -183,7 +183,7 @@ class TestBlobExchange(BlobExchangeTestBase):
writer.write(mock_blob_bytes) writer.write(mock_blob_bytes)
return self.loop.create_task(_inner()) return self.loop.create_task(_inner())
await asyncio.gather(write_task(writer1), write_task(writer2), loop=self.loop) await asyncio.gather(write_task(writer1), write_task(writer2))
self.assertDictEqual({1: mock_blob_bytes, 2: mock_blob_bytes}, results) self.assertDictEqual({1: mock_blob_bytes, 2: mock_blob_bytes}, results)
self.assertEqual(1, write_called_count) self.assertEqual(1, write_called_count)
@ -239,7 +239,8 @@ class TestBlobExchange(BlobExchangeTestBase):
async def test_server_chunked_request(self): async def test_server_chunked_request(self):
blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed" blob_hash = "7f5ab2def99f0ddd008da71db3a3772135f4002b19b7605840ed1034c8955431bd7079549e65e6b2a3b9c17c773073ed"
server_protocol = BlobServerProtocol(self.loop, self.server_blob_manager, self.server.lbrycrd_address) server_protocol = BlobServerProtocol(self.loop, self.server_blob_manager, self.server.lbrycrd_address)
transport = asyncio.Transport(extra={'peername': ('ip', 90)}) transport = mock.Mock(spec=asyncio.Transport)
transport.get_extra_info = lambda k: {'peername': ('ip', 90)}[k]
received_data = BytesIO() received_data = BytesIO()
transport.is_closing = lambda: received_data.closed transport.is_closing = lambda: received_data.closed
transport.write = received_data.write transport.write = received_data.write
@ -269,7 +270,7 @@ class TestBlobExchange(BlobExchangeTestBase):
client_blob.delete() client_blob.delete()
# wait for less than the idle timeout # wait for less than the idle timeout
await asyncio.sleep(0.5, loop=self.loop) await asyncio.sleep(0.5)
# download the blob again # download the blob again
downloaded, protocol2 = await request_blob(self.loop, client_blob, self.server_from_client.address, downloaded, protocol2 = await request_blob(self.loop, client_blob, self.server_from_client.address,
@ -283,10 +284,10 @@ class TestBlobExchange(BlobExchangeTestBase):
client_blob.delete() client_blob.delete()
# check that the connection times out from the server side # check that the connection times out from the server side
await asyncio.sleep(0.9, loop=self.loop) await asyncio.sleep(0.9)
self.assertFalse(protocol.transport.is_closing()) self.assertFalse(protocol.transport.is_closing())
self.assertIsNotNone(protocol.transport._sock) self.assertIsNotNone(protocol.transport._sock)
await asyncio.sleep(0.1, loop=self.loop) await asyncio.sleep(0.1)
self.assertIsNone(protocol.transport) self.assertIsNone(protocol.transport)
def test_max_request_size(self): def test_max_request_size(self):
@ -322,7 +323,7 @@ class TestBlobExchange(BlobExchangeTestBase):
server_blob = self.server_blob_manager.get_blob(blob_hash) server_blob = self.server_blob_manager.get_blob(blob_hash)
async def sendfile(writer): async def sendfile(writer):
await asyncio.sleep(2, loop=self.loop) await asyncio.sleep(2)
return 0 return 0
server_blob.sendfile = sendfile server_blob.sendfile = sendfile
@ -346,7 +347,7 @@ class TestBlobExchange(BlobExchangeTestBase):
def _mock_accumulate_peers(q1, q2=None): def _mock_accumulate_peers(q1, q2=None):
async def _task(): async def _task():
pass pass
q2 = q2 or asyncio.Queue(loop=self.loop) q2 = q2 or asyncio.Queue()
return q2, self.loop.create_task(_task()) return q2, self.loop.create_task(_task())
mock_node.accumulate_peers = _mock_accumulate_peers mock_node.accumulate_peers = _mock_accumulate_peers

View file

@ -72,14 +72,14 @@ class CacheConcurrentDecoratorTests(AsyncioTestCase):
@utils.cache_concurrent @utils.cache_concurrent
async def foo(self, arg1, arg2=None, delay=1): async def foo(self, arg1, arg2=None, delay=1):
self.called.append((arg1, arg2, delay)) self.called.append((arg1, arg2, delay))
await asyncio.sleep(delay, loop=self.loop) await asyncio.sleep(delay)
self.counter += 1 self.counter += 1
self.finished.append((arg1, arg2, delay)) self.finished.append((arg1, arg2, delay))
return object() return object()
async def test_gather_duplicates(self): async def test_gather_duplicates(self):
result = await asyncio.gather( result = await asyncio.gather(
self.loop.create_task(self.foo(1)), self.loop.create_task(self.foo(1)), loop=self.loop self.loop.create_task(self.foo(1)), self.loop.create_task(self.foo(1))
) )
self.assertEqual(1, len(self.called)) self.assertEqual(1, len(self.called))
self.assertEqual(1, len(self.finished)) self.assertEqual(1, len(self.finished))
@ -93,7 +93,7 @@ class CacheConcurrentDecoratorTests(AsyncioTestCase):
with self.assertRaises(asyncio.CancelledError): with self.assertRaises(asyncio.CancelledError):
await asyncio.gather( await asyncio.gather(
t1, self.loop.create_task(self.foo(1)), loop=self.loop t1, self.loop.create_task(self.foo(1))
) )
self.assertEqual(1, len(self.called)) self.assertEqual(1, len(self.called))
self.assertEqual(0, len(self.finished)) self.assertEqual(0, len(self.finished))

View file

@ -128,7 +128,7 @@ class TestBlobAnnouncer(AsyncioTestCase):
await self.chain_peer(constants.generate_id(current + 4), '1.2.3.13') await self.chain_peer(constants.generate_id(current + 4), '1.2.3.13')
last = await self.chain_peer(constants.generate_id(current + 5), '1.2.3.14') last = await self.chain_peer(constants.generate_id(current + 5), '1.2.3.14')
search_q, peer_q = asyncio.Queue(loop=self.loop), asyncio.Queue(loop=self.loop) search_q, peer_q = asyncio.Queue(), asyncio.Queue()
search_q.put_nowait(blob1) search_q.put_nowait(blob1)
_, task = last.accumulate_peers(search_q, peer_q) _, task = last.accumulate_peers(search_q, peer_q)

View file

@ -2,7 +2,6 @@ import unittest
from unittest import mock from unittest import mock
import json import json
from lbry.conf import Config
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
from lbry.extras.daemon.componentmanager import ComponentManager from lbry.extras.daemon.componentmanager import ComponentManager
from lbry.extras.daemon.components import DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT from lbry.extras.daemon.components import DATABASE_COMPONENT, DHT_COMPONENT, WALLET_COMPONENT
@ -11,6 +10,7 @@ from lbry.extras.daemon.components import UPNP_COMPONENT, BLOB_COMPONENT
from lbry.extras.daemon.components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT from lbry.extras.daemon.components import PEER_PROTOCOL_SERVER_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
from lbry.extras.daemon.daemon import Daemon as LBRYDaemon from lbry.extras.daemon.daemon import Daemon as LBRYDaemon
from lbry.wallet import WalletManager, Wallet from lbry.wallet import WalletManager, Wallet
from lbry.conf import Config
from tests import test_utils from tests import test_utils
# from tests.mocks import mock_conf_settings, FakeNetwork, FakeFileManager # from tests.mocks import mock_conf_settings, FakeNetwork, FakeFileManager

View file

@ -8,6 +8,8 @@ from lbry.blob_exchange.serialization import BlobResponse
from lbry.blob_exchange.server import BlobServerProtocol from lbry.blob_exchange.server import BlobServerProtocol
from lbry.dht.node import Node from lbry.dht.node import Node
from lbry.dht.peer import make_kademlia_peer from lbry.dht.peer import make_kademlia_peer
from lbry.extras.daemon.storage import StoredContentClaim
from lbry.schema import Claim
from lbry.stream.managed_stream import ManagedStream from lbry.stream.managed_stream import ManagedStream
from lbry.stream.descriptor import StreamDescriptor from lbry.stream.descriptor import StreamDescriptor
from tests.unit.blob_exchange.test_transfer_blob import BlobExchangeTestBase from tests.unit.blob_exchange.test_transfer_blob import BlobExchangeTestBase
@ -23,7 +25,10 @@ class TestManagedStream(BlobExchangeTestBase):
with open(file_path, 'wb') as f: with open(file_path, 'wb') as f:
f.write(self.stream_bytes) f.write(self.stream_bytes)
descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path) descriptor = await StreamDescriptor.create_stream(self.loop, self.server_blob_manager.blob_dir, file_path)
self.sd_hash = descriptor.calculate_sd_hash() descriptor.suggested_file_name = file_name
descriptor.stream_hash = descriptor.get_stream_hash()
self.sd_hash = descriptor.sd_hash = descriptor.calculate_sd_hash()
await descriptor.make_sd_blob()
return descriptor return descriptor
async def setup_stream(self, blob_count: int = 10): async def setup_stream(self, blob_count: int = 10):
@ -47,6 +52,21 @@ class TestManagedStream(BlobExchangeTestBase):
self.assertEqual(self.stream.full_path, os.path.join(self.client_dir, 'tt_f')) self.assertEqual(self.stream.full_path, os.path.join(self.client_dir, 'tt_f'))
self.assertTrue(os.path.isfile(os.path.join(self.client_dir, 'tt_f'))) self.assertTrue(os.path.isfile(os.path.join(self.client_dir, 'tt_f')))
async def test_empty_name_fallback(self):
descriptor = await self.create_stream(file_name=" ")
descriptor.suggested_file_name = " "
claim = Claim()
claim.stream.source.name = "cool.mp4"
self.stream = ManagedStream(
self.loop, self.client_config, self.client_blob_manager, self.sd_hash, self.client_dir,
claim=StoredContentClaim(serialized=claim.to_bytes().hex())
)
await self._test_transfer_stream(10, skip_setup=True)
self.assertTrue(self.stream.completed)
self.assertEqual(self.stream.suggested_file_name, "cool.mp4")
self.assertEqual(self.stream.stream_name, "cool.mp4")
self.assertEqual(self.stream.mime_type, "video/mp4")
async def test_status_file_completed(self): async def test_status_file_completed(self):
await self._test_transfer_stream(10) await self._test_transfer_stream(10)
self.assertTrue(self.stream.output_file_exists) self.assertTrue(self.stream.output_file_exists)
@ -89,9 +109,9 @@ class TestManagedStream(BlobExchangeTestBase):
await self._test_transfer_stream(10, stop_when_done=False) await self._test_transfer_stream(10, stop_when_done=False)
self.assertEqual(self.stream.status, "finished") self.assertEqual(self.stream.status, "finished")
self.assertTrue(self.stream._running.is_set()) self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(0.5, loop=self.loop) await asyncio.sleep(0.5)
self.assertTrue(self.stream._running.is_set()) self.assertTrue(self.stream._running.is_set())
await asyncio.sleep(2, loop=self.loop) await asyncio.sleep(2)
self.assertEqual(self.stream.status, "finished") self.assertEqual(self.stream.status, "finished")
self.assertFalse(self.stream._running.is_set()) self.assertFalse(self.stream._running.is_set())

View file

@ -86,13 +86,13 @@ class TestReflector(AsyncioTestCase):
self.assertListEqual(sent, []) self.assertListEqual(sent, [])
async def test_reflect_stream(self): async def test_reflect_stream(self):
return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=50), 3, loop=self.loop) return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=50), 3)
async def test_reflect_stream_but_reflector_changes_its_mind(self): async def test_reflect_stream_but_reflector_changes_its_mind(self):
return await asyncio.wait_for(self._test_reflect_stream(partial_needs=True), 3, loop=self.loop) return await asyncio.wait_for(self._test_reflect_stream(partial_needs=True), 3)
async def test_reflect_stream_small_response_chunks(self): async def test_reflect_stream_small_response_chunks(self):
return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=30), 3, loop=self.loop) return await asyncio.wait_for(self._test_reflect_stream(response_chunk_size=30), 3)
async def test_announces(self): async def test_announces(self):
to_announce = await self.storage.get_blobs_to_announce() to_announce = await self.storage.get_blobs_to_announce()

View file

@ -174,7 +174,7 @@ class TestStreamManager(BlobExchangeTestBase):
await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager)
else: else:
await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager)
await asyncio.sleep(0, loop=self.loop) await asyncio.sleep(0)
self.assertTrue(checked_analytics_event) self.assertTrue(checked_analytics_event)
async def test_time_to_first_bytes(self): async def test_time_to_first_bytes(self):
@ -317,7 +317,7 @@ class TestStreamManager(BlobExchangeTestBase):
stream.downloader.node = self.stream_manager.node stream.downloader.node = self.stream_manager.node
await stream.save_file() await stream.save_file()
await stream.finished_writing.wait() await stream.finished_writing.wait()
await asyncio.sleep(0, loop=self.loop) await asyncio.sleep(0)
self.assertTrue(stream.finished) self.assertTrue(stream.finished)
self.assertFalse(stream.running) self.assertFalse(stream.running)
self.assertTrue(os.path.isfile(os.path.join(self.client_dir, "test_file"))) self.assertTrue(os.path.isfile(os.path.join(self.client_dir, "test_file")))
@ -355,7 +355,7 @@ class TestStreamManager(BlobExchangeTestBase):
self.stream_manager.analytics_manager._post = check_post self.stream_manager.analytics_manager._post = check_post
await self._test_download_error_on_start(expected_error, timeout) await self._test_download_error_on_start(expected_error, timeout)
await asyncio.sleep(0, loop=self.loop) await asyncio.sleep(0)
self.assertListEqual([expected_error.__name__], received) self.assertListEqual([expected_error.__name__], received)
async def test_insufficient_funds(self): async def test_insufficient_funds(self):
@ -424,7 +424,7 @@ class TestStreamManager(BlobExchangeTestBase):
self.assertIsNone(stream.full_path) self.assertIsNone(stream.full_path)
self.assertEqual(0, stream.written_bytes) self.assertEqual(0, stream.written_bytes)
self.stream_manager.stop() await self.stream_manager.stop()
await self.stream_manager.start() await self.stream_manager.start()
self.assertEqual(1, len(self.stream_manager.streams)) self.assertEqual(1, len(self.stream_manager.streams))
stream = list(self.stream_manager.streams.values())[0] stream = list(self.stream_manager.streams.values())[0]
@ -448,8 +448,8 @@ class TestStreamManager(BlobExchangeTestBase):
self.assertDictEqual(self.stream_manager.streams, {}) self.assertDictEqual(self.stream_manager.streams, {})
stream = await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager) stream = await self.file_manager.download_from_uri(self.uri, self.exchange_rate_manager)
await stream.finished_writing.wait() await stream.finished_writing.wait()
await asyncio.sleep(0, loop=self.loop) await asyncio.sleep(0)
self.stream_manager.stop() await self.stream_manager.stop()
self.client_blob_manager.stop() self.client_blob_manager.stop()
# partial removal, only sd blob is missing. # partial removal, only sd blob is missing.
# in this case, we recover the sd blob while the other blobs are kept untouched as 'finished' # in this case, we recover the sd blob while the other blobs are kept untouched as 'finished'

View file

@ -470,7 +470,7 @@ class TestUpgrade(AsyncioTestCase):
class TestSQLiteRace(AsyncioTestCase): class TestSQLiteRace(AsyncioTestCase):
max_misuse_attempts = 80000 max_misuse_attempts = 120000
def setup_db(self): def setup_db(self):
self.db = sqlite3.connect(":memory:", isolation_level=None) self.db = sqlite3.connect(":memory:", isolation_level=None)

View file

@ -5,7 +5,6 @@ deps =
extras = extras =
test test
hub hub
torrent
changedir = {toxinidir}/tests changedir = {toxinidir}/tests
setenv = setenv =
HOME=/tmp HOME=/tmp