Compare commits

...

352 commits

Author SHA1 Message Date
Jonathan Moody
eb5da9511e Revert "TEMP: Try python 3.8."
This reverts commit 8def4d5177.
2023-04-03 13:34:36 -04:00
Jonathan Moody
8722ef840e Bump python_requires >= 3.8.
Code to handle CancelledError (as subclass of Exception) was removed.
2023-04-03 13:34:36 -04:00
Jonathan Moody
6e75a1a89b TEMP: Try python 3.8. 2023-04-03 13:34:36 -04:00
Jonathan Moody
ef3189de1d Work on some DeprecationWarnings: The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8. 2023-04-03 13:34:36 -04:00
Jonathan Moody
c2d2080034 Try to suppress asyncio.CancelledError in a different way in test_streaming.py. 2023-04-03 13:34:36 -04:00
Jonathan Moody
d0b5a0a8fd TEMP: Add workflow_dispatch. 2023-04-03 13:34:36 -04:00
Jonathan Moody
1d0e17be21 Another place generalized to Exception or asyncio.CancelledError. 2023-04-03 13:34:36 -04:00
Jonathan Moody
4ef03bb1f4 Try separate file_manager.stop() and start() calls to better
control order of events in test.
While file_manager is stopped, we get no response to file_list().
2023-04-03 13:34:36 -04:00
Jonathan Moody
4bd4bcdc27 Try ubuntu-20.04 to resolve missing libffi.so.7 issue. 2023-04-03 13:34:36 -04:00
Jonathan Moody
e5ca967fa2 Make FileManager.stop() async because SourceManager.stop() is now async. 2023-04-03 13:34:36 -04:00
Jonathan Moody
eed7d02e8b Tweak aiohttp version to be compatible with hub repository. 2023-04-03 13:34:36 -04:00
Jonathan Moody
02aecad52b CancelledError derives from BaseException in Python >= 3.8. The significant functional
change here is in upload_to_reflector(). Unit tests in TestReflector were failing.
Deal with lint related to CancelledError cleanup.
2023-04-03 13:34:36 -04:00
Jonathan Moody
585962d930 Make stop(), stop_tasks() consistently async routines, and have stop_tasks()
wait for file_output_task completion. This fixes a problem with
test_download_stop_resume_delete.
2023-04-03 13:34:36 -04:00
Jonathan Moody
ea4fba39a6 Fix Transport, DatagramTransport mockup issues. 2023-04-03 13:34:36 -04:00
Jonathan Moody
7a86406746 Fix and enable lint no-self-use & try-except-raise. 2023-04-03 13:34:36 -04:00
Jonathan Moody
c8a3eb97a4 Bump pylint version. Old pylint did not find standard library stuff on 3.9.12. 2023-04-03 13:34:36 -04:00
Lex Berezhny
20213628d7 upgrade cryptography 2023-04-03 13:34:36 -04:00
Lex Berezhny
2d1649f972 pylint disable shuffle() arg check 2023-04-03 13:34:36 -04:00
Lex Berezhny
5cb04b86a0 shuffle() needs custom random, removed loop from Event()/Queue() 2023-04-03 13:34:36 -04:00
Lex Berezhny
93ab6b3be3 passing loop to asyncio functions is deprecated 2023-04-03 13:34:36 -04:00
Lex Berezhny
b9762c3e64 update plyvel 2023-04-03 13:34:36 -04:00
Lex Berezhny
82592d00ef try building 3.9 2023-04-03 13:34:36 -04:00
Jonathan Moody
c118174c1a Try shell: bash to simplify. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d284acd8b8 Remove "debug pip cache". 2023-02-02 14:16:07 -05:00
Jonathan Moody
235c98372d Fix syntax. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d2f5073ef4 Single "set pip cache dir" task with conditional inside. 2023-02-02 14:16:07 -05:00
Jonathan Moody
84e5e43117 Bump upload-artifact version too. 2023-02-02 14:16:07 -05:00
Jonathan Moody
7bd025ae54 Upgrade change-string-case. Use startsWith() to test runner.os.
Bump change-string-case-action version again.
2023-02-02 14:16:07 -05:00
Jonathan Moody
8f28ce65b0 Switch to environment vars in $GITHUB_ENV. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d36e305129 Functions save-state, set-output deprecated. Use new mechanism. 2023-02-02 14:16:07 -05:00
Jonathan Moody
2609dee8fb Bump checkout, setup-python, cache action verions. 2023-02-02 14:16:07 -05:00
Lex Berezhny
a2da86d4b5 v0.113.0 2023-01-23 10:43:02 -05:00
Alex Grin
aa16c7fee5 Update conf.py 2023-01-23 10:30:25 -05:00
Alex Grin
3266f72b82 add s1.lbry.network 2023-01-23 10:30:25 -05:00
Jack Robison
77cd2a3f8a add more non lbry.com hubs/bootstrap dht nodes 2023-01-23 10:30:25 -05:00
Alex Grin
308e586e9a add grin's domain to bootstrap hubs list 2023-01-23 10:30:25 -05:00
84beddfd77 Added tracker and dht from pigg.es
Added tracker and dht from pigg.es
2023-01-22 19:09:17 -05:00
Victor Shyba
6258651650
Merge pull request #3716 from lbryio/dht_exceptions
handle remote exceptions on routing table ping
2022-12-13 17:18:47 -03:00
Victor Shyba
cc5f0b6630 handle remote exception on routing table ping 2022-12-13 16:56:58 -03:00
Jonathan Moody
f64d507d39 TEMP: Pin workflows to ubuntu-20.04 to work around missing ripemd160 issue. 2022-12-12 21:47:41 -05:00
Jonathan Moody
001819d5c2 Bump Hub to include fix for supports with wrong names. 2022-11-20 20:34:30 -05:00
Jonathan Moody
8b4c046d28 Try pyinstaller==4.6 to fix MacOS build failure. 2022-11-20 20:34:30 -05:00
Jonathan Moody
2c20ad6c43 Add another zlib.error mapped to InvalidPasswordError. 2022-11-20 20:34:30 -05:00
Jonathan Moody
9e610cc54c Update test for Hub rename of method stage_put() -> stash_put(). 2022-11-20 20:34:30 -05:00
Jonathan Moody
b9d25c6d01 Bump hub to latest, getting fix for TX negative caching issue and others. 2022-11-20 20:34:30 -05:00
Jonathan Moody
419b5b45f2 Allow a few initial "transaction not found" responses from Hub. 2022-11-20 20:34:30 -05:00
Jonathan Moody
516c2dd5d0 Bump hub to fix subscribe race + EADDRINUSE issue. 2022-11-20 20:34:30 -05:00
Jonathan Moody
b99102f9c9 Bump max_misuse_attempts by 50% to 120000. 2022-11-20 20:34:30 -05:00
Lex Berezhny
8c6c7b655c v0.112.0 2022-10-30 21:56:17 -04:00
Lex Berezhny
48c6873fc4 channel_sign command has customizeable salt 2022-10-30 21:53:53 -04:00
Victor Shyba
15dc52bd9a
Merge pull request #3695 from lbryio/3690
Fix claim fields fallback raising errors before download is saved on database
2022-10-28 11:16:52 -03:00
Victor Shyba
52d555078f initialize stored claim field for fallback earlier 2022-10-19 15:13:47 -03:00
Victor Shyba
cc976bd010 test for early fallback of suggested_file_name 2022-10-19 15:13:47 -03:00
Lex Berezhny
9cc6992011 torrents needs loop 2022-10-18 17:23:56 -04:00
Lex Berezhny
a1b87460c5 passing loop to asyncio functions is deprecated 2022-10-18 17:23:56 -04:00
jessopb
007e1115c4 v0.111.0 2022-10-18 11:18:26 -04:00
jessopb
20ae51b949
Merge pull request #3692 from lbryio/fix-import/export-backwards-compat
fix backwards compatibility in wallet import/export
2022-10-18 11:16:34 -04:00
zeppi
24e9c7b435 fix backwards compatibility in wallet import/export 2022-10-18 10:53:51 -04:00
zeppi
b452e76e1d reverting version to 0.110.0 2022-10-18 10:27:44 -04:00
Lex Berezhny
341834c30d v0.111.0 2022-10-18 00:37:44 -04:00
Victor Shyba
12bac730bd tests: check mime type as well 2022-10-18 00:31:10 -04:00
Victor Shyba
1027337833 fallback for stream name and tests 2022-10-18 00:31:10 -04:00
Victor Shyba
97fef21f75 fallback for suggested file name and tests 2022-10-18 00:31:10 -04:00
Lex Berezhny
9dafd5f69b added claim_list filtering by reposted_claim_id and fix for claim_id of reposted claim in JSON output 2022-10-18 00:28:13 -04:00
Victor Shyba
fd4f0b2049 bump first-start checkpoint 2022-10-18 00:26:10 -04:00
Lex Berezhny
734f0651a4 minor refactor 2022-10-18 00:25:41 -04:00
zeppi
94deaf55df lint 2022-10-18 00:25:41 -04:00
zeppi
d957d46f96 lint 2022-10-18 00:25:41 -04:00
zeppi
0217aede3d update docs 2022-10-18 00:25:41 -04:00
zeppi
e4e1600f51 Enable unencrypted wallet import and export 2022-10-18 00:25:41 -04:00
Jonathan Moody
d0aad8ccaf Add zlib.error string just observed for the first time. 2022-09-29 22:18:54 -04:00
Jonathan Moody
ab50cfa5c1 Add test steps to repeatedly sync_apply() using a bad password. 2022-09-29 22:18:54 -04:00
Jonathan Moody
5a26aea398 Feedback: Reuse IntegrationTestcase.generate() in generate_and_wait(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
bd1cebdb4c Bump hub to include TaskGroup fix. 2022-09-29 22:18:54 -04:00
Jonathan Moody
ec433f069f Substitute InvalidPasswordError for zlib.error. 2022-09-29 22:18:54 -04:00
Jonathan Moody
cd6d3fec9c Wait for initial sync in test_wallet_syncing_status(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
8c474a69de Hub error message changed to include blocked/filtered. 2022-09-29 22:18:54 -04:00
Jonathan Moody
8903056648 Bump hub version to latest. 2022-09-29 22:18:54 -04:00
Jonathan Moody
749a92d0e5 Wait on block height too in generate_and_wait(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
a7d7efecc7 Logging level back to INFO. 2022-09-20 10:04:23 -04:00
Jonathan Moody
c88f0797a3 Log f.exception() if present instead of "Stopped". 2022-09-20 10:04:23 -04:00
Jonathan Moody
137ebd503d Test insufficient funds behavior. 2022-09-20 10:04:23 -04:00
Jonathan Moody
c3f5dd780e Revise exception handling. 2022-09-20 10:04:23 -04:00
Jonathan Moody
20b1865879 Don't use retriable_call(). Add handling for InsufficientFundsError. 2022-09-20 10:04:23 -04:00
Jonathan Moody
231b982422 Wait on usage payement TX to be processed. 2022-09-20 10:04:23 -04:00
Jonathan Moody
fd69401791 Catch and log exceptions coming from the pay() task.
Change test to reproduce failure.
2022-09-20 10:04:23 -04:00
Jonathan Moody
718d046833 Logging for test_single_server_payment debug. 2022-09-20 10:04:23 -04:00
Victor Shyba
e10f57d1ed
Merge pull request #3642 from lbryio/libtorrent
use official libtorrent, fix tests, make it a normal dependency
2022-09-09 19:58:38 -03:00
Victor Shyba
8a033d58df fix torrent component 2022-09-09 12:21:59 -03:00
Victor Shyba
c07c369a28 add libtorrent pyinstaller hook 2022-09-09 12:21:59 -03:00
Victor Shyba
5be990fc55 do not ignore libtorrent import error 2022-09-09 12:21:59 -03:00
Victor Shyba
8f26010c04 make libtorrent a normal dependency 2022-09-09 12:21:59 -03:00
Victor Shyba
3021962e3d tests: add peer directly instead of relying on torrent dht 2022-09-09 12:21:59 -03:00
Victor Shyba
84d89ce5af torrent: disable upnp, natpmp. 2022-09-09 12:21:59 -03:00
Victor Shyba
0961cad716 remove dead code 2022-09-09 12:21:59 -03:00
Jonathan Moody
5c543cb374 Wait for hub to update with all 100 new blocks
before proceeding with initial_headers_sync().
2022-09-08 22:40:09 -04:00
Jonathan Moody
f78d7896a5 Revert "Add more failure message details for debugging."
This reverts commit 5e00a79751.
2022-09-08 18:13:26 -04:00
Jonathan Moody
78a28de2aa Align style of generate() with generate_and_wait(). 2022-09-08 18:13:26 -04:00
Jonathan Moody
45a255e7a2 Reuse generate() logic to wait on hub
instead of half-baked reorg() logic.
2022-09-08 18:13:26 -04:00
Jonathan Moody
d2738c2e72 Add more failure message details for debugging. 2022-09-08 18:13:26 -04:00
Jonathan Moody
a7c7ab7f7b Correct the terminal height we wait for in generate(). 2022-09-08 18:13:26 -04:00
Jonathan Moody
988f288715 Lint fix for _es_height checks. 2022-09-08 18:13:26 -04:00
Jonathan Moody
38e9b5b432 Wait for _es_height in addition to db_height. 2022-09-08 18:13:26 -04:00
Victor Shyba
f7455600cc
Merge pull request #3625 from lbryio/dht_crawler
Add script to collect DHT metrics
2022-09-07 12:56:41 -03:00
Victor Shyba
c7c2d6fe5a collect connections reachability 2022-09-07 12:03:11 -03:00
Victor Shyba
c6c0228970 fix crawler startup query 2022-09-07 12:03:11 -03:00
Victor Shyba
8d9d2c76ae routing table sizes as histogram 2022-09-07 12:03:11 -03:00
Victor Shyba
0b059a5445 use a histogram for latency, remove labels 2022-09-07 12:03:11 -03:00
Victor Shyba
ab67f417ee dht_crawler: wait and retry during port switch 2022-09-07 12:03:11 -03:00
Victor Shyba
0e7a1aee0a dht_crawler: clean in memory set for expired peers 2022-09-07 12:03:11 -03:00
Victor Shyba
d0497cf6b5 dht_crawler: skip saving connections for now 2022-09-07 12:03:11 -03:00
Victor Shyba
c38573d5de dht_crawler: gather both loops, avoid task exceptions being hidden 2022-09-07 12:03:11 -03:00
Victor Shyba
f077e56cec dht_crawler:only count latency during findNode 2022-09-07 12:03:11 -03:00
Victor Shyba
5e58c2f224 fix hosting metrics, improve logging 2022-09-07 12:03:11 -03:00
Victor Shyba
cc64789e96 dht_crawler: fix logging for missing ports 2022-09-07 12:03:11 -03:00
Victor Shyba
b5c390ca04 docker: add volume declaration 2022-09-07 12:03:11 -03:00
Victor Shyba
da2ffb000e skip peers with bad ports without raising 2022-09-07 12:03:11 -03:00
Victor Shyba
df77392fe0 dht crawler:improve logging, metrics, make startup concurrent 2022-09-07 12:03:11 -03:00
Victor Shyba
9aa9ecdc0a add arg for db path 2022-09-07 12:03:11 -03:00
Victor Shyba
43b45a939b format logging 2022-09-07 12:03:11 -03:00
Victor Shyba
e2922a434f add script to generate probe dataset 2022-09-07 12:03:11 -03:00
Victor Shyba
0d6125de0b add sd_hash prober 2022-09-07 12:03:11 -03:00
Victor Shyba
13af7800c2 refactor script, remove dep 2022-09-07 12:03:11 -03:00
Victor Shyba
47a5d37d7c change default metric port, add sqlalchemy to dockerfile 2022-09-07 12:03:11 -03:00
Victor Shyba
4a3a7e318d update pip and setuptools on dht dockerfile 2022-09-07 12:03:11 -03:00
Victor Shyba
85ff487af5 dht_crawler: randomize port when idle 2022-09-07 12:03:11 -03:00
Victor Shyba
62eb9d5c75 dht_crawler: only count non zero connections 2022-09-07 12:03:11 -03:00
Victor Shyba
cfe5c8de8a dht_crawler: serve prometheus metrics at 7070 2022-09-07 12:03:11 -03:00
Victor Shyba
0497698c5b dht_crawler: skip ping if known node_id 2022-09-07 12:03:11 -03:00
Victor Shyba
508bdb8e94 dht_crawler: keep working set in memory, flush to db on intervals 2022-09-07 12:03:11 -03:00
Victor Shyba
cd42f0d726 dht_crawler: fix node id store 2022-09-07 12:03:11 -03:00
Victor Shyba
2706b66a92 dht_crawler: dont re-bootstrap. try known reachable even when they expire 2022-09-07 12:03:11 -03:00
Victor Shyba
29c2d5715d dht_crawler: fix last_seen update 2022-09-07 12:03:11 -03:00
Victor Shyba
965389b759 dht_crawler: process older first, avoid discarding 2022-09-07 12:03:11 -03:00
Victor Shyba
174439f517 dht_crawler: cleanup, try not to reset key 2022-09-07 12:03:11 -03:00
Victor Shyba
baf422fc03 dht_crawler: extract refresh_limit, bump to 1h 2022-09-07 12:03:11 -03:00
Victor Shyba
61f7fbe230 dht_crawler: avoid reads 2022-09-07 12:03:11 -03:00
Victor Shyba
c6c27925b7 dht_crawler: flush/commit only when finished 2022-09-07 12:03:11 -03:00
Victor Shyba
be4c62cf32 check membership instead of one update per peer 2022-09-07 12:03:11 -03:00
Victor Shyba
443a1c32fa dht_crawler: save a set of connections to avoid dupes, enable initial crawl 2022-09-07 12:03:11 -03:00
Victor Shyba
90c2a58470 dht_crawler: dont gather empty, fix crash 2022-09-07 12:03:11 -03:00
Victor Shyba
adc79ec404 dht_crawler: only warn for missing key if it replied 2022-09-07 12:03:11 -03:00
Victor Shyba
137d8ca4ac dht_crawler: enable WAL 2022-09-07 12:03:11 -03:00
Victor Shyba
abf4d888af dht_crawler: warn if we cannot get node id 2022-09-07 12:03:11 -03:00
Victor Shyba
6c350e57dd dht_crawler: query recently checked as stats 2022-09-07 12:03:11 -03:00
Victor Shyba
fb7a93096e only count checked unreachable 2022-09-07 12:03:11 -03:00
Victor Shyba
7ea88e7b31 dht_crawler: store data 2022-09-07 12:03:11 -03:00
Victor Shyba
2361e34541 dht crawler, initial version 2022-09-07 12:03:11 -03:00
Victor Shyba
be06378437 add method for getting the node_id from a known peer on peer manager 2022-09-07 12:03:11 -03:00
Victor Shyba
a334a93757
Merge pull request #3631 from lbryio/bootstrap_node
Add all peers when running as a bootstrap node
2022-08-29 11:07:29 -03:00
Victor Shyba
e3ee3892b2 better test name 2022-08-22 18:45:18 -03:00
Victor Shyba
d61accea1a simplify bucket refresh loop 2022-08-11 21:14:56 -03:00
Victor Shyba
e887453aa5 remove unused last_accessed 2022-08-11 20:39:51 -03:00
Victor Shyba
c3e4f0b988 add 'is_bootstrap_node' conf 2022-08-11 20:38:42 -03:00
Victor Shyba
318728aebd add bootstrap flag to routing table 2022-08-11 20:38:42 -03:00
Victor Shyba
d8c1aaebc2 routing table: mark private methods 2022-08-11 20:38:42 -03:00
Victor Shyba
d7b65c15d2 return none instead of raising 2022-08-11 20:38:42 -03:00
Victor Shyba
972db80246 move add peer logic to routing table 2022-08-11 20:38:42 -03:00
Victor Shyba
0d343ecb2f simplify iterative find constructor 2022-08-11 20:38:42 -03:00
Lex Berezhny
01cd95fe46 v0.110.0 2022-08-11 10:58:16 -04:00
Lex Berezhny
6dc57fc02c revert version 2022-08-11 10:20:58 -04:00
Lex Berezhny
10df0c1fba disable Hotbit and UPBit exchange rate feeds 2022-08-11 10:19:54 -04:00
Lex Berezhny
ec751e5add v0.110.0 2022-08-10 13:52:46 -04:00
Lex Berezhny
3e3974f813 lint 2022-08-08 14:55:44 -04:00
Lex Berezhny
ec82486e15 removed go hub dependency 2022-08-08 14:55:44 -04:00
Lex Berezhny
e16f6b07b8 revert release 2022-08-08 13:02:12 -04:00
Lex Berezhny
9a842c273b v0.110.0 2022-08-08 08:46:32 -04:00
Jonathan Moody
40f7d3ee4b Stabilize test_streaming.py by scaning the data_dir, not the parent of data_dir 2022-08-01 17:37:06 -04:00
Lex Berezhny
1dc2f0458b fix lint 2022-08-01 10:04:24 -04:00
Lex Berezhny
3924b28cc3 raise not implemented error when importing unencrypted wallet 2022-08-01 10:04:24 -04:00
Lex Berezhny
020487b6a0 account merge bug fix from upstream 2022-08-01 10:04:24 -04:00
zeppi
14037c9b2f help string edits 2022-08-01 10:04:24 -04:00
zeppi
0cb37a5c4b linting 2022-08-01 10:04:24 -04:00
zeppi
fa5f3e7e55 change api for data first, password optional, return (str) 2022-08-01 10:04:24 -04:00
zeppi
30aa0724ec newline end of test file 2022-08-01 10:04:24 -04:00
zeppi
059890e4e5 wallet import export feature 2022-08-01 10:04:24 -04:00
Jonathan Moody
9654d4f003 Obtain "amount" from new_txo.amount when calling save_supports(). 2022-08-01 09:10:49 -04:00
Jonathan Moody
956b52a2c1 Refactor _old_get_temp_claim_info(), eliminating "bid" arg. Obtain the value from txo.amount. 2022-08-01 09:10:49 -04:00
Lex Berezhny
2e975c8b61 lint 2022-07-26 22:18:29 -04:00
Lex Berezhny
656e299100 migrate key addresses on changed accounts after sync apply 2022-07-26 22:18:29 -04:00
Jack Robison
352e45b6b7 update pinned hub version 2022-07-25 10:12:46 -04:00
Jack Robison
a9a1076362 improve test_es_sync_utility 2022-07-25 10:12:46 -04:00
Jack Robison
6d370b0a12 dont skip test_setting_stream_fields 2022-07-25 10:12:46 -04:00
Jack Robison
c9fac27b66 test resolving different streams for a channel using short urls 2022-07-25 10:12:46 -04:00
Jack Robison
59bc0b9682 update censored error 2022-07-25 10:12:46 -04:00
Lex Berezhny
ba60aeeebc migrate certificates after importing new account 2022-07-18 10:36:21 -04:00
Jonathan Moody
dc427ecf6c Correct collection_update, account_fund docstrings. Regenerate api.json using generate_json_api.py. 2022-07-07 21:33:43 -04:00
Victor Shyba
87b4404767
Merge pull request #3624 from moodyjon/test_fix_exch_rate1
Fixes for intermittent test failures: test_exchange_rate_manager(), test_basic_claim_search()
2022-07-01 17:29:44 -03:00
Jonathan Moody
ba9ac489c3 Relax range in test_exchange_rate_manager.py. (again) 2022-06-19 19:17:48 -04:00
Jonathan Moody
7049629ad7 Relax range in test_exchange_rate_manager.py. 2022-06-19 19:17:40 -04:00
Jonathan Moody
3ae4aeea47 Search for longer prefix of sd_hash to give better chance of unique results. 2022-06-19 19:14:54 -04:00
Lex Berezhny
8becf1f69f v0.109.0 2022-06-08 12:40:35 -04:00
Victor Shyba
582f79ba1c do not consider pending blobs on disk space query 2022-06-08 12:25:38 -04:00
Lex Berezhny
3c28d869f4
Merge pull request #3620 from lbryio/repost_title_tags
reposts can have title, description and tags
2022-06-08 12:24:28 -04:00
Lex Berezhny
fe61b90610 reposts can have title, description and tags 2022-06-08 10:35:22 -04:00
Lex Berezhny
c04fbb2908
Merge pull request #3614 from lbryio/grins-tracker
add tracker.lbry.grin.io
2022-06-06 13:13:01 -04:00
Alex Grintsvayg
571e71b28e
add tracker.lbry.grin.io 2022-06-06 11:29:09 -04:00
Lex Berezhny
39fcfcccfb
Merge pull request #3608 from lbryio/fix_ci
upgraded SDK to use the new LBRY hub project
2022-06-06 09:01:57 -04:00
Jack Robison
2313d30996
fix reconnect test 2022-05-27 11:59:18 -04:00
Jack Robison
ac7e94c6ed
pylint 2022-05-27 09:59:11 -04:00
Jack Robison
a391fe9fc7
scribe -> hub 2022-05-27 09:58:13 -04:00
Jack Robison
ea8adc5367
update scribe env and fix tests 2022-05-27 09:58:13 -04:00
Victor Shyba
0ea8ba72dd
Env->ServerEnv from scribe changes 2022-05-26 14:28:33 -04:00
Victor Shyba
7a8d5da0e8
Merge pull request #3613 from lbryio/fix_ci_lbcd_urls
tests: fix ci lbcd/lbcwallet urls
2022-05-26 10:21:02 -03:00
Victor Shyba
da30f003e8 update lbcwallet url 2022-05-25 12:17:57 -03:00
Victor Shyba
6257948ad7 update lbcd url 2022-05-25 12:17:28 -03:00
Victor Shyba
a7f606d62c change pip upgrade due windows error 2022-05-23 16:28:36 -03:00
Victor Shyba
1d95eb1549
Merge pull request #3599 from moodyjon/async-for-pr3504
Tighten up IterativeFinder async close behavior (DHT iterator continues after consumer breaks out of it)
2022-05-23 11:12:40 -03:00
Jonathan Moody
e5e9873f79 Simplify by eliminating AsyncGenerator base and generator function. Remove any new places enforcing max_results. 2022-05-20 17:23:39 -04:00
Jonathan Moody
530f9c72ea Fix lint error lbry/utils.py 2022-05-20 17:23:39 -04:00
Jonathan Moody
fad84c771c Support official contextlib.aclosing() when it's available. 2022-05-20 17:23:39 -04:00
Jonathan Moody
fe07aac79c Define and use lbry.utils.aclosing() in lieu of official contextlib.aclosing(). 2022-05-20 17:23:39 -04:00
Jonathan Moody
91a6eae831 Fix lint issue in iterative_find.py. 2022-05-20 17:23:39 -04:00
Jonathan Moody
5852fcd287 Don't wait on running_tasks after cancel(). Sometimes a CancelledError exception is received, which is unhelpful, and complicates shutting down the generator. 2022-05-20 17:23:39 -04:00
Jonathan Moody
4767bb9dee Wrap "async for" over IterativeXXXFinder in try/finally ensuring aclose(). 2022-05-20 17:23:39 -04:00
Jonathan Moody
82d7f81f41 Correct call to _aclose() in response to TransportNotConnected. 2022-05-20 17:23:39 -04:00
Jonathan Moody
b036961954 Tighten up IterativeFinder logic to respect max_records better, and wait after task cancel().
Also make IterativeFinder a proper AsyncGenerator. This gives it an offically recognized aclose() method and could help with clean finalization.
2022-05-20 17:23:39 -04:00
Victor Shyba
5c708e1c6f
Merge pull request #3611 from lbryio/fix_hub_url
tests: fix hub url
2022-05-20 18:19:39 -03:00
Victor Shyba
9436600267 tests: bump exchange rate manager test 2022-05-20 17:25:02 -03:00
Victor Shyba
4ab29c4d5f tests: fix hub url 2022-05-20 16:50:09 -03:00
Alex Grin
6944c4a7c4
Update LICENSE 2022-05-17 12:16:00 -04:00
Victor Shyba
2735484fae
Merge pull request #3576 from lbryio/trackers
Add support for announcing and querying LBRY streams over BEP15 (BitTorrent Trackers)
2022-05-13 17:56:20 -03:00
Victor Shyba
03b0d5e250 tracker client: extract default timeout and concurreny. Bump concurrency to 100 2022-05-11 21:13:30 -03:00
Victor Shyba
629812337b changes from review 2022-05-11 21:13:30 -03:00
Victor Shyba
e54cc8850c return KademliaPeers directly into the queue instead of exposing Announcement abstraction 2022-05-11 21:13:30 -03:00
Victor Shyba
7cba51ca7d update tests, query with port 0, filter bad ports earlier, make unit tests more reliable 2022-05-11 21:13:30 -03:00
Victor Shyba
3dc145fe68 make peer list query trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
7d560df9fd use same arg name as overriden datagram_received (linting) 2022-05-11 21:13:30 -03:00
Victor Shyba
b3f894e480 add integration test for tracker discovery 2022-05-11 21:13:30 -03:00
Victor Shyba
235cc5dc05 results are indexed by ip, setdefault after resolve 2022-05-11 21:13:30 -03:00
Victor Shyba
c276053301 move server implementation to tracker module 2022-05-11 21:13:30 -03:00
Victor Shyba
2e85e29ef1 peer id PREFIX is a constant 2022-05-11 21:13:30 -03:00
Victor Shyba
1169a02c8b make client server updatable from conf 2022-05-11 21:13:30 -03:00
Victor Shyba
a7cea4082e tracker:log DNS errors as warning instead of trace 2022-05-11 21:13:30 -03:00
Victor Shyba
7e6ea97499 make peer id according to BEP20 2022-05-11 21:13:30 -03:00
Victor Shyba
3c46cc4fdd expire connection id quicker as some trackers have it set low 2022-05-11 21:13:30 -03:00
Victor Shyba
6e5c7a1927 use cache_concurrent to avoid requesting the same connection_id multiple times 2022-05-11 21:13:30 -03:00
Victor Shyba
4e09b35012 remove unused import and dead code 2022-05-11 21:13:30 -03:00
Victor Shyba
16a2023bbd stop tasks before removing transport 2022-05-11 21:13:30 -03:00
Victor Shyba
99fc7178c1 better way to batch announce + handle different intervals for different trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
d4aca89a48 handle multiple results from multiple trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
2918d8c7b4 tracker component is running only if the task is alive 2022-05-11 21:13:30 -03:00
Victor Shyba
407c570f8b tests: lower timeout, add test with bad and good mixed 2022-05-11 21:13:30 -03:00
Victor Shyba
e299a9c159 tests: multiple trackers, simple case 2022-05-11 21:13:30 -03:00
Victor Shyba
cc4a578578 tests: add support for multiple trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
0e4f1eae5b reduce timeout to 10, fix lints 2022-05-11 21:13:30 -03:00
Victor Shyba
eccf0e6234 fix reusing result interval from failed expired attempt 2022-05-11 21:13:30 -03:00
Victor Shyba
a3da041412 fix exceptions on shutdown, stop using cancel_tasks 2022-05-11 21:13:30 -03:00
Victor Shyba
2f1617eee4 less verbose on timeouts, dont count timeouts, fix stop 2022-05-11 21:13:30 -03:00
Victor Shyba
05124d41ae only log when really announcing, stop counting cached ones 2022-05-11 21:13:30 -03:00
Victor Shyba
42fd1c962e stop tracker tasks on shutdown 2022-05-11 21:13:30 -03:00
Victor Shyba
47e432b4bb make it less verbose, only log after all events are fired 2022-05-11 21:13:30 -03:00
Victor Shyba
61c99abcf1 avoid readding the same hash when tracker is busy with too many files 2022-05-11 21:13:30 -03:00
Victor Shyba
28fdd62945 move concurreny control to lower layer 2022-05-11 21:13:30 -03:00
Victor Shyba
3855db6c66 pause announcer for 1 minute each round 2022-05-11 21:13:30 -03:00
Victor Shyba
30acde0afc at most 10 announces concurrently 2022-05-11 21:13:30 -03:00
Victor Shyba
2d9c5742c7 cache results, save interval on tracker 2022-05-11 21:13:30 -03:00
Victor Shyba
43e50f7f04 fix subscribe_hash 2022-05-11 21:13:30 -03:00
Victor Shyba
888e9918a6 improve timeout handling 2022-05-11 21:13:30 -03:00
Victor Shyba
9e9a64d989 evented system for tracker announcements 2022-05-11 21:13:30 -03:00
Victor Shyba
7acaecaed2 managed_stream: remove unused imports 2022-05-11 21:13:30 -03:00
Victor Shyba
24eb189b7f skip component on test cli 2022-05-11 21:13:30 -03:00
Victor Shyba
2344aca146 fix component property 2022-05-11 21:13:30 -03:00
Victor Shyba
758f9deafe fix unit tests 2022-05-11 21:13:30 -03:00
Victor Shyba
7b425eb2ac add tracker announcer component 2022-05-11 21:13:30 -03:00
Victor Shyba
30e8728f7f use tracker on download 2022-05-11 21:13:30 -03:00
Victor Shyba
3989eef84b return whole announcement so the caller knows the interval 2022-05-11 21:13:30 -03:00
Victor Shyba
dc6f8c4fc4 add arg to announce stopped, removing the announcement 2022-05-11 21:13:30 -03:00
Victor Shyba
2df8a1d99d make a helper function to announce 2022-05-11 21:13:30 -03:00
Victor Shyba
4ea858fdd3 add new conf: tracker_servers 2022-05-11 21:13:30 -03:00
Victor Shyba
006391dd26 move udp server to test file, add link to BEP15 2022-05-11 21:13:29 -03:00
Victor Shyba
4a0bf8a702 add torrent udp tracker client, server and tests 2022-05-11 21:13:29 -03:00
Victor Shyba
d0e715feb9
Merge pull request #3609 from lbryio/pin_scribe
CI: pin scribe, fix exchange rate manager test
2022-05-11 21:13:00 -03:00
Victor Shyba
fd73412f12 test_exchange_rate_manager: bump value 2022-05-11 20:28:06 -03:00
Victor Shyba
3819552861 try usedevelop=true 2022-05-11 20:14:55 -03:00
Victor Shyba
ca6fd5b7b9 fix scribe pinning 2022-05-11 20:14:44 -03:00
Lex Berezhny
b8867cd18c release.py script changed to use gh auth login for authentication 2022-04-10 23:28:16 -04:00
Lex Berezhny
8209eafc6b v0.108.0 2022-04-10 23:25:15 -04:00
Lex Berezhny
858e72a555
Merge pull request #3595 from lbryio/default_feer_per_name_char
pin scribe to specific version
2022-04-08 13:49:11 -04:00
Lex Berezhny
d3880fffa0 pin scribe to specific version 2022-04-08 13:48:30 -04:00
Lex Berezhny
0a51898722
Merge pull request #3593 from lbryio/default_feer_per_name_char
set the default per character fee for claims to zero
2022-04-08 13:46:54 -04:00
Lex Berezhny
63cef81015 fix scribe server version test 2022-04-08 13:22:51 -04:00
Lex Berezhny
9279865078 add sleep to transaction show test per jack suggestion 2022-04-08 12:59:25 -04:00
Lex Berezhny
fba7fc7aba fix scribe server version test 2022-04-08 12:53:19 -04:00
Lex Berezhny
a3d9d5bce7 fix transaction unit test 2022-04-08 11:05:45 -04:00
Lex Berezhny
23ecbc8ebe set the default per character fee for claims to zero 2022-04-08 10:58:02 -04:00
Lex Berezhny
42b2dbd92e
Merge pull request #3572 from orblivion/json-schema
Add wallet json-schema, validate in one test.
2022-04-08 10:56:58 -04:00
Lex Berezhny
37eb55375a only install jsonschema during testing 2022-04-08 10:56:18 -04:00
Lex Berezhny
94bf357817 cleanup paths 2022-04-08 10:56:18 -04:00
Daniel Krol
eca69391ef Add wallet json-schema, validate in one test. 2022-04-08 10:56:18 -04:00
Lex Berezhny
d0c5b32a90
Merge pull request #3575 from lbryio/spend_time_locked
added `account_deposit` command which is able to deposit time locked transaction into wallet
2022-04-08 10:52:08 -04:00
Lex Berezhny
84ef52cf4d fix redeem scripthash test 2022-04-08 10:11:11 -04:00
Lex Berezhny
8fb14bf713 remove command not available in lbcd 2022-04-08 09:59:22 -04:00
Lex Berezhny
16eb50a291 working jsonrpc_account_deposit 2022-04-08 09:57:15 -04:00
Lex Berezhny
dd503fbb82 set locktime from script 2022-04-08 09:57:15 -04:00
Lex Berezhny
ae79314869 wip 2022-04-08 09:57:15 -04:00
Lex Berezhny
0cbc514a8e account_deposit command added which accepts time locked TXs 2022-04-08 09:57:15 -04:00
Lex Berezhny
5777f3e15c wip 2022-04-08 09:57:15 -04:00
Lex Berezhny
8cdcd770c0
Merge pull request #3590 from lbryio/fix-address_list-pagination
fix pagination with `address_list` command
2022-04-06 09:52:41 -04:00
Lex Berezhny
2d20458bc2 re-use existing constraints cleanup function 2022-04-06 09:09:39 -04:00
zeppi
2bd2088248 bugfix 2022-04-06 09:09:39 -04:00
zeppi
5818270803 fix address_list pagination 2022-04-06 09:09:39 -04:00
Victor Shyba
79a5f0e375 lint 2022-04-05 00:35:48 -03:00
Victor Shyba
c830784f65
Merge pull request #3586 from AlessandroSpallina/master
fix #3530 added error log when tcp port is already in use
2022-04-05 00:04:59 -03:00
Victor Shyba
3fc538104d v0.107.2 2022-03-31 17:19:58 -03:00
AlessandroSpallina
96490fdb15
Merge branch 'master' into master 2022-03-29 13:50:57 +02:00
Victor Shyba
5a0c225c6f v0.107.0 2022-03-28 15:56:06 -03:00
Lex Berezhny
c3e524cb8b
Merge pull request #3588 from lbryio/scribe
move `lbry.wallet.server` to new project called `scribe`, switch from using `lbrycrd` to `lbcd` in integration tests
2022-03-28 00:14:54 -04:00
Jack Robison
9faf6e46ca move lbry.wallet.server to new project called scribe
switch from using lbrycrd to lbcd
2022-03-27 23:33:26 -04:00
Victor Shyba
e89acac235
Merge pull request #3585 from lbryio/fix_blob_db_queries
Fixes bugs on disk space management and stream recovery
2022-03-24 21:01:14 -03:00
Victor Shyba
200761ff13 make added_on a required parameter on BlobInfo, fix callers 2022-03-24 19:51:48 -03:00
Victor Shyba
cb78e95e3d add missing space on query, typo 2022-03-23 13:40:01 -03:00
AlessandroSpallina
f01cf98d62 fix #3530 added error log when tcp port is already in use 2022-03-22 17:17:41 +01:00
Victor Shyba
c9c2495611 if a blob file exists but is pending on db, fix on startup 2022-03-21 21:58:36 -03:00
Victor Shyba
aac72fa512 fix bug where recovery doesnt update blob status 2022-03-21 21:33:33 -03:00
Victor Shyba
c5e2f19dde fix bug where added_on is always 0 for downloads 2022-03-21 04:38:51 -03:00
Victor Shyba
34bd9e5cb4 exclude sd blobs from calculation and make them be picked last on removal 2022-03-21 04:26:27 -03:00
Lex Berezhny
ad489ed606
Merge pull request #3581 from lbryio/deterministic_channel_keys_post_unlock
eagerly load deterministic channel keys immediately after wallet is unlocked
2022-03-14 12:36:04 -04:00
Lex Berezhny
bb541901d9 fix tests 2022-03-13 21:30:38 -04:00
Lex Berezhny
ca4ba19a5e fixes #3577 2022-03-13 20:42:34 -04:00
Victor Shyba
f05943ff79 implement announcer as a consumer task on gather 2022-03-02 13:00:34 -03:00
Victor Shyba
7ded8a1333 make active an explicit ordered dict 2022-03-02 13:00:34 -03:00
Victor Shyba
c2478d4add remove unused search rounds 2022-03-02 13:00:34 -03:00
Victor Shyba
f69747bc89 timeout is now supported on dht tests 2022-03-02 13:00:34 -03:00
Victor Shyba
441cc950aa fix and enable test_blob_announcer 2022-03-02 13:00:34 -03:00
Victor Shyba
a76a0ac8c4 simplify dht mock and restore clock after accelerating 2022-03-02 13:00:34 -03:00
Victor Shyba
8b1009161a better representation of kademliapeer on debug logs 2022-03-02 13:00:34 -03:00
Victor Shyba
868a620e91 add a way to wait announcements to finish so tests are reliable 2022-03-02 13:00:34 -03:00
Victor Shyba
a0e34b0bc8 make timeout handler immune to asyncio time tricks 2022-03-02 13:00:34 -03:00
Victor Shyba
612dbcb2f3 allow running some extra probes for k replacements 2022-03-02 13:00:34 -03:00
Victor Shyba
b3614d965d remove all references to bottoming out 2022-03-02 13:00:34 -03:00
Victor Shyba
5d7137255e no stop condition, let it exhaust 2022-03-02 13:00:34 -03:00
Victor Shyba
6ff867ef55 bottoming out is now warning and no results for peer search 2022-03-02 13:00:34 -03:00
Victor Shyba
c14915df29 don't probe peers too far from the top closest 2022-03-02 13:00:34 -03:00
Victor Shyba
7d4966e2ae use a dict for the active queue 2022-03-02 13:00:34 -03:00
Victor Shyba
3876e0317d log bottom out of peer search in debug, show short key id for find value 2022-03-02 13:00:34 -03:00
Victor Shyba
0b2b10f759 bump bottom out limit of peer search so people can use 100 concurrent announcers 2022-03-02 13:00:34 -03:00
Victor Shyba
9a79b33664 wait until k peers are ready. do not double add peers 2022-03-02 13:00:34 -03:00
Victor Shyba
af1a6edd15 only return good (contacted) peers 2022-03-02 13:00:34 -03:00
Victor Shyba
b78929f4d5 reset closest peer on failure 2022-03-02 13:00:34 -03:00
Victor Shyba
fb6e342043 add peers from shortlist regardless, but check from other nodes 2022-03-02 13:00:34 -03:00
Victor Shyba
0faa2d35da bump split index to 2 2022-03-02 13:00:34 -03:00
Victor Shyba
511e57c231 fix distance sorting and improve logging 2022-03-02 13:00:34 -03:00
Victor Shyba
d762d675c4 closest peer is only ready when it was contacted and isn't known to be bad 2022-03-02 13:00:34 -03:00
Victor Shyba
3fdadee87c dont probe and ignore bad peers 2022-03-02 13:00:34 -03:00
Victor Shyba
1aa4d9d585 simplify, genaralize to any size and fix tests 2022-02-28 13:06:51 -03:00
Victor Shyba
8019f4bdb3 stop after finding what to download 2022-02-28 13:06:51 -03:00
Victor Shyba
ca65c1ebc5 replace duplicated code 2022-02-28 13:06:51 -03:00
Victor Shyba
f0e47aae86 add get_colliding_prefix_bits, docs and tests 2022-02-28 13:06:51 -03:00
Victor Shyba
dc7cd545ba extract method and avoid using hash builtin name 2022-02-28 13:06:51 -03:00
Victor Shyba
76bd59d82e extract min_prefix_colliding_bits to a contanst 2022-02-28 13:06:51 -03:00
Victor Shyba
461687ffb4 check that the stored blob is at least 1 prefix byte close to peer id 2022-02-28 13:06:51 -03:00
146 changed files with 4273 additions and 14946 deletions

View file

@ -1,24 +1,24 @@
name: ci name: ci
on: ["push", "pull_request"] on: ["push", "pull_request", "workflow_dispatch"]
jobs: jobs:
lint: lint:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel - run: pip install --user --upgrade pip wheel
- run: pip install -e .[torrent,lint] - run: pip install -e .[lint]
- run: make lint - run: make lint
tests-unit: tests-unit:
@ -26,31 +26,31 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-latest - ubuntu-20.04
- macos-latest - macos-latest
- windows-latest - windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- name: set pip cache dir - name: set pip cache dir
id: pip-cache shell: bash
run: echo "::set-output name=dir::$(pip cache dir)" run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ${{ steps.pip-cache.outputs.dir }} path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- id: os-name - id: os-name
uses: ASzc/change-string-case-action@v1 uses: ASzc/change-string-case-action@v5
with: with:
string: ${{ runner.os }} string: ${{ runner.os }}
- run: pip install --user --upgrade pip wheel - run: python -m pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux') - if: startsWith(runner.os, 'linux')
run: pip install -e .[torrent,test] run: pip install -e .[test]
- if: startsWith(runner.os, 'linux') - if: startsWith(runner.os, 'linux')
env: env:
HOME: /tmp HOME: /tmp
@ -72,7 +72,7 @@ jobs:
tests-integration: tests-integration:
name: "tests / integration" name: "tests / integration"
runs-on: ubuntu-latest runs-on: ubuntu-20.04
strategy: strategy:
matrix: matrix:
test: test:
@ -81,8 +81,6 @@ jobs:
- claims - claims
- takeovers - takeovers
- transactions - transactions
- claims_legacy_search
- takeovers_legacy_search
- other - other
steps: steps:
- name: Configure sysctl limits - name: Configure sysctl limits
@ -95,16 +93,16 @@ jobs:
uses: elastic/elastic-github-actions/elasticsearch@master uses: elastic/elastic-github-actions/elasticsearch@master
with: with:
stack-version: 7.12.1 stack-version: 7.12.1
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- if: matrix.test == 'other' - if: matrix.test == 'other'
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ./.tox path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }} key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
@ -125,7 +123,7 @@ jobs:
coverage: coverage:
needs: ["tests-unit", "tests-integration"] needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- name: finalize coverage report submission - name: finalize coverage report submission
env: env:
@ -140,29 +138,29 @@ jobs:
strategy: strategy:
matrix: matrix:
os: os:
- ubuntu-18.04 - ubuntu-20.04
- macos-latest - macos-latest
- windows-latest - windows-latest
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/setup-python@v1 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.9'
- id: os-name - id: os-name
uses: ASzc/change-string-case-action@v1 uses: ASzc/change-string-case-action@v5
with: with:
string: ${{ runner.os }} string: ${{ runner.os }}
- name: set pip cache dir - name: set pip cache dir
id: pip-cache shell: bash
run: echo "::set-output name=dir::$(pip cache dir)" run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache - name: extract pip cache
uses: actions/cache@v2 uses: actions/cache@v3
with: with:
path: ${{ steps.pip-cache.outputs.dir }} path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip- restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.4 - run: pip install pyinstaller==4.6
- run: pip install -e . - run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v') - if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py run: python docker/set_build.py
@ -177,7 +175,7 @@ jobs:
pip install pywin32==301 pip install pywin32==301
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet.exe --version dist/lbrynet.exe --version
- uses: actions/upload-artifact@v2 - uses: actions/upload-artifact@v3
with: with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }} name: lbrynet-${{ steps.os-name.outputs.lowercase }}
path: dist/ path: dist/
@ -186,7 +184,7 @@ jobs:
name: "release" name: "release"
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
needs: ["build"] needs: ["build"]
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2

View file

@ -7,7 +7,7 @@ on:
jobs: jobs:
release: release:
name: "slack notification" name: "slack notification"
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0 - uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown id: markdown

View file

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2015-2020 LBRY Inc Copyright (c) 2015-2022 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,

View file

@ -2,6 +2,7 @@ FROM debian:10-slim
ARG user=lbry ARG user=lbry
ARG projects_dir=/home/$user ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker ARG DOCKER_COMMIT=docker
@ -27,12 +28,16 @@ RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir COPY . $projects_dir
RUN chown -R $user:$user $projects_dir RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user USER $user
WORKDIR $projects_dir WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install RUN make install
RUN python3 docker/set_build.py RUN python3 docker/set_build.py
RUN rm ~/.cache -rf RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"] ENTRYPOINT ["python3", "scripts/dht_node.py"]

File diff suppressed because one or more lines are too long

View file

@ -1,2 +1,2 @@
__version__ = "0.106.0" __version__ = "0.113.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

View file

@ -87,8 +87,8 @@ class AbstractBlob:
self.blob_completed_callback = blob_completed_callback self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {} self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event(loop=self.loop) self.verified: asyncio.Event = asyncio.Event()
self.writing: asyncio.Event = asyncio.Event(loop=self.loop) self.writing: asyncio.Event = asyncio.Event()
self.readers: typing.List[typing.BinaryIO] = [] self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time() self.added_on = added_on or time.time()
self.is_mine = is_mine self.is_mine = is_mine
@ -201,7 +201,7 @@ class AbstractBlob:
writer = blob.get_blob_writer() writer = blob.get_blob_writer()
writer.write(blob_bytes) writer.write(blob_bytes)
await blob.verified.wait() await blob.verified.wait()
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash, added_on, is_mine) return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
def save_verified_blob(self, verified_bytes: bytes): def save_verified_blob(self, verified_bytes: bytes):
if self.verified.is_set(): if self.verified.is_set():
@ -222,7 +222,7 @@ class AbstractBlob:
peer_port: typing.Optional[int] = None) -> HashBlobWriter: peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed(): if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}") raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
writer = HashBlobWriter(self.blob_hash, self.get_length, fut) writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer self.writers[(peer_address, peer_port)] = writer

View file

@ -12,8 +12,8 @@ class BlobInfo:
] ]
def __init__( def __init__(
self, blob_num: int, length: int, iv: str, self, blob_num: int, length: int, iv: str, added_on,
blob_hash: typing.Optional[str] = None, added_on=0, is_mine=False): blob_hash: typing.Optional[str] = None, is_mine=False):
self.blob_hash = blob_hash self.blob_hash = blob_hash
self.blob_num = blob_num self.blob_num = blob_num
self.length = length self.length = length

View file

@ -83,6 +83,8 @@ class BlobManager:
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir) to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
if to_add: if to_add:
self.completed_blob_hashes.update(to_add) self.completed_blob_hashes.update(to_add)
# check blobs that aren't set as finished but were seen on disk
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
if self.config.track_bandwidth: if self.config.track_bandwidth:
self.connection_manager.start() self.connection_manager.start()
return True return True
@ -113,9 +115,18 @@ class BlobManager:
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False) (blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
) )
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]: async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
"""Returns of the blobhashes_to_check, which are valid""" """Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)] to_add = []
for blob_hash in blob_hashes:
if not self.is_blob_verified(blob_hash):
continue
blob = self.get_blob(blob_hash)
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
if len(to_add) > 500:
await self.storage.add_blobs(*to_add, finished=True)
to_add.clear()
return await self.storage.add_blobs(*to_add, finished=True)
def delete_blob(self, blob_hash: str): def delete_blob(self, blob_hash: str):
if not is_valid_blobhash(blob_hash): if not is_valid_blobhash(blob_hash):

View file

@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.buf = b'' self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result # this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event(loop=self.loop) self.closed = asyncio.Event()
def data_received(self, data: bytes): def data_received(self, data: bytes):
if self.connection_manager: if self.connection_manager:
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.transport.write(msg) self.transport.write(msg)
if self.connection_manager: if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg)) self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop) response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
availability_response = response.get_availability_response() availability_response = response.get_availability_response()
price_response = response.get_price_response() price_response = response.get_price_response()
blob_response = response.get_blob_response() blob_response = response.get_blob_response()
@ -151,7 +151,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
f" timeout in {self.peer_timeout}" f" timeout in {self.peer_timeout}"
log.debug(msg) log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}" msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop) await asyncio.wait_for(self.writer.finished, self.peer_timeout)
# wait for the io to finish # wait for the io to finish
await self.blob.verified.wait() await self.blob.verified.wait()
log.info("%s at %fMB/s", msg, log.info("%s at %fMB/s", msg,
@ -187,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
try: try:
self._blob_bytes_received = 0 self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port) self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future(loop=self.loop) self._response_fut = asyncio.Future()
return await self._download_blob() return await self._download_blob()
except OSError: except OSError:
# i'm not sure how to fix this race condition - jack # i'm not sure how to fix this race condition - jack
@ -244,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
try: try:
if not connected_protocol: if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port), await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout, loop=loop) peer_connect_timeout)
connected_protocol = protocol connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable(): if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection # blob is None happens when we are just opening a connection

View file

@ -30,7 +30,7 @@ class BlobDownloader:
self.failures: typing.Dict['KademliaPeer', int] = {} self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.Set['KademliaPeer'] = set() self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {} self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event(loop=self.loop) self.is_running = asyncio.Event()
def should_race_continue(self, blob: 'AbstractBlob'): def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10) max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
@ -64,8 +64,8 @@ class BlobDownloader:
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1 self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self): async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)] active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED') await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
def cleanup_active(self): def cleanup_active(self):
if not self.active_connections and not self.connections: if not self.active_connections and not self.connections:
@ -126,7 +126,7 @@ class BlobDownloader:
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node', async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
blob_hash: str) -> 'AbstractBlob': blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download) search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash) search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue) peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers) fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)

View file

@ -1,6 +1,7 @@
import asyncio import asyncio
import binascii import binascii
import logging import logging
import socket
import typing import typing
from json.decoder import JSONDecodeError from json.decoder import JSONDecodeError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
@ -24,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol):
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.buf = b'' self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event(loop=self.loop) self.started_transfer = asyncio.Event()
self.transfer_finished = asyncio.Event(loop=self.loop) self.transfer_finished = asyncio.Event()
self.close_on_idle_task: typing.Optional[asyncio.Task] = None self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self): async def close_on_idle(self):
while self.transport: while self.transport:
try: try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop) await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port) log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close() return self.close()
@ -100,7 +101,7 @@ class BlobServerProtocol(asyncio.Protocol):
log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port) log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set() self.started_transfer.set()
try: try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop) sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
if sent and sent > 0: if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent) self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port) log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
@ -137,7 +138,7 @@ class BlobServerProtocol(asyncio.Protocol):
try: try:
request = BlobRequest.deserialize(self.buf + data) request = BlobRequest.deserialize(self.buf + data)
self.buf = remainder self.buf = remainder
except JSONDecodeError: except (UnicodeDecodeError, JSONDecodeError):
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port, log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode()) len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close() self.close()
@ -156,7 +157,7 @@ class BlobServer:
self.loop = loop self.loop = loop
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
@ -167,6 +168,13 @@ class BlobServer:
raise Exception("already running") raise Exception("already running")
async def _start_server(): async def _start_server():
# checking if the port is in use
# thx https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
# the port is already in use!
log.error("Failed to bind TCP %s:%d", interface, port)
server = await self.loop.create_server( server = await self.loop.create_server(
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address, lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
self.idle_timeout, self.transfer_timeout), self.idle_timeout, self.transfer_timeout),

View file

@ -622,7 +622,11 @@ class Config(CLIConfig):
"Routing table bucket index below which we always split the bucket if given a new key to add to it and " "Routing table bucket index below which we always split the bucket if given a new key to add to it and "
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) " "the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal " "will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 1 "use.", 2
)
is_bootstrap_node = Toggle(
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
"add as many peers as possible and better help first-runs.", False
) )
# protocol timeouts # protocol timeouts
@ -681,6 +685,14 @@ class Config(CLIConfig):
('cdn.reflector.lbry.com', 5567) ('cdn.reflector.lbry.com', 5567)
]) ])
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
])
lbryum_servers = Servers("SPV wallet servers", [ lbryum_servers = Servers("SPV wallet servers", [
('spv11.lbry.com', 50001), ('spv11.lbry.com', 50001),
('spv12.lbry.com', 50001), ('spv12.lbry.com', 50001),
@ -691,14 +703,20 @@ class Config(CLIConfig):
('spv17.lbry.com', 50001), ('spv17.lbry.com', 50001),
('spv18.lbry.com', 50001), ('spv18.lbry.com', 50001),
('spv19.lbry.com', 50001), ('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
]) ])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [ known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin ('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator ('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST ('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST ('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU ('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444) # ASIA ('lbrynet4.lbry.com', 4444), # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
]) ])
# blockchain # blockchain

View file

@ -67,7 +67,7 @@ class ConnectionManager:
while True: while True:
last = time.perf_counter() last = time.perf_counter()
await asyncio.sleep(0.1, loop=self.loop) await asyncio.sleep(0.1)
self._status['incoming_bps'].clear() self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear() self._status['outgoing_bps'].clear()
now = time.perf_counter() now = time.perf_counter()

View file

@ -27,27 +27,28 @@ class BlobAnnouncer:
self.storage = storage self.storage = storage
self.announce_task: asyncio.Task = None self.announce_task: asyncio.Task = None
self.announce_queue: typing.List[str] = [] self.announce_queue: typing.List[str] = []
self._done = asyncio.Event()
self.announced = set()
async def _submit_announcement(self, blob_hash): async def _run_consumer(self):
try: while self.announce_queue:
try:
peers = len(await self.node.announce_blob(blob_hash)) blob_hash = self.announce_queue.pop()
self.announcements_sent_metric.labels(peers=peers, error=False).inc() peers = len(await self.node.announce_blob(blob_hash))
if peers > 4: self.announcements_sent_metric.labels(peers=peers, error=False).inc()
return blob_hash if peers > 4:
else: self.announced.add(blob_hash)
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers) else:
except Exception as err: log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
self.announcements_sent_metric.labels(peers=0, error=True).inc() except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8 self.announcements_sent_metric.labels(peers=0, error=True).inc()
raise err log.warning("error announcing %s: %s", blob_hash[:8], str(err))
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10): async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size: while batch_size:
if not self.node.joined.is_set(): if not self.node.joined.is_set():
await self.node.joined.wait() await self.node.joined.wait()
await asyncio.sleep(60, loop=self.loop) await asyncio.sleep(60)
if not self.node.protocol.routing_table.get_peers(): if not self.node.protocol.routing_table.get_peers():
log.warning("No peers in DHT, announce round skipped") log.warning("No peers in DHT, announce round skipped")
continue continue
@ -56,14 +57,14 @@ class BlobAnnouncer:
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue)) log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0: while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue)) log.info("%i blobs to announce", len(self.announce_queue))
announced = await asyncio.gather(*[ await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
self._submit_announcement( announced = list(filter(None, self.announced))
self.announce_queue.pop()) for _ in range(batch_size) if self.announce_queue
], loop=self.loop)
announced = list(filter(None, announced))
if announced: if announced:
await self.storage.update_last_announced_blobs(announced) await self.storage.update_last_announced_blobs(announced)
log.info("announced %i blobs", len(announced)) log.info("announced %i blobs", len(announced))
self.announced.clear()
self._done.set()
self._done.clear()
def start(self, batch_size: typing.Optional[int] = 10): def start(self, batch_size: typing.Optional[int] = 10):
assert not self.announce_task or self.announce_task.done(), "already running" assert not self.announce_task or self.announce_task.done(), "already running"
@ -72,3 +73,6 @@ class BlobAnnouncer:
def stop(self): def stop(self):
if self.announce_task and not self.announce_task.done(): if self.announce_task and not self.announce_task.done():
self.announce_task.cancel() self.announce_task.cancel()
def wait(self):
return self._done.wait()

View file

@ -20,7 +20,6 @@ MAYBE_PING_DELAY = 300 # 5 minutes
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5 CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
RPC_ID_LENGTH = 20 RPC_ID_LENGTH = 20
PROTOCOL_VERSION = 1 PROTOCOL_VERSION = 1
BOTTOM_OUT_LIMIT = 3
MSG_SIZE_LIMIT = 1400 MSG_SIZE_LIMIT = 1400

View file

@ -5,7 +5,7 @@ import socket
from prometheus_client import Gauge from prometheus_client import Gauge
from lbry.utils import resolve_host from lbry.utils import aclosing, resolve_host
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.peer import make_kademlia_peer from lbry.dht.peer import make_kademlia_peer
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
@ -30,14 +30,14 @@ class Node:
) )
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT, internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
storage: typing.Optional['SQLiteStorage'] = None): storage: typing.Optional['SQLiteStorage'] = None):
self.loop = loop self.loop = loop
self.internal_udp_port = internal_udp_port self.internal_udp_port = internal_udp_port
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout, self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index) split_buckets_under_index, is_bootstrap_node)
self.listening_port: asyncio.DatagramTransport = None self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event(loop=self.loop) self.joined = asyncio.Event()
self._join_task: asyncio.Task = None self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None self._refresh_task: asyncio.Task = None
self._storage = storage self._storage = storage
@ -70,13 +70,6 @@ class Node:
# get ids falling in the midpoint of each bucket that hasn't been recently updated # get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True) node_ids = self.protocol.routing_table.get_refresh_list(0, True)
# if we have 3 or fewer populated buckets get two random ids in the range of each to try and
# populate/split the buckets further
buckets_with_contacts = self.protocol.routing_table.buckets_with_contacts()
if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
node_ids.append(self.protocol.routing_table.random_id_in_bucket_range(i))
if self.protocol.routing_table.get_peers(): if self.protocol.routing_table.get_peers():
# if we have node ids to look up, perform the iterative search until we have k results # if we have node ids to look up, perform the iterative search until we have k results
@ -86,7 +79,7 @@ class Node:
else: else:
if force_once: if force_once:
break break
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None) self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
await fut await fut
continue continue
@ -100,7 +93,7 @@ class Node:
if force_once: if force_once:
break break
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None) self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
await fut await fut
@ -115,7 +108,7 @@ class Node:
for peer in peers: for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port) log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather( stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers), loop=self.loop *(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
) )
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted] stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to: if stored_to:
@ -189,39 +182,36 @@ class Node:
for address, udp_port in known_node_urls or [] for address, udp_port in known_node_urls or []
])) ]))
except socket.gaierror: except socket.gaierror:
await asyncio.sleep(30, loop=self.loop) await asyncio.sleep(30)
continue continue
self.protocol.peer_manager.reset() self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0) self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32) await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
await asyncio.sleep(1, loop=self.loop) await asyncio.sleep(1)
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None): def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls)) self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = constants.BOTTOM_OUT_LIMIT,
max_results: int = constants.K) -> IterativeNodeFinder: max_results: int = constants.K) -> IterativeNodeFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeNodeFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol, return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
key, bottom_out_limit, max_results, None, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None, def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
bottom_out_limit: int = 40,
max_results: int = -1) -> IterativeValueFinder: max_results: int = -1) -> IterativeValueFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeValueFinder(self.loop, self.protocol.peer_manager, self.protocol.routing_table, self.protocol, return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
key, bottom_out_limit, max_results, None, shortlist)
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2, async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
bottom_out_limit=20, shortlist: typing.Optional[typing.List['KademliaPeer']] = None shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']: ) -> typing.List['KademliaPeer']:
peers = [] peers = []
async for iteration_peers in self.get_iterative_node_finder( async with aclosing(self.get_iterative_node_finder(
node_id, shortlist=shortlist, bottom_out_limit=bottom_out_limit, max_results=max_results): node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
peers.extend(iteration_peers) async for iteration_peers in node_finder:
peers.extend(iteration_peers)
distance = Distance(node_id) distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id)) peers.sort(key=lambda peer: distance(peer.node_id))
return peers[:count] return peers[:count]
@ -247,41 +237,41 @@ class Node:
# prioritize peers who reply to a dht ping first # prioritize peers who reply to a dht ping first
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers # this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
async for results in self.get_iterative_value_finder(bytes.fromhex(blob_hash)): async for results in value_finder:
to_put = [] to_put = []
for peer in results: for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port: if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
continue continue
is_good = self.protocol.peer_manager.peer_is_good(peer) is_good = self.protocol.peer_manager.peer_is_good(peer)
if is_good: if is_good:
# the peer has replied recently over UDP, it can probably be reached on the TCP port # the peer has replied recently over UDP, it can probably be reached on the TCP port
to_put.append(peer) to_put.append(peer)
elif is_good is None: elif is_good is None:
if not peer.udp_port: if not peer.udp_port:
# TODO: use the same port for TCP and UDP # TODO: use the same port for TCP and UDP
# the udp port must be guessed # the udp port must be guessed
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default, # default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
# including on a network with several nodes, then assume the udp port is proportionately # including on a network with several nodes, then assume the udp port is proportionately
# based on a starting port of 4444 # based on a starting port of 4444
udp_port_to_try = peer.tcp_port udp_port_to_try = peer.tcp_port
if 3400 > peer.tcp_port > 3332: if 3400 > peer.tcp_port > 3332:
udp_port_to_try = (peer.tcp_port - 3333) + 4444 udp_port_to_try = (peer.tcp_port - 3333) + 4444
self.loop.create_task(put_into_result_queue_after_pong( self.loop.create_task(put_into_result_queue_after_pong(
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port) make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
)) ))
else:
self.loop.create_task(put_into_result_queue_after_pong(peer))
else: else:
self.loop.create_task(put_into_result_queue_after_pong(peer)) # the peer is known to be bad/unreachable, skip trying to connect to it over TCP
else: log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP if to_put:
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash) result_queue.put_nowait(to_put)
if to_put:
result_queue.put_nowait(to_put)
def accumulate_peers(self, search_queue: asyncio.Queue, def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None peer_queue: typing.Optional[asyncio.Queue] = None
) -> typing.Tuple[asyncio.Queue, asyncio.Task]: ) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue(loop=self.loop) queue = peer_queue or asyncio.Queue()
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue)) return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))

View file

@ -100,6 +100,9 @@ class PeerManager:
self._node_id_reverse_mapping[node_id] = (address, udp_port) self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys()) self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def get_node_id_for_endpoint(self, address, port):
return self._node_id_mapping.get((address, port))
def prune(self): # TODO: periodically call this def prune(self): # TODO: periodically call this
now = self._loop.time() now = self._loop.time()
to_pop = [] to_pop = []
@ -150,9 +153,10 @@ class PeerManager:
def peer_is_good(self, peer: 'KademliaPeer'): def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port) return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address) def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port) node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
@dataclass(unsafe_hash=True) @dataclass(unsafe_hash=True)
@ -190,3 +194,6 @@ class KademliaPeer:
def compact_ip(self): def compact_ip(self):
return make_compact_ip(self.address) return make_compact_ip(self.address)
def __str__(self):
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"

View file

@ -1,17 +1,17 @@
import asyncio import asyncio
from itertools import chain from itertools import chain
from collections import defaultdict from collections import defaultdict, OrderedDict
from collections.abc import AsyncIterator
import typing import typing
import logging import logging
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
from lbry.dht.peer import make_kademlia_peer from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
from lbry.dht.serialization.datagram import PAGE_KEY from lbry.dht.serialization.datagram import PAGE_KEY
if TYPE_CHECKING: if TYPE_CHECKING:
from lbry.dht.protocol.routing_table import TreeRoutingTable
from lbry.dht.protocol.protocol import KademliaProtocol from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import PeerManager, KademliaPeer from lbry.dht.peer import PeerManager, KademliaPeer
@ -26,6 +26,15 @@ class FindResponse:
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]: def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError() raise NotImplementedError()
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
for contact_triple in self.get_close_triples():
node_id, address, udp_port = contact_triple
try:
yield make_kademlia_peer(node_id, address, udp_port)
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
peer_info.udp_port, address, udp_port)
class FindNodeResponse(FindResponse): class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]): def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
@ -56,57 +65,33 @@ class FindValueResponse(FindResponse):
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples] return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes, class IterativeFinder(AsyncIterator):
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']: def __init__(self, loop: asyncio.AbstractEventLoop,
""" protocol: 'KademliaProtocol', key: bytes,
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table max_results: typing.Optional[int] = constants.K,
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key)
class IterativeFinder:
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager',
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None): shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.HASH_LENGTH: if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key)) raise ValueError("invalid key length: %i" % len(key))
self.loop = loop self.loop = loop
self.peer_manager = peer_manager self.peer_manager = protocol.peer_manager
self.routing_table = routing_table
self.protocol = protocol self.protocol = protocol
self.key = key self.key = key
self.bottom_out_limit = bottom_out_limit self.max_results = max(constants.K, max_results)
self.max_results = max_results
self.exclude = exclude or []
self.active: typing.Set['KademliaPeer'] = set() self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
self.contacted: typing.Set['KademliaPeer'] = set() self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key) self.distance = Distance(key)
self.closest_peer: typing.Optional['KademliaPeer'] = None self.iteration_queue = asyncio.Queue()
self.prev_closest_peer: typing.Optional['KademliaPeer'] = None
self.iteration_queue = asyncio.Queue(loop=self.loop) self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.running_probes: typing.Set[asyncio.Task] = set()
self.iteration_count = 0 self.iteration_count = 0
self.bottom_out_count = 0
self.running = False self.running = False
self.tasks: typing.List[asyncio.Task] = [] self.tasks: typing.List[asyncio.Task] = []
self.delayed_calls: typing.List[asyncio.Handle] = [] for peer in shortlist:
for peer in get_shortlist(routing_table, key, shortlist):
if peer.node_id: if peer.node_id:
self._add_active(peer) self._add_active(peer, force=True)
else: else:
# seed nodes # seed nodes
self._schedule_probe(peer) self._schedule_probe(peer)
@ -138,66 +123,79 @@ class IterativeFinder:
""" """
return [] return []
def _is_closer(self, peer: 'KademliaPeer') -> bool: def _add_active(self, peer, force=False):
return not self.closest_peer or self.distance.is_closer(peer.node_id, self.closest_peer.node_id) if not force and self.peer_manager.peer_is_good(peer) is False:
return
def _add_active(self, peer): if peer in self.contacted:
return
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id: if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
self.active.add(peer) self.active[peer] = self.distance(peer.node_id)
if self._is_closer(peer): self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
self.prev_closest_peer = self.closest_peer
self.closest_peer = peer
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse): async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer) self._add_active(peer)
for contact_triple in response.get_close_triples(): for new_peer in response.get_close_kademlia_peers(peer):
node_id, address, udp_port = contact_triple self._add_active(new_peer)
try:
self._add_active(make_kademlia_peer(node_id, address, udp_port))
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer.address,
peer.udp_port, address, udp_port)
self.check_result_ready(response) self.check_result_ready(response)
self._log_state(reason="check result")
def _reset_closest(self, peer):
if peer in self.active:
del self.active[peer]
async def _send_probe(self, peer: 'KademliaPeer'): async def _send_probe(self, peer: 'KademliaPeer'):
try: try:
response = await self.send_probe(peer) response = await self.send_probe(peer)
except asyncio.TimeoutError: except asyncio.TimeoutError:
self.active.discard(peer) self._reset_closest(peer)
return return
except asyncio.CancelledError:
log.debug("%s[%x] cancelled probe",
type(self).__name__, id(self))
raise
except ValueError as err: except ValueError as err:
log.warning(str(err)) log.warning(str(err))
self.active.discard(peer) self._reset_closest(peer)
return return
except TransportNotConnected: except TransportNotConnected:
return self.aclose() await self._aclose(reason="not connected")
return
except RemoteException: except RemoteException:
self._reset_closest(peer)
return return
return await self._handle_probe_result(peer, response) return await self._handle_probe_result(peer, response)
async def _search_round(self): def _search_round(self):
""" """
Send up to constants.alpha (5) probes to closest active peers Send up to constants.alpha (5) probes to closest active peers
""" """
added = 0 added = 0
to_probe = list(self.active - self.contacted) for index, peer in enumerate(self.active.keys()):
to_probe.sort(key=lambda peer: self.distance(self.key)) if index == 0:
for peer in to_probe: log.debug("%s[%x] closest to probe: %s",
if added >= constants.ALPHA: type(self).__name__, id(self),
peer.node_id.hex()[:8])
if peer in self.contacted:
continue
if len(self.running_probes) >= constants.ALPHA:
break
if index > (constants.K + len(self.running_probes)):
break break
origin_address = (peer.address, peer.udp_port) origin_address = (peer.address, peer.udp_port)
if origin_address in self.exclude:
continue
if peer.node_id == self.protocol.node_id: if peer.node_id == self.protocol.node_id:
continue continue
if origin_address == (self.protocol.external_ip, self.protocol.udp_port): if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
continue continue
self._schedule_probe(peer) self._schedule_probe(peer)
added += 1 added += 1
log.debug("running %d probes for key %s", len(self.running_probes), self.key.hex()[:8]) log.debug("%s[%x] running %d probes for key %s",
type(self).__name__, id(self),
len(self.running_probes), self.key.hex()[:8])
if not added and not self.running_probes: if not added and not self.running_probes:
log.debug("search for %s exhausted", self.key.hex()[:8]) log.debug("%s[%x] search for %s exhausted",
type(self).__name__, id(self),
self.key.hex()[:8])
self.search_exhausted() self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'): def _schedule_probe(self, peer: 'KademliaPeer'):
@ -206,33 +204,24 @@ class IterativeFinder:
t = self.loop.create_task(self._send_probe(peer)) t = self.loop.create_task(self._send_probe(peer))
def callback(_): def callback(_):
self.running_probes.difference_update({ self.running_probes.pop(peer, None)
probe for probe in self.running_probes if probe.done() or probe == t if self.running:
}) self._search_round()
if not self.running_probes:
self.tasks.append(self.loop.create_task(self._search_task(0.0)))
t.add_done_callback(callback) t.add_done_callback(callback)
self.running_probes.add(t) self.running_probes[peer] = t
async def _search_task(self, delay: typing.Optional[float] = constants.ITERATIVE_LOOKUP_DELAY): def _log_state(self, reason="?"):
try: log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
if self.running: type(self).__name__, id(self), self.key.hex()[:8],
await self._search_round() reason, len(self.active), len(self.contacted),
if self.running: self.iteration_count, self.iteration_queue.qsize())
self.delayed_calls.append(self.loop.call_later(delay, self._search))
except (asyncio.CancelledError, StopAsyncIteration, TransportNotConnected):
if self.running:
self.loop.call_soon(self.aclose)
def _search(self):
self.tasks.append(self.loop.create_task(self._search_task()))
def __aiter__(self): def __aiter__(self):
if self.running: if self.running:
raise Exception("already running") raise Exception("already running")
self.running = True self.running = True
self._search() self.loop.call_soon(self._search_round)
return self return self
async def __anext__(self) -> typing.List['KademliaPeer']: async def __anext__(self) -> typing.List['KademliaPeer']:
@ -245,28 +234,37 @@ class IterativeFinder:
raise StopAsyncIteration raise StopAsyncIteration
self.iteration_count += 1 self.iteration_count += 1
return result return result
except (asyncio.CancelledError, StopAsyncIteration): except asyncio.CancelledError:
self.loop.call_soon(self.aclose) await self._aclose(reason="cancelled")
raise
except StopAsyncIteration:
await self._aclose(reason="no more results")
raise raise
def aclose(self): async def _aclose(self, reason="?"):
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
self.running = False self.running = False
self.iteration_queue.put_nowait(None) self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes, self.delayed_calls): for task in chain(self.tasks, self.running_probes.values()):
task.cancel() task.cancel()
self.tasks.clear() self.tasks.clear()
self.running_probes.clear() self.running_probes.clear()
self.delayed_calls.clear()
async def aclose(self):
if self.running:
await self._aclose(reason="aclose")
log.debug("%s[%x] [%s] async close completed",
type(self).__name__, id(self), self.key.hex()[:8])
class IterativeNodeFinder(IterativeFinder): class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', def __init__(self, loop: asyncio.AbstractEventLoop,
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes, protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None): shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude, super().__init__(loop, protocol, key, max_results, shortlist)
shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set() self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse: async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
@ -276,14 +274,14 @@ class IterativeNodeFinder(IterativeFinder):
return FindNodeResponse(self.key, response) return FindNodeResponse(self.key, response)
def search_exhausted(self): def search_exhausted(self):
self.put_result(self.active, finish=True) self.put_result(self.active.keys(), finish=True)
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False): def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
not_yet_yielded = [ not_yet_yielded = [
peer for peer in from_iter peer for peer in from_iter
if peer not in self.yielded_peers if peer not in self.yielded_peers
and peer.node_id != self.protocol.node_id and peer.node_id != self.protocol.node_id
and self.peer_manager.peer_is_good(peer) is not False and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
] ]
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id)) not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
to_yield = not_yet_yielded[:max(constants.K, self.max_results)] to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
@ -298,27 +296,15 @@ class IterativeNodeFinder(IterativeFinder):
if found: if found:
log.debug("found") log.debug("found")
return self.put_result(self.active, finish=True) return self.put_result(self.active.keys(), finish=True)
if self.prev_closest_peer and self.closest_peer and not self._is_closer(self.prev_closest_peer):
# log.info("improving, %i %i %i %i %i", len(self.shortlist), len(self.active), len(self.contacted),
# self.bottom_out_count, self.iteration_count)
self.bottom_out_count = 0
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
log.info("bottom out %i %i %i", len(self.active), len(self.contacted), self.bottom_out_count)
if self.bottom_out_count >= self.bottom_out_limit or self.iteration_count >= self.bottom_out_limit:
log.info("limit hit")
self.put_result(self.active, True)
class IterativeValueFinder(IterativeFinder): class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', def __init__(self, loop: asyncio.AbstractEventLoop,
routing_table: 'TreeRoutingTable', protocol: 'KademliaProtocol', key: bytes, protocol: 'KademliaProtocol', key: bytes,
bottom_out_limit: typing.Optional[int] = 2, max_results: typing.Optional[int] = constants.K, max_results: typing.Optional[int] = constants.K,
exclude: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None): shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, peer_manager, routing_table, protocol, key, bottom_out_limit, max_results, exclude, super().__init__(loop, protocol, key, max_results, shortlist)
shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set() self.blob_peers: typing.Set['KademliaPeer'] = set()
# this tracks the index of the most recent page we requested from each peer # this tracks the index of the most recent page we requested from each peer
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int) self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
@ -337,7 +323,7 @@ class IterativeValueFinder(IterativeFinder):
decoded_peers = set() decoded_peers = set()
for compact_addr in parsed.found_compact_addresses: for compact_addr in parsed.found_compact_addresses:
try: try:
decoded_peers.add(self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr)) decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
except ValueError: except ValueError:
log.warning("misbehaving peer %s:%i returned invalid peer for blob", log.warning("misbehaving peer %s:%i returned invalid peer for blob",
peer.address, peer.udp_port) peer.address, peer.udp_port)
@ -359,26 +345,15 @@ class IterativeValueFinder(IterativeFinder):
def check_result_ready(self, response: FindValueResponse): def check_result_ready(self, response: FindValueResponse):
if response.found: if response.found:
blob_peers = [self.peer_manager.decode_tcp_peer_from_compact_address(compact_addr) blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses] for compact_addr in response.found_compact_addresses]
to_yield = [] to_yield = []
self.bottom_out_count = 0
for blob_peer in blob_peers: for blob_peer in blob_peers:
if blob_peer not in self.blob_peers: if blob_peer not in self.blob_peers:
self.blob_peers.add(blob_peer) self.blob_peers.add(blob_peer)
to_yield.append(blob_peer) to_yield.append(blob_peer)
if to_yield: if to_yield:
# log.info("found %i new peers for blob", len(to_yield))
self.iteration_queue.put_nowait(to_yield) self.iteration_queue.put_nowait(to_yield)
# if self.max_results and len(self.blob_peers) >= self.max_results:
# log.info("enough blob peers found")
# if not self.finished.is_set():
# self.finished.set()
elif self.prev_closest_peer and self.closest_peer:
self.bottom_out_count += 1
if self.bottom_out_count >= self.bottom_out_limit:
log.info("blob peer search bottomed out")
self.iteration_queue.put_nowait(None)
def get_initial_result(self) -> typing.List['KademliaPeer']: def get_initial_result(self) -> typing.List['KademliaPeer']:
if self.protocol.data_store.has_peers_for_blob(self.key): if self.protocol.data_store.has_peers_for_blob(self.key):

View file

@ -218,6 +218,10 @@ class PingQueue:
def running(self): def running(self):
return self._running return self._running
@property
def busy(self):
return self._running and (any(self._running_pings) or any(self._pending_contacts))
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None): def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
delay = delay if delay is not None else self._default_delay delay = delay if delay is not None else self._default_delay
now = self._loop.time() now = self._loop.time()
@ -229,7 +233,7 @@ class PingQueue:
async def ping_task(): async def ping_task():
try: try:
if self._protocol.peer_manager.peer_is_good(peer): if self._protocol.peer_manager.peer_is_good(peer):
if peer not in self._protocol.routing_table.get_peers(): if not self._protocol.routing_table.get_peer(peer.node_id):
self._protocol.add_peer(peer) self._protocol.add_peer(peer)
return return
await self._protocol.get_rpc_peer(peer).ping() await self._protocol.get_rpc_peer(peer).ping()
@ -249,7 +253,7 @@ class PingQueue:
del self._pending_contacts[peer] del self._pending_contacts[peer]
self.maybe_ping(peer) self.maybe_ping(peer)
break break
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
def start(self): def start(self):
assert not self._running assert not self._running
@ -294,7 +298,7 @@ class KademliaProtocol(DatagramProtocol):
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT, udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX): split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
self.peer_manager = peer_manager self.peer_manager = peer_manager
self.loop = loop self.loop = loop
self.node_id = node_id self.node_id = node_id
@ -309,15 +313,16 @@ class KademliaProtocol(DatagramProtocol):
self.transport: DatagramTransport = None self.transport: DatagramTransport = None
self.old_token_secret = constants.generate_id() self.old_token_secret = constants.generate_id()
self.token_secret = constants.generate_id() self.token_secret = constants.generate_id()
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index) self.routing_table = TreeRoutingTable(
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
self.data_store = DictDataStore(self.loop, self.peer_manager) self.data_store = DictDataStore(self.loop, self.peer_manager)
self.ping_queue = PingQueue(self.loop, self) self.ping_queue = PingQueue(self.loop, self)
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port) self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
self.rpc_timeout = rpc_timeout self.rpc_timeout = rpc_timeout
self._split_lock = asyncio.Lock(loop=self.loop) self._split_lock = asyncio.Lock()
self._to_remove: typing.Set['KademliaPeer'] = set() self._to_remove: typing.Set['KademliaPeer'] = set()
self._to_add: typing.Set['KademliaPeer'] = set() self._to_add: typing.Set['KademliaPeer'] = set()
self._wakeup_routing_task = asyncio.Event(loop=self.loop) self._wakeup_routing_task = asyncio.Event()
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
@functools.lru_cache(128) @functools.lru_cache(128)
@ -356,72 +361,10 @@ class KademliaProtocol(DatagramProtocol):
return args, {} return args, {}
async def _add_peer(self, peer: 'KademliaPeer'): async def _add_peer(self, peer: 'KademliaPeer'):
if not peer.node_id: async def probe(some_peer: 'KademliaPeer'):
log.warning("Tried adding a peer with no node id!") rpc_peer = self.get_rpc_peer(some_peer)
return False await rpc_peer.ping()
for my_peer in self.routing_table.get_peers(): return await self.routing_table.add_peer(peer, probe)
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.routing_table.remove_peer(my_peer)
self.routing_table.join_buckets()
bucket_index = self.routing_table.kbucket_index(peer.node_id)
if self.routing_table.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self.routing_table.should_split(bucket_index, peer.node_id):
self.routing_table.split_bucket(bucket_index)
# Retry the insertion attempt
result = await self._add_peer(peer)
self.routing_table.join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self.peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self.loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.routing_table.buckets[bucket_index].peers[0]
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self.loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
to_replace_rpc = self.get_rpc_peer(to_replace)
await to_replace_rpc.ping()
return False
except asyncio.TimeoutError:
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.routing_table.buckets[bucket_index]:
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
return await self._add_peer(peer)
def add_peer(self, peer: 'KademliaPeer'): def add_peer(self, peer: 'KademliaPeer'):
if peer.node_id == self.node_id: if peer.node_id == self.node_id:
@ -439,11 +382,10 @@ class KademliaProtocol(DatagramProtocol):
async with self._split_lock: async with self._split_lock:
peer = self._to_remove.pop() peer = self._to_remove.pop()
self.routing_table.remove_peer(peer) self.routing_table.remove_peer(peer)
self.routing_table.join_buckets()
while self._to_add: while self._to_add:
async with self._split_lock: async with self._split_lock:
await self._add_peer(self._to_add.pop()) await self._add_peer(self._to_add.pop())
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop) await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
self._wakeup_routing_task.clear() self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram): def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
@ -482,9 +424,8 @@ class KademliaProtocol(DatagramProtocol):
# This is an RPC method request # This is an RPC method request
self.received_request_metric.labels(method=request_datagram.method).inc() self.received_request_metric.labels(method=request_datagram.method).inc()
self.peer_manager.report_last_requested(address[0], address[1]) self.peer_manager.report_last_requested(address[0], address[1])
try: peer = self.routing_table.get_peer(request_datagram.node_id)
peer = self.routing_table.get_peer(request_datagram.node_id) if not peer:
except IndexError:
try: try:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1]) peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
except ValueError as err: except ValueError as err:

View file

@ -6,7 +6,9 @@ import itertools
from prometheus_client import Gauge from prometheus_client import Gauge
from lbry import utils
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.dht.peer import KademliaPeer, PeerManager from lbry.dht.peer import KademliaPeer, PeerManager
@ -27,7 +29,8 @@ class KBucket:
namespace="dht_node", labelnames=("amount",) namespace="dht_node", labelnames=("amount",)
) )
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes): def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
node_id: bytes, capacity: int = constants.K):
""" """
@param range_min: The lower boundary for the range in the n-bit ID @param range_min: The lower boundary for the range in the n-bit ID
space covered by this k-bucket space covered by this k-bucket
@ -35,12 +38,12 @@ class KBucket:
covered by this k-bucket covered by this k-bucket
""" """
self._peer_manager = peer_manager self._peer_manager = peer_manager
self.last_accessed = 0
self.range_min = range_min self.range_min = range_min
self.range_max = range_max self.range_max = range_max
self.peers: typing.List['KademliaPeer'] = [] self.peers: typing.List['KademliaPeer'] = []
self._node_id = node_id self._node_id = node_id
self._distance_to_self = Distance(node_id) self._distance_to_self = Distance(node_id)
self.capacity = capacity
def add_peer(self, peer: 'KademliaPeer') -> bool: def add_peer(self, peer: 'KademliaPeer') -> bool:
""" Add contact to _contact list in the right order. This will move the """ Add contact to _contact list in the right order. This will move the
@ -67,22 +70,19 @@ class KBucket:
self.peers.remove(local_peer) self.peers.remove(local_peer)
self.peers.append(peer) self.peers.append(peer)
return True return True
if len(self.peers) < constants.K: if len(self.peers) < self.capacity:
self.peers.append(peer) self.peers.append(peer)
self.peer_in_routing_table_metric.labels("global").inc() self.peer_in_routing_table_metric.labels("global").inc()
if peer.node_id[0] == self._node_id[0]: bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
bits_colliding = 8 - (peer.node_id[1] ^ self._node_id[1]).bit_length() self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
self.peer_with_x_bit_colliding_metric.labels(amount=(bits_colliding + 8)).inc()
return True return True
else: else:
return False return False
# raise BucketFull("No space in bucket to insert contact")
def get_peer(self, node_id: bytes) -> 'KademliaPeer': def get_peer(self, node_id: bytes) -> 'KademliaPeer':
for peer in self.peers: for peer in self.peers:
if peer.node_id == node_id: if peer.node_id == node_id:
return peer return peer
raise IndexError(node_id)
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']: def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
""" Returns a list containing up to the first count number of contacts """ Returns a list containing up to the first count number of contacts
@ -140,9 +140,8 @@ class KBucket:
def remove_peer(self, peer: 'KademliaPeer') -> None: def remove_peer(self, peer: 'KademliaPeer') -> None:
self.peers.remove(peer) self.peers.remove(peer)
self.peer_in_routing_table_metric.labels("global").dec() self.peer_in_routing_table_metric.labels("global").dec()
if peer.node_id[0] == self._node_id[0]: bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
bits_colliding = 8 - (peer.node_id[1] ^ self._node_id[1]).bit_length() self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
self.peer_with_x_bit_colliding_metric.labels(amount=(bits_colliding + 8)).dec()
def key_in_range(self, key: bytes) -> bool: def key_in_range(self, key: bytes) -> bool:
""" Tests whether the specified key (i.e. node ID) is in the range """ Tests whether the specified key (i.e. node ID) is in the range
@ -180,6 +179,13 @@ class TreeRoutingTable:
version of the Kademlia paper, in section 2.4. It does, however, use the version of the Kademlia paper, in section 2.4. It does, however, use the
ping RPC-based k-bucket eviction algorithm described in section 2.2 of ping RPC-based k-bucket eviction algorithm described in section 2.2 of
that paper. that paper.
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
bootstrap node does not get a bias towards its own node id and replies are
the best it can provide (joining peer knows its neighbors immediately).
Over time, this will need to be optimized so we use the disk as holding
everything in memory won't be feasible anymore.
See: https://github.com/bittorrent/bootstrap-dht
""" """
bucket_in_routing_table_metric = Gauge( bucket_in_routing_table_metric = Gauge(
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node", "buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
@ -187,21 +193,22 @@ class TreeRoutingTable:
) )
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX): split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
self._loop = loop self._loop = loop
self._peer_manager = peer_manager self._peer_manager = peer_manager
self._parent_node_id = parent_node_id self._parent_node_id = parent_node_id
self._split_buckets_under_index = split_buckets_under_index self._split_buckets_under_index = split_buckets_under_index
self.buckets: typing.List[KBucket] = [ self.buckets: typing.List[KBucket] = [
KBucket( KBucket(
self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
capacity=1 << 32 if is_bootstrap_node else constants.K
) )
] ]
def get_peers(self) -> typing.List['KademliaPeer']: def get_peers(self) -> typing.List['KademliaPeer']:
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets))) return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
def should_split(self, bucket_index: int, to_add: bytes) -> bool: def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456 # https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
if bucket_index < self._split_buckets_under_index: if bucket_index < self._split_buckets_under_index:
return True return True
@ -226,39 +233,32 @@ class TreeRoutingTable:
return [] return []
def get_peer(self, contact_id: bytes) -> 'KademliaPeer': def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
""" return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
@raise IndexError: No contact with the specified contact ID is known
by this node
"""
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]: def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
bucket_index = start_index
refresh_ids = [] refresh_ids = []
now = int(self._loop.time()) for offset, _ in enumerate(self.buckets[start_index:]):
for bucket in self.buckets[start_index:]: refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
if force or now - bucket.last_accessed >= constants.REFRESH_INTERVAL: # if we have 3 or fewer populated buckets get two random ids in the range of each to try and
to_search = self.midpoint_id_in_bucket_range(bucket_index) # populate/split the buckets further
refresh_ids.append(to_search) buckets_with_contacts = self.buckets_with_contacts()
bucket_index += 1 if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
refresh_ids.append(self._random_id_in_bucket_range(i))
refresh_ids.append(self._random_id_in_bucket_range(i))
return refresh_ids return refresh_ids
def remove_peer(self, peer: 'KademliaPeer') -> None: def remove_peer(self, peer: 'KademliaPeer') -> None:
if not peer.node_id: if not peer.node_id:
return return
bucket_index = self.kbucket_index(peer.node_id) bucket_index = self._kbucket_index(peer.node_id)
try: try:
self.buckets[bucket_index].remove_peer(peer) self.buckets[bucket_index].remove_peer(peer)
self._join_buckets()
except ValueError: except ValueError:
return return
def touch_kbucket(self, key: bytes) -> None: def _kbucket_index(self, key: bytes) -> int:
self.touch_kbucket_by_index(self.kbucket_index(key))
def touch_kbucket_by_index(self, bucket_index: int):
self.buckets[bucket_index].last_accessed = int(self._loop.time())
def kbucket_index(self, key: bytes) -> int:
i = 0 i = 0
for bucket in self.buckets: for bucket in self.buckets:
if bucket.key_in_range(key): if bucket.key_in_range(key):
@ -267,19 +267,19 @@ class TreeRoutingTable:
i += 1 i += 1
return i return i
def random_id_in_bucket_range(self, bucket_index: int) -> bytes: def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max)) random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
return Distance( return Distance(
self._parent_node_id self._parent_node_id
)(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big') )(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes: def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2) half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
return Distance(self._parent_node_id)( return Distance(self._parent_node_id)(
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big') int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
).to_bytes(constants.HASH_LENGTH, 'big') ).to_bytes(constants.HASH_LENGTH, 'big')
def split_bucket(self, old_bucket_index: int) -> None: def _split_bucket(self, old_bucket_index: int) -> None:
""" Splits the specified k-bucket into two new buckets which together """ Splits the specified k-bucket into two new buckets which together
cover the same range in the key/ID space cover the same range in the key/ID space
@ -304,7 +304,7 @@ class TreeRoutingTable:
old_bucket.remove_peer(contact) old_bucket.remove_peer(contact)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets)) self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
def join_buckets(self): def _join_buckets(self):
if len(self.buckets) == 1: if len(self.buckets) == 1:
return return
to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0] to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
@ -327,14 +327,7 @@ class TreeRoutingTable:
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
self.buckets.remove(bucket) self.buckets.remove(bucket)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets)) self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
return self.join_buckets() return self._join_buckets()
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
for bucket in self.buckets:
for contact in bucket.get_peers(sort_distance_to=False):
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
return True
return False
def buckets_with_contacts(self) -> int: def buckets_with_contacts(self) -> int:
count = 0 count = 0
@ -342,3 +335,70 @@ class TreeRoutingTable:
if len(bucket) > 0: if len(bucket) > 0:
count += 1 count += 1
return count return count
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.remove_peer(my_peer)
self._join_buckets()
bucket_index = self._kbucket_index(peer.node_id)
if self.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self._should_split(bucket_index, peer.node_id):
self._split_bucket(bucket_index)
# Retry the insertion attempt
result = await self.add_peer(peer, probe)
self._join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self._loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.buckets[bucket_index].peers[0]
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self._loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
await probe(to_replace)
return False
except (asyncio.TimeoutError, RemoteException):
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.buckets[bucket_index]:
self.buckets[bucket_index].remove_peer(to_replace)
return await self.add_peer(peer, probe)

View file

@ -1,5 +1,5 @@
from lbry.conf import Config
from lbry.extras.cli import execute_command from lbry.extras.cli import execute_command
from lbry.conf import Config
def daemon_rpc(conf: Config, method: str, **kwargs): def daemon_rpc(conf: Config, method: str, **kwargs):

View file

@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
def running(self): def running(self):
return self._running return self._running
async def get_status(self): async def get_status(self): # pylint: disable=no-self-use
return return
async def start(self): async def start(self):

View file

@ -42,7 +42,7 @@ class ComponentManager:
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.component_classes = {} self.component_classes = {}
self.components = set() self.components = set()
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop()) self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items(): for component_name, component_class in self.default_component_classes.items():
@ -118,7 +118,7 @@ class ComponentManager:
component._setup() for component in stage if not component.running component._setup() for component in stage if not component.running
] ]
if needing_start: if needing_start:
await asyncio.wait(needing_start) await asyncio.wait(map(asyncio.create_task, needing_start))
self.started.set() self.started.set()
async def stop(self): async def stop(self):
@ -131,7 +131,7 @@ class ComponentManager:
component._stop() for component in stage if component.running component._stop() for component in stage if component.running
] ]
if needing_stop: if needing_stop:
await asyncio.wait(needing_stop) await asyncio.wait(map(asyncio.create_task, needing_stop))
def all_components_running(self, *component_names): def all_components_running(self, *component_names):
""" """

View file

@ -27,10 +27,8 @@ from lbry.extras.daemon.storage import SQLiteStorage
from lbry.torrent.torrent_manager import TorrentManager from lbry.torrent.torrent_manager import TorrentManager
from lbry.wallet import WalletManager from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer from lbry.wallet.usage_payment import WalletServerPayer
try: from lbry.torrent.tracker import TrackerClient
from lbry.torrent.session import TorrentSession from lbry.torrent.session import TorrentSession
except ImportError:
TorrentSession = None
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -48,6 +46,7 @@ BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server" PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp" UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager" EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
LIBTORRENT_COMPONENT = "libtorrent_component" LIBTORRENT_COMPONENT = "libtorrent_component"
@ -294,6 +293,7 @@ class DHTComponent(Component):
peer_port=self.external_peer_port, peer_port=self.external_peer_port,
rpc_timeout=self.conf.node_rpc_timeout, rpc_timeout=self.conf.node_rpc_timeout,
split_buckets_under_index=self.conf.split_buckets_under_index, split_buckets_under_index=self.conf.split_buckets_under_index,
is_bootstrap_node=self.conf.is_bootstrap_node,
storage=storage storage=storage
) )
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes) self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
@ -357,10 +357,6 @@ class FileManagerComponent(Component):
wallet = self.component_manager.get_component(WALLET_COMPONENT) wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT) \ node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None if self.component_manager.has_component(DHT_COMPONENT) else None
try:
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT) if TorrentSession else None
except NameError:
torrent = None
log.info('Starting the file manager') log.info('Starting the file manager')
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
self.file_manager = FileManager( self.file_manager = FileManager(
@ -369,7 +365,8 @@ class FileManagerComponent(Component):
self.file_manager.source_managers['stream'] = StreamManager( self.file_manager.source_managers['stream'] = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node, loop, self.conf, blob_manager, wallet, storage, node,
) )
if TorrentSession and LIBTORRENT_COMPONENT not in self.conf.components_to_skip: if self.component_manager.has_component(LIBTORRENT_COMPONENT):
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
self.file_manager.source_managers['torrent'] = TorrentManager( self.file_manager.source_managers['torrent'] = TorrentManager(
loop, self.conf, torrent, storage, self.component_manager.analytics_manager loop, self.conf, torrent, storage, self.component_manager.analytics_manager
) )
@ -377,10 +374,11 @@ class FileManagerComponent(Component):
log.info('Done setting up file manager') log.info('Done setting up file manager')
async def stop(self): async def stop(self):
self.file_manager.stop() await self.file_manager.stop()
class BackgroundDownloaderComponent(Component): class BackgroundDownloaderComponent(Component):
MIN_PREFIX_COLLIDING_BITS = 8
component_name = BACKGROUND_DOWNLOADER_COMPONENT component_name = BACKGROUND_DOWNLOADER_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT] depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
@ -412,12 +410,18 @@ class BackgroundDownloaderComponent(Component):
while True: while True:
self.space_available = await self.space_manager.get_free_space_mb(True) self.space_available = await self.space_manager.get_free_space_mb(True)
if not self.is_busy and self.space_available > 10: if not self.is_busy and self.space_available > 10:
blob_hash = next((key.hex() for key in self.dht_node.stored_blob_hashes if self._download_next_close_blob_hash()
key.hex() not in self.blob_manager.completed_blob_hashes), None)
if blob_hash:
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash))
await asyncio.sleep(self.download_loop_delay_seconds) await asyncio.sleep(self.download_loop_delay_seconds)
def _download_next_close_blob_hash(self):
node_id = self.dht_node.protocol.node_id
for blob_hash in self.dht_node.stored_blob_hashes:
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
continue
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
return
async def start(self): async def start(self):
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT) self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
if not self.component_manager.has_component(DHT_COMPONENT): if not self.component_manager.has_component(DHT_COMPONENT):
@ -491,9 +495,8 @@ class TorrentComponent(Component):
} }
async def start(self): async def start(self):
if TorrentSession: self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None) await self.torrent_session.bind() # TODO: specify host/port
await self.torrent_session.bind() # TODO: specify host/port
async def stop(self): async def stop(self):
if self.torrent_session: if self.torrent_session:
@ -548,7 +551,7 @@ class UPnPComponent(Component):
while True: while True:
if now: if now:
await self._maintain_redirects() await self._maintain_redirects()
await asyncio.sleep(360, loop=self.component_manager.loop) await asyncio.sleep(360)
async def _maintain_redirects(self): async def _maintain_redirects(self):
# setup the gateway if necessary # setup the gateway if necessary
@ -557,8 +560,6 @@ class UPnPComponent(Component):
self.upnp = await UPnP.discover(loop=self.component_manager.loop) self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string) log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.warning("upnp discovery failed: %s", err) log.warning("upnp discovery failed: %s", err)
self.upnp = None self.upnp = None
@ -670,7 +671,7 @@ class UPnPComponent(Component):
log.info("Removing upnp redirects: %s", self.upnp_redirects) log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([ await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items() self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
], loop=self.component_manager.loop) ])
if self._maintain_redirects_task and not self._maintain_redirects_task.done(): if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel() self._maintain_redirects_task.cancel()
@ -701,3 +702,49 @@ class ExchangeRateManagerComponent(Component):
async def stop(self): async def stop(self):
self.exchange_rate_manager.stop() self.exchange_rate_manager.stop()
class TrackerAnnouncerComponent(Component):
component_name = TRACKER_ANNOUNCER_COMPONENT
depends_on = [FILE_MANAGER_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.file_manager = None
self.announce_task = None
self.tracker_client: typing.Optional[TrackerClient] = None
@property
def component(self):
return self.tracker_client
@property
def running(self):
return self._running and self.announce_task and not self.announce_task.done()
async def announce_forever(self):
while True:
sleep_seconds = 60.0
announce_sd_hashes = []
for file in self.file_manager.get_filtered():
if not file.downloader:
continue
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
await self.tracker_client.announce_many(*announce_sd_hashes)
await asyncio.sleep(sleep_seconds)
async def start(self):
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
node_id = node.protocol.node_id if node else None
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
await self.tracker_client.start()
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
self.announce_task = asyncio.create_task(self.announce_forever())
async def stop(self):
self.file_manager = None
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
self.announce_task = None
self.tracker_client.stop()

View file

@ -9,7 +9,7 @@ import inspect
import typing import typing
import random import random
import tracemalloc import tracemalloc
from decimal import Decimal import itertools
from urllib.parse import urlencode, quote from urllib.parse import urlencode, quote
from typing import Callable, Optional, List from typing import Callable, Optional, List
from binascii import hexlify, unhexlify from binascii import hexlify, unhexlify
@ -28,6 +28,7 @@ from lbry.wallet import (
from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc from lbry.wallet.dewies import dewies_to_lbc, lbc_to_dewies, dict_values_to_lbc
from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPE_NAMES from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPE_NAMES
from lbry.wallet.bip32 import PrivateKey from lbry.wallet.bip32 import PrivateKey
from lbry.crypto.base58 import Base58
from lbry import utils from lbry import utils
from lbry.conf import Config, Setting, NOT_SET from lbry.conf import Config, Setting, NOT_SET
@ -43,7 +44,7 @@ from lbry.error import (
from lbry.extras import system_info from lbry.extras import system_info
from lbry.extras.daemon import analytics from lbry.extras.daemon import analytics
from lbry.extras.daemon.components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT from lbry.extras.daemon.components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT from lbry.extras.daemon.components import FILE_MANAGER_COMPONENT, DISK_SPACE_COMPONENT, TRACKER_ANNOUNCER_COMPONENT
from lbry.extras.daemon.components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT from lbry.extras.daemon.components import EXCHANGE_RATE_MANAGER_COMPONENT, UPNP_COMPONENT
from lbry.extras.daemon.componentmanager import RequiredCondition from lbry.extras.daemon.componentmanager import RequiredCondition
from lbry.extras.daemon.componentmanager import ComponentManager from lbry.extras.daemon.componentmanager import ComponentManager
@ -52,9 +53,8 @@ from lbry.extras.daemon.undecorated import undecorated
from lbry.extras.daemon.security import ensure_request_allowed from lbry.extras.daemon.security import ensure_request_allowed
from lbry.file_analysis import VideoFileAnalyzer from lbry.file_analysis import VideoFileAnalyzer
from lbry.schema.claim import Claim from lbry.schema.claim import Claim
from lbry.schema.url import URL, normalize_name from lbry.schema.url import URL
from lbry.wallet.server.db.elasticsearch.constants import RANGE_FIELDS, REPLACEMENTS
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.blob.blob_manager import BlobManager from lbry.blob.blob_manager import BlobManager
@ -67,6 +67,29 @@ if typing.TYPE_CHECKING:
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
RANGE_FIELDS = {
'height', 'creation_height', 'activation_height', 'expiration_height',
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
'tx_position', 'repost_count', 'limit_claims_per_channel',
'amount', 'effective_amount', 'support_amount',
'trending_score', 'censor_type', 'tx_num'
}
MY_RANGE_FIELDS = RANGE_FIELDS - {"limit_claims_per_channel"}
REPLACEMENTS = {
'claim_name': 'normalized_name',
'name': 'normalized_name',
'txid': 'tx_id',
'nout': 'tx_nout',
'trending_group': 'trending_score',
'trending_mixed': 'trending_score',
'trending_global': 'trending_score',
'trending_local': 'trending_score',
'reposted': 'repost_count',
'stream_types': 'stream_type',
'media_types': 'media_type',
'valid_channel_signature': 'is_signature_valid'
}
def is_transactional_function(name): def is_transactional_function(name):
for action in ('create', 'update', 'abandon', 'send', 'fund'): for action in ('create', 'update', 'abandon', 'send', 'fund'):
@ -172,61 +195,6 @@ def paginate_list(items: List, page: Optional[int], page_size: Optional[int]):
} }
def fix_kwargs_for_hub(**kwargs):
repeated_fields = {"media_type", "stream_type", "claim_type"}
value_fields = {"tx_nout", "has_source", "is_signature_valid"}
opcodes = {'=': 0, '<=': 1, '>=': 2, '<': 3, '>': 4}
for key, value in list(kwargs.items()):
if value in (None, [], False):
kwargs.pop(key)
continue
if key in REPLACEMENTS:
kwargs[REPLACEMENTS[key]] = kwargs.pop(key)
key = REPLACEMENTS[key]
if key == "normalized_name":
kwargs[key] = normalize_name(value)
if key == "limit_claims_per_channel":
value = kwargs.pop("limit_claims_per_channel") or 0
if value > 0:
kwargs["limit_claims_per_channel"] = value
elif key == "invalid_channel_signature":
kwargs["is_signature_valid"] = {"value": not kwargs.pop("invalid_channel_signature")}
elif key == "has_no_source":
kwargs["has_source"] = {"value": not kwargs.pop("has_no_source")}
elif key in value_fields:
kwargs[key] = {"value": value} if not isinstance(value, dict) else value
elif key in repeated_fields and isinstance(value, str):
kwargs[key] = [value]
elif key in ("claim_id", "channel_id"):
kwargs[key] = {"invert": False, "value": [kwargs[key]]}
elif key in ("claim_ids", "channel_ids"):
kwargs[key[:-1]] = {"invert": False, "value": kwargs.pop(key)}
elif key == "not_channel_ids":
kwargs["channel_id"] = {"invert": True, "value": kwargs.pop("not_channel_ids")}
elif key in MY_RANGE_FIELDS:
constraints = []
for val in value if isinstance(value, list) else [value]:
operator = '='
if isinstance(val, str) and val[0] in opcodes:
operator_length = 2 if val[:2] in opcodes else 1
operator, val = val[:operator_length], val[operator_length:]
val = [int(val if key != 'fee_amount' else Decimal(val)*1000)]
constraints.append({"op": opcodes[operator], "value": val})
kwargs[key] = constraints
elif key == 'order_by': # TODO: remove this after removing support for old trending args from the api
value = value if isinstance(value, list) else [value]
new_value = []
for new_v in value:
migrated = new_v if new_v not in (
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
) else 'trending_score'
if migrated not in new_value:
new_value.append(migrated)
kwargs[key] = new_value
return kwargs
DHT_HAS_CONTACTS = "dht_has_contacts" DHT_HAS_CONTACTS = "dht_has_contacts"
@ -646,7 +614,8 @@ class Daemon(metaclass=JSONRPCServerType):
content_type='application/json' content_type='application/json'
) )
async def handle_metrics_get_request(self, request: web.Request): @staticmethod
async def handle_metrics_get_request(request: web.Request):
try: try:
return web.Response( return web.Response(
text=prom_generate_latest().decode(), text=prom_generate_latest().decode(),
@ -1359,6 +1328,65 @@ class Daemon(metaclass=JSONRPCServerType):
c.wallets += [wallet_id] c.wallets += [wallet_id]
return wallet return wallet
@requires("wallet")
async def jsonrpc_wallet_export(self, password=None, wallet_id=None):
"""
Exports encrypted wallet data if password is supplied; otherwise plain JSON.
Wallet must be unlocked to perform this operation.
Usage:
wallet_export [--password=<password>] [--wallet_id=<wallet_id>]
Options:
--password=<password> : (str) password to encrypt outgoing data
--wallet_id=<wallet_id> : (str) wallet being exported
Returns:
(str) data: base64-encoded encrypted wallet, or cleartext JSON
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
if password is None:
return wallet.to_json()
return wallet.pack(password).decode()
@requires("wallet")
async def jsonrpc_wallet_import(self, data, password=None, wallet_id=None, blocking=False):
"""
Import wallet data and merge accounts and preferences. Data is expected to be JSON if
password is not supplied.
Wallet must be unlocked to perform this operation.
Usage:
wallet_import (<data> | --data=<data>) [<password> | --password=<password>]
[--wallet_id=<wallet_id>] [--blocking]
Options:
--data=<data> : (str) incoming wallet data
--password=<password> : (str) password to decrypt incoming data
--wallet_id=<wallet_id> : (str) wallet being merged into
--blocking : (bool) wait until any new accounts have merged
Returns:
(str) base64-encoded encrypted wallet, or cleartext JSON
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
for new_account in itertools.chain(added_accounts, merged_accounts):
await new_account.maybe_migrate_certificates()
if added_accounts and self.ledger.network.is_connected:
if blocking:
await asyncio.wait([
a.ledger.subscribe_account(a) for a in added_accounts
])
else:
for new_account in added_accounts:
asyncio.create_task(self.ledger.subscribe_account(new_account))
wallet.save()
return await self.jsonrpc_wallet_export(password=password, wallet_id=wallet_id)
@requires("wallet") @requires("wallet")
async def jsonrpc_wallet_add(self, wallet_id): async def jsonrpc_wallet_add(self, wallet_id):
""" """
@ -1827,7 +1855,7 @@ class Daemon(metaclass=JSONRPCServerType):
Options: Options:
--to_account=<to_account> : (str) send to this account --to_account=<to_account> : (str) send to this account
--from_account=<from_account> : (str) spend from this account --from_account=<from_account> : (str) spend from this account
--amount=<amount> : (str) the amount to transfer lbc --amount=<amount> : (decimal) the amount to transfer lbc
--everything : (bool) transfer everything (excluding claims), default: false. --everything : (bool) transfer everything (excluding claims), default: false.
--outputs=<outputs> : (int) split payment across many outputs, default: 1. --outputs=<outputs> : (int) split payment across many outputs, default: 1.
--wallet_id=<wallet_id> : (str) limit operation to specific wallet. --wallet_id=<wallet_id> : (str) limit operation to specific wallet.
@ -1850,6 +1878,48 @@ class Daemon(metaclass=JSONRPCServerType):
outputs=outputs, broadcast=broadcast outputs=outputs, broadcast=broadcast
) )
@requires("wallet")
async def jsonrpc_account_deposit(
self, txid, nout, redeem_script, private_key,
to_account=None, wallet_id=None, preview=False, blocking=False
):
"""
Spend a time locked transaction into your account.
Usage:
account_deposit <txid> <nout> <redeem_script> <private_key>
[<to_account> | --to_account=<to_account>]
[--wallet_id=<wallet_id>] [--preview] [--blocking]
Options:
--txid=<txid> : (str) id of the transaction
--nout=<nout> : (int) output number in the transaction
--redeem_script=<redeem_script> : (str) redeem script for output
--private_key=<private_key> : (str) private key to sign transaction
--to_account=<to_account> : (str) deposit to this account
--wallet_id=<wallet_id> : (str) limit operation to specific wallet.
--preview : (bool) do not broadcast the transaction
--blocking : (bool) wait until tx has synced
Returns: {Transaction}
"""
wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
account = wallet.get_account_or_default(to_account)
other_tx = await self.wallet_manager.get_transaction(txid)
tx = await Transaction.spend_time_lock(
other_tx.outputs[nout], unhexlify(redeem_script), account
)
pk = PrivateKey.from_bytes(
account.ledger, Base58.decode_check(private_key)[1:-1]
)
await tx.sign([account], {pk.address: pk})
if not preview:
await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(self.analytics_manager.send_credits_sent())
else:
await self.ledger.release_tx(tx)
return tx
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False, blocking=False): def jsonrpc_account_send(self, amount, addresses, account_id=None, wallet_id=None, preview=False, blocking=False):
""" """
@ -1921,7 +1991,9 @@ class Daemon(metaclass=JSONRPCServerType):
wallet = self.wallet_manager.get_wallet_or_default(wallet_id) wallet = self.wallet_manager.get_wallet_or_default(wallet_id)
wallet_changed = False wallet_changed = False
if data is not None: if data is not None:
added_accounts = wallet.merge(self.wallet_manager, password, data) added_accounts, merged_accounts = wallet.merge(self.wallet_manager, password, data)
for new_account in itertools.chain(added_accounts, merged_accounts):
await new_account.maybe_migrate_certificates()
if added_accounts and self.ledger.network.is_connected: if added_accounts and self.ledger.network.is_connected:
if blocking: if blocking:
await asyncio.wait([ await asyncio.wait([
@ -2338,6 +2410,7 @@ class Daemon(metaclass=JSONRPCServerType):
Usage: Usage:
claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent] claim_list [--claim_type=<claim_type>...] [--claim_id=<claim_id>...] [--name=<name>...] [--is_spent]
[--reposted_claim_id=<reposted_claim_id>...]
[--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>] [--channel_id=<channel_id>...] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>] [--has_source | --has_no_source] [--page=<page>] [--page_size=<page_size>]
[--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips] [--resolve] [--order_by=<order_by>] [--no_totals] [--include_received_tips]
@ -2348,6 +2421,7 @@ class Daemon(metaclass=JSONRPCServerType):
--channel_id=<channel_id> : (str or list) streams in this channel --channel_id=<channel_id> : (str or list) streams in this channel
--name=<name> : (str or list) claim name --name=<name> : (str or list) claim name
--is_spent : (bool) shows previous claim updates and abandons --is_spent : (bool) shows previous claim updates and abandons
--reposted_claim_id=<reposted_claim_id> : (str or list) reposted claim id
--account_id=<account_id> : (str) id of the account to query --account_id=<account_id> : (str) id of the account to query
--wallet_id=<wallet_id> : (str) restrict results to specific wallet --wallet_id=<wallet_id> : (str) restrict results to specific wallet
--has_source : (bool) list claims containing a source field --has_source : (bool) list claims containing a source field
@ -2545,42 +2619,27 @@ class Daemon(metaclass=JSONRPCServerType):
Returns: {Paginated[Output]} Returns: {Paginated[Output]}
""" """
if self.ledger.config.get('use_go_hub'): if "claim_ids" in kwargs and not kwargs["claim_ids"]:
host = self.ledger.network.client.server[0] kwargs.pop("claim_ids")
port = "50051" if {'claim_id', 'claim_ids'}.issubset(kwargs):
kwargs['new_sdk_server'] = f"{host}:{port}" raise ConflictingInputValueError('claim_id', 'claim_ids')
if kwargs.get("channel"): if kwargs.pop('valid_channel_signature', False):
channel = kwargs.pop("channel") kwargs['signature_valid'] = 1
channel_obj = (await self.jsonrpc_resolve(channel))[channel] if kwargs.pop('invalid_channel_signature', False):
if isinstance(channel_obj, dict): kwargs['signature_valid'] = 0
# This happens when the channel doesn't exist if 'has_no_source' in kwargs:
kwargs["channel_id"] = "" kwargs['has_source'] = not kwargs.pop('has_no_source')
else: if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api
kwargs["channel_id"] = channel_obj.claim_id value = kwargs.pop('order_by')
kwargs = fix_kwargs_for_hub(**kwargs) value = value if isinstance(value, list) else [value]
else: new_value = []
# Don't do this if using the hub server, it screws everything up for new_v in value:
if "claim_ids" in kwargs and not kwargs["claim_ids"]: migrated = new_v if new_v not in (
kwargs.pop("claim_ids") 'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
if {'claim_id', 'claim_ids'}.issubset(kwargs): ) else 'trending_score'
raise ConflictingInputValueError('claim_id', 'claim_ids') if migrated not in new_value:
if kwargs.pop('valid_channel_signature', False): new_value.append(migrated)
kwargs['signature_valid'] = 1 kwargs['order_by'] = new_value
if kwargs.pop('invalid_channel_signature', False):
kwargs['signature_valid'] = 0
if 'has_no_source' in kwargs:
kwargs['has_source'] = not kwargs.pop('has_no_source')
if 'order_by' in kwargs: # TODO: remove this after removing support for old trending args from the api
value = kwargs.pop('order_by')
value = value if isinstance(value, list) else [value]
new_value = []
for new_v in value:
migrated = new_v if new_v not in (
'trending_mixed', 'trending_local', 'trending_global', 'trending_group'
) else 'trending_score'
if migrated not in new_value:
new_value.append(migrated)
kwargs['order_by'] = new_value
page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', DEFAULT_PAGE_SIZE)), 50) page_num, page_size = abs(kwargs.pop('page', 1)), min(abs(kwargs.pop('page_size', DEFAULT_PAGE_SIZE)), 50)
wallet = self.wallet_manager.get_wallet_or_default(kwargs.pop('wallet_id', None)) wallet = self.wallet_manager.get_wallet_or_default(kwargs.pop('wallet_id', None))
kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size}) kwargs.update({'offset': page_size * (page_num - 1), 'limit': page_size})
@ -2716,7 +2775,7 @@ class Daemon(metaclass=JSONRPCServerType):
wallet.save() wallet.save()
await self.broadcast_or_release(tx, blocking) await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info( self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
tx, txo, claim_address, claim, name, dewies_to_lbc(amount) tx, txo, claim_address, claim, name
)])) )]))
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel()) self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
else: else:
@ -2875,7 +2934,7 @@ class Daemon(metaclass=JSONRPCServerType):
wallet.save() wallet.save()
await self.broadcast_or_release(tx, blocking) await self.broadcast_or_release(tx, blocking)
self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info( self.component_manager.loop.create_task(self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount) tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name
)])) )]))
self.component_manager.loop.create_task(self.analytics_manager.send_new_channel()) self.component_manager.loop.create_task(self.analytics_manager.send_new_channel())
else: else:
@ -2885,19 +2944,21 @@ class Daemon(metaclass=JSONRPCServerType):
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
async def jsonrpc_channel_sign( async def jsonrpc_channel_sign(
self, channel_name=None, channel_id=None, hexdata=None, channel_account_id=None, wallet_id=None): self, channel_name=None, channel_id=None, hexdata=None, salt=None,
channel_account_id=None, wallet_id=None):
""" """
Signs data using the specified channel signing key. Signs data using the specified channel signing key.
Usage: Usage:
channel_sign [<channel_name> | --channel_name=<channel_name>] channel_sign [<channel_name> | --channel_name=<channel_name>] [<channel_id> | --channel_id=<channel_id>]
[<channel_id> | --channel_id=<channel_id>] [<hexdata> | --hexdata=<hexdata>] [<hexdata> | --hexdata=<hexdata>] [<salt> | --salt=<salt>]
[--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>] [--channel_account_id=<channel_account_id>...] [--wallet_id=<wallet_id>]
Options: Options:
--channel_name=<channel_name> : (str) name of channel used to sign (or use channel id) --channel_name=<channel_name> : (str) name of channel used to sign (or use channel id)
--channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name) --channel_id=<channel_id> : (str) claim id of channel used to sign (or use channel name)
--hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal --hexdata=<hexdata> : (str) data to sign, encoded as hexadecimal
--salt=<salt> : (str) salt to use for signing, default is to use timestamp
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in --channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts. for channel certificates, defaults to all accounts.
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet --wallet_id=<wallet_id> : (str) restrict operation to specific wallet
@ -2914,11 +2975,13 @@ class Daemon(metaclass=JSONRPCServerType):
signing_channel = await self.get_channel_or_error( signing_channel = await self.get_channel_or_error(
wallet, channel_account_id, channel_id, channel_name, for_signing=True wallet, channel_account_id, channel_id, channel_name, for_signing=True
) )
timestamp = str(int(time.time())) if salt is None:
signature = signing_channel.sign_data(unhexlify(str(hexdata)), timestamp) salt = str(int(time.time()))
signature = signing_channel.sign_data(unhexlify(str(hexdata)), salt)
return { return {
'signature': signature, 'signature': signature,
'signing_ts': timestamp 'signing_ts': salt, # DEPRECATED
'salt': salt,
} }
@requires(WALLET_COMPONENT) @requires(WALLET_COMPONENT)
@ -3236,15 +3299,17 @@ class Daemon(metaclass=JSONRPCServerType):
) )
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT) @requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, DATABASE_COMPONENT)
async def jsonrpc_stream_repost(self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None, async def jsonrpc_stream_repost(
channel_name=None, channel_account_id=None, account_id=None, wallet_id=None, self, name, bid, claim_id, allow_duplicate_name=False, channel_id=None,
claim_address=None, funding_account_ids=None, preview=False, blocking=False): channel_name=None, channel_account_id=None, account_id=None, wallet_id=None,
claim_address=None, funding_account_ids=None, preview=False, blocking=False, **kwargs):
""" """
Creates a claim that references an existing stream by its claim id. Creates a claim that references an existing stream by its claim id.
Usage: Usage:
stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>) stream_repost (<name> | --name=<name>) (<bid> | --bid=<bid>) (<claim_id> | --claim_id=<claim_id>)
[--allow_duplicate_name=<allow_duplicate_name>] [--allow_duplicate_name=<allow_duplicate_name>]
[--title=<title>] [--description=<description>] [--tags=<tags>...]
[--channel_id=<channel_id> | --channel_name=<channel_name>] [--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...] [--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
@ -3257,6 +3322,9 @@ class Daemon(metaclass=JSONRPCServerType):
--claim_id=<claim_id> : (str) id of the claim being reposted --claim_id=<claim_id> : (str) id of the claim being reposted
--allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with --allow_duplicate_name=<allow_duplicate_name> : (bool) create new claim even if one already exists with
given name. default: false. given name. default: false.
--title=<title> : (str) title of the repost
--description=<description> : (str) description of the repost
--tags=<tags> : (list) add repost tags
--channel_id=<channel_id> : (str) claim id of the publisher channel --channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel --channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in --channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
@ -3291,6 +3359,7 @@ class Daemon(metaclass=JSONRPCServerType):
raise Exception('Invalid claim id. It is expected to be a 40 characters long hexadecimal string.') raise Exception('Invalid claim id. It is expected to be a 40 characters long hexadecimal string.')
claim = Claim() claim = Claim()
claim.repost.update(**kwargs)
claim.repost.reference.claim_id = claim_id claim.repost.reference.claim_id = claim_id
tx = await Transaction.claim_create( tx = await Transaction.claim_create(
name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel name, claim, amount, claim_address, funding_accounts, funding_accounts[0], channel
@ -3468,7 +3537,7 @@ class Daemon(metaclass=JSONRPCServerType):
async def save_claims(): async def save_claims():
await self.storage.save_claims([self._old_get_temp_claim_info( await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, claim, name, dewies_to_lbc(amount) tx, new_txo, claim_address, claim, name
)]) )])
if file_path is not None: if file_path is not None:
await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id) await self.storage.save_content_claim(file_stream.stream_hash, new_txo.id)
@ -3671,6 +3740,8 @@ class Daemon(metaclass=JSONRPCServerType):
if old_txo.claim.is_stream: if old_txo.claim.is_stream:
claim.stream.update(file_path=file_path, **kwargs) claim.stream.update(file_path=file_path, **kwargs)
elif old_txo.claim.is_repost:
claim.repost.update(**kwargs)
if clear_channel: if clear_channel:
claim.clear_signature() claim.clear_signature()
@ -3703,7 +3774,7 @@ class Daemon(metaclass=JSONRPCServerType):
async def save_claims(): async def save_claims():
await self.storage.save_claims([self._old_get_temp_claim_info( await self.storage.save_claims([self._old_get_temp_claim_info(
tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name, dewies_to_lbc(amount) tx, new_txo, claim_address, new_txo.claim, new_txo.claim_name
)]) )])
if stream_hash: if stream_hash:
await self.storage.save_content_claim(stream_hash, new_txo.id) await self.storage.save_content_claim(stream_hash, new_txo.id)
@ -3965,6 +4036,8 @@ class Daemon(metaclass=JSONRPCServerType):
[--languages=<languages>...] [--clear_languages] [--languages=<languages>...] [--clear_languages]
[--locations=<locations>...] [--clear_locations] [--locations=<locations>...] [--clear_locations]
[--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>] [--thumbnail_url=<thumbnail_url>] [--cover_url=<cover_url>]
[--channel_id=<channel_id> | --channel_name=<channel_name>]
[--channel_account_id=<channel_account_id>...]
[--account_id=<account_id>] [--wallet_id=<wallet_id>] [--account_id=<account_id>] [--wallet_id=<wallet_id>]
[--claim_address=<claim_address>] [--claim_address=<claim_address>]
[--funding_account_ids=<funding_account_ids>...] [--funding_account_ids=<funding_account_ids>...]
@ -4020,6 +4093,10 @@ class Daemon(metaclass=JSONRPCServerType):
--clear_locations : (bool) clear existing locations (prior to adding new ones) --clear_locations : (bool) clear existing locations (prior to adding new ones)
--thumbnail_url=<thumbnail_url>: (str) thumbnail url --thumbnail_url=<thumbnail_url>: (str) thumbnail url
--channel_id=<channel_id> : (str) claim id of the publisher channel
--channel_name=<channel_name> : (str) name of the publisher channel
--channel_account_id=<channel_account_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account in which to look for collection (default: all) --account_id=<account_id> : (str) account in which to look for collection (default: all)
--wallet_id=<wallet_id> : (str) restrict operation to specific wallet --wallet_id=<wallet_id> : (str) restrict operation to specific wallet
--funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction --funding_account_ids=<funding_account_ids>: (list) ids of accounts to fund this transaction
@ -4267,7 +4344,7 @@ class Daemon(metaclass=JSONRPCServerType):
'nout': tx.position, 'nout': tx.position,
'address': claim_address, 'address': claim_address,
'claim_id': claim_id, 'claim_id': claim_id,
'amount': dewies_to_lbc(amount) 'amount': dewies_to_lbc(new_txo.amount)
}]}) }]})
self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('new_support')) self.component_manager.loop.create_task(self.analytics_manager.send_claim_action('new_support'))
else: else:
@ -4884,21 +4961,16 @@ class Daemon(metaclass=JSONRPCServerType):
DHT / Blob Exchange peer commands. DHT / Blob Exchange peer commands.
""" """
@requires(DHT_COMPONENT) async def jsonrpc_peer_list(self, blob_hash, page=None, page_size=None):
async def jsonrpc_peer_list(self, blob_hash, search_bottom_out_limit=None, page=None, page_size=None):
""" """
Get peers for blob hash Get peers for blob hash
Usage: Usage:
peer_list (<blob_hash> | --blob_hash=<blob_hash>) peer_list (<blob_hash> | --blob_hash=<blob_hash>)
[<search_bottom_out_limit> | --search_bottom_out_limit=<search_bottom_out_limit>]
[--page=<page>] [--page_size=<page_size>] [--page=<page>] [--page_size=<page_size>]
Options: Options:
--blob_hash=<blob_hash> : (str) find available peers for this blob hash --blob_hash=<blob_hash> : (str) find available peers for this blob hash
--search_bottom_out_limit=<search_bottom_out_limit> : (int) the number of search probes in a row
that don't find any new peers
before giving up and returning
--page=<page> : (int) page to return during paginating --page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination --page_size=<page_size> : (int) number of items on page during pagination
@ -4910,28 +4982,29 @@ class Daemon(metaclass=JSONRPCServerType):
if not is_valid_blobhash(blob_hash): if not is_valid_blobhash(blob_hash):
# TODO: use error from lbry.error # TODO: use error from lbry.error
raise Exception("invalid blob hash") raise Exception("invalid blob hash")
if search_bottom_out_limit is not None:
search_bottom_out_limit = int(search_bottom_out_limit)
if search_bottom_out_limit <= 0:
# TODO: use error from lbry.error
raise Exception("invalid bottom out limit")
else:
search_bottom_out_limit = 4
peers = []
peer_q = asyncio.Queue(loop=self.component_manager.loop) peer_q = asyncio.Queue(loop=self.component_manager.loop)
await self.dht_node._peers_for_value_producer(blob_hash, peer_q) if self.component_manager.has_component(TRACKER_ANNOUNCER_COMPONENT):
tracker = self.component_manager.get_component(TRACKER_ANNOUNCER_COMPONENT)
tracker_peers = await tracker.get_kademlia_peer_list(bytes.fromhex(blob_hash))
log.info("Found %d peers for %s from trackers.", len(tracker_peers), blob_hash[:8])
peer_q.put_nowait(tracker_peers)
elif not self.component_manager.has_component(DHT_COMPONENT):
raise Exception("Peer list needs, at least, either a DHT component or a Tracker component for discovery.")
peers = []
if self.component_manager.has_component(DHT_COMPONENT):
await self.dht_node._peers_for_value_producer(blob_hash, peer_q)
while not peer_q.empty(): while not peer_q.empty():
peers.extend(peer_q.get_nowait()) peers.extend(peer_q.get_nowait())
results = [ results = {
{ (peer.address, peer.tcp_port): {
"node_id": hexlify(peer.node_id).decode(), "node_id": hexlify(peer.node_id).decode() if peer.node_id else None,
"address": peer.address, "address": peer.address,
"udp_port": peer.udp_port, "udp_port": peer.udp_port,
"tcp_port": peer.tcp_port, "tcp_port": peer.tcp_port,
} }
for peer in peers for peer in peers
] }
return paginate_list(results, page, page_size) return paginate_list(list(results.values()), page, page_size)
@requires(DATABASE_COMPONENT) @requires(DATABASE_COMPONENT)
async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None): async def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
@ -5401,11 +5474,11 @@ class Daemon(metaclass=JSONRPCServerType):
return results return results
@staticmethod @staticmethod
def _old_get_temp_claim_info(tx, txo, address, claim_dict, name, bid): def _old_get_temp_claim_info(tx, txo, address, claim_dict, name):
return { return {
"claim_id": txo.claim_id, "claim_id": txo.claim_id,
"name": name, "name": name,
"amount": bid, "amount": dewies_to_lbc(txo.amount),
"address": address, "address": address,
"txid": tx.id, "txid": tx.id,
"nout": txo.position, "nout": txo.position,

View file

@ -80,8 +80,6 @@ class MarketFeed:
self.rate = ExchangeRate(self.market, rate, int(time.time())) self.rate = ExchangeRate(self.market, rate, int(time.time()))
self.last_check = time.time() self.last_check = time.time()
return self.rate return self.rate
except asyncio.CancelledError:
raise
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.warning("Timed out fetching exchange rate from %s.", self.name) log.warning("Timed out fetching exchange rate from %s.", self.name)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
@ -196,9 +194,9 @@ FEEDS: Iterable[Type[MarketFeed]] = (
BittrexUSDFeed, BittrexUSDFeed,
CoinExBTCFeed, CoinExBTCFeed,
CoinExUSDFeed, CoinExUSDFeed,
HotbitBTCFeed, # HotbitBTCFeed,
HotbitUSDFeed, # HotbitUSDFeed,
UPbitBTCFeed, # UPbitBTCFeed,
) )

View file

@ -328,8 +328,8 @@ class JSONResponseEncoder(JSONEncoder):
result.update({ result.update({
'streaming_url': managed_stream.stream_url, 'streaming_url': managed_stream.stream_url,
'stream_hash': managed_stream.stream_hash, 'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.descriptor.stream_name, 'stream_name': managed_stream.stream_name,
'suggested_file_name': managed_stream.descriptor.suggested_file_name, 'suggested_file_name': managed_stream.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash, 'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type, 'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key, 'key': managed_stream.descriptor.key,

View file

@ -20,7 +20,7 @@ def do_migration(conf):
"left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall() "left outer join blob b ON b.blob_hash=s.blob_hash order by s.position").fetchall()
blobs_by_stream = {} blobs_by_stream = {}
for stream_hash, position, iv, blob_hash, blob_length in blobs: for stream_hash, position, iv, blob_hash, blob_length in blobs:
blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, blob_hash)) blobs_by_stream.setdefault(stream_hash, []).append(BlobInfo(position, blob_length or 0, iv, 0, blob_hash))
for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams: for stream_name, stream_key, suggested_filename, sd_hash, stream_hash in streams:
sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename, sd = StreamDescriptor(None, blob_dir, stream_name, stream_key, suggested_filename,

View file

@ -449,7 +449,8 @@ class SQLiteStorage(SQLiteMixin):
return await self.db.execute_fetchall( return await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on " "select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob left join stream_blob using (blob_hash) " "from blob left join stream_blob using (blob_hash) "
"where stream_blob.stream_hash is null and blob.is_mine=? order by blob.added_on asc", "where stream_blob.stream_hash is null and blob.is_mine=? and blob.status='finished'"
"order by blob.blob_length desc, blob.added_on asc",
(is_mine,) (is_mine,)
) )
@ -462,7 +463,8 @@ class SQLiteStorage(SQLiteMixin):
content_blobs = await self.db.execute_fetchall( content_blobs = await self.db.execute_fetchall(
"select blob.blob_hash, blob.blob_length, blob.added_on " "select blob.blob_hash, blob.blob_length, blob.added_on "
"from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)" "from blob join stream_blob using (blob_hash) cross join stream using (stream_hash)"
"cross join file using (stream_hash) where blob.is_mine=? order by blob.added_on asc, blob.blob_length asc", "cross join file using (stream_hash)"
"where blob.is_mine=? and blob.status='finished' order by blob.added_on asc, blob.blob_length asc",
(is_mine,) (is_mine,)
) )
return content_blobs + sd_blobs return content_blobs + sd_blobs
@ -480,6 +482,7 @@ class SQLiteStorage(SQLiteMixin):
is_mine=1 is_mine=1
then blob_length else 0 end), 0) as private_storage then blob_length else 0 end), 0) as private_storage
from blob left join stream_blob using (blob_hash) from blob left join stream_blob using (blob_hash)
where blob_hash not in (select sd_hash from stream) and blob.status="finished"
""") """)
return { return {
'network_storage': network_size, 'network_storage': network_size,
@ -531,7 +534,8 @@ class SQLiteStorage(SQLiteMixin):
def _get_blobs_for_stream(transaction): def _get_blobs_for_stream(transaction):
crypt_blob_infos = [] crypt_blob_infos = []
stream_blobs = transaction.execute( stream_blobs = transaction.execute(
"select blob_hash, position, iv from stream_blob where stream_hash=? " "select s.blob_hash, s.position, s.iv, b.added_on "
"from stream_blob s left outer join blob b on b.blob_hash=s.blob_hash where stream_hash=? "
"order by position asc", (stream_hash, ) "order by position asc", (stream_hash, )
).fetchall() ).fetchall()
if only_completed: if only_completed:
@ -551,9 +555,10 @@ class SQLiteStorage(SQLiteMixin):
for blob_hash, length in lengths: for blob_hash, length in lengths:
blob_length_dict[blob_hash] = length blob_length_dict[blob_hash] = length
for blob_hash, position, iv in stream_blobs: current_time = time.time()
for blob_hash, position, iv, added_on in stream_blobs:
blob_length = blob_length_dict.get(blob_hash, 0) blob_length = blob_length_dict.get(blob_hash, 0)
crypt_blob_infos.append(BlobInfo(position, blob_length, iv, blob_hash)) crypt_blob_infos.append(BlobInfo(position, blob_length, iv, added_on or current_time, blob_hash))
if not blob_hash: if not blob_hash:
break break
return crypt_blob_infos return crypt_blob_infos
@ -788,7 +793,7 @@ class SQLiteStorage(SQLiteMixin):
await self.db.run(_save_claims) await self.db.run(_save_claims)
if update_file_callbacks: if update_file_callbacks:
await asyncio.wait(update_file_callbacks) await asyncio.wait(map(asyncio.create_task, update_file_callbacks))
if claim_id_to_supports: if claim_id_to_supports:
await self.save_supports(claim_id_to_supports) await self.save_supports(claim_id_to_supports)

View file

@ -13,11 +13,12 @@ from lbry.schema.url import URL
from lbry.wallet.dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from lbry.file.source_manager import SourceManager from lbry.file.source_manager import SourceManager
from lbry.file.source import ManagedDownloadSource from lbry.file.source import ManagedDownloadSource
from lbry.extras.daemon.storage import StoredContentClaim
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.extras.daemon.analytics import AnalyticsManager from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import WalletManager, Output from lbry.wallet import WalletManager
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -49,10 +50,10 @@ class FileManager:
await manager.started.wait() await manager.started.wait()
self.started.set() self.started.set()
def stop(self): async def stop(self):
for manager in self.source_managers.values(): for manager in self.source_managers.values():
# fixme: pop or not? # fixme: pop or not?
manager.stop() await manager.stop()
self.started.clear() self.started.clear()
@cache_concurrent @cache_concurrent
@ -98,8 +99,6 @@ class FileManager:
except asyncio.TimeoutError: except asyncio.TimeoutError:
raise ResolveTimeoutError(uri) raise ResolveTimeoutError(uri)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise
log.exception("Unexpected error resolving stream:") log.exception("Unexpected error resolving stream:")
raise ResolveError(f"Unexpected error resolving stream: {str(err)}") raise ResolveError(f"Unexpected error resolving stream: {str(err)}")
if 'error' in resolved_result: if 'error' in resolved_result:
@ -194,21 +193,24 @@ class FileManager:
#################### ####################
# make downloader and wait for start # make downloader and wait for start
#################### ####################
# temporary with fields we know so downloader can start. Missing fields are populated later.
stored_claim = StoredContentClaim(outpoint=outpoint, claim_id=txo.claim_id, name=txo.claim_name,
amount=txo.amount, height=txo.tx_ref.height,
serialized=claim.to_bytes().hex())
if not claim.stream.source.bt_infohash: if not claim.stream.source.bt_infohash:
# fixme: this shouldnt be here # fixme: this shouldnt be here
stream = ManagedStream( stream = ManagedStream(
self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash, self.loop, self.config, source_manager.blob_manager, claim.stream.source.sd_hash,
download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment, download_directory, file_name, ManagedStream.STATUS_RUNNING, content_fee=payment,
analytics_manager=self.analytics_manager analytics_manager=self.analytics_manager, claim=stored_claim
) )
stream.downloader.node = source_manager.node stream.downloader.node = source_manager.node
else: else:
stream = TorrentSource( stream = TorrentSource(
self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash, self.loop, self.config, self.storage, identifier=claim.stream.source.bt_infohash,
file_name=file_name, download_directory=download_directory or self.config.download_dir, file_name=file_name, download_directory=download_directory or self.config.download_dir,
status=ManagedStream.STATUS_RUNNING, status=ManagedStream.STATUS_RUNNING, claim=stored_claim, analytics_manager=self.analytics_manager,
analytics_manager=self.analytics_manager,
torrent_session=source_manager.torrent_session torrent_session=source_manager.torrent_session
) )
log.info("starting download for %s", uri) log.info("starting download for %s", uri)
@ -240,13 +242,12 @@ class FileManager:
claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier) claim_info = await self.storage.get_content_claim_for_torrent(stream.identifier)
stream.set_claim(claim_info, claim) stream.set_claim(claim_info, claim)
if save_file: if save_file:
await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download), await asyncio.wait_for(stream.save_file(), timeout - (self.loop.time() - before_download))
loop=self.loop)
return stream return stream
except asyncio.TimeoutError: except asyncio.TimeoutError:
error = DownloadDataTimeoutError(stream.sd_hash) error = DownloadDataTimeoutError(stream.sd_hash)
raise error raise error
except Exception as err: # forgive data timeout, don't delete stream except (Exception, asyncio.CancelledError) as err: # forgive data timeout, don't delete stream
expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError, expected = (DownloadSDTimeoutError, DownloadDataTimeoutError, InsufficientFundsError,
KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError) KeyFeeAboveMaxAllowedError, ResolveError, InvalidStreamURLError)
if isinstance(err, expected): if isinstance(err, expected):

View file

@ -45,11 +45,12 @@ class ManagedDownloadSource:
self.purchase_receipt = None self.purchase_receipt = None
self._added_on = added_on self._added_on = added_on
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.downloader = None
self.saving = asyncio.Event(loop=self.loop) self.saving = asyncio.Event()
self.finished_writing = asyncio.Event(loop=self.loop) self.finished_writing = asyncio.Event()
self.started_writing = asyncio.Event(loop=self.loop) self.started_writing = asyncio.Event()
self.finished_write_attempt = asyncio.Event(loop=self.loop) self.finished_write_attempt = asyncio.Event()
# @classmethod # @classmethod
# async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str, # async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', file_path: str,
@ -66,7 +67,7 @@ class ManagedDownloadSource:
async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None): async def save_file(self, file_name: Optional[str] = None, download_directory: Optional[str] = None):
raise NotImplementedError() raise NotImplementedError()
def stop_tasks(self): async def stop_tasks(self):
raise NotImplementedError() raise NotImplementedError()
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'): def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):

View file

@ -54,16 +54,16 @@ class SourceManager:
self.storage = storage self.storage = storage
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self._sources: typing.Dict[str, ManagedDownloadSource] = {} self._sources: typing.Dict[str, ManagedDownloadSource] = {}
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
def add(self, source: ManagedDownloadSource): def add(self, source: ManagedDownloadSource):
self._sources[source.identifier] = source self._sources[source.identifier] = source
def remove(self, source: ManagedDownloadSource): async def remove(self, source: ManagedDownloadSource):
if source.identifier not in self._sources: if source.identifier not in self._sources:
return return
self._sources.pop(source.identifier) self._sources.pop(source.identifier)
source.stop_tasks() await source.stop_tasks()
async def initialize_from_database(self): async def initialize_from_database(self):
raise NotImplementedError() raise NotImplementedError()
@ -72,10 +72,10 @@ class SourceManager:
await self.initialize_from_database() await self.initialize_from_database()
self.started.set() self.started.set()
def stop(self): async def stop(self):
while self._sources: while self._sources:
_, source = self._sources.popitem() _, source = self._sources.popitem()
source.stop_tasks() await source.stop_tasks()
self.started.clear() self.started.clear()
async def create(self, file_path: str, key: Optional[bytes] = None, async def create(self, file_path: str, key: Optional[bytes] = None,
@ -83,7 +83,7 @@ class SourceManager:
raise NotImplementedError() raise NotImplementedError()
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False): async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):
self.remove(source) await self.remove(source)
if delete_file and source.output_file_exists: if delete_file and source.output_file_exists:
os.remove(source.full_path) os.remove(source.full_path)

View file

@ -2,4 +2,5 @@ build:
rm types/v2/* -rf rm types/v2/* -rf
touch types/v2/__init__.py touch types/v2/__init__.py
cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto cd types/v2/ && protoc --python_out=. -I ../../../../../types/v2/proto/ ../../../../../types/v2/proto/*.proto
cd types/v2/ && cp ../../../../../types/jsonschema/* ./
sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py sed -e 's/^import\ \(.*\)_pb2\ /from . import\ \1_pb2\ /g' -i types/v2/*.py

View file

@ -398,6 +398,12 @@ class Repost(BaseClaim):
claim_type = Claim.REPOST claim_type = Claim.REPOST
def to_dict(self):
claim = super().to_dict()
if claim.pop('claim_hash', None):
claim['claim_id'] = self.reference.claim_id
return claim
@property @property
def reference(self) -> ClaimReference: def reference(self) -> ClaimReference:
return ClaimReference(self.message) return ClaimReference(self.message)

View file

@ -1,13 +1,11 @@
import base64 import base64
from typing import List, TYPE_CHECKING, Union, Optional from typing import List, Union, Optional, NamedTuple
from binascii import hexlify from binascii import hexlify
from itertools import chain from itertools import chain
from lbry.error import ResolveCensoredError from lbry.error import ResolveCensoredError
from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage from lbry.schema.types.v2.result_pb2 import Outputs as OutputsMessage
from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage from lbry.schema.types.v2.result_pb2 import Error as ErrorMessage
if TYPE_CHECKING:
from lbry.wallet.server.leveldb import ResolveResult
INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID) INVALID = ErrorMessage.Code.Name(ErrorMessage.INVALID)
NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND) NOT_FOUND = ErrorMessage.Code.Name(ErrorMessage.NOT_FOUND)
@ -24,6 +22,31 @@ def set_reference(reference, claim_hash, rows):
return return
class ResolveResult(NamedTuple):
name: str
normalized_name: str
claim_hash: bytes
tx_num: int
position: int
tx_hash: bytes
height: int
amount: int
short_url: str
is_controlling: bool
canonical_url: str
creation_height: int
activation_height: int
expiration_height: int
effective_amount: int
support_amount: int
reposted: int
last_takeover_height: Optional[int]
claims_in_channel: Optional[int]
channel_hash: Optional[bytes]
reposted_claim_hash: Optional[bytes]
signature_valid: Optional[bool]
class Censor: class Censor:
NOT_CENSORED = 0 NOT_CENSORED = 0
@ -154,19 +177,6 @@ class Outputs:
outputs.blocked, outputs.blocked_total outputs.blocked, outputs.blocked_total
) )
@classmethod
def from_grpc(cls, outputs: OutputsMessage) -> 'Outputs':
txs = set()
for txo_message in chain(outputs.txos, outputs.extra_txos):
if txo_message.WhichOneof('meta') == 'error':
continue
txs.add((hexlify(txo_message.tx_hash[::-1]).decode(), txo_message.height))
return cls(
outputs.txos, outputs.extra_txos, txs,
outputs.offset, outputs.total,
outputs.blocked, outputs.blocked_total
)
@classmethod @classmethod
def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str: def to_base64(cls, txo_rows, extra_txo_rows, offset=0, total=None, blocked=None) -> str:
return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode() return base64.b64encode(cls.to_bytes(txo_rows, extra_txo_rows, offset, total, blocked)).decode()

View file

@ -1,960 +0,0 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hub.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import result_pb2 as result__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hub.proto',
package='pb',
syntax='proto3',
serialized_options=b'Z$github.com/lbryio/hub/protobuf/go/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\thub.proto\x12\x02pb\x1a\x0cresult.proto\"\x0e\n\x0c\x45mptyMessage\".\n\rServerMessage\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"N\n\x0cHelloMessage\x12\x0c\n\x04port\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\"\n\x07servers\x18\x03 \x03(\x0b\x32\x11.pb.ServerMessage\"0\n\x0fInvertibleField\x12\x0e\n\x06invert\x18\x01 \x01(\x08\x12\r\n\x05value\x18\x02 \x03(\t\"\x1c\n\x0bStringValue\x12\r\n\x05value\x18\x01 \x01(\t\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"\x1c\n\x0bUInt32Value\x12\r\n\x05value\x18\x01 \x01(\r\"j\n\nRangeField\x12\x1d\n\x02op\x18\x01 \x01(\x0e\x32\x11.pb.RangeField.Op\x12\r\n\x05value\x18\x02 \x03(\x05\".\n\x02Op\x12\x06\n\x02\x45Q\x10\x00\x12\x07\n\x03LTE\x10\x01\x12\x07\n\x03GTE\x10\x02\x12\x06\n\x02LT\x10\x03\x12\x06\n\x02GT\x10\x04\"\x8e\x0c\n\rSearchRequest\x12%\n\x08\x63laim_id\x18\x01 \x01(\x0b\x32\x13.pb.InvertibleField\x12\'\n\nchannel_id\x18\x02 \x01(\x0b\x32\x13.pb.InvertibleField\x12\x0c\n\x04text\x18\x03 \x01(\t\x12\r\n\x05limit\x18\x04 \x01(\x05\x12\x10\n\x08order_by\x18\x05 \x03(\t\x12\x0e\n\x06offset\x18\x06 \x01(\r\x12\x16\n\x0eis_controlling\x18\x07 \x01(\x08\x12\x1d\n\x15last_take_over_height\x18\x08 \x01(\t\x12\x12\n\nclaim_name\x18\t \x01(\t\x12\x17\n\x0fnormalized_name\x18\n \x01(\t\x12#\n\x0btx_position\x18\x0b \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06\x61mount\x18\x0c \x03(\x0b\x32\x0e.pb.RangeField\x12!\n\ttimestamp\x18\r \x03(\x0b\x32\x0e.pb.RangeField\x12*\n\x12\x63reation_timestamp\x18\x0e \x03(\x0b\x32\x0e.pb.RangeField\x12\x1e\n\x06height\x18\x0f \x03(\x0b\x32\x0e.pb.RangeField\x12\'\n\x0f\x63reation_height\x18\x10 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x61\x63tivation_height\x18\x11 \x03(\x0b\x32\x0e.pb.RangeField\x12)\n\x11\x65xpiration_height\x18\x12 \x03(\x0b\x32\x0e.pb.RangeField\x12$\n\x0crelease_time\x18\x13 \x03(\x0b\x32\x0e.pb.RangeField\x12\x11\n\tshort_url\x18\x14 \x01(\t\x12\x15\n\rcanonical_url\x18\x15 \x01(\t\x12\r\n\x05title\x18\x16 \x01(\t\x12\x0e\n\x06\x61uthor\x18\x17 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x18 \x01(\t\x12\x12\n\nclaim_type\x18\x19 \x03(\t\x12$\n\x0crepost_count\x18\x1a \x03(\x0b\x32\x0e.pb.RangeField\x12\x13\n\x0bstream_type\x18\x1b \x03(\t\x12\x12\n\nmedia_type\x18\x1c \x03(\t\x12\"\n\nfee_amount\x18\x1d \x03(\x0b\x32\x0e.pb.RangeField\x12\x14\n\x0c\x66\x65\x65_currency\x18\x1e \x01(\t\x12 \n\x08\x64uration\x18\x1f \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11reposted_claim_id\x18 \x01(\t\x12#\n\x0b\x63\x65nsor_type\x18! \x03(\x0b\x32\x0e.pb.RangeField\x12\x19\n\x11\x63laims_in_channel\x18\" \x01(\t\x12)\n\x12is_signature_valid\x18$ \x01(\x0b\x32\r.pb.BoolValue\x12(\n\x10\x65\x66\x66\x65\x63tive_amount\x18% \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0esupport_amount\x18& \x03(\x0b\x32\x0e.pb.RangeField\x12&\n\x0etrending_score\x18\' \x03(\x0b\x32\x0e.pb.RangeField\x12\r\n\x05tx_id\x18+ \x01(\t\x12 \n\x07tx_nout\x18, \x01(\x0b\x32\x0f.pb.UInt32Value\x12\x11\n\tsignature\x18- \x01(\t\x12\x18\n\x10signature_digest\x18. \x01(\t\x12\x18\n\x10public_key_bytes\x18/ \x01(\t\x12\x15\n\rpublic_key_id\x18\x30 \x01(\t\x12\x10\n\x08\x61ny_tags\x18\x31 \x03(\t\x12\x10\n\x08\x61ll_tags\x18\x32 \x03(\t\x12\x10\n\x08not_tags\x18\x33 \x03(\t\x12\x1d\n\x15has_channel_signature\x18\x34 \x01(\x08\x12!\n\nhas_source\x18\x35 \x01(\x0b\x32\r.pb.BoolValue\x12 \n\x18limit_claims_per_channel\x18\x36 \x01(\x05\x12\x15\n\rany_languages\x18\x37 \x03(\t\x12\x15\n\rall_languages\x18\x38 \x03(\t\x12\x19\n\x11remove_duplicates\x18\x39 \x01(\x08\x12\x11\n\tno_totals\x18: \x01(\x08\x12\x0f\n\x07sd_hash\x18; \x01(\t2\x88\x03\n\x03Hub\x12*\n\x06Search\x12\x11.pb.SearchRequest\x1a\x0b.pb.Outputs\"\x00\x12+\n\x04Ping\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12-\n\x05Hello\x12\x10.pb.HelloMessage\x1a\x10.pb.HelloMessage\"\x00\x12/\n\x07\x41\x64\x64Peer\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12\x35\n\rPeerSubscribe\x12\x11.pb.ServerMessage\x1a\x0f.pb.StringValue\"\x00\x12.\n\x07Version\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12/\n\x08\x46\x65\x61tures\x12\x10.pb.EmptyMessage\x1a\x0f.pb.StringValue\"\x00\x12\x30\n\tBroadcast\x12\x10.pb.EmptyMessage\x1a\x0f.pb.UInt32Value\"\x00\x42&Z$github.com/lbryio/hub/protobuf/go/pbb\x06proto3'
,
dependencies=[result__pb2.DESCRIPTOR,])
_RANGEFIELD_OP = _descriptor.EnumDescriptor(
name='Op',
full_name='pb.RangeField.Op',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='EQ', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LTE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GTE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LT', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GT', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=373,
serialized_end=419,
)
_sym_db.RegisterEnumDescriptor(_RANGEFIELD_OP)
_EMPTYMESSAGE = _descriptor.Descriptor(
name='EmptyMessage',
full_name='pb.EmptyMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=45,
)
_SERVERMESSAGE = _descriptor.Descriptor(
name='ServerMessage',
full_name='pb.ServerMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='pb.ServerMessage.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='port', full_name='pb.ServerMessage.port', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=93,
)
_HELLOMESSAGE = _descriptor.Descriptor(
name='HelloMessage',
full_name='pb.HelloMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='port', full_name='pb.HelloMessage.port', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='pb.HelloMessage.host', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='servers', full_name='pb.HelloMessage.servers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=173,
)
_INVERTIBLEFIELD = _descriptor.Descriptor(
name='InvertibleField',
full_name='pb.InvertibleField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='invert', full_name='pb.InvertibleField.invert', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pb.InvertibleField.value', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=175,
serialized_end=223,
)
_STRINGVALUE = _descriptor.Descriptor(
name='StringValue',
full_name='pb.StringValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.StringValue.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=253,
)
_BOOLVALUE = _descriptor.Descriptor(
name='BoolValue',
full_name='pb.BoolValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.BoolValue.value', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=255,
serialized_end=281,
)
_UINT32VALUE = _descriptor.Descriptor(
name='UInt32Value',
full_name='pb.UInt32Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='pb.UInt32Value.value', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=283,
serialized_end=311,
)
_RANGEFIELD = _descriptor.Descriptor(
name='RangeField',
full_name='pb.RangeField',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='op', full_name='pb.RangeField.op', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='pb.RangeField.value', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_RANGEFIELD_OP,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=313,
serialized_end=419,
)
_SEARCHREQUEST = _descriptor.Descriptor(
name='SearchRequest',
full_name='pb.SearchRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='claim_id', full_name='pb.SearchRequest.claim_id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='channel_id', full_name='pb.SearchRequest.channel_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='text', full_name='pb.SearchRequest.text', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='pb.SearchRequest.limit', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_by', full_name='pb.SearchRequest.order_by', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='offset', full_name='pb.SearchRequest.offset', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_controlling', full_name='pb.SearchRequest.is_controlling', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='last_take_over_height', full_name='pb.SearchRequest.last_take_over_height', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claim_name', full_name='pb.SearchRequest.claim_name', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='normalized_name', full_name='pb.SearchRequest.normalized_name', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_position', full_name='pb.SearchRequest.tx_position', index=10,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='amount', full_name='pb.SearchRequest.amount', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='pb.SearchRequest.timestamp', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_timestamp', full_name='pb.SearchRequest.creation_timestamp', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='height', full_name='pb.SearchRequest.height', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='creation_height', full_name='pb.SearchRequest.creation_height', index=15,
number=16, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='activation_height', full_name='pb.SearchRequest.activation_height', index=16,
number=17, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expiration_height', full_name='pb.SearchRequest.expiration_height', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='release_time', full_name='pb.SearchRequest.release_time', index=18,
number=19, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='short_url', full_name='pb.SearchRequest.short_url', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='canonical_url', full_name='pb.SearchRequest.canonical_url', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='title', full_name='pb.SearchRequest.title', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='author', full_name='pb.SearchRequest.author', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='pb.SearchRequest.description', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claim_type', full_name='pb.SearchRequest.claim_type', index=24,
number=25, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repost_count', full_name='pb.SearchRequest.repost_count', index=25,
number=26, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stream_type', full_name='pb.SearchRequest.stream_type', index=26,
number=27, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='media_type', full_name='pb.SearchRequest.media_type', index=27,
number=28, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fee_amount', full_name='pb.SearchRequest.fee_amount', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fee_currency', full_name='pb.SearchRequest.fee_currency', index=29,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='pb.SearchRequest.duration', index=30,
number=31, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='reposted_claim_id', full_name='pb.SearchRequest.reposted_claim_id', index=31,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='censor_type', full_name='pb.SearchRequest.censor_type', index=32,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='claims_in_channel', full_name='pb.SearchRequest.claims_in_channel', index=33,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_signature_valid', full_name='pb.SearchRequest.is_signature_valid', index=34,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='effective_amount', full_name='pb.SearchRequest.effective_amount', index=35,
number=37, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='support_amount', full_name='pb.SearchRequest.support_amount', index=36,
number=38, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trending_score', full_name='pb.SearchRequest.trending_score', index=37,
number=39, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_id', full_name='pb.SearchRequest.tx_id', index=38,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tx_nout', full_name='pb.SearchRequest.tx_nout', index=39,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='pb.SearchRequest.signature', index=40,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature_digest', full_name='pb.SearchRequest.signature_digest', index=41,
number=46, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_bytes', full_name='pb.SearchRequest.public_key_bytes', index=42,
number=47, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_id', full_name='pb.SearchRequest.public_key_id', index=43,
number=48, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='any_tags', full_name='pb.SearchRequest.any_tags', index=44,
number=49, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_tags', full_name='pb.SearchRequest.all_tags', index=45,
number=50, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='not_tags', full_name='pb.SearchRequest.not_tags', index=46,
number=51, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_channel_signature', full_name='pb.SearchRequest.has_channel_signature', index=47,
number=52, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='has_source', full_name='pb.SearchRequest.has_source', index=48,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit_claims_per_channel', full_name='pb.SearchRequest.limit_claims_per_channel', index=49,
number=54, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='any_languages', full_name='pb.SearchRequest.any_languages', index=50,
number=55, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_languages', full_name='pb.SearchRequest.all_languages', index=51,
number=56, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remove_duplicates', full_name='pb.SearchRequest.remove_duplicates', index=52,
number=57, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='no_totals', full_name='pb.SearchRequest.no_totals', index=53,
number=58, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sd_hash', full_name='pb.SearchRequest.sd_hash', index=54,
number=59, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=422,
serialized_end=1972,
)
_HELLOMESSAGE.fields_by_name['servers'].message_type = _SERVERMESSAGE
_RANGEFIELD.fields_by_name['op'].enum_type = _RANGEFIELD_OP
_RANGEFIELD_OP.containing_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['claim_id'].message_type = _INVERTIBLEFIELD
_SEARCHREQUEST.fields_by_name['channel_id'].message_type = _INVERTIBLEFIELD
_SEARCHREQUEST.fields_by_name['tx_position'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['timestamp'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['creation_timestamp'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['creation_height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['activation_height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['expiration_height'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['release_time'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['repost_count'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['fee_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['duration'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['censor_type'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['is_signature_valid'].message_type = _BOOLVALUE
_SEARCHREQUEST.fields_by_name['effective_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['support_amount'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['trending_score'].message_type = _RANGEFIELD
_SEARCHREQUEST.fields_by_name['tx_nout'].message_type = _UINT32VALUE
_SEARCHREQUEST.fields_by_name['has_source'].message_type = _BOOLVALUE
DESCRIPTOR.message_types_by_name['EmptyMessage'] = _EMPTYMESSAGE
DESCRIPTOR.message_types_by_name['ServerMessage'] = _SERVERMESSAGE
DESCRIPTOR.message_types_by_name['HelloMessage'] = _HELLOMESSAGE
DESCRIPTOR.message_types_by_name['InvertibleField'] = _INVERTIBLEFIELD
DESCRIPTOR.message_types_by_name['StringValue'] = _STRINGVALUE
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
DESCRIPTOR.message_types_by_name['UInt32Value'] = _UINT32VALUE
DESCRIPTOR.message_types_by_name['RangeField'] = _RANGEFIELD
DESCRIPTOR.message_types_by_name['SearchRequest'] = _SEARCHREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EmptyMessage = _reflection.GeneratedProtocolMessageType('EmptyMessage', (_message.Message,), {
'DESCRIPTOR' : _EMPTYMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.EmptyMessage)
})
_sym_db.RegisterMessage(EmptyMessage)
ServerMessage = _reflection.GeneratedProtocolMessageType('ServerMessage', (_message.Message,), {
'DESCRIPTOR' : _SERVERMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.ServerMessage)
})
_sym_db.RegisterMessage(ServerMessage)
HelloMessage = _reflection.GeneratedProtocolMessageType('HelloMessage', (_message.Message,), {
'DESCRIPTOR' : _HELLOMESSAGE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.HelloMessage)
})
_sym_db.RegisterMessage(HelloMessage)
InvertibleField = _reflection.GeneratedProtocolMessageType('InvertibleField', (_message.Message,), {
'DESCRIPTOR' : _INVERTIBLEFIELD,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.InvertibleField)
})
_sym_db.RegisterMessage(InvertibleField)
StringValue = _reflection.GeneratedProtocolMessageType('StringValue', (_message.Message,), {
'DESCRIPTOR' : _STRINGVALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.StringValue)
})
_sym_db.RegisterMessage(StringValue)
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), {
'DESCRIPTOR' : _BOOLVALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.BoolValue)
})
_sym_db.RegisterMessage(BoolValue)
UInt32Value = _reflection.GeneratedProtocolMessageType('UInt32Value', (_message.Message,), {
'DESCRIPTOR' : _UINT32VALUE,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.UInt32Value)
})
_sym_db.RegisterMessage(UInt32Value)
RangeField = _reflection.GeneratedProtocolMessageType('RangeField', (_message.Message,), {
'DESCRIPTOR' : _RANGEFIELD,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.RangeField)
})
_sym_db.RegisterMessage(RangeField)
SearchRequest = _reflection.GeneratedProtocolMessageType('SearchRequest', (_message.Message,), {
'DESCRIPTOR' : _SEARCHREQUEST,
'__module__' : 'hub_pb2'
# @@protoc_insertion_point(class_scope:pb.SearchRequest)
})
_sym_db.RegisterMessage(SearchRequest)
DESCRIPTOR._options = None
_HUB = _descriptor.ServiceDescriptor(
name='Hub',
full_name='pb.Hub',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1975,
serialized_end=2367,
methods=[
_descriptor.MethodDescriptor(
name='Search',
full_name='pb.Hub.Search',
index=0,
containing_service=None,
input_type=_SEARCHREQUEST,
output_type=result__pb2._OUTPUTS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Ping',
full_name='pb.Hub.Ping',
index=1,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Hello',
full_name='pb.Hub.Hello',
index=2,
containing_service=None,
input_type=_HELLOMESSAGE,
output_type=_HELLOMESSAGE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='AddPeer',
full_name='pb.Hub.AddPeer',
index=3,
containing_service=None,
input_type=_SERVERMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='PeerSubscribe',
full_name='pb.Hub.PeerSubscribe',
index=4,
containing_service=None,
input_type=_SERVERMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Version',
full_name='pb.Hub.Version',
index=5,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Features',
full_name='pb.Hub.Features',
index=6,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_STRINGVALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Broadcast',
full_name='pb.Hub.Broadcast',
index=7,
containing_service=None,
input_type=_EMPTYMESSAGE,
output_type=_UINT32VALUE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_HUB)
DESCRIPTOR.services_by_name['Hub'] = _HUB
# @@protoc_insertion_point(module_scope)

View file

@ -1,298 +0,0 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import hub_pb2 as hub__pb2
from . import result_pb2 as result__pb2
class HubStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Search = channel.unary_unary(
'/pb.Hub/Search',
request_serializer=hub__pb2.SearchRequest.SerializeToString,
response_deserializer=result__pb2.Outputs.FromString,
)
self.Ping = channel.unary_unary(
'/pb.Hub/Ping',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Hello = channel.unary_unary(
'/pb.Hub/Hello',
request_serializer=hub__pb2.HelloMessage.SerializeToString,
response_deserializer=hub__pb2.HelloMessage.FromString,
)
self.AddPeer = channel.unary_unary(
'/pb.Hub/AddPeer',
request_serializer=hub__pb2.ServerMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.PeerSubscribe = channel.unary_unary(
'/pb.Hub/PeerSubscribe',
request_serializer=hub__pb2.ServerMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Version = channel.unary_unary(
'/pb.Hub/Version',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Features = channel.unary_unary(
'/pb.Hub/Features',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.StringValue.FromString,
)
self.Broadcast = channel.unary_unary(
'/pb.Hub/Broadcast',
request_serializer=hub__pb2.EmptyMessage.SerializeToString,
response_deserializer=hub__pb2.UInt32Value.FromString,
)
class HubServicer(object):
"""Missing associated documentation comment in .proto file."""
def Search(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Ping(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Hello(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddPeer(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PeerSubscribe(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Version(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Features(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Broadcast(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_HubServicer_to_server(servicer, server):
rpc_method_handlers = {
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=hub__pb2.SearchRequest.FromString,
response_serializer=result__pb2.Outputs.SerializeToString,
),
'Ping': grpc.unary_unary_rpc_method_handler(
servicer.Ping,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Hello': grpc.unary_unary_rpc_method_handler(
servicer.Hello,
request_deserializer=hub__pb2.HelloMessage.FromString,
response_serializer=hub__pb2.HelloMessage.SerializeToString,
),
'AddPeer': grpc.unary_unary_rpc_method_handler(
servicer.AddPeer,
request_deserializer=hub__pb2.ServerMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'PeerSubscribe': grpc.unary_unary_rpc_method_handler(
servicer.PeerSubscribe,
request_deserializer=hub__pb2.ServerMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Version': grpc.unary_unary_rpc_method_handler(
servicer.Version,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Features': grpc.unary_unary_rpc_method_handler(
servicer.Features,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.StringValue.SerializeToString,
),
'Broadcast': grpc.unary_unary_rpc_method_handler(
servicer.Broadcast,
request_deserializer=hub__pb2.EmptyMessage.FromString,
response_serializer=hub__pb2.UInt32Value.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'pb.Hub', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Hub(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Search',
hub__pb2.SearchRequest.SerializeToString,
result__pb2.Outputs.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Ping(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Ping',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Hello(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Hello',
hub__pb2.HelloMessage.SerializeToString,
hub__pb2.HelloMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddPeer(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/AddPeer',
hub__pb2.ServerMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PeerSubscribe(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/PeerSubscribe',
hub__pb2.ServerMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Version(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Version',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Features(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Features',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.StringValue.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Broadcast(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/pb.Hub/Broadcast',
hub__pb2.EmptyMessage.SerializeToString,
hub__pb2.UInt32Value.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)

View file

@ -1,4 +0,0 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

View file

@ -0,0 +1,139 @@
{
"title": "Wallet",
"description": "An LBC wallet",
"type": "object",
"required": ["name", "version", "accounts", "preferences"],
"additionalProperties": false,
"properties": {
"name": {
"description": "Human readable name for this wallet",
"type": "string"
},
"version": {
"description": "Wallet spec version",
"type": "integer",
"$comment": "Should this be a string? We may need some sort of decimal type if we want exact decimal versions."
},
"accounts": {
"description": "Accounts associated with this wallet",
"type": "array",
"items": {
"type": "object",
"required": ["address_generator", "certificates", "encrypted", "ledger", "modified_on", "name", "private_key", "public_key", "seed"],
"additionalProperties": false,
"properties": {
"address_generator": {
"description": "Higher level manager of either singular or deterministically generated addresses",
"type": "object",
"oneOf": [
{
"required": ["name", "change", "receiving"],
"additionalProperties": false,
"properties": {
"name": {
"description": "type of address generator: a deterministic chain of addresses",
"enum": ["deterministic-chain"],
"type": "string"
},
"change": {
"$ref": "#/$defs/address_manager",
"description": "Manager for deterministically generated change address (not used for single address)"
},
"receiving": {
"$ref": "#/$defs/address_manager",
"description": "Manager for deterministically generated receiving address (not used for single address)"
}
}
}, {
"required": ["name"],
"additionalProperties": false,
"properties": {
"name": {
"description": "type of address generator: a single address",
"enum": ["single-address"],
"type": "string"
}
}
}
]
},
"certificates": {
"type": "object",
"description": "Channel keys. Mapping from public key address to pem-formatted private key.",
"additionalProperties": {"type": "string"}
},
"encrypted": {
"type": "boolean",
"description": "Whether private key and seed are encrypted with a password"
},
"ledger": {
"description": "Which network to use",
"type": "string",
"examples": [
"lbc_mainnet",
"lbc_testnet"
]
},
"modified_on": {
"description": "last modified time in Unix Time",
"type": "integer"
},
"name": {
"description": "Name for account, possibly human readable",
"type": "string"
},
"private_key": {
"description": "Private key for address if `address_generator` is a single address. Root of chain of private keys for addresses if `address_generator` is a deterministic chain of addresses. Encrypted if `encrypted` is true.",
"type": "string"
},
"public_key": {
"description": "Public key for address if `address_generator` is a single address. Root of chain of public keys for addresses if `address_generator` is a deterministic chain of addresses.",
"type": "string"
},
"seed": {
"description": "Human readable representation of `private_key`. encrypted if `encrypted` is set to `true`",
"type": "string"
}
}
}
},
"preferences": {
"description": "Timestamped application-level preferences. Values can be objects or of a primitive type.",
"$comment": "enable-sync is seen in example wallet. encrypt-on-disk is seen in example wallet. they both have a boolean `value` field. Do we want them explicitly defined here? local and shared seem to have at least a similar structure (type, value [yes, again], version), value being the free-form part. Should we define those here? Or can there be any key under preferences, and `value` be literally be anything in any form?",
"type": "object",
"additionalProperties": {
"type": "object",
"required": ["ts", "value"],
"additionalProperties": false,
"properties": {
"ts": {
"type": "number",
"description": "When the item was set, in Unix time format.",
"$comment": "Do we want a string (decimal)?"
},
"value": {
"$comment": "Sometimes this has been an object, sometimes just a boolean. I don't want to prescribe anything."
}
}
}
}
},
"$defs": {
"address_manager": {
"description": "Manager for deterministically generated addresses",
"type": "object",
"required": ["gap", "maximum_uses_per_address"],
"additionalProperties": false,
"properties": {
"gap": {
"description": "Maximum allowed consecutive generated addresses with no transactions",
"type": "integer"
},
"maximum_uses_per_address": {
"description": "Maximum number of uses for each generated address",
"type": "integer"
}
}
}
}
}

View file

@ -23,6 +23,7 @@ class BackgroundDownloader:
except ValueError: except ValueError:
return return
except asyncio.CancelledError: except asyncio.CancelledError:
log.debug("Cancelled background downloader")
raise raise
except Exception: except Exception:
log.error("Unexpected download error on background downloader") log.error("Unexpected download error on background downloader")

View file

@ -194,12 +194,13 @@ class StreamDescriptor:
raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash") raise InvalidStreamDescriptorError("Stream terminator blob should not have a hash")
if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])): if any(i != blob_info['blob_num'] for i, blob_info in enumerate(decoded['blobs'])):
raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs") raise InvalidStreamDescriptorError("Stream contains out of order or skipped blobs")
added_on = time.time()
descriptor = cls( descriptor = cls(
loop, blob_dir, loop, blob_dir,
binascii.unhexlify(decoded['stream_name']).decode(), binascii.unhexlify(decoded['stream_name']).decode(),
decoded['key'], decoded['key'],
binascii.unhexlify(decoded['suggested_file_name']).decode(), binascii.unhexlify(decoded['suggested_file_name']).decode(),
[BlobInfo(info['blob_num'], info['length'], info['iv'], info.get('blob_hash')) [BlobInfo(info['blob_num'], info['length'], info['iv'], added_on, info.get('blob_hash'))
for info in decoded['blobs']], for info in decoded['blobs']],
decoded['stream_hash'], decoded['stream_hash'],
blob.blob_hash blob.blob_hash
@ -266,7 +267,7 @@ class StreamDescriptor:
blobs.append(blob_info) blobs.append(blob_info)
blobs.append( blobs.append(
# add the stream terminator # add the stream terminator
BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), None, added_on, True) BlobInfo(len(blobs), 0, binascii.hexlify(next(iv_generator)).decode(), added_on, None, True)
) )
file_name = os.path.basename(file_path) file_name = os.path.basename(file_path)
suggested_file_name = sanitize_file_name(file_name) suggested_file_name = sanitize_file_name(file_name)

View file

@ -8,6 +8,8 @@ from lbry.error import DownloadSDTimeoutError
from lbry.utils import lru_cache_concurrent from lbry.utils import lru_cache_concurrent
from lbry.stream.descriptor import StreamDescriptor from lbry.stream.descriptor import StreamDescriptor
from lbry.blob_exchange.downloader import BlobDownloader from lbry.blob_exchange.downloader import BlobDownloader
from lbry.torrent.tracker import enqueue_tracker_search
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.dht.node import Node from lbry.dht.node import Node
@ -25,8 +27,8 @@ class StreamDownloader:
self.config = config self.config = config
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.sd_hash = sd_hash self.sd_hash = sd_hash
self.search_queue = asyncio.Queue(loop=loop) # blob hashes to feed into the iterative finder self.search_queue = asyncio.Queue() # blob hashes to feed into the iterative finder
self.peer_queue = asyncio.Queue(loop=loop) # new peers to try self.peer_queue = asyncio.Queue() # new peers to try
self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue) self.blob_downloader = BlobDownloader(self.loop, self.config, self.blob_manager, self.peer_queue)
self.descriptor: typing.Optional[StreamDescriptor] = descriptor self.descriptor: typing.Optional[StreamDescriptor] = descriptor
self.node: typing.Optional['Node'] = None self.node: typing.Optional['Node'] = None
@ -70,7 +72,7 @@ class StreamDownloader:
now = self.loop.time() now = self.loop.time()
sd_blob = await asyncio.wait_for( sd_blob = await asyncio.wait_for(
self.blob_downloader.download_blob(self.sd_hash, connection_id), self.blob_downloader.download_blob(self.sd_hash, connection_id),
self.config.blob_download_timeout, loop=self.loop self.config.blob_download_timeout
) )
log.info("downloaded sd blob %s", self.sd_hash) log.info("downloaded sd blob %s", self.sd_hash)
self.time_to_descriptor = self.loop.time() - now self.time_to_descriptor = self.loop.time() - now
@ -91,6 +93,7 @@ class StreamDownloader:
self.accumulate_task.cancel() self.accumulate_task.cancel()
_, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue) _, self.accumulate_task = self.node.accumulate_peers(self.search_queue, self.peer_queue)
await self.add_fixed_peers() await self.add_fixed_peers()
enqueue_tracker_search(bytes.fromhex(self.sd_hash), self.peer_queue)
# start searching for peers for the sd hash # start searching for peers for the sd hash
self.search_queue.put_nowait(self.sd_hash) self.search_queue.put_nowait(self.sd_hash)
log.info("searching for peers for stream %s", self.sd_hash) log.info("searching for peers for stream %s", self.sd_hash)
@ -108,7 +111,7 @@ class StreamDownloader:
raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}") raise ValueError(f"blob {blob_info.blob_hash} is not part of stream with sd hash {self.sd_hash}")
blob = await asyncio.wait_for( blob = await asyncio.wait_for(
self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id), self.blob_downloader.download_blob(blob_info.blob_hash, blob_info.length, connection_id),
self.config.blob_download_timeout * 10, loop=self.loop self.config.blob_download_timeout * 10
) )
return blob return blob

View file

@ -16,10 +16,8 @@ from lbry.file.source import ManagedDownloadSource
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.schema.claim import Claim
from lbry.blob.blob_manager import BlobManager from lbry.blob.blob_manager import BlobManager
from lbry.blob.blob_info import BlobInfo from lbry.blob.blob_info import BlobInfo
from lbry.dht.node import Node
from lbry.extras.daemon.analytics import AnalyticsManager from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.wallet.transaction import Transaction from lbry.wallet.transaction import Transaction
@ -62,9 +60,9 @@ class ManagedStream(ManagedDownloadSource):
self.file_output_task: typing.Optional[asyncio.Task] = None self.file_output_task: typing.Optional[asyncio.Task] = None
self.delayed_stop_task: typing.Optional[asyncio.Task] = None self.delayed_stop_task: typing.Optional[asyncio.Task] = None
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = [] self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
self.fully_reflected = asyncio.Event(loop=self.loop) self.fully_reflected = asyncio.Event()
self.streaming = asyncio.Event(loop=self.loop) self.streaming = asyncio.Event()
self._running = asyncio.Event(loop=self.loop) self._running = asyncio.Event()
@property @property
def sd_hash(self) -> str: def sd_hash(self) -> str:
@ -84,7 +82,19 @@ class ManagedStream(ManagedDownloadSource):
@property @property
def file_name(self) -> Optional[str]: def file_name(self) -> Optional[str]:
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None) return self._file_name or self.suggested_file_name
@property
def suggested_file_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.suggested_file_name) or '').strip()
return sanitize_file_name(first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name))
@property
def stream_name(self) -> Optional[str]:
first_option = ((self.descriptor and self.descriptor.stream_name) or '').strip()
return first_option or (self.stream_claim_info and self.stream_claim_info.claim and
self.stream_claim_info.claim.stream.source.name)
@property @property
def written_bytes(self) -> int: def written_bytes(self) -> int:
@ -118,7 +128,7 @@ class ManagedStream(ManagedDownloadSource):
@property @property
def mime_type(self): def mime_type(self):
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0] return guess_media_type(os.path.basename(self.suggested_file_name))[0]
@property @property
def download_path(self): def download_path(self):
@ -151,7 +161,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("start downloader for stream (sd hash: %s)", self.sd_hash) log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
self._running.set() self._running.set()
try: try:
await asyncio.wait_for(self.downloader.start(), timeout, loop=self.loop) await asyncio.wait_for(self.downloader.start(), timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
self._running.clear() self._running.clear()
raise DownloadSDTimeoutError(self.sd_hash) raise DownloadSDTimeoutError(self.sd_hash)
@ -164,7 +174,7 @@ class ManagedStream(ManagedDownloadSource):
if not self._file_name: if not self._file_name:
self._file_name = await get_next_available_file_name( self._file_name = await get_next_available_file_name(
self.loop, self.download_directory, self.loop, self.download_directory,
self._file_name or sanitize_file_name(self.descriptor.suggested_file_name) self._file_name or sanitize_file_name(self.suggested_file_name)
) )
file_name, download_dir = self._file_name, self.download_directory file_name, download_dir = self._file_name, self.download_directory
else: else:
@ -181,7 +191,7 @@ class ManagedStream(ManagedDownloadSource):
Stop any running save/stream tasks as well as the downloader and update the status in the database Stop any running save/stream tasks as well as the downloader and update the status in the database
""" """
self.stop_tasks() await self.stop_tasks()
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING: if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED) await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
@ -269,7 +279,7 @@ class ManagedStream(ManagedDownloadSource):
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id, log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
self.sd_hash[:6], self.full_path) self.sd_hash[:6], self.full_path)
await self.blob_manager.storage.set_saved_file(self.stream_hash) await self.blob_manager.storage.set_saved_file(self.stream_hash)
except Exception as err: except (Exception, asyncio.CancelledError) as err:
if os.path.isfile(output_path): if os.path.isfile(output_path):
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash) log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
os.remove(output_path) os.remove(output_path)
@ -296,14 +306,14 @@ class ManagedStream(ManagedDownloadSource):
self.download_directory = download_directory or self.download_directory or self.config.download_dir self.download_directory = download_directory or self.download_directory or self.config.download_dir
if not self.download_directory: if not self.download_directory:
raise ValueError("no directory to download to") raise ValueError("no directory to download to")
if not (file_name or self._file_name or self.descriptor.suggested_file_name): if not (file_name or self._file_name or self.suggested_file_name):
raise ValueError("no file name to download to") raise ValueError("no file name to download to")
if not os.path.isdir(self.download_directory): if not os.path.isdir(self.download_directory):
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory) log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
os.mkdir(self.download_directory) os.mkdir(self.download_directory)
self._file_name = await get_next_available_file_name( self._file_name = await get_next_available_file_name(
self.loop, self.download_directory, self.loop, self.download_directory,
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name) file_name or self._file_name or sanitize_file_name(self.suggested_file_name)
) )
await self.blob_manager.storage.change_file_download_dir_and_file_name( await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, self.download_directory, self.file_name self.stream_hash, self.download_directory, self.file_name
@ -311,15 +321,16 @@ class ManagedStream(ManagedDownloadSource):
await self.update_status(ManagedStream.STATUS_RUNNING) await self.update_status(ManagedStream.STATUS_RUNNING)
self.file_output_task = self.loop.create_task(self._save_file(self.full_path)) self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
try: try:
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop) await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id) log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
self.stop_tasks() await self.stop_tasks()
await self.update_status(ManagedStream.STATUS_STOPPED) await self.update_status(ManagedStream.STATUS_STOPPED)
def stop_tasks(self): async def stop_tasks(self):
if self.file_output_task and not self.file_output_task.done(): if self.file_output_task and not self.file_output_task.done():
self.file_output_task.cancel() self.file_output_task.cancel()
await asyncio.gather(self.file_output_task, return_exceptions=True)
self.file_output_task = None self.file_output_task = None
while self.streaming_responses: while self.streaming_responses:
req, response = self.streaming_responses.pop() req, response = self.streaming_responses.pop()
@ -356,7 +367,7 @@ class ManagedStream(ManagedDownloadSource):
return sent return sent
except ConnectionError: except ConnectionError:
return sent return sent
except (OSError, Exception) as err: except (OSError, Exception, asyncio.CancelledError) as err:
if isinstance(err, asyncio.CancelledError): if isinstance(err, asyncio.CancelledError):
log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id) log.warning("stopped uploading %s#%s to reflector", self.claim_name, self.claim_id)
elif isinstance(err, OSError): elif isinstance(err, OSError):
@ -391,7 +402,7 @@ class ManagedStream(ManagedDownloadSource):
self.sd_hash[:6]) self.sd_hash[:6])
await self.stop() await self.stop()
return return
await asyncio.sleep(1, loop=self.loop) await asyncio.sleep(1)
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]: def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
if '=' in get_range: if '=' in get_range:

View file

@ -21,7 +21,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: asyncio.Task = None self.server_task: asyncio.Task = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.buf = b'' self.buf = b''
self.transport: asyncio.StreamWriter = None self.transport: asyncio.StreamWriter = None
self.writer: typing.Optional['HashBlobWriter'] = None self.writer: typing.Optional['HashBlobWriter'] = None
@ -29,9 +29,9 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.descriptor: typing.Optional['StreamDescriptor'] = None self.descriptor: typing.Optional['StreamDescriptor'] = None
self.sd_blob: typing.Optional['BlobFile'] = None self.sd_blob: typing.Optional['BlobFile'] = None
self.received = [] self.received = []
self.incoming = incoming_event or asyncio.Event(loop=self.loop) self.incoming = incoming_event or asyncio.Event()
self.not_incoming = not_incoming_event or asyncio.Event(loop=self.loop) self.not_incoming = not_incoming_event or asyncio.Event()
self.stop_event = stop_event or asyncio.Event(loop=self.loop) self.stop_event = stop_event or asyncio.Event()
self.chunk_size = response_chunk_size self.chunk_size = response_chunk_size
self.wait_for_stop_task: typing.Optional[asyncio.Task] = None self.wait_for_stop_task: typing.Optional[asyncio.Task] = None
self.partial_event = partial_event self.partial_event = partial_event
@ -94,7 +94,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set() self.incoming.set()
self.send_response({"send_sd_blob": True}) self.send_response({"send_sd_blob": True})
try: try:
await asyncio.wait_for(self.sd_blob.verified.wait(), 30, loop=self.loop) await asyncio.wait_for(self.sd_blob.verified.wait(), 30)
self.descriptor = await StreamDescriptor.from_stream_descriptor_blob( self.descriptor = await StreamDescriptor.from_stream_descriptor_blob(
self.loop, self.blob_manager.blob_dir, self.sd_blob self.loop, self.blob_manager.blob_dir, self.sd_blob
) )
@ -140,7 +140,7 @@ class ReflectorServerProtocol(asyncio.Protocol):
self.incoming.set() self.incoming.set()
self.send_response({"send_blob": True}) self.send_response({"send_blob": True})
try: try:
await asyncio.wait_for(blob.verified.wait(), 30, loop=self.loop) await asyncio.wait_for(blob.verified.wait(), 30)
self.send_response({"received_blob": True}) self.send_response({"received_blob": True})
except asyncio.TimeoutError: except asyncio.TimeoutError:
self.send_response({"received_blob": False}) self.send_response({"received_blob": False})
@ -162,10 +162,10 @@ class ReflectorServer:
self.loop = asyncio.get_event_loop() self.loop = asyncio.get_event_loop()
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.stopped_listening = asyncio.Event(loop=self.loop) self.stopped_listening = asyncio.Event()
self.incoming_event = incoming_event or asyncio.Event(loop=self.loop) self.incoming_event = incoming_event or asyncio.Event()
self.not_incoming_event = not_incoming_event or asyncio.Event(loop=self.loop) self.not_incoming_event = not_incoming_event or asyncio.Event()
self.response_chunk_size = response_chunk_size self.response_chunk_size = response_chunk_size
self.stop_event = stop_event self.stop_event = stop_event
self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants self.partial_needs = partial_needs # for testing cases where it doesn't know what it wants

View file

@ -54,7 +54,7 @@ class StreamManager(SourceManager):
self.re_reflect_task: Optional[asyncio.Task] = None self.re_reflect_task: Optional[asyncio.Task] = None
self.update_stream_finished_futs: typing.List[asyncio.Future] = [] self.update_stream_finished_futs: typing.List[asyncio.Future] = []
self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {} self.running_reflector_uploads: typing.Dict[str, asyncio.Task] = {}
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
@property @property
def streams(self): def streams(self):
@ -70,6 +70,7 @@ class StreamManager(SourceManager):
async def recover_streams(self, file_infos: typing.List[typing.Dict]): async def recover_streams(self, file_infos: typing.List[typing.Dict]):
to_restore = [] to_restore = []
to_check = []
async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str, async def recover_stream(sd_hash: str, stream_hash: str, stream_name: str,
suggested_file_name: str, key: str, suggested_file_name: str, key: str,
@ -82,6 +83,7 @@ class StreamManager(SourceManager):
if not descriptor: if not descriptor:
return return
to_restore.append((descriptor, sd_blob, content_fee)) to_restore.append((descriptor, sd_blob, content_fee))
to_check.extend([sd_blob.blob_hash] + [blob.blob_hash for blob in descriptor.blobs[:-1]])
await asyncio.gather(*[ await asyncio.gather(*[
recover_stream( recover_stream(
@ -93,6 +95,8 @@ class StreamManager(SourceManager):
if to_restore: if to_restore:
await self.storage.recover_streams(to_restore, self.config.download_dir) await self.storage.recover_streams(to_restore, self.config.download_dir)
if to_check:
await self.blob_manager.ensure_completed_blobs_status(to_check)
# if self.blob_manager._save_blobs: # if self.blob_manager._save_blobs:
# log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos)) # log.info("Recovered %i/%i attempted streams", len(to_restore), len(file_infos))
@ -146,7 +150,7 @@ class StreamManager(SourceManager):
file_info['added_on'], file_info['fully_reflected'] file_info['added_on'], file_info['fully_reflected']
))) )))
if add_stream_tasks: if add_stream_tasks:
await asyncio.gather(*add_stream_tasks, loop=self.loop) await asyncio.gather(*add_stream_tasks)
log.info("Started stream manager with %i files", len(self._sources)) log.info("Started stream manager with %i files", len(self._sources))
if not self.node: if not self.node:
log.info("no DHT node given, resuming downloads trusting that we can contact reflector") log.info("no DHT node given, resuming downloads trusting that we can contact reflector")
@ -155,14 +159,11 @@ class StreamManager(SourceManager):
self.resume_saving_task = asyncio.ensure_future(asyncio.gather( self.resume_saving_task = asyncio.ensure_future(asyncio.gather(
*(self._sources[sd_hash].save_file(file_name, download_directory) *(self._sources[sd_hash].save_file(file_name, download_directory)
for (file_name, download_directory, sd_hash) in to_resume_saving), for (file_name, download_directory, sd_hash) in to_resume_saving),
loop=self.loop
)) ))
async def reflect_streams(self): async def reflect_streams(self):
try: try:
return await self._reflect_streams() return await self._reflect_streams()
except asyncio.CancelledError:
raise
except Exception: except Exception:
log.exception("reflector task encountered an unexpected error!") log.exception("reflector task encountered an unexpected error!")
@ -182,21 +183,21 @@ class StreamManager(SourceManager):
batch.append(self.reflect_stream(stream)) batch.append(self.reflect_stream(stream))
if len(batch) >= self.config.concurrent_reflector_uploads: if len(batch) >= self.config.concurrent_reflector_uploads:
log.debug("waiting for batch of %s reflecting streams", len(batch)) log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch, loop=self.loop) await asyncio.gather(*batch)
log.debug("done processing %s streams", len(batch)) log.debug("done processing %s streams", len(batch))
batch = [] batch = []
if batch: if batch:
log.debug("waiting for batch of %s reflecting streams", len(batch)) log.debug("waiting for batch of %s reflecting streams", len(batch))
await asyncio.gather(*batch, loop=self.loop) await asyncio.gather(*batch)
log.debug("done processing %s streams", len(batch)) log.debug("done processing %s streams", len(batch))
await asyncio.sleep(300, loop=self.loop) await asyncio.sleep(300)
async def start(self): async def start(self):
await super().start() await super().start()
self.re_reflect_task = self.loop.create_task(self.reflect_streams()) self.re_reflect_task = self.loop.create_task(self.reflect_streams())
def stop(self): async def stop(self):
super().stop() await super().stop()
if self.resume_saving_task and not self.resume_saving_task.done(): if self.resume_saving_task and not self.resume_saving_task.done():
self.resume_saving_task.cancel() self.resume_saving_task.cancel()
if self.re_reflect_task and not self.re_reflect_task.done(): if self.re_reflect_task and not self.re_reflect_task.done():
@ -223,7 +224,8 @@ class StreamManager(SourceManager):
) )
return task return task
async def _retriable_reflect_stream(self, stream, host, port): @staticmethod
async def _retriable_reflect_stream(stream, host, port):
sent = await stream.upload_to_reflector(host, port) sent = await stream.upload_to_reflector(host, port)
while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0: while not stream.is_fully_reflected and stream.reflector_progress > 0 and len(sent) > 0:
stream.reflector_progress = 0 stream.reflector_progress = 0
@ -258,7 +260,7 @@ class StreamManager(SourceManager):
return return
if source.identifier in self.running_reflector_uploads: if source.identifier in self.running_reflector_uploads:
self.running_reflector_uploads[source.identifier].cancel() self.running_reflector_uploads[source.identifier].cancel()
source.stop_tasks() await source.stop_tasks()
if source.identifier in self.streams: if source.identifier in self.streams:
del self.streams[source.identifier] del self.streams[source.identifier]
blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]] blob_hashes = [source.identifier] + [b.blob_hash for b in source.descriptor.blobs[:-1]]

View file

@ -19,7 +19,7 @@ from lbry.conf import Config
from lbry.wallet.util import satoshis_to_coins from lbry.wallet.util import satoshis_to_coins
from lbry.wallet.dewies import lbc_to_dewies from lbry.wallet.dewies import lbc_to_dewies
from lbry.wallet.orchstr8 import Conductor from lbry.wallet.orchstr8 import Conductor
from lbry.wallet.orchstr8.node import BlockchainNode, WalletNode, HubNode from lbry.wallet.orchstr8.node import LBCWalletNode, WalletNode
from lbry.schema.claim import Claim from lbry.schema.claim import Claim
from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty from lbry.extras.daemon.daemon import Daemon, jsonrpc_dumps_pretty
@ -204,7 +204,13 @@ class AsyncioTestCase(unittest.TestCase):
def add_timeout(self): def add_timeout(self):
if self.TIMEOUT: if self.TIMEOUT:
self.loop.call_later(self.TIMEOUT, self.cancel) self.loop.call_later(self.TIMEOUT, self.check_timeout, time())
def check_timeout(self, started):
if time() - started >= self.TIMEOUT:
self.cancel()
else:
self.loop.call_later(self.TIMEOUT, self.check_timeout, started)
class AdvanceTimeTestCase(AsyncioTestCase): class AdvanceTimeTestCase(AsyncioTestCase):
@ -230,8 +236,7 @@ class IntegrationTestCase(AsyncioTestCase):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.conductor: Optional[Conductor] = None self.conductor: Optional[Conductor] = None
self.blockchain: Optional[BlockchainNode] = None self.blockchain: Optional[LBCWalletNode] = None
self.hub: Optional[HubNode] = None
self.wallet_node: Optional[WalletNode] = None self.wallet_node: Optional[WalletNode] = None
self.manager: Optional[WalletManager] = None self.manager: Optional[WalletManager] = None
self.ledger: Optional[Ledger] = None self.ledger: Optional[Ledger] = None
@ -240,16 +245,15 @@ class IntegrationTestCase(AsyncioTestCase):
async def asyncSetUp(self): async def asyncSetUp(self):
self.conductor = Conductor(seed=self.SEED) self.conductor = Conductor(seed=self.SEED)
await self.conductor.start_blockchain() await self.conductor.start_lbcd()
self.addCleanup(self.conductor.stop_blockchain) self.addCleanup(self.conductor.stop_lbcd)
await self.conductor.start_lbcwallet()
self.addCleanup(self.conductor.stop_lbcwallet)
await self.conductor.start_spv() await self.conductor.start_spv()
self.addCleanup(self.conductor.stop_spv) self.addCleanup(self.conductor.stop_spv)
await self.conductor.start_wallet() await self.conductor.start_wallet()
self.addCleanup(self.conductor.stop_wallet) self.addCleanup(self.conductor.stop_wallet)
await self.conductor.start_hub() self.blockchain = self.conductor.lbcwallet_node
self.addCleanup(self.conductor.stop_hub)
self.blockchain = self.conductor.blockchain_node
self.hub = self.conductor.hub_node
self.wallet_node = self.conductor.wallet_node self.wallet_node = self.conductor.wallet_node
self.manager = self.wallet_node.manager self.manager = self.wallet_node.manager
self.ledger = self.wallet_node.ledger self.ledger = self.wallet_node.ledger
@ -263,6 +267,13 @@ class IntegrationTestCase(AsyncioTestCase):
def broadcast(self, tx): def broadcast(self, tx):
return self.ledger.broadcast(tx) return self.ledger.broadcast(tx)
async def broadcast_and_confirm(self, tx, ledger=None):
ledger = ledger or self.ledger
notifications = asyncio.create_task(ledger.wait(tx))
await ledger.broadcast(tx)
await notifications
await self.generate_and_wait(1, [tx.id], ledger)
async def on_header(self, height): async def on_header(self, height):
if self.ledger.headers.height < height: if self.ledger.headers.height < height:
await self.ledger.on_header.where( await self.ledger.on_header.where(
@ -270,11 +281,29 @@ class IntegrationTestCase(AsyncioTestCase):
) )
return True return True
def on_transaction_id(self, txid, ledger=None): async def send_to_address_and_wait(self, address, amount, blocks_to_generate=0, ledger=None):
return (ledger or self.ledger).on_transaction.where( tx_watch = []
lambda e: e.tx.id == txid txid = None
done = False
watcher = (ledger or self.ledger).on_transaction.where(
lambda e: e.tx.id == txid or done or tx_watch.append(e.tx.id)
) )
txid = await self.blockchain.send_to_address(address, amount)
done = txid in tx_watch
await watcher
await self.generate_and_wait(blocks_to_generate, [txid], ledger)
return txid
async def generate_and_wait(self, blocks_to_generate, txids, ledger=None):
if blocks_to_generate > 0:
watcher = (ledger or self.ledger).on_transaction.where(
lambda e: ((e.tx.id in txids and txids.remove(e.tx.id)), len(txids) <= 0)[-1] # multi-statement lambda
)
await self.generate(blocks_to_generate)
await watcher
def on_address_update(self, address): def on_address_update(self, address):
return self.ledger.on_transaction.where( return self.ledger.on_transaction.where(
lambda e: e.address == address lambda e: e.address == address
@ -285,6 +314,22 @@ class IntegrationTestCase(AsyncioTestCase):
lambda e: e.tx.id == tx.id and e.address == address lambda e: e.tx.id == tx.id and e.address == address
) )
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
self.conductor.spv_node.server.synchronized.clear()
await self.blockchain.generate(blocks)
height = self.blockchain.block_expected
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
while True:
await self.conductor.spv_node.server.synchronized.wait()
self.conductor.spv_node.server.synchronized.clear()
if self.conductor.spv_node.server.db.db_height < height:
continue
if self.conductor.spv_node.server._es_height < height:
continue
break
class FakeExchangeRateManager(ExchangeRateManager): class FakeExchangeRateManager(ExchangeRateManager):
@ -345,20 +390,19 @@ class CommandTestCase(IntegrationTestCase):
self.skip_libtorrent = True self.skip_libtorrent = True
async def asyncSetUp(self): async def asyncSetUp(self):
await super().asyncSetUp()
logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY) logging.getLogger('lbry.blob_exchange').setLevel(self.VERBOSITY)
logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY) logging.getLogger('lbry.daemon').setLevel(self.VERBOSITY)
logging.getLogger('lbry.stream').setLevel(self.VERBOSITY) logging.getLogger('lbry.stream').setLevel(self.VERBOSITY)
logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY) logging.getLogger('lbry.wallet').setLevel(self.VERBOSITY)
await super().asyncSetUp()
self.daemon = await self.add_daemon(self.wallet_node) self.daemon = await self.add_daemon(self.wallet_node)
await self.account.ensure_address_gap() await self.account.ensure_address_gap()
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0] address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
sendtxid = await self.blockchain.send_to_address(address, 10) await self.send_to_address_and_wait(address, 10, 6)
await self.confirm_tx(sendtxid)
await self.generate(5)
server_tmp_dir = tempfile.mkdtemp() server_tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, server_tmp_dir) self.addCleanup(shutil.rmtree, server_tmp_dir)
@ -455,9 +499,14 @@ class CommandTestCase(IntegrationTestCase):
async def confirm_tx(self, txid, ledger=None): async def confirm_tx(self, txid, ledger=None):
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """ """ Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
await self.on_transaction_id(txid, ledger) # await (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
await self.generate(1) on_tx = (ledger or self.ledger).on_transaction.where(lambda e: e.tx.id == txid)
await self.on_transaction_id(txid, ledger) await asyncio.wait([self.generate(1), on_tx], timeout=5)
# # actually, if it's in the mempool or in the block we're fine
# await self.generate_and_wait(1, [txid], ledger=ledger)
# return txid
return txid return txid
async def on_transaction_dict(self, tx): async def on_transaction_dict(self, tx):
@ -472,12 +521,6 @@ class CommandTestCase(IntegrationTestCase):
addresses.add(txo['address']) addresses.add(txo['address'])
return list(addresses) return list(addresses)
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
prepare = self.ledger.on_header.where(self.blockchain.is_expected_block)
await self.blockchain.generate(blocks)
await prepare # no guarantee that it didn't happen already, so start waiting from before calling generate
async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True): async def blockchain_claim_name(self, name: str, value: str, amount: str, confirm=True):
txid = await self.blockchain._cli_cmnd('claimname', name, value, amount) txid = await self.blockchain._cli_cmnd('claimname', name, value, amount)
if confirm: if confirm:
@ -508,7 +551,7 @@ class CommandTestCase(IntegrationTestCase):
return self.sout(tx) return self.sout(tx)
return tx return tx
async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None): async def create_nondeterministic_channel(self, name, price, pubkey_bytes, daemon=None, blocking=False):
account = (daemon or self.daemon).wallet_manager.default_account account = (daemon or self.daemon).wallet_manager.default_account
claim_address = await account.receiving.get_or_create_usable_address() claim_address = await account.receiving.get_or_create_usable_address()
claim = Claim() claim = Claim()
@ -518,7 +561,7 @@ class CommandTestCase(IntegrationTestCase):
claim_address, [self.account], self.account claim_address, [self.account], self.account
) )
await tx.sign([self.account]) await tx.sign([self.account])
await (daemon or self.daemon).broadcast_or_release(tx, False) await (daemon or self.daemon).broadcast_or_release(tx, blocking)
return self.sout(tx) return self.sout(tx)
def create_upload_file(self, data, prefix=None, suffix=None): def create_upload_file(self, data, prefix=None, suffix=None):

View file

@ -10,47 +10,13 @@ from typing import Optional
import libtorrent import libtorrent
NOTIFICATION_MASKS = [
"error",
"peer",
"port_mapping",
"storage",
"tracker",
"debug",
"status",
"progress",
"ip_block",
"dht",
"stats",
"session_log",
"torrent_log",
"peer_log",
"incoming_request",
"dht_log",
"dht_operation",
"port_mapping_log",
"picker_log",
"file_progress",
"piece_progress",
"upload",
"block_progress"
]
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted? DEFAULT_FLAGS = ( # fixme: somehow the logic here is inverted?
libtorrent.add_torrent_params_flags_t.flag_auto_managed libtorrent.add_torrent_params_flags_t.flag_auto_managed
| libtorrent.add_torrent_params_flags_t.flag_update_subscribe | libtorrent.add_torrent_params_flags_t.flag_update_subscribe
) )
def get_notification_type(notification) -> str:
for i, notification_type in enumerate(NOTIFICATION_MASKS):
if (1 << i) & notification:
return notification_type
raise ValueError("unrecognized notification type")
class TorrentHandle: class TorrentHandle:
def __init__(self, loop, executor, handle): def __init__(self, loop, executor, handle):
self._loop = loop self._loop = loop
@ -121,7 +87,7 @@ class TorrentHandle:
self._show_status() self._show_status()
if self.finished.is_set(): if self.finished.is_set():
break break
await asyncio.sleep(0.1, loop=self._loop) await asyncio.sleep(0.1)
async def pause(self): async def pause(self):
await self._loop.run_in_executor( await self._loop.run_in_executor(
@ -156,10 +122,8 @@ class TorrentSession:
async def bind(self, interface: str = '0.0.0.0', port: int = 10889): async def bind(self, interface: str = '0.0.0.0', port: int = 10889):
settings = { settings = {
'listen_interfaces': f"{interface}:{port}", 'listen_interfaces': f"{interface}:{port}",
'enable_outgoing_utp': True, 'enable_natpmp': False,
'enable_incoming_utp': True, 'enable_upnp': False
'enable_outgoing_tcp': False,
'enable_incoming_tcp': False
} }
self._session = await self._loop.run_in_executor( self._session = await self._loop.run_in_executor(
self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member self._executor, libtorrent.session, settings # pylint: disable=c-extension-no-member
@ -186,7 +150,7 @@ class TorrentSession:
await self._loop.run_in_executor( await self._loop.run_in_executor(
self._executor, self._pop_alerts self._executor, self._pop_alerts
) )
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
async def pause(self): async def pause(self):
await self._loop.run_in_executor( await self._loop.run_in_executor(

View file

@ -36,7 +36,7 @@ class Torrent:
def __init__(self, loop, handle): def __init__(self, loop, handle):
self._loop = loop self._loop = loop
self._handle = handle self._handle = handle
self.finished = asyncio.Event(loop=loop) self.finished = asyncio.Event()
def _threaded_update_status(self): def _threaded_update_status(self):
status = self._handle.status() status = self._handle.status()
@ -58,7 +58,7 @@ class Torrent:
log.info("finished downloading torrent!") log.info("finished downloading torrent!")
await self.pause() await self.pause()
break break
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
async def pause(self): async def pause(self):
log.info("pause torrent") log.info("pause torrent")

View file

@ -74,7 +74,7 @@ class TorrentSource(ManagedDownloadSource):
def bt_infohash(self): def bt_infohash(self):
return self.identifier return self.identifier
def stop_tasks(self): async def stop_tasks(self):
pass pass
@property @property
@ -118,8 +118,8 @@ class TorrentManager(SourceManager):
async def start(self): async def start(self):
await super().start() await super().start()
def stop(self): async def stop(self):
super().stop() await super().stop()
log.info("finished stopping the torrent manager") log.info("finished stopping the torrent manager")
async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False): async def delete(self, source: ManagedDownloadSource, delete_file: Optional[bool] = False):

285
lbry/torrent/tracker.py Normal file
View file

@ -0,0 +1,285 @@
import random
import socket
import string
import struct
import asyncio
import logging
import time
import ipaddress
from collections import namedtuple
from functools import reduce
from typing import Optional
from lbry.dht.node import get_kademlia_peers_from_hosts
from lbry.utils import resolve_host, async_timed_cache, cache_concurrent
from lbry.wallet.stream import StreamController
from lbry import version
log = logging.getLogger(__name__)
CONNECTION_EXPIRES_AFTER_SECONDS = 50
PREFIX = 'LB' # todo: PR BEP20 to add ourselves
DEFAULT_TIMEOUT_SECONDS = 10.0
DEFAULT_CONCURRENCY_LIMIT = 100
# see: http://bittorrent.org/beps/bep_0015.html and http://xbtt.sourceforge.net/udp_tracker_protocol.html
ConnectRequest = namedtuple("ConnectRequest", ["connection_id", "action", "transaction_id"])
ConnectResponse = namedtuple("ConnectResponse", ["action", "transaction_id", "connection_id"])
AnnounceRequest = namedtuple("AnnounceRequest",
["connection_id", "action", "transaction_id", "info_hash", "peer_id", "downloaded", "left",
"uploaded", "event", "ip_addr", "key", "num_want", "port"])
AnnounceResponse = namedtuple("AnnounceResponse",
["action", "transaction_id", "interval", "leechers", "seeders", "peers"])
CompactIPv4Peer = namedtuple("CompactPeer", ["address", "port"])
ScrapeRequest = namedtuple("ScrapeRequest", ["connection_id", "action", "transaction_id", "infohashes"])
ScrapeResponse = namedtuple("ScrapeResponse", ["action", "transaction_id", "items"])
ScrapeResponseItem = namedtuple("ScrapeResponseItem", ["seeders", "completed", "leechers"])
ErrorResponse = namedtuple("ErrorResponse", ["action", "transaction_id", "message"])
structs = {
ConnectRequest: struct.Struct(">QII"),
ConnectResponse: struct.Struct(">IIQ"),
AnnounceRequest: struct.Struct(">QII20s20sQQQIIIiH"),
AnnounceResponse: struct.Struct(">IIIII"),
CompactIPv4Peer: struct.Struct(">IH"),
ScrapeRequest: struct.Struct(">QII"),
ScrapeResponse: struct.Struct(">II"),
ScrapeResponseItem: struct.Struct(">III"),
ErrorResponse: struct.Struct(">II")
}
def decode(cls, data, offset=0):
decoder = structs[cls]
if cls is AnnounceResponse:
return AnnounceResponse(*decoder.unpack_from(data, offset),
peers=[decode(CompactIPv4Peer, data, index) for index in range(20, len(data), 6)])
elif cls is ScrapeResponse:
return ScrapeResponse(*decoder.unpack_from(data, offset),
items=[decode(ScrapeResponseItem, data, index) for index in range(8, len(data), 12)])
elif cls is ErrorResponse:
return ErrorResponse(*decoder.unpack_from(data, offset), data[decoder.size:])
return cls(*decoder.unpack_from(data, offset))
def encode(obj):
if isinstance(obj, ScrapeRequest):
return structs[ScrapeRequest].pack(*obj[:-1]) + b''.join(obj.infohashes)
elif isinstance(obj, ErrorResponse):
return structs[ErrorResponse].pack(*obj[:-1]) + obj.message
elif isinstance(obj, AnnounceResponse):
return structs[AnnounceResponse].pack(*obj[:-1]) + b''.join([encode(peer) for peer in obj.peers])
return structs[type(obj)].pack(*obj)
def make_peer_id(random_part: Optional[str] = None) -> bytes:
# see https://wiki.theory.org/BitTorrentSpecification#peer_id and https://www.bittorrent.org/beps/bep_0020.html
# not to confuse with node id; peer id identifies uniquely the software, version and instance
random_part = random_part or ''.join(random.choice(string.ascii_letters) for _ in range(20))
return f"{PREFIX}-{'-'.join(map(str, version))}-{random_part}"[:20].encode()
class UDPTrackerClientProtocol(asyncio.DatagramProtocol):
def __init__(self, timeout: float = DEFAULT_TIMEOUT_SECONDS):
self.transport = None
self.data_queue = {}
self.timeout = timeout
self.semaphore = asyncio.Semaphore(DEFAULT_CONCURRENCY_LIMIT)
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
async def request(self, obj, tracker_ip, tracker_port):
self.data_queue[obj.transaction_id] = asyncio.get_running_loop().create_future()
try:
async with self.semaphore:
self.transport.sendto(encode(obj), (tracker_ip, tracker_port))
return await asyncio.wait_for(self.data_queue[obj.transaction_id], self.timeout)
finally:
self.data_queue.pop(obj.transaction_id, None)
async def connect(self, tracker_ip, tracker_port):
transaction_id = random.getrandbits(32)
return decode(ConnectResponse,
await self.request(ConnectRequest(0x41727101980, 0, transaction_id), tracker_ip, tracker_port))
@cache_concurrent
@async_timed_cache(CONNECTION_EXPIRES_AFTER_SECONDS)
async def ensure_connection_id(self, peer_id, tracker_ip, tracker_port):
# peer_id is just to ensure cache coherency
return (await self.connect(tracker_ip, tracker_port)).connection_id
async def announce(self, info_hash, peer_id, port, tracker_ip, tracker_port, stopped=False):
connection_id = await self.ensure_connection_id(peer_id, tracker_ip, tracker_port)
# this should make the key deterministic but unique per info hash + peer id
key = int.from_bytes(info_hash[:4], "big") ^ int.from_bytes(peer_id[:4], "big") ^ port
transaction_id = random.getrandbits(32)
req = AnnounceRequest(
connection_id, 1, transaction_id, info_hash, peer_id, 0, 0, 0, 3 if stopped else 1, 0, key, -1, port)
return decode(AnnounceResponse, await self.request(req, tracker_ip, tracker_port))
async def scrape(self, infohashes, tracker_ip, tracker_port, connection_id=None):
connection_id = await self.ensure_connection_id(None, tracker_ip, tracker_port)
transaction_id = random.getrandbits(32)
reply = await self.request(
ScrapeRequest(connection_id, 2, transaction_id, infohashes), tracker_ip, tracker_port)
return decode(ScrapeResponse, reply), connection_id
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 8:
return
transaction_id = int.from_bytes(data[4:8], byteorder="big", signed=False)
if transaction_id in self.data_queue:
if not self.data_queue[transaction_id].done():
if data[3] == 3:
return self.data_queue[transaction_id].set_exception(Exception(decode(ErrorResponse, data).message))
return self.data_queue[transaction_id].set_result(data)
log.debug("unexpected packet (can be a response for a previously timed out request): %s", data.hex())
def connection_lost(self, exc: Exception = None) -> None:
self.transport = None
class TrackerClient:
event_controller = StreamController()
def __init__(self, node_id, announce_port, get_servers, timeout=10.0):
self.client = UDPTrackerClientProtocol(timeout=timeout)
self.transport = None
self.peer_id = make_peer_id(node_id.hex() if node_id else None)
self.announce_port = announce_port
self._get_servers = get_servers
self.results = {} # we can't probe the server before the interval, so we keep the result here until it expires
self.tasks = {}
async def start(self):
self.transport, _ = await asyncio.get_running_loop().create_datagram_endpoint(
lambda: self.client, local_addr=("0.0.0.0", 0))
self.event_controller.stream.listen(
lambda request: self.on_hash(request[1], request[2]) if request[0] == 'search' else None)
def stop(self):
while self.tasks:
self.tasks.popitem()[1].cancel()
if self.transport is not None:
self.transport.close()
self.client = None
self.transport = None
self.event_controller.close()
def on_hash(self, info_hash, on_announcement=None):
if info_hash not in self.tasks:
task = asyncio.create_task(self.get_peer_list(info_hash, on_announcement=on_announcement))
task.add_done_callback(lambda *_: self.tasks.pop(info_hash, None))
self.tasks[info_hash] = task
async def announce_many(self, *info_hashes, stopped=False):
await asyncio.gather(
*[self._announce_many(server, info_hashes, stopped=stopped) for server in self._get_servers()],
return_exceptions=True)
async def _announce_many(self, server, info_hashes, stopped=False):
tracker_ip = await resolve_host(*server, 'udp')
still_good_info_hashes = {
info_hash for (info_hash, (next_announcement, _)) in self.results.get(tracker_ip, {}).items()
if time.time() < next_announcement
}
results = await asyncio.gather(
*[self._probe_server(info_hash, tracker_ip, server[1], stopped=stopped)
for info_hash in info_hashes if info_hash not in still_good_info_hashes],
return_exceptions=True)
if results:
errors = sum([1 for result in results if result is None or isinstance(result, Exception)])
log.info("Tracker: finished announcing %d files to %s:%d, %d errors", len(results), *server, errors)
async def get_peer_list(self, info_hash, stopped=False, on_announcement=None, no_port=False):
found = []
probes = [self._probe_server(info_hash, *server, stopped, no_port) for server in self._get_servers()]
for done in asyncio.as_completed(probes):
result = await done
if result is not None:
await asyncio.gather(*filter(asyncio.iscoroutine, [on_announcement(result)] if on_announcement else []))
found.append(result)
return found
async def get_kademlia_peer_list(self, info_hash):
responses = await self.get_peer_list(info_hash, no_port=True)
return await announcement_to_kademlia_peers(*responses)
async def _probe_server(self, info_hash, tracker_host, tracker_port, stopped=False, no_port=False):
result = None
try:
tracker_host = await resolve_host(tracker_host, tracker_port, 'udp')
except socket.error:
log.warning("DNS failure while resolving tracker host: %s, skipping.", tracker_host)
return
self.results.setdefault(tracker_host, {})
if info_hash in self.results[tracker_host]:
next_announcement, result = self.results[tracker_host][info_hash]
if time.time() < next_announcement:
return result
try:
result = await self.client.announce(
info_hash, self.peer_id, 0 if no_port else self.announce_port, tracker_host, tracker_port, stopped)
self.results[tracker_host][info_hash] = (time.time() + result.interval, result)
except asyncio.TimeoutError: # todo: this is UDP, timeout is common, we need a better metric for failures
self.results[tracker_host][info_hash] = (time.time() + 60.0, result)
log.debug("Tracker timed out: %s:%d", tracker_host, tracker_port)
return None
log.debug("Announced: %s found %d peers for %s", tracker_host, len(result.peers), info_hash.hex()[:8])
return result
def enqueue_tracker_search(info_hash: bytes, peer_q: asyncio.Queue):
async def on_announcement(announcement: AnnounceResponse):
peers = await announcement_to_kademlia_peers(announcement)
log.info("Found %d peers from tracker for %s", len(peers), info_hash.hex()[:8])
peer_q.put_nowait(peers)
TrackerClient.event_controller.add(('search', info_hash, on_announcement))
def announcement_to_kademlia_peers(*announcements: AnnounceResponse):
peers = [
(str(ipaddress.ip_address(peer.address)), peer.port)
for announcement in announcements for peer in announcement.peers if peer.port > 1024 # no privileged or 0
]
return get_kademlia_peers_from_hosts(peers)
class UDPTrackerServerProtocol(asyncio.DatagramProtocol): # for testing. Not suitable for production
def __init__(self):
self.transport = None
self.known_conns = set()
self.peers = {}
def connection_made(self, transport: asyncio.DatagramTransport) -> None:
self.transport = transport
def add_peer(self, info_hash, ip_address: str, port: int):
self.peers.setdefault(info_hash, [])
self.peers[info_hash].append(encode_peer(ip_address, port))
def datagram_received(self, data: bytes, addr: (str, int)) -> None:
if len(data) < 16:
return
action = int.from_bytes(data[8:12], "big", signed=False)
if action == 0:
req = decode(ConnectRequest, data)
connection_id = random.getrandbits(32)
self.known_conns.add(connection_id)
return self.transport.sendto(encode(ConnectResponse(0, req.transaction_id, connection_id)), addr)
elif action == 1:
req = decode(AnnounceRequest, data)
if req.connection_id not in self.known_conns:
resp = encode(ErrorResponse(3, req.transaction_id, b'Connection ID missmatch.\x00'))
else:
compact_address = encode_peer(addr[0], req.port)
if req.event != 3:
self.add_peer(req.info_hash, addr[0], req.port)
elif compact_address in self.peers.get(req.info_hash, []):
self.peers[req.info_hash].remove(compact_address)
peers = [decode(CompactIPv4Peer, peer) for peer in self.peers[req.info_hash]]
resp = encode(AnnounceResponse(1, req.transaction_id, 1700, 0, len(peers), peers))
return self.transport.sendto(resp, addr)
def encode_peer(ip_address: str, port: int):
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), ip_address.split('.'), bytearray())
return compact_ip + port.to_bytes(2, "big", signed=False)

View file

@ -130,21 +130,16 @@ def get_sd_hash(stream_info):
def json_dumps_pretty(obj, **kwargs): def json_dumps_pretty(obj, **kwargs):
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs) return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '), **kwargs)
try:
def cancel_task(task: typing.Optional[asyncio.Task]): # the standard contextlib.aclosing() is available in 3.10+
if task and not task.done(): from contextlib import aclosing # pylint: disable=unused-import
task.cancel() except ImportError:
@contextlib.asynccontextmanager
async def aclosing(thing):
def cancel_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]): try:
for task in tasks: yield thing
cancel_task(task) finally:
await thing.aclose()
def drain_tasks(tasks: typing.List[typing.Optional[asyncio.Task]]):
while tasks:
cancel_task(tasks.pop())
def async_timed_cache(duration: int): def async_timed_cache(duration: int):
def wrapper(func): def wrapper(func):
@ -405,7 +400,7 @@ async def fallback_get_external_ip(): # used if spv servers can't be used for i
async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: async def _get_external_ip(default_servers) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
# used if upnp is disabled or non-functioning # used if upnp is disabled or non-functioning
from lbry.wallet.server.udp import SPVStatusClientProtocol # pylint: disable=C0415 from lbry.wallet.udp import SPVStatusClientProtocol # pylint: disable=C0415
hostname_to_ip = {} hostname_to_ip = {}
ip_to_hostnames = collections.defaultdict(list) ip_to_hostnames = collections.defaultdict(list)
@ -455,8 +450,8 @@ def is_running_from_bundle():
class LockWithMetrics(asyncio.Lock): class LockWithMetrics(asyncio.Lock):
def __init__(self, acquire_metric, held_time_metric, loop=None): def __init__(self, acquire_metric, held_time_metric):
super().__init__(loop=loop) super().__init__()
self._acquire_metric = acquire_metric self._acquire_metric = acquire_metric
self._lock_held_time_metric = held_time_metric self._lock_held_time_metric = held_time_metric
self._lock_acquired_time = None self._lock_acquired_time = None
@ -474,3 +469,18 @@ class LockWithMetrics(asyncio.Lock):
return super().release() return super().release()
finally: finally:
self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time) self._lock_held_time_metric.observe(time.perf_counter() - self._lock_acquired_time)
def get_colliding_prefix_bits(first_value: bytes, second_value: bytes):
"""
Calculates the amount of colliding prefix bits between <first_value> and <second_value>.
This is given by the amount of bits that are the same until the first different one (via XOR),
starting from the most significant bit to the least significant bit.
:param first_value: first value to compare, bigger than size.
:param second_value: second value to compare, bigger than size.
:return: amount of prefix colliding bits.
"""
assert len(first_value) == len(second_value), "length should be the same"
size = len(first_value) * 8
first_value, second_value = int.from_bytes(first_value, "big"), int.from_bytes(second_value, "big")
return size - (first_value ^ second_value).bit_length()

View file

@ -1,17 +1,23 @@
__node_daemon__ = 'lbrycrdd' __lbcd__ = 'lbcd'
__node_cli__ = 'lbrycrd-cli' __lbcctl__ = 'lbcctl'
__node_bin__ = '' __lbcwallet__ = 'lbcwallet'
__node_url__ = ( __lbcd_url__ = (
'https://github.com/lbryio/lbrycrd/releases/download/v0.17.4.6/lbrycrd-linux-1746.zip' 'https://github.com/lbryio/lbcd/releases/download/' +
'v0.22.100-rc.0/lbcd_0.22.100-rc.0_TARGET_PLATFORM.tar.gz'
)
__lbcwallet_url__ = (
'https://github.com/lbryio/lbcwallet/releases/download/' +
'v0.13.100-alpha.0/lbcwallet_0.13.100-alpha.0_TARGET_PLATFORM.tar.gz'
) )
__spvserver__ = 'lbry.wallet.server.coin.LBCRegTest' __spvserver__ = 'lbry.wallet.server.coin.LBCRegTest'
from .wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK from lbry.wallet.wallet import Wallet, WalletStorage, TimestampedPreferences, ENCRYPT_ON_DISK
from .manager import WalletManager from lbry.wallet.manager import WalletManager
from .network import Network from lbry.wallet.network import Network
from .ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent from lbry.wallet.ledger import Ledger, RegTestLedger, TestNetLedger, BlockHeightEvent
from .account import Account, AddressManager, SingleKey, HierarchicalDeterministic, DeterministicChannelKeyManager from lbry.wallet.account import Account, AddressManager, SingleKey, HierarchicalDeterministic, \
from .transaction import Transaction, Output, Input DeterministicChannelKeyManager
from .script import OutputScript, InputScript from lbry.wallet.transaction import Transaction, Output, Input
from .database import SQLiteMixin, Database from lbry.wallet.script import OutputScript, InputScript
from .header import Headers from lbry.wallet.database import SQLiteMixin, Database
from lbry.wallet.header import Headers

View file

@ -215,6 +215,10 @@ class PrivateKey(_KeyBase):
private_key = cPrivateKey.from_int(key_int) private_key = cPrivateKey.from_int(key_int)
return cls(ledger, private_key, bytes((0,)*32), 0, 0) return cls(ledger, private_key, bytes((0,)*32), 0, 0)
@classmethod
def from_bytes(cls, ledger, key_bytes) -> 'PrivateKey':
return cls(ledger, cPrivateKey(key_bytes), bytes((0,)*32), 0, 0)
@cachedproperty @cachedproperty
def private_key_bytes(self): def private_key_bytes(self):
""" Return the serialized private key (no leading zero byte). """ """ Return the serialized private key (no leading zero byte). """

View file

@ -1064,4 +1064,182 @@ HASHES = {
1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80', 1062000: 'c44d02a890aa66979b10d1cfa597c877f498841b4e12dd9a7bdf8d4a5fccab80',
1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465', 1063000: '1c093734f5f241b36c1b9971e2759983f88f4033405a2588b4ebfd6998ac7465',
1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6', 1064000: '9e354a83b71bbb9704053bfeea038a9c3d5daad080c6406c698b047c634706a6',
1065000: '563188accc4a6e311bd5046516a92a233f11f891b2304d37f151c5a6002b6958',
1066000: '333f1b4e996fac87e32dec667533715b31f1736b4342806a81d568b5c5238456',
1067000: 'df59a0b7319d5269bdf55043d91ec62bbb30829bb7054da623717a394b6ed678',
1068000: '06d8b674a205393edaf20c1d837baadc9caf0b0a675645246263cc163302241d',
1069000: 'ac065c48fad1383039d39e23c8367bad7cf9a37e07a5294cd7b04af5827b9961',
1070000: '90cd8b50f94208bc459081356474a961f6b764a1217f8fd291f5e4828081b730',
1071000: '3c0aa207ba9eea45458ab4fa26d6a027862592adb9bcce30915816e777dc6cfc',
1072000: '3d556c08f2300b67b704d3cbf46e22866e3ac164472b5930e2ada23b08475a0f',
1073000: 'a39b5c54c24efe3066aa203358b96baea405cd59aac6b0b48930e77799b4dd7d',
1074000: 'e8c8273d5a50a60e8744716c9f31496fb29eca87b4d68643f4ecd7ec4e400e23',
1075000: 'b8043ae41a1d0d7d4310c85764fcba1424733df347ffc2e8cbda1fe6ccbb5153',
1076000: '58468db1f91805e767d334824d6bffe54e0f900d1fb2a89b105086a493053b3d',
1077000: '04a78749b58465efa3a56d1735cd082c1f0f796e26486c7136950dbaf6effaa4',
1078000: 'e1dd6b58c75b01a67d4a4594dc7b4b2ee9e7d7fa7b25fd6246ce0e86eff33c75',
1079000: 'd239af017a6bb664485b14ad15e0eb703775e43018a045a8612b3697794460da',
1080000: '29ae5503f8c1249fefeb63fd967a71a70588ee0db1c97497e16366163a684341',
1081000: '05103ab27469e0859cbcd3daf42faa2bae798f522534697c7f2b34f7a050ee0f',
1082000: '4553d2cb7e90b6db11d242e287fe96822e6cd60e6388b94bf9006411f202ba03',
1083000: '97995acd178b2a142d571d5ae1c2a3deaf93a909fd91fb9c541d57f73e32dc99',
1084000: '9e3f23376af14d76ab24cd54e321dec019af73ad61067d959ff90043acc5ffcc',
1085000: '81c056b14f13cee0d6d6c8079fdd5a1a84c3a5c76cc9448612e8ef6d3531300e',
1086000: '8a0004f6809bdd075915a804e43991dfe8f22e05679d2fdaf8e373f101bac5c2',
1087000: '27c45a4c9ad24e038f2ebe40835a1c49ac7221d7185082866ee354351ba87c7a',
1088000: 'fd27e21747117b00b4ada1cba161ac49edb57cca540f86ac5ba885050f08f824',
1089000: 'bff867335767103bc3ed15ede5b9fde88016f8ede15dc5bf3e81ea40dcfc61ae',
1090000: '608f75016d1db08888dd59640f63e838c19bdfa833c0cc177ad3d2b818b0db5b',
1091000: '90750b452bd4dedaab6b57fecbfe88f71ce3d5437fad7f9ec0fdd270445c7526',
1092000: '98287b39f9f1233017dc5d932e5c77f0521ca84587eb3f39f0e7b6c297c749af',
1093000: '68a5846ed05c9bb142197849106838765f90f15c10b2cc938eef49b95eaa9d33',
1094000: '5660a1aac2fc763a417fc656c8887fc8186bf613ae1ccbb1a664fb43ce1fa1d6',
1095000: '62bad3db418b3f4cad3596881b645b72479c71deb0d39c7a4c8bd1577dc225fd',
1096000: 'e0e4b2b183591f10dd5614c289412f2fb5e320b7d3278f7c028f42f591872666',
1097000: 'a233a233fc2aa5dab9e75106d91388343ef969458ea974f1409a2ab5fc441911',
1098000: '16dfa5fa6cbd1188e562697b5f00ac206960d0851ed84adf37ae975fd5ffdd6a',
1099000: 'b8a870b7dc6d3263730c00f59d52aa6cce35dc59aa8fba715034cc2d14927260',
1100000: 'a3cd7749743da22a3846dcc2edbf1df21b938e829419389e3bc09284797c5b43',
1101000: '75b14c2a95e2a095949729b7c0b624bd725a2de98404a8e3247b60c977d0198e',
1102000: '4d3af64d37064dd5f57e25d61f248a1e21c1b1cadd7bb1404e35c9fbe06f1fd4',
1103000: 'd73c92bfed358dfcd7659228974ab75ea2fc86f2301ee47133adad8075203872',
1104000: '30cd82354f37bc0b412123867c7e1835206022a7501853bf8c0d3df02f291645',
1105000: '1d2ef984f26693dce77460cd2694e5da46e675077e91a1cea26051733b01a7ef',
1106000: '51c076c304222fe3ca308ba6968c46fef448f85be13a095cecb75b90e7954698',
1107000: '99e2221339e16acc34c9816f2ef7b866c2dd753aa3cbe484ae831959a23ece68',
1108000: '0f1227c250296bfe88eb7eb41703f99f633cfe02870816111e0cadfe778ddb19',
1109000: 'b35447f1ad76f95bc4f5886e4028d33acb3ad7b5000dd15516d3f11ce4baa990',
1110000: 'ac7baff996062bfaaaddd7d496b17e3ec1c8d34b2143095645ff22fb3888ae00',
1111000: '430bbbdcca36b2d69b6a2dd8b07c583a060a467e5f9acbc6de62462e1f7c7036',
1112000: 'e5274dea029dc44baff55c05b0555f91b74d29ffd40e3a8c4e2c5b57f9d40bef',
1113000: 'cf43863249fa42cfe108220dd40169dac702b0dd9cf5cb699cf2fc96feda8371',
1114000: 'fa1c0e551784d21c451564124d2d730e616724f3e535de3c186bcdeb47e80a8f',
1115000: '49fe6ecee35a397b83b5a704e950ad028cfb4b7e7a524021e789f4acc0fd6ffe',
1116000: '74ecded36751aa8b7901b31f0d16d75d111fc3c40b567f649c04f74ed028aa5c',
1117000: 'd9ca760a22190bdf545766b47d963c738a4edcc27f4d15ca801b35751577cfa7',
1118000: 'c28d42f871682800ac4e867608227cfb6bc4c00b618e83a8556f201a1c28813c',
1119000: 'c5fafc4e1785b0b9e84bb052e392154a5ba1aefe612998017e90772bcd554e08',
1120000: 'aa054d428bc9ccee0761da92163817163413065fe1e67ef79a056c5233ea3476',
1121000: '0df295bb944218503bd1bf66d2ece0c50fd22dae3391b80673a7ad1e4e5c3934',
1122000: 'a13abb350a26673b3933b1de307a60a6845ca594d502599548c6253e21a6d8e8',
1123000: 'a4bc6a3abf9ed1f4b14338ff0f03f83456312bc91a93fa89ae6db493050115e1',
1124000: '65869938df99adf0dda76200291ce09a54c9bcc787e4bb62cd72c367db58f4f0',
1125000: 'ea5e918233b14c3c73d488a906e3741c61bdcafe0393bd0404168fe80c950a46',
1126000: 'ce88cd35104fcec51bcee77302e03162dc694802536f5b668786b2245e61bca5',
1127000: 'ea19c0c8d205be4be87d02c5301c9ed331e7d75e25b93d1c2137c248882af515',
1128000: '006f32d63c2a3adcf4fbad0b0629c97f1beab6446a9c27fbde9472f2d066219e',
1129000: '218e5392e1ecf471c3bbc3d79c24dee30ac8db315dbeb61317318efb3f221163',
1130000: '30b9da0bd8364e9cd5551b2529341a01a3b7257a238d15b2560e2c99fdb324e8',
1131000: '8a7f382cfa023d2eba6639443e67206f8883b57d23ce7e1339234b8bb3098a82',
1132000: 'bf9af68a6fe2112d8fe311dfd52334ae2e7b0bac6675c9ebfddb1f386c212668',
1133000: '1a30951e2be633502a47c255a93ddbb9ed231d6bb4c55a807c0e910b437766b3',
1134000: 'a9bcaf3300b7915e701a8e396eb13f0c7287576323420be7aab3c3ba48020f76',
1135000: '337eed9ed072b5ad862af2d3d651f1b49fa852abc590b7e1c2dc381b496f438a',
1136000: '208761dbc29ec58302d722a05e937a3cf9e78bfb6495be395dd7b54f02e169dc',
1137000: '4e5b67ff3324b64e268049fdc3d82982b847ee359d409ade6368864c38a111e5',
1138000: '55d1d0833021a664e85eec8cc90a0985e67cc80d28841aaa8c2231ec28087ebb',
1139000: 'e750ada1ec9fa0f2f2461ed68958c7d116a699a82ec12911da5563139f8df19e',
1140000: '9cf81407b6ccc8046f0233f97484166945758f7392bb54841c912fcb34cf205c',
1141000: 'fccf32b2fae03e3b6b562483776625f9843cd68734c55659e2069cde7e383170',
1142000: 'c3608c215dd6569da6c1871c4d72a09ab1caa9663647f2a9454b5693d5d72a65',
1143000: 'bd39cb8c4e529d15bbea6baeec66afe52ca18afe32bd812f28fbb0676647cdff',
1144000: '6e42d02538565ce7e2d9bf31a304f1fd0ac122d35d17a030160575815901b0b1',
1145000: 'b9722e1de2904ce1219140fffb1f4f9f5a041f885faa634404238d103c738b4c',
1146000: 'd4de4271459966cee774f538a243d7db0689b213b296463d42e45c93194d7861',
1147000: '51fadf109f22bb85574d0fbcbd0b20992983e89aee3d415a7b1c37c44775d9a9',
1148000: '137e1fe8da31680d21a42e7421eb608a883a497314e4404625ce44b0edadde6a',
1149000: 'cb87867eb04203ce15e0763a2f4389376cea75e0a2877f55e2911c575bef07a8',
1150000: '977528ca7953a2c9c19fefaa3aab7ebdec3ac324d74a07d83764ba25d9be0689',
1151000: 'a09c51c832600ded63a19201df008075273ea248fd406886e93a2cbaa3bba46b',
1152000: '0e5367cfa0f00dd932a5bcc00dcc807fa6825161806bed588e16a57947b4b32d',
1153000: '55a9de3dcde2efb56a3c5fea7d22b98c1e180db9a4d4f4f6be7aae1f1cbd7608',
1154000: 'abc58cf71c4691ebfaef920252730cf69abbe9de88b424c03051b9b03e85d45a',
1155000: '4f074ce73c8a096620b8a32498362eb66a072eae95d561f2d53557cd513ae785',
1156000: '540a838a0f0a8834466b17dd456d35b8acae2ec8419f8bd9a704d9ea439062ac',
1157000: 'd5310ac671abdb658ea028db86c23fc729af965f91d67a37218c1412cf32a1f5',
1158000: '162d906a07e6c35e7c3ebf7069a200521605a97920f5b589d31b19bfd7766ee2',
1159000: '600bd8f5e1e62219e220f4dcb650db5812e79956f95ae8a50e83126932685ee0',
1160000: '91319398d1a805fac8582c8485e6d84e7490d6cfa6e44e2c630665b6bce0e6b8',
1161000: 'f7ad3cff6ee76e1e3df4abe70c600e4af66e1df55bf7b03aee12251d4455a1d4',
1162000: '85b9fbba669c2a4d3f85cdb5123f9538c05bd66172b7236d756703f99258454d',
1163000: '966085d767d1e5e2e8baf8eda8c11472ec5351181c418b503585284009aaea79',
1164000: '1c94e1b531215c019b12caf407296d8868481f49524b7180c7161b0363c1f789',
1165000: '803b6bf93735aeae2cf607824e2adf0d754b58da2516c2da1e485c697e472143',
1166000: '872561a82f7991633d0927d25cb659d096bbe556fe6dac7a0b6a679820733069',
1167000: '6bd7cdd605a3179b54c8af88d1638bf8133fab12cbf0a78d37cf21eddf4395a1',
1168000: '79946f5758c1817239cc642d27298bd710983551a8236e49832c6d818b097337',
1169000: 'b0994c60728e74de4aa361f37fa85e5296ce3188ae4e0b66d7b34fe86a239c9c',
1170000: 'a54188a5a64e0cf8da2406d16a0ac3983b087fc7d6231b6f8abf92cf11dc78cd',
1171000: 'ec2924d98e470cc6359821e6468df2c15d60301861d443188730342581230ef2',
1172000: 'b4ac11116aa73ce19428009a80e583e19dc9bcd380f7f7ce272a92921d5868d2',
1173000: '501d3551f762999dd5a799f3c5658fff2a7f3aff0511488272cd7693fefb8f9d',
1174000: '4660074ea48a78ae453cb14b694b2844cc0fb63ed9352ed20d11158bbb5c1f28',
1175000: '0727f6b1d9f8fe5677a9ffa0d475f53f5a419ef90b80896c22c2c95de22175de',
1176000: '150633d6a35496c24a93c9e19817e90f649c56b7e2558f99e97325bfd5df8b17',
1177000: '0849e19f22571b62dba8ff02f6b5a064a7ac36e7ed491321b3663567e8e17294',
1178000: '770dd463e7bad80f689f12934e4ae06e24378d1545dcf211fd143beaef49464e',
1179000: '059d383dcc60a49b658b674d92fc35cab07b06329c58d73818b6387cb0c06534',
1180000: 'e547cb3c636243ca9ae4cfb92c30a0f583eda84e329a5c1e5f64a26fc6fc791e',
1181000: '4521a4396ab02f73d45d7a3393ea1c602d255778d52c12079c88bfbad32aab43',
1182000: '051cfe993e4b0b34233403a9e8c397dd50e8b78a30fb07e9c260604ee9e624a9',
1183000: '44a69c99bb8b85e84ae279f2d8e5400d51cb3d5f0bcd178db49d55548cd66191',
1184000: '2a1d23c9bb3c71a533e0c9d25b03bfa7e9db8e014645f3e7fbede6d99fff0191',
1185000: 'bb90d6c6d77819163a9e909ee621d874707cdb21c91b1d9e861b204cf37d0ffa',
1186000: '4a92051b738ea0e28c64c64f1eb6f0405bc7c3427bef91ff20f4c43cf084d750',
1187000: 'f782ac330ca20fb5d8a094ee0f0f8c086a76e3f03ecc6a2c42f8fd07e52e0f41',
1188000: '94cb7b653dd3d838c186420158cf0e73db73ec28deaf67d9a2ca902caba4141a',
1189000: 'c8128e59b9ec948de890184578a113478ea63f7d57cb75c2c8d5c001a5a724c0',
1190000: '4da643bd35e5b98932ae21515a6bffb9c72f2cd8d514cd2d7eac1922af785c3f',
1191000: '0f922d86658ac3f53c5f9db360c68ab3f3253a925f23e1323820e3384214719a',
1192000: '4c3ab631cf5ba0c236f7c64af6f790fc24448319de6f75dbd28df4e2648d0b7d',
1193000: 'eda118d1fac3470a1f8f01f5c78108c8ecdcd6420be30f6d20f1d1831e7b6975',
1194000: '5723fff88abd9bb5088476fa5f4221a61c6f8a718703a92f13248ad350abeea2',
1195000: '1715846f82d011919e3446c6ce675a65fb80338bd791d4e735702c4767d9adc4',
1196000: 'b497667996aee2db61e88f442e728be15ab0b2b64cfd43198691fcf6cdafacc8',
1197000: '309a6170d837b8cb334fb888a64ed4e47e6592747e93c8e9d1bf7d608cfef87d',
1198000: '3ea918ef64a67dec20051519e6aefaeb7aca2d8583baca9ad5c5bd07073e513a',
1199000: '4ec7b7361b0243e5b2996a16e3b27acd662126b95fe542a487c7030e47ea3667',
1200000: 'b829c742686fcd642d0f9443336d7e2c4eab81667c90ce553df1350ed10b4233',
1201000: '44c022887f1e126fd281b1cae26b2017fa6415a64b105762c87643204ce165a5',
1202000: 'b11cc739eb28a14f4e47be125aa7e62d6d6f90c8f8014ee70044ed506d53d938',
1203000: '997a7c5fd7a98b39c9ca0790519924d73c3567656b605c97a6fdb7b406c3c64d',
1204000: '7d25d872e17195ee277243f7a5a39aa64d8750cec62e4777146acf61a8e76b04',
1205000: 'ce8486ae745a4645bee081ef3291d9505174bed05b0668d963b2998b7643dbb0',
1206000: '46a0bcea3c411c600dffe3e06e3d1dfbf5879a7ec4dcf3848e794cefcbf2bc0b',
1207000: '37e6297bf6e4e2bdd40401d4d7f95e3e3bdafd4a7f76b9c52865cefc6b82b20b',
1208000: 'd09e3982a9827b8cf56a5a2f4031dc6b082926c1fd57b63beaaa6cfd534eb902',
1209000: '54ae9010a9f146c83464e7ee60b30d9dbee36418561abc4e8d61bce9baa2d21d',
1210000: '5dcfd33f8e5ac21c9ba8553758b8cd8afae7961cad428530b5109c2db2ebf39f',
1211000: '91c952348bb2c3dfac0d6531a3dac770ea6dab571af257530e9c55493c96bdd9',
1212000: 'e62cc3fe044a7f5de4c04a8aed5619548f9d5c6fad9f989d3382cb96de1d780d',
1213000: '66b46ffdca8acf1dd04528dadb28b6ac4ce38807c1b84abd685d4ddb3dc59a34',
1214000: '2ce4091756ad23746bab4906f46545953cadaf61deae0d78e8a10d4eb51866b1',
1215000: '83ce3ca087799cdc4b4c5e7cfeb4a127708724a7ca76aa5f7f4ec1ed48b5fca6',
1216000: '7d07b739b7991fbd74926281bf51bba9d5721afab39598720f9ff5f7410a6721',
1217000: '76adf49491670d0e8379058eacf0228f330f3c18955dfea1ebe43bc11ee065f3',
1218000: '77f422e7301a81692dec69e5c6d35fa988a00a4d820ad0ebb1d595add36558cc',
1219000: '8ba9d944f8c468c81799294aeea8dc05ed1bb90bb26552fcd190bd88fedcddf2',
1220000: '00330367c255e0fe51b374597995c53353bc5700ad7d603cbd4197141933fe9c',
1221000: '3ba8b316b7964f31fdf628ed869a6fd023680cca6611257a31efe22e4d17e578',
1222000: '016e58d3fb6a29a3f9281789359460e776e9feb2f0db500482b6e231e1272aef',
1223000: 'fdfe767c29a3de7acd913b627d1e5fa887a1af9974f6a8a6474db822468c785c',
1224000: '92239f6207bff3689c554e92b24fe2e7be4a2203104ad8ef08b2c6bedd9aeccf',
1225000: '9a2f2dd9527b533d3d743efc55236e73e15192171bc8d0cd910918d1ab00aef7',
1226000: 'eb8269c75b8c5f66e6ea88ad70883dddcf8a75a45198ca7a46eb0ec606a791bb',
1227000: '5c82e624390cd57942dc9d64344eaa3d8991e0437e01802473053245b706290c',
1228000: '51e9a7d727f07fc01be7c03e3dd854eb666697f05bf89259baac628520d4402c',
1229000: 'c4bfdb651c9abdeda717fb9c8a4c8a6c9c0f78c13d3e6cae3f24f504d734c643',
1230000: '9f1ce781d16f2334567cbfb22fff42c14d2b9290cc2883746f435a1fb127021d',
1231000: '5c996634b377412ae0a3d8f541f3cc4a354aab72c198aa23a5cfc2678cbabf09',
1232000: '86702316a2d1730fbae01a08f36fffe5bf6d3ebb7d76b35a1617713766698b46',
1233000: 'fb16b63916c0287cb9b01d0c5aad626ced1b73c49a374c9009703aa90fd27a82',
1234000: '7c6f7904602ccd86bfb05cb8d6b5547c989c57cb2e214e93f1220fa4fe29bcb0',
1235000: '898b0f20811f52aa5a6bd0c35eff86fca3fbe3b066e423644fa77b2e269d9513',
1236000: '39128910ef624b6a8bbd390a311b5587c0991cda834eed996d814fe410cac352',
1237000: 'a0709afeedb64af4168ce8cf3dbda667a248df8e91da96acb2333686a2b89325',
1238000: 'e00075e7ba8c18cc277bfc5115ae6ff6b9678e6e99efd6e45f549ef8a3981a3d',
1239000: '3fba891600738f2d37e279209d52bbe6dc7ce005eeed62048247c96f370e7cd5',
1240000: 'def9bf1bec9325db90bb070f532972cfdd74e814c2b5e74a4d5a7c09a963a5f1',
1241000: '6a5d187e32bc189ac786959e1fe846031b97ae1ce202c22e1bdb1d2a963005fd',
1242000: 'a74d7c0b104eaf76c53a3a31ce51b75bbd8e05b5e84c31f593f505a13d83634c',
} }

View file

@ -141,7 +141,7 @@ class CoinSelector:
_) -> List[OutputEffectiveAmountEstimator]: _) -> List[OutputEffectiveAmountEstimator]:
""" Accumulate UTXOs at random until there is enough to cover the target. """ """ Accumulate UTXOs at random until there is enough to cover the target. """
target = self.target + self.cost_of_change target = self.target + self.cost_of_change
self.random.shuffle(txos, self.random.random) self.random.shuffle(txos, random=self.random.random) # pylint: disable=deprecated-argument
selection = [] selection = []
amount = 0 amount = 0
for coin in txos: for coin in txos:

View file

@ -1211,6 +1211,7 @@ class Database(SQLiteMixin):
return addresses return addresses
async def get_address_count(self, cols=None, read_only=False, **constraints): async def get_address_count(self, cols=None, read_only=False, **constraints):
self._clean_txo_constraints_for_aggregation(constraints)
count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints) count = await self.select_addresses('COUNT(*) as total', read_only=read_only, **constraints)
return count[0]['total'] or 0 return count[0]['total'] or 0

View file

@ -16,18 +16,18 @@ from lbry.crypto.hash import hash160, double_sha256, sha256
from lbry.crypto.base58 import Base58 from lbry.crypto.base58 import Base58
from lbry.utils import LRUCacheWithMetrics from lbry.utils import LRUCacheWithMetrics
from .tasks import TaskGroup from lbry.wallet.tasks import TaskGroup
from .database import Database from lbry.wallet.database import Database
from .stream import StreamController from lbry.wallet.stream import StreamController
from .dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from .account import Account, AddressManager, SingleKey from lbry.wallet.account import Account, AddressManager, SingleKey
from .network import Network from lbry.wallet.network import Network
from .transaction import Transaction, Output from lbry.wallet.transaction import Transaction, Output
from .header import Headers, UnvalidatedHeaders from lbry.wallet.header import Headers, UnvalidatedHeaders
from .checkpoints import HASHES from lbry.wallet.checkpoints import HASHES
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32 from lbry.wallet.constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
from .bip32 import PublicKey, PrivateKey from lbry.wallet.bip32 import PublicKey, PrivateKey
from .coinselection import CoinSelector from lbry.wallet.coinselection import CoinSelector
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -106,7 +106,7 @@ class Ledger(metaclass=LedgerRegistry):
target_timespan = 150 target_timespan = 150
default_fee_per_byte = 50 default_fee_per_byte = 50
default_fee_per_name_char = 200000 default_fee_per_name_char = 0
checkpoints = HASHES checkpoints = HASHES
@ -329,10 +329,10 @@ class Ledger(metaclass=LedgerRegistry):
async def start(self): async def start(self):
if not os.path.exists(self.path): if not os.path.exists(self.path):
os.mkdir(self.path) os.mkdir(self.path)
await asyncio.wait([ await asyncio.wait(map(asyncio.create_task, [
self.db.open(), self.db.open(),
self.headers.open() self.headers.open()
]) ]))
fully_synced = self.on_ready.first fully_synced = self.on_ready.first
asyncio.create_task(self.network.start()) asyncio.create_task(self.network.start())
await self.network.on_connected.first await self.network.on_connected.first
@ -365,6 +365,10 @@ class Ledger(metaclass=LedgerRegistry):
await self.db.close() await self.db.close()
await self.headers.close() await self.headers.close()
async def tasks_are_done(self):
await self._update_tasks.done.wait()
await self._other_tasks.done.wait()
@property @property
def local_height_including_downloaded_height(self): def local_height_including_downloaded_height(self):
return max(self.headers.height, self._download_height) return max(self.headers.height, self._download_height)
@ -462,9 +466,9 @@ class Ledger(metaclass=LedgerRegistry):
async def subscribe_accounts(self): async def subscribe_accounts(self):
if self.network.is_connected and self.accounts: if self.network.is_connected and self.accounts:
log.info("Subscribe to %i accounts", len(self.accounts)) log.info("Subscribe to %i accounts", len(self.accounts))
await asyncio.wait([ await asyncio.wait(map(asyncio.create_task, [
self.subscribe_account(a) for a in self.accounts self.subscribe_account(a) for a in self.accounts
]) ]))
async def subscribe_account(self, account: Account): async def subscribe_account(self, account: Account):
for address_manager in account.address_managers.values(): for address_manager in account.address_managers.values():
@ -718,6 +722,15 @@ class Ledger(metaclass=LedgerRegistry):
return account.address_managers[details['chain']] return account.address_managers[details['chain']]
return None return None
async def broadcast_or_release(self, tx, blocking=False):
try:
await self.broadcast(tx)
except:
await self.release_tx(tx)
raise
if blocking:
await self.wait(tx, timeout=None)
def broadcast(self, tx): def broadcast(self, tx):
# broadcast can't be a retriable call yet # broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode()) return self.network.broadcast(hexlify(tx.raw).decode())
@ -739,7 +752,7 @@ class Ledger(metaclass=LedgerRegistry):
while timeout and (int(time.perf_counter()) - start) <= timeout: while timeout and (int(time.perf_counter()) - start) <= timeout:
if await self._wait_round(tx, height, addresses): if await self._wait_round(tx, height, addresses):
return return
raise asyncio.TimeoutError('Timed out waiting for transaction.') raise asyncio.TimeoutError(f'Timed out waiting for transaction. {tx.id}')
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]): async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
records = await self.db.get_addresses(address__in=addresses) records = await self.db.get_addresses(address__in=addresses)
@ -776,13 +789,9 @@ class Ledger(metaclass=LedgerRegistry):
include_is_my_output=False, include_is_my_output=False,
include_sent_supports=False, include_sent_supports=False,
include_sent_tips=False, include_sent_tips=False,
include_received_tips=False, include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
hub_server=False) -> Tuple[List[Output], dict, int, int]:
encoded_outputs = await query encoded_outputs = await query
if hub_server: outputs = Outputs.from_base64(encoded_outputs or '') # TODO: why is the server returning None?
outputs = Outputs.from_grpc(encoded_outputs)
else:
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
txs: List[Transaction] = [] txs: List[Transaction] = []
if len(outputs.txs) > 0: if len(outputs.txs) > 0:
async for tx in self.request_transactions(tuple(outputs.txs), cached=True): async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
@ -858,13 +867,10 @@ class Ledger(metaclass=LedgerRegistry):
txo.received_tips = tips txo.received_tips = tips
return txos, blocked, outputs.offset, outputs.total return txos, blocked, outputs.offset, outputs.total
async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs): async def resolve(self, accounts, urls, **kwargs):
txos = [] txos = []
urls_copy = list(urls) urls_copy = list(urls)
if new_sdk_server: resolve = partial(self.network.retriable_call, self.network.resolve)
resolve = partial(self.network.new_resolve, new_sdk_server)
else:
resolve = partial(self.network.retriable_call, self.network.resolve)
while urls_copy: while urls_copy:
batch, urls_copy = urls_copy[:100], urls_copy[100:] batch, urls_copy = urls_copy[:100], urls_copy[100:]
txos.extend( txos.extend(
@ -889,17 +895,14 @@ class Ledger(metaclass=LedgerRegistry):
return await self.network.sum_supports(new_sdk_server, **kwargs) return await self.network.sum_supports(new_sdk_server, **kwargs)
async def claim_search( async def claim_search(
self, accounts, include_purchase_receipt=False, include_is_my_output=False, self, accounts,
new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]: include_purchase_receipt=False,
if new_sdk_server: include_is_my_output=False,
claim_search = partial(self.network.new_claim_search, new_sdk_server) **kwargs) -> Tuple[List[Output], dict, int, int]:
else:
claim_search = self.network.claim_search
return await self._inflate_outputs( return await self._inflate_outputs(
claim_search(**kwargs), accounts, self.network.claim_search(**kwargs), accounts,
include_purchase_receipt=include_purchase_receipt, include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output, include_is_my_output=include_is_my_output
hub_server=new_sdk_server is not None
) )
# async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output: # async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
@ -935,9 +938,7 @@ class Ledger(metaclass=LedgerRegistry):
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ", "%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
account.id, balance, total_receiving, account.receiving.gap, total_change, account.id, balance, total_receiving, account.receiving.gap, total_change,
account.change.gap, channel_count, len(account.channel_keys), claim_count) account.change.gap, channel_count, len(account.channel_keys), claim_count)
except Exception as err: except Exception:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception( log.exception(
'Failed to display wallet state, please file issue ' 'Failed to display wallet state, please file issue '
'for this bug along with the traceback you see below:') 'for this bug along with the traceback you see below:')
@ -960,9 +961,7 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = [p.purchased_claim_id for p in purchases] claim_ids = [p.purchased_claim_id for p in purchases]
try: try:
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids) resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err: except Exception:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up purchased claim ids:") log.exception("Resolve failed while looking up purchased claim ids:")
resolved = [] resolved = []
lookup = {claim.claim_id: claim for claim in resolved} lookup = {claim.claim_id: claim for claim in resolved}
@ -1042,9 +1041,7 @@ class Ledger(metaclass=LedgerRegistry):
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset] claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
try: try:
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids) resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err: except Exception:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up collection claim ids:") log.exception("Resolve failed while looking up collection claim ids:")
return [] return []
claims = [] claims = []

View file

@ -3,7 +3,6 @@ import json
import typing import typing
import logging import logging
import asyncio import asyncio
from distutils.util import strtobool
from binascii import unhexlify from binascii import unhexlify
from decimal import Decimal from decimal import Decimal
@ -12,13 +11,13 @@ from typing import List, Type, MutableSequence, MutableMapping, Optional
from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError from lbry.error import KeyFeeAboveMaxAllowedError, WalletNotLoadedError
from lbry.conf import Config, NOT_SET from lbry.conf import Config, NOT_SET
from .dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from .account import Account from lbry.wallet.account import Account
from .ledger import Ledger, LedgerRegistry from lbry.wallet.ledger import Ledger, LedgerRegistry
from .transaction import Transaction, Output from lbry.wallet.transaction import Transaction, Output
from .database import Database from lbry.wallet.database import Database
from .wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK from lbry.wallet.wallet import Wallet, WalletStorage, ENCRYPT_ON_DISK
from .rpc.jsonrpc import CodeMessageError from lbry.wallet.rpc.jsonrpc import CodeMessageError
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
@ -183,7 +182,6 @@ class WalletManager:
}[config.blockchain_name] }[config.blockchain_name]
ledger_config = { ledger_config = {
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
'auto_connect': True, 'auto_connect': True,
'explicit_servers': [], 'explicit_servers': [],
'hub_timeout': config.hub_timeout, 'hub_timeout': config.hub_timeout,
@ -238,7 +236,6 @@ class WalletManager:
async def reset(self): async def reset(self):
self.ledger.config = { self.ledger.config = {
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
'auto_connect': True, 'auto_connect': True,
'explicit_servers': [], 'explicit_servers': [],
'default_servers': Config.lbryum_servers.default, 'default_servers': Config.lbryum_servers.default,
@ -320,10 +317,4 @@ class WalletManager:
) )
async def broadcast_or_release(self, tx, blocking=False): async def broadcast_or_release(self, tx, blocking=False):
try: await self.ledger.broadcast_or_release(tx, blocking=blocking)
await self.ledger.broadcast(tx)
except:
await self.ledger.release_tx(tx)
raise
if blocking:
await self.ledger.wait(tx, timeout=None)

View file

@ -7,16 +7,13 @@ from time import perf_counter
from collections import defaultdict from collections import defaultdict
from typing import Dict, Optional, Tuple from typing import Dict, Optional, Tuple
import aiohttp import aiohttp
import grpc
from lbry.schema.types.v2 import hub_pb2_grpc
from lbry.schema.types.v2.hub_pb2 import SearchRequest
from lbry import __version__ from lbry import __version__
from lbry.utils import resolve_host from lbry.utils import resolve_host
from lbry.error import IncompatibleWalletServerError from lbry.error import IncompatibleWalletServerError
from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError from lbry.wallet.rpc import RPCSession as BaseClientSession, Connector, RPCError, ProtocolError
from lbry.wallet.stream import StreamController from lbry.wallet.stream import StreamController
from lbry.wallet.server.udp import SPVStatusClientProtocol, SPVPong from lbry.wallet.udp import SPVStatusClientProtocol, SPVPong
from lbry.conf import KnownHubsList from lbry.conf import KnownHubsList
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -120,9 +117,9 @@ class ClientSession(BaseClientSession):
) )
else: else:
await asyncio.sleep(max(0, max_idle - (now - self.last_send))) await asyncio.sleep(max(0, max_idle - (now - self.last_send)))
except Exception as err: except (Exception, asyncio.CancelledError) as err:
if isinstance(err, asyncio.CancelledError): if isinstance(err, asyncio.CancelledError):
log.warning("closing connection to %s:%i", *self.server) log.info("closing connection to %s:%i", *self.server)
else: else:
log.exception("lost connection to spv") log.exception("lost connection to spv")
finally: finally:
@ -140,7 +137,7 @@ class ClientSession(BaseClientSession):
controller.add(request.args) controller.add(request.args)
def connection_lost(self, exc): def connection_lost(self, exc):
log.warning("Connection lost: %s:%d", *self.server) log.debug("Connection lost: %s:%d", *self.server)
super().connection_lost(exc) super().connection_lost(exc)
self.response_time = None self.response_time = None
self.connection_latency = None self.connection_latency = None
@ -217,7 +214,7 @@ class Network:
def loop_task_done_callback(f): def loop_task_done_callback(f):
try: try:
f.result() f.result()
except Exception: except (Exception, asyncio.CancelledError):
if self.running: if self.running:
log.exception("wallet server connection loop crashed") log.exception("wallet server connection loop crashed")
@ -303,7 +300,7 @@ class Network:
concurrency=self.config.get('concurrent_hub_requests', 30)) concurrency=self.config.get('concurrent_hub_requests', 30))
try: try:
await client.create_connection() await client.create_connection()
log.warning("Connected to spv server %s:%i", host, port) log.info("Connected to spv server %s:%i", host, port)
await client.ensure_server_version() await client.ensure_server_version()
return client return client
except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError): except (asyncio.TimeoutError, ConnectionError, OSError, IncompatibleWalletServerError, RPCError):
@ -315,7 +312,8 @@ class Network:
sleep_delay = 30 sleep_delay = 30
while self.running: while self.running:
await asyncio.wait( await asyncio.wait(
[asyncio.sleep(30), self._urgent_need_reconnect.wait()], return_when=asyncio.FIRST_COMPLETED map(asyncio.create_task, [asyncio.sleep(30), self._urgent_need_reconnect.wait()]),
return_when=asyncio.FIRST_COMPLETED
) )
if self._urgent_need_reconnect.is_set(): if self._urgent_need_reconnect.is_set():
sleep_delay = 30 sleep_delay = 30
@ -341,14 +339,13 @@ class Network:
try: try:
if not self._urgent_need_reconnect.is_set(): if not self._urgent_need_reconnect.is_set():
await asyncio.wait( await asyncio.wait(
[self._keepalive_task, self._urgent_need_reconnect.wait()], [self._keepalive_task, asyncio.create_task(self._urgent_need_reconnect.wait())],
return_when=asyncio.FIRST_COMPLETED return_when=asyncio.FIRST_COMPLETED
) )
else: else:
await self._keepalive_task await self._keepalive_task
if self._urgent_need_reconnect.is_set(): if self._urgent_need_reconnect.is_set():
log.warning("urgent reconnect needed") log.warning("urgent reconnect needed")
self._urgent_need_reconnect.clear()
if self._keepalive_task and not self._keepalive_task.done(): if self._keepalive_task and not self._keepalive_task.done():
self._keepalive_task.cancel() self._keepalive_task.cancel()
except asyncio.CancelledError: except asyncio.CancelledError:
@ -357,7 +354,7 @@ class Network:
self._keepalive_task = None self._keepalive_task = None
self.client = None self.client = None
self.server_features = None self.server_features = None
log.warning("connection lost to %s", server_str) log.info("connection lost to %s", server_str)
log.info("network loop finished") log.info("network loop finished")
async def stop(self): async def stop(self):
@ -394,7 +391,6 @@ class Network:
log.warning("Wallet server call timed out, retrying.") log.warning("Wallet server call timed out, retrying.")
except ConnectionError: except ConnectionError:
log.warning("connection error") log.warning("connection error")
raise asyncio.CancelledError() # if we got here, we are shutting down raise asyncio.CancelledError() # if we got here, we are shutting down
def _update_remote_height(self, header_args): def _update_remote_height(self, header_args):
@ -477,21 +473,6 @@ class Network:
def claim_search(self, session_override=None, **kwargs): def claim_search(self, session_override=None, **kwargs):
return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override) return self.rpc('blockchain.claimtrie.search', kwargs, False, session_override)
async def new_resolve(self, server, urls):
message = {"method": "resolve", "params": {"urls": urls, "protobuf": True}}
async with self.aiohttp_session.post(server, json=message) as r:
result = await r.json()
return result['result']
async def new_claim_search(self, server, **kwargs):
async with grpc.aio.insecure_channel(server) as channel:
stub = hub_pb2_grpc.HubStub(channel)
try:
response = await stub.Search(SearchRequest(**kwargs))
except grpc.aio.AioRpcError as error:
raise RPCError(error.code(), error.details())
return response
async def sum_supports(self, server, **kwargs): async def sum_supports(self, server, **kwargs):
message = {"method": "support_sum", "params": kwargs} message = {"method": "support_sum", "params": kwargs}
async with self.aiohttp_session.post(server, json=message) as r: async with self.aiohttp_session.post(server, json=message) as r:

View file

@ -1,5 +1,2 @@
__hub_url__ = ( from lbry.wallet.orchstr8.node import Conductor
"https://github.com/lbryio/hub/releases/download/v0.2022.01.21.1/hub" from lbry.wallet.orchstr8.service import ConductorService
)
from .node import Conductor
from .service import ConductorService

View file

@ -5,7 +5,9 @@ import aiohttp
from lbry import wallet from lbry import wallet
from lbry.wallet.orchstr8.node import ( from lbry.wallet.orchstr8.node import (
Conductor, get_blockchain_node_from_ledger Conductor,
get_lbcd_node_from_ledger,
get_lbcwallet_node_from_ledger
) )
from lbry.wallet.orchstr8.service import ConductorService from lbry.wallet.orchstr8.service import ConductorService
@ -16,10 +18,11 @@ def get_argument_parser():
) )
subparsers = parser.add_subparsers(dest='command', help='sub-command help') subparsers = parser.add_subparsers(dest='command', help='sub-command help')
subparsers.add_parser("download", help="Download blockchain node binary.") subparsers.add_parser("download", help="Download lbcd and lbcwallet node binaries.")
start = subparsers.add_parser("start", help="Start orchstr8 service.") start = subparsers.add_parser("start", help="Start orchstr8 service.")
start.add_argument("--blockchain", help="Hostname to start blockchain node.") start.add_argument("--lbcd", help="Hostname to start lbcd node.")
start.add_argument("--lbcwallet", help="Hostname to start lbcwallet node.")
start.add_argument("--spv", help="Hostname to start SPV server.") start.add_argument("--spv", help="Hostname to start SPV server.")
start.add_argument("--wallet", help="Hostname to start wallet daemon.") start.add_argument("--wallet", help="Hostname to start wallet daemon.")
@ -47,7 +50,8 @@ def main():
if command == 'download': if command == 'download':
logging.getLogger('blockchain').setLevel(logging.INFO) logging.getLogger('blockchain').setLevel(logging.INFO)
get_blockchain_node_from_ledger(wallet).ensure() get_lbcd_node_from_ledger(wallet).ensure()
get_lbcwallet_node_from_ledger(wallet).ensure()
elif command == 'generate': elif command == 'generate':
loop.run_until_complete(run_remote_command( loop.run_until_complete(run_remote_command(
@ -57,9 +61,12 @@ def main():
elif command == 'start': elif command == 'start':
conductor = Conductor() conductor = Conductor()
if getattr(args, 'blockchain', False): if getattr(args, 'lbcd', False):
conductor.blockchain_node.hostname = args.blockchain conductor.lbcd_node.hostname = args.lbcd
loop.run_until_complete(conductor.start_blockchain()) loop.run_until_complete(conductor.start_lbcd())
if getattr(args, 'lbcwallet', False):
conductor.lbcwallet_node.hostname = args.lbcwallet
loop.run_until_complete(conductor.start_lbcwallet())
if getattr(args, 'spv', False): if getattr(args, 'spv', False):
conductor.spv_node.hostname = args.spv conductor.spv_node.hostname = args.spv
loop.run_until_complete(conductor.start_spv()) loop.run_until_complete(conductor.start_spv())

View file

@ -1,3 +1,4 @@
# pylint: disable=import-error
import os import os
import json import json
import shutil import shutil
@ -7,35 +8,44 @@ import tarfile
import logging import logging
import tempfile import tempfile
import subprocess import subprocess
import importlib import platform
from distutils.util import strtobool
from binascii import hexlify from binascii import hexlify
from typing import Type, Optional from typing import Type, Optional
import urllib.request import urllib.request
from uuid import uuid4 from uuid import uuid4
import lbry import lbry
from lbry.wallet.server.server import Server
from lbry.wallet.server.env import Env
from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent from lbry.wallet import Wallet, Ledger, RegTestLedger, WalletManager, Account, BlockHeightEvent
from lbry.conf import KnownHubsList, Config from lbry.conf import KnownHubsList, Config
from lbry.wallet.orchstr8 import __hub_url__
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
try:
def get_spvserver_from_ledger(ledger_module): from hub.herald.env import ServerEnv
spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1) from hub.scribe.env import BlockchainEnv
spvserver_module = importlib.import_module(spvserver_path) from hub.elastic_sync.env import ElasticEnv
return getattr(spvserver_module, regtest_class_name) from hub.herald.service import HubServerService
from hub.elastic_sync.service import ElasticSyncService
from hub.scribe.service import BlockchainProcessorService
except ImportError:
pass
def get_blockchain_node_from_ledger(ledger_module): def get_lbcd_node_from_ledger(ledger_module):
return BlockchainNode( return LBCDNode(
ledger_module.__node_url__, ledger_module.__lbcd_url__,
os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__), ledger_module.__lbcd__,
os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__) ledger_module.__lbcctl__
)
def get_lbcwallet_node_from_ledger(ledger_module):
return LBCWalletNode(
ledger_module.__lbcwallet_url__,
ledger_module.__lbcwallet__,
ledger_module.__lbcctl__
) )
@ -43,53 +53,37 @@ class Conductor:
def __init__(self, seed=None): def __init__(self, seed=None):
self.manager_module = WalletManager self.manager_module = WalletManager
self.spv_module = get_spvserver_from_ledger(lbry.wallet) self.lbcd_node = get_lbcd_node_from_ledger(lbry.wallet)
self.lbcwallet_node = get_lbcwallet_node_from_ledger(lbry.wallet)
self.blockchain_node = get_blockchain_node_from_ledger(lbry.wallet) self.spv_node = SPVNode()
self.spv_node = SPVNode(self.spv_module)
self.wallet_node = WalletNode( self.wallet_node = WalletNode(
self.manager_module, RegTestLedger, default_seed=seed self.manager_module, RegTestLedger, default_seed=seed
) )
self.hub_node = HubNode(__hub_url__, "hub", self.spv_node) self.lbcd_started = False
self.lbcwallet_started = False
self.blockchain_started = False
self.spv_started = False self.spv_started = False
self.wallet_started = False self.wallet_started = False
self.hub_started = False
self.log = log.getChild('conductor') self.log = log.getChild('conductor')
async def start_blockchain(self): async def start_lbcd(self):
if not self.blockchain_started: if not self.lbcd_started:
asyncio.create_task(self.blockchain_node.start()) await self.lbcd_node.start()
await self.blockchain_node.running.wait() self.lbcd_started = True
await self.blockchain_node.generate(200)
self.blockchain_started = True
async def stop_blockchain(self): async def stop_lbcd(self, cleanup=True):
if self.blockchain_started: if self.lbcd_started:
await self.blockchain_node.stop(cleanup=True) await self.lbcd_node.stop(cleanup)
self.blockchain_started = False self.lbcd_started = False
async def start_hub(self):
if not self.hub_started:
asyncio.create_task(self.hub_node.start())
await self.blockchain_node.running.wait()
self.hub_started = True
async def stop_hub(self):
if self.hub_started:
await self.hub_node.stop(cleanup=True)
self.hub_started = False
async def start_spv(self): async def start_spv(self):
if not self.spv_started: if not self.spv_started:
await self.spv_node.start(self.blockchain_node) await self.spv_node.start(self.lbcwallet_node)
self.spv_started = True self.spv_started = True
async def stop_spv(self): async def stop_spv(self, cleanup=True):
if self.spv_started: if self.spv_started:
await self.spv_node.stop(cleanup=True) await self.spv_node.stop(cleanup)
self.spv_started = False self.spv_started = False
async def start_wallet(self): async def start_wallet(self):
@ -97,13 +91,30 @@ class Conductor:
await self.wallet_node.start(self.spv_node) await self.wallet_node.start(self.spv_node)
self.wallet_started = True self.wallet_started = True
async def stop_wallet(self): async def stop_wallet(self, cleanup=True):
if self.wallet_started: if self.wallet_started:
await self.wallet_node.stop(cleanup=True) await self.wallet_node.stop(cleanup)
self.wallet_started = False self.wallet_started = False
async def start_lbcwallet(self, clean=True):
if not self.lbcwallet_started:
await self.lbcwallet_node.start()
if clean:
mining_addr = await self.lbcwallet_node.get_new_address()
self.lbcwallet_node.mining_addr = mining_addr
await self.lbcwallet_node.generate(200)
# unlock the wallet for the next 1 hour
await self.lbcwallet_node.wallet_passphrase("password", 3600)
self.lbcwallet_started = True
async def stop_lbcwallet(self, cleanup=True):
if self.lbcwallet_started:
await self.lbcwallet_node.stop(cleanup)
self.lbcwallet_started = False
async def start(self): async def start(self):
await self.start_blockchain() await self.start_lbcd()
await self.start_lbcwallet()
await self.start_spv() await self.start_spv()
await self.start_wallet() await self.start_wallet()
@ -111,7 +122,8 @@ class Conductor:
all_the_stops = [ all_the_stops = [
self.stop_wallet, self.stop_wallet,
self.stop_spv, self.stop_spv,
self.stop_blockchain self.stop_lbcwallet,
self.stop_lbcd
] ]
for stop in all_the_stops: for stop in all_the_stops:
try: try:
@ -119,6 +131,12 @@ class Conductor:
except Exception as e: except Exception as e:
log.exception('Exception raised while stopping services:', exc_info=e) log.exception('Exception raised while stopping services:', exc_info=e)
async def clear_mempool(self):
await self.stop_lbcwallet(cleanup=False)
await self.stop_lbcd(cleanup=False)
await self.start_lbcd()
await self.start_lbcwallet(clean=False)
class WalletNode: class WalletNode:
@ -139,14 +157,14 @@ class WalletNode:
async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None): async def start(self, spv_node: 'SPVNode', seed=None, connect=True, config=None):
wallets_dir = os.path.join(self.data_path, 'wallets') wallets_dir = os.path.join(self.data_path, 'wallets')
os.mkdir(wallets_dir)
wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json') wallet_file_name = os.path.join(wallets_dir, 'my_wallet.json')
with open(wallet_file_name, 'w') as wallet_file: if not os.path.isdir(wallets_dir):
wallet_file.write('{"version": 1, "accounts": []}\n') os.mkdir(wallets_dir)
with open(wallet_file_name, 'w') as wallet_file:
wallet_file.write('{"version": 1, "accounts": []}\n')
self.manager = self.manager_class.from_config({ self.manager = self.manager_class.from_config({
'ledgers': { 'ledgers': {
self.ledger_class.get_id(): { self.ledger_class.get_id(): {
'use_go_hub': not strtobool(os.environ.get('ENABLE_LEGACY_SEARCH') or 'yes'),
'api_port': self.port, 'api_port': self.port,
'explicit_servers': [(spv_node.hostname, spv_node.port)], 'explicit_servers': [(spv_node.hostname, spv_node.port)],
'default_servers': Config.lbryum_servers.default, 'default_servers': Config.lbryum_servers.default,
@ -154,6 +172,7 @@ class WalletNode:
'known_hubs': config.known_hubs if config else KnownHubsList(), 'known_hubs': config.known_hubs if config else KnownHubsList(),
'hub_timeout': 30, 'hub_timeout': 30,
'concurrent_hub_requests': 32, 'concurrent_hub_requests': 32,
'fee_per_name_char': 200000
} }
}, },
'wallets': [wallet_file_name] 'wallets': [wallet_file_name]
@ -184,55 +203,83 @@ class WalletNode:
class SPVNode: class SPVNode:
def __init__(self, node_number=1):
def __init__(self, coin_class, node_number=1): self.node_number = node_number
self.coin_class = coin_class
self.controller = None self.controller = None
self.data_path = None self.data_path = None
self.server = None self.server: Optional[HubServerService] = None
self.writer: Optional[BlockchainProcessorService] = None
self.es_writer: Optional[ElasticSyncService] = None
self.hostname = 'localhost' self.hostname = 'localhost'
self.port = 50001 + node_number # avoid conflict with default daemon self.port = 50001 + node_number # avoid conflict with default daemon
self.udp_port = self.port self.udp_port = self.port
self.elastic_notifier_port = 19080 + node_number
self.elastic_services = f'localhost:9200/localhost:{self.elastic_notifier_port}'
self.session_timeout = 600 self.session_timeout = 600
self.rpc_port = '0' # disabled by default self.stopped = True
self.stopped = False
self.index_name = uuid4().hex self.index_name = uuid4().hex
async def start(self, blockchain_node: 'BlockchainNode', extraconf=None): async def start(self, lbcwallet_node: 'LBCWalletNode', extraconf=None):
self.data_path = tempfile.mkdtemp() if not self.stopped:
conf = { log.warning("spv node is already running")
'DESCRIPTION': '', return
'PAYMENT_ADDRESS': '', self.stopped = False
'DAILY_FEE': '0', try:
'DB_DIRECTORY': self.data_path, self.data_path = tempfile.mkdtemp()
'DAEMON_URL': blockchain_node.rpc_url, conf = {
'REORG_LIMIT': '100', 'description': '',
'HOST': self.hostname, 'payment_address': '',
'TCP_PORT': str(self.port), 'daily_fee': '0',
'UDP_PORT': str(self.udp_port), 'db_dir': self.data_path,
'SESSION_TIMEOUT': str(self.session_timeout), 'daemon_url': lbcwallet_node.rpc_url,
'MAX_QUERY_WORKERS': '0', 'reorg_limit': 100,
'INDIVIDUAL_TAG_INDEXES': '', 'host': self.hostname,
'RPC_PORT': self.rpc_port, 'tcp_port': self.port,
'ES_INDEX_PREFIX': self.index_name, 'udp_port': self.udp_port,
'ES_MODE': 'writer', 'elastic_services': self.elastic_services,
} 'session_timeout': self.session_timeout,
if extraconf: 'max_query_workers': 0,
conf.update(extraconf) 'es_index_prefix': self.index_name,
# TODO: don't use os.environ 'chain': 'regtest',
os.environ.update(conf) 'index_address_status': False
self.server = Server(Env(self.coin_class)) }
self.server.bp.mempool.refresh_secs = self.server.bp.prefetcher.polling_delay = 0.5 if extraconf:
await self.server.start() conf.update(extraconf)
self.writer = BlockchainProcessorService(
BlockchainEnv(db_dir=self.data_path, daemon_url=lbcwallet_node.rpc_url,
reorg_limit=100, max_query_workers=0, chain='regtest', index_address_status=False)
)
self.server = HubServerService(ServerEnv(**conf))
self.es_writer = ElasticSyncService(
ElasticEnv(
db_dir=self.data_path, reorg_limit=100, max_query_workers=0, chain='regtest',
elastic_notifier_port=self.elastic_notifier_port,
es_index_prefix=self.index_name,
filtering_channel_ids=(extraconf or {}).get('filtering_channel_ids'),
blocking_channel_ids=(extraconf or {}).get('blocking_channel_ids')
)
)
await self.writer.start()
await self.es_writer.start()
await self.server.start()
except Exception as e:
self.stopped = True
log.exception("failed to start spv node")
raise e
async def stop(self, cleanup=True): async def stop(self, cleanup=True):
if self.stopped: if self.stopped:
log.warning("spv node is already stopped")
return return
try: try:
await self.server.db.search_index.delete_index()
await self.server.db.search_index.stop()
await self.server.stop() await self.server.stop()
await self.es_writer.delete_index()
await self.es_writer.stop()
await self.writer.stop()
self.stopped = True self.stopped = True
except Exception as e:
log.exception("failed to stop spv node")
raise e
finally: finally:
cleanup and self.cleanup() cleanup and self.cleanup()
@ -240,18 +287,19 @@ class SPVNode:
shutil.rmtree(self.data_path, ignore_errors=True) shutil.rmtree(self.data_path, ignore_errors=True)
class BlockchainProcess(asyncio.SubprocessProtocol): class LBCDProcess(asyncio.SubprocessProtocol):
IGNORE_OUTPUT = [ IGNORE_OUTPUT = [
b'keypool keep', b'keypool keep',
b'keypool reserve', b'keypool reserve',
b'keypool return', b'keypool return',
b'Block submitted',
] ]
def __init__(self): def __init__(self):
self.ready = asyncio.Event() self.ready = asyncio.Event()
self.stopped = asyncio.Event() self.stopped = asyncio.Event()
self.log = log.getChild('blockchain') self.log = log.getChild('lbcd')
def pipe_data_received(self, fd, data): def pipe_data_received(self, fd, data):
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT): if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
@ -262,7 +310,7 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
if b'Error:' in data: if b'Error:' in data:
self.ready.set() self.ready.set()
raise SystemError(data.decode()) raise SystemError(data.decode())
if b'Done loading' in data: if b'RPCS: RPC server listening on' in data:
self.ready.set() self.ready.set()
def process_exited(self): def process_exited(self):
@ -270,39 +318,57 @@ class BlockchainProcess(asyncio.SubprocessProtocol):
self.ready.set() self.ready.set()
class BlockchainNode: class WalletProcess(asyncio.SubprocessProtocol):
P2SH_SEGWIT_ADDRESS = "p2sh-segwit" IGNORE_OUTPUT = [
BECH32_ADDRESS = "bech32" ]
def __init__(self):
self.ready = asyncio.Event()
self.stopped = asyncio.Event()
self.log = log.getChild('lbcwallet')
self.transport: Optional[asyncio.transports.SubprocessTransport] = None
def pipe_data_received(self, fd, data):
if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT):
if b'Error:' in data:
self.log.error(data.decode())
else:
self.log.info(data.decode())
if b'Error:' in data:
self.ready.set()
raise SystemError(data.decode())
if b'WLLT: Finished rescan' in data:
self.ready.set()
def process_exited(self):
self.stopped.set()
self.ready.set()
class LBCDNode:
def __init__(self, url, daemon, cli): def __init__(self, url, daemon, cli):
self.latest_release_url = url self.latest_release_url = url
self.project_dir = os.path.dirname(os.path.dirname(__file__)) self.project_dir = os.path.dirname(os.path.dirname(__file__))
self.bin_dir = os.path.join(self.project_dir, 'bin') self.bin_dir = os.path.join(self.project_dir, 'bin')
self.daemon_bin = os.path.join(self.bin_dir, daemon) self.daemon_bin = os.path.join(self.bin_dir, daemon)
self.cli_bin = os.path.join(self.bin_dir, cli) self.cli_bin = os.path.join(self.bin_dir, cli)
self.log = log.getChild('blockchain') self.log = log.getChild('lbcd')
self.data_path = None self.data_path = tempfile.mkdtemp()
self.protocol = None self.protocol = None
self.transport = None self.transport = None
self.block_expected = 0
self.hostname = 'localhost' self.hostname = 'localhost'
self.peerport = 9246 + 2 # avoid conflict with default peer port self.peerport = 29246
self.rpcport = 9245 + 2 # avoid conflict with default rpc port self.rpcport = 29245
self.rpcuser = 'rpcuser' self.rpcuser = 'rpcuser'
self.rpcpassword = 'rpcpassword' self.rpcpassword = 'rpcpassword'
self.stopped = False self.stopped = True
self.restart_ready = asyncio.Event()
self.restart_ready.set()
self.running = asyncio.Event() self.running = asyncio.Event()
@property @property
def rpc_url(self): def rpc_url(self):
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/' return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.rpcport}/'
def is_expected_block(self, e: BlockHeightEvent):
return self.block_expected == e.height
@property @property
def exists(self): def exists(self):
return ( return (
@ -311,6 +377,12 @@ class BlockchainNode:
) )
def download(self): def download(self):
uname = platform.uname()
target_os = str.lower(uname.system)
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
target_platform = target_os + '_' + target_arch
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
downloaded_file = os.path.join( downloaded_file = os.path.join(
self.bin_dir, self.bin_dir,
self.latest_release_url[self.latest_release_url.rfind('/')+1:] self.latest_release_url[self.latest_release_url.rfind('/')+1:]
@ -344,72 +416,206 @@ class BlockchainNode:
return self.exists or self.download() return self.exists or self.download()
async def start(self): async def start(self):
assert self.ensure() if not self.stopped:
self.data_path = tempfile.mkdtemp() return
loop = asyncio.get_event_loop() self.stopped = False
asyncio.get_child_watcher().attach_loop(loop) try:
command = [ assert self.ensure()
self.daemon_bin, loop = asyncio.get_event_loop()
f'-datadir={self.data_path}', '-printtoconsole', '-regtest', '-server', '-txindex', asyncio.get_child_watcher().attach_loop(loop)
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}', command = [
f'-port={self.peerport}' self.daemon_bin,
] '--notls',
self.log.info(' '.join(command)) f'--datadir={self.data_path}',
while not self.stopped: '--regtest', f'--listen=127.0.0.1:{self.peerport}', f'--rpclisten=127.0.0.1:{self.rpcport}',
if self.running.is_set(): '--txindex', f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}'
await asyncio.sleep(1) ]
continue self.log.info(' '.join(command))
await self.restart_ready.wait() self.transport, self.protocol = await loop.subprocess_exec(
try: LBCDProcess, *command
self.transport, self.protocol = await loop.subprocess_exec( )
BlockchainProcess, *command await self.protocol.ready.wait()
) assert not self.protocol.stopped.is_set()
await self.protocol.ready.wait() self.running.set()
assert not self.protocol.stopped.is_set() except asyncio.CancelledError:
self.running.set() self.running.clear()
except asyncio.CancelledError: self.stopped = True
self.running.clear() raise
raise except Exception as e:
except Exception as e: self.running.clear()
self.running.clear() self.stopped = True
log.exception('failed to start lbrycrdd', exc_info=e) log.exception('failed to start lbcd', exc_info=e)
raise
async def stop(self, cleanup=True): async def stop(self, cleanup=True):
if self.stopped:
return
try:
if self.transport:
self.transport.terminate()
await self.protocol.stopped.wait()
self.transport.close()
except Exception as e:
log.exception('failed to stop lbcd', exc_info=e)
raise
finally:
self.log.info("Done shutting down " + self.daemon_bin)
self.stopped = True
if cleanup:
self.cleanup()
self.running.clear()
def cleanup(self):
assert self.stopped
shutil.rmtree(self.data_path, ignore_errors=True)
class LBCWalletNode:
P2SH_SEGWIT_ADDRESS = "p2sh-segwit"
BECH32_ADDRESS = "bech32"
def __init__(self, url, lbcwallet, cli):
self.latest_release_url = url
self.project_dir = os.path.dirname(os.path.dirname(__file__))
self.bin_dir = os.path.join(self.project_dir, 'bin')
self.lbcwallet_bin = os.path.join(self.bin_dir, lbcwallet)
self.cli_bin = os.path.join(self.bin_dir, cli)
self.log = log.getChild('lbcwallet')
self.protocol = None
self.transport = None
self.hostname = 'localhost'
self.lbcd_rpcport = 29245
self.lbcwallet_rpcport = 29244
self.rpcuser = 'rpcuser'
self.rpcpassword = 'rpcpassword'
self.data_path = tempfile.mkdtemp()
self.stopped = True self.stopped = True
self.running = asyncio.Event()
self.block_expected = 0
self.mining_addr = ''
@property
def rpc_url(self):
# FIXME: somehow the hub/sdk doesn't learn the blocks through the Walet RPC port, why?
# return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcwallet_rpcport}/'
return f'http://{self.rpcuser}:{self.rpcpassword}@{self.hostname}:{self.lbcd_rpcport}/'
def is_expected_block(self, e: BlockHeightEvent):
return self.block_expected == e.height
@property
def exists(self):
return (
os.path.exists(self.lbcwallet_bin)
)
def download(self):
uname = platform.uname()
target_os = str.lower(uname.system)
target_arch = str.replace(uname.machine, 'x86_64', 'amd64')
target_platform = target_os + '_' + target_arch
self.latest_release_url = str.replace(self.latest_release_url, 'TARGET_PLATFORM', target_platform)
downloaded_file = os.path.join(
self.bin_dir,
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
)
if not os.path.exists(self.bin_dir):
os.mkdir(self.bin_dir)
if not os.path.exists(downloaded_file):
self.log.info('Downloading: %s', self.latest_release_url)
with urllib.request.urlopen(self.latest_release_url) as response:
with open(downloaded_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
self.log.info('Extracting: %s', downloaded_file)
if downloaded_file.endswith('.zip'):
with zipfile.ZipFile(downloaded_file) as dotzip:
dotzip.extractall(self.bin_dir)
# zipfile bug https://bugs.python.org/issue15795
os.chmod(self.lbcwallet_bin, 0o755)
elif downloaded_file.endswith('.tar.gz'):
with tarfile.open(downloaded_file) as tar:
tar.extractall(self.bin_dir)
return self.exists
def ensure(self):
return self.exists or self.download()
async def start(self):
assert self.ensure()
loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
command = [
self.lbcwallet_bin,
'--noservertls', '--noclienttls',
'--regtest',
f'--rpcconnect=127.0.0.1:{self.lbcd_rpcport}', f'--rpclisten=127.0.0.1:{self.lbcwallet_rpcport}',
'--createtemp', f'--appdata={self.data_path}',
f'--username={self.rpcuser}', f'--password={self.rpcpassword}'
]
self.log.info(' '.join(command))
try:
self.transport, self.protocol = await loop.subprocess_exec(
WalletProcess, *command
)
self.protocol.transport = self.transport
await self.protocol.ready.wait()
assert not self.protocol.stopped.is_set()
self.running.set()
self.stopped = False
except asyncio.CancelledError:
self.running.clear()
raise
except Exception as e:
self.running.clear()
log.exception('failed to start lbcwallet', exc_info=e)
def cleanup(self):
assert self.stopped
shutil.rmtree(self.data_path, ignore_errors=True)
async def stop(self, cleanup=True):
if self.stopped:
return
try: try:
self.transport.terminate() self.transport.terminate()
await self.protocol.stopped.wait() await self.protocol.stopped.wait()
self.transport.close() self.transport.close()
except Exception as e:
log.exception('failed to stop lbcwallet', exc_info=e)
raise
finally: finally:
self.log.info("Done shutting down " + self.lbcwallet_bin)
self.stopped = True
if cleanup: if cleanup:
self.cleanup() self.cleanup()
self.running.clear()
async def clear_mempool(self):
self.restart_ready.clear()
self.transport.terminate()
await self.protocol.stopped.wait()
self.transport.close()
self.running.clear()
os.remove(os.path.join(self.data_path, 'regtest', 'mempool.dat'))
self.restart_ready.set()
await self.running.wait()
def cleanup(self):
shutil.rmtree(self.data_path, ignore_errors=True)
async def _cli_cmnd(self, *args): async def _cli_cmnd(self, *args):
cmnd_args = [ cmnd_args = [
self.cli_bin, f'-datadir={self.data_path}', '-regtest', self.cli_bin,
f'-rpcuser={self.rpcuser}', f'-rpcpassword={self.rpcpassword}', f'-rpcport={self.rpcport}' f'--rpcuser={self.rpcuser}', f'--rpcpass={self.rpcpassword}', '--notls', '--regtest', '--wallet'
] + list(args) ] + list(args)
self.log.info(' '.join(cmnd_args)) self.log.info(' '.join(cmnd_args))
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop) asyncio.get_child_watcher().attach_loop(loop)
process = await asyncio.create_subprocess_exec( process = await asyncio.create_subprocess_exec(
*cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT *cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) )
out, _ = await process.communicate() out, err = await process.communicate()
result = out.decode().strip() result = out.decode().strip()
err = err.decode().strip()
if len(result) <= 0 and err.startswith('-'):
raise Exception(err)
if err and 'creating a default config file' not in err:
log.warning(err)
self.log.info(result) self.log.info(result)
if result.startswith('error code'): if result.startswith('error code'):
raise Exception(result) raise Exception(result)
@ -417,7 +623,14 @@ class BlockchainNode:
def generate(self, blocks): def generate(self, blocks):
self.block_expected += blocks self.block_expected += blocks
return self._cli_cmnd('generate', str(blocks)) return self._cli_cmnd('generatetoaddress', str(blocks), self.mining_addr)
def generate_to_address(self, blocks, addr):
self.block_expected += blocks
return self._cli_cmnd('generatetoaddress', str(blocks), addr)
def wallet_passphrase(self, passphrase, timeout):
return self._cli_cmnd('walletpassphrase', passphrase, str(timeout))
def invalidate_block(self, blockhash): def invalidate_block(self, blockhash):
return self._cli_cmnd('invalidateblock', blockhash) return self._cli_cmnd('invalidateblock', blockhash)
@ -434,7 +647,7 @@ class BlockchainNode:
def get_raw_change_address(self): def get_raw_change_address(self):
return self._cli_cmnd('getrawchangeaddress') return self._cli_cmnd('getrawchangeaddress')
def get_new_address(self, address_type): def get_new_address(self, address_type='legacy'):
return self._cli_cmnd('getnewaddress', "", address_type) return self._cli_cmnd('getnewaddress', "", address_type)
async def get_balance(self): async def get_balance(self):
@ -450,140 +663,13 @@ class BlockchainNode:
return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs)) return self._cli_cmnd('createrawtransaction', json.dumps(inputs), json.dumps(outputs))
async def sign_raw_transaction_with_wallet(self, tx): async def sign_raw_transaction_with_wallet(self, tx):
return json.loads(await self._cli_cmnd('signrawtransactionwithwallet', tx))['hex'].encode() # the "withwallet" portion should only come into play if we are doing segwit.
# and "withwallet" doesn't exist on lbcd yet.
result = await self._cli_cmnd('signrawtransaction', tx)
return json.loads(result)['hex'].encode()
def decode_raw_transaction(self, tx): def decode_raw_transaction(self, tx):
return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode()) return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode())
def get_raw_transaction(self, txid): def get_raw_transaction(self, txid):
return self._cli_cmnd('getrawtransaction', txid, '1') return self._cli_cmnd('getrawtransaction', txid, '1')
class HubProcess(asyncio.SubprocessProtocol):
def __init__(self):
self.ready = asyncio.Event()
self.stopped = asyncio.Event()
self.log = log.getChild('hub')
def pipe_data_received(self, fd, data):
if self.log:
self.log.info(data.decode())
if b'error' in data.lower():
self.ready.set()
raise SystemError(data.decode())
if b'listening on' in data:
self.ready.set()
str_lines = str(data.decode()).split("\n")
for line in str_lines:
if 'releaseTime' in line:
print(line)
def process_exited(self):
self.stopped.set()
self.ready.set()
class HubNode:
def __init__(self, url, daemon, spv_node):
self.spv_node = spv_node
self.debug = False
self.latest_release_url = url
self.project_dir = os.path.dirname(os.path.dirname(__file__))
self.bin_dir = os.path.join(self.project_dir, 'bin')
self.daemon_bin = os.path.join(self.bin_dir, daemon)
self.cli_bin = os.path.join(self.bin_dir, daemon)
self.log = log.getChild('hub')
self.transport = None
self.protocol = None
self.hostname = 'localhost'
self.rpcport = 50051 # avoid conflict with default rpc port
self.stopped = False
self.restart_ready = asyncio.Event()
self.restart_ready.set()
self.running = asyncio.Event()
@property
def exists(self):
return (
os.path.exists(self.cli_bin) and
os.path.exists(self.daemon_bin)
)
def download(self):
downloaded_file = os.path.join(
self.bin_dir,
self.latest_release_url[self.latest_release_url.rfind('/')+1:]
)
if not os.path.exists(self.bin_dir):
os.mkdir(self.bin_dir)
if not os.path.exists(downloaded_file):
self.log.info('Downloading: %s', self.latest_release_url)
with urllib.request.urlopen(self.latest_release_url) as response:
with open(downloaded_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
self.log.info('Extracting: %s', downloaded_file)
if downloaded_file.endswith('.zip'):
with zipfile.ZipFile(downloaded_file) as dotzip:
dotzip.extractall(self.bin_dir)
# zipfile bug https://bugs.python.org/issue15795
os.chmod(self.cli_bin, 0o755)
os.chmod(self.daemon_bin, 0o755)
elif downloaded_file.endswith('.tar.gz'):
with tarfile.open(downloaded_file) as tar:
tar.extractall(self.bin_dir)
os.chmod(self.daemon_bin, 0o755)
return self.exists
def ensure(self):
return self.exists or self.download()
async def start(self):
assert self.ensure()
loop = asyncio.get_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
command = [
self.daemon_bin, 'serve', '--esindex', self.spv_node.index_name + 'claims', '--debug'
]
self.log.info(' '.join(command))
while not self.stopped:
if self.running.is_set():
await asyncio.sleep(1)
continue
await self.restart_ready.wait()
try:
if not self.debug:
self.transport, self.protocol = await loop.subprocess_exec(
HubProcess, *command
)
await self.protocol.ready.wait()
assert not self.protocol.stopped.is_set()
self.running.set()
except asyncio.CancelledError:
self.running.clear()
raise
except Exception as e:
self.running.clear()
log.exception('failed to start hub', exc_info=e)
async def stop(self, cleanup=True):
self.stopped = True
try:
if not self.debug:
self.transport.terminate()
await self.protocol.stopped.wait()
self.transport.close()
finally:
if cleanup:
self.cleanup()
def cleanup(self):
pass

View file

@ -61,8 +61,10 @@ class ConductorService:
#set_logging( #set_logging(
# self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message) # self.stack.ledger_module, logging.DEBUG, WebSocketLogHandler(self.send_message)
#) #)
self.stack.blockchain_started or await self.stack.start_blockchain() self.stack.lbcd_started or await self.stack.start_lbcd()
self.send_message({'type': 'service', 'name': 'blockchain', 'port': self.stack.blockchain_node.port}) self.send_message({'type': 'service', 'name': 'lbcd', 'port': self.stack.lbcd_node.port})
self.stack.lbcwallet_started or await self.stack.start_lbcwallet()
self.send_message({'type': 'service', 'name': 'lbcwallet', 'port': self.stack.lbcwallet_node.port})
self.stack.spv_started or await self.stack.start_spv() self.stack.spv_started or await self.stack.start_spv()
self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port}) self.send_message({'type': 'service', 'name': 'spv', 'port': self.stack.spv_node.port})
self.stack.wallet_started or await self.stack.start_wallet() self.stack.wallet_started or await self.stack.start_wallet()
@ -74,7 +76,7 @@ class ConductorService:
async def generate(self, request): async def generate(self, request):
data = await request.post() data = await request.post()
blocks = data.get('blocks', 1) blocks = data.get('blocks', 1)
await self.stack.blockchain_node.generate(int(blocks)) await self.stack.lbcwallet_node.generate(int(blocks))
return json_response({'blocks': blocks}) return json_response({'blocks': blocks})
async def transfer(self, request): async def transfer(self, request):
@ -85,11 +87,14 @@ class ConductorService:
if not address: if not address:
raise ValueError("No address was provided.") raise ValueError("No address was provided.")
amount = data.get('amount', 1) amount = data.get('amount', 1)
txid = await self.stack.blockchain_node.send_to_address(address, amount)
if self.stack.wallet_started: if self.stack.wallet_started:
await self.stack.wallet_node.ledger.on_transaction.where( watcher = self.stack.wallet_node.ledger.on_transaction.where(
lambda e: e.tx.id == txid and e.address == address lambda e: e.address == address # and e.tx.id == txid -- might stall; see send_to_address_and_wait
) )
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
await watcher
else:
txid = await self.stack.lbcwallet_node.send_to_address(address, amount)
return json_response({ return json_response({
'address': address, 'address': address,
'amount': amount, 'amount': amount,
@ -98,7 +103,7 @@ class ConductorService:
async def balance(self, _): async def balance(self, _):
return json_response({ return json_response({
'balance': await self.stack.blockchain_node.get_balance() 'balance': await self.stack.lbcwallet_node.get_balance()
}) })
async def log(self, request): async def log(self, request):
@ -129,7 +134,7 @@ class ConductorService:
'type': 'status', 'type': 'status',
'height': self.stack.wallet_node.ledger.headers.height, 'height': self.stack.wallet_node.ledger.headers.height,
'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()), 'balance': satoshis_to_coins(await self.stack.wallet_node.account.get_balance()),
'miner': await self.stack.blockchain_node.get_balance() 'miner': await self.stack.lbcwallet_node.get_balance()
}) })
def send_message(self, msg): def send_message(self, msg):

View file

@ -395,8 +395,8 @@ class RPCSession(SessionBase):
namespace=NAMESPACE, labelnames=("version",) namespace=NAMESPACE, labelnames=("version",)
) )
def __init__(self, *, framer=None, loop=None, connection=None): def __init__(self, *, framer=None, connection=None):
super().__init__(framer=framer, loop=loop) super().__init__(framer=framer)
self.connection = connection or self.default_connection() self.connection = connection or self.default_connection()
self.client_version = 'unknown' self.client_version = 'unknown'

View file

@ -17,6 +17,7 @@ OP_HASH160 = 0xa9
OP_EQUALVERIFY = 0x88 OP_EQUALVERIFY = 0x88
OP_CHECKSIG = 0xac OP_CHECKSIG = 0xac
OP_CHECKMULTISIG = 0xae OP_CHECKMULTISIG = 0xae
OP_CHECKLOCKTIMEVERIFY = 0xb1
OP_EQUAL = 0x87 OP_EQUAL = 0x87
OP_PUSHDATA1 = 0x4c OP_PUSHDATA1 = 0x4c
OP_PUSHDATA2 = 0x4d OP_PUSHDATA2 = 0x4d
@ -276,7 +277,7 @@ class Template:
elif isinstance(opcode, PUSH_INTEGER): elif isinstance(opcode, PUSH_INTEGER):
data = values[opcode.name] data = values[opcode.name]
source.write_many(push_data( source.write_many(push_data(
data.to_bytes((data.bit_length() + 7) // 8, byteorder='little') data.to_bytes((data.bit_length() + 8) // 8, byteorder='little', signed=True)
)) ))
elif isinstance(opcode, PUSH_SUBSCRIPT): elif isinstance(opcode, PUSH_SUBSCRIPT):
data = values[opcode.name] data = values[opcode.name]
@ -357,19 +358,27 @@ class InputScript(Script):
REDEEM_PUBKEY_HASH = Template('pubkey_hash', ( REDEEM_PUBKEY_HASH = Template('pubkey_hash', (
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey') PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey')
)) ))
REDEEM_SCRIPT = Template('script', ( MULTI_SIG_SCRIPT = Template('multi_sig', (
SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'), SMALL_INTEGER('signatures_count'), PUSH_MANY('pubkeys'), SMALL_INTEGER('pubkeys_count'),
OP_CHECKMULTISIG OP_CHECKMULTISIG
)) ))
REDEEM_SCRIPT_HASH = Template('script_hash', ( REDEEM_SCRIPT_HASH_MULTI_SIG = Template('script_hash+multi_sig', (
OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', REDEEM_SCRIPT) OP_0, PUSH_MANY('signatures'), PUSH_SUBSCRIPT('script', MULTI_SIG_SCRIPT)
))
TIME_LOCK_SCRIPT = Template('timelock', (
PUSH_INTEGER('height'), OP_CHECKLOCKTIMEVERIFY, OP_DROP,
# rest is identical to OutputScript.PAY_PUBKEY_HASH:
OP_DUP, OP_HASH160, PUSH_SINGLE('pubkey_hash'), OP_EQUALVERIFY, OP_CHECKSIG
))
REDEEM_SCRIPT_HASH_TIME_LOCK = Template('script_hash+timelock', (
PUSH_SINGLE('signature'), PUSH_SINGLE('pubkey'), PUSH_SUBSCRIPT('script', TIME_LOCK_SCRIPT)
)) ))
templates = [ templates = [
REDEEM_PUBKEY, REDEEM_PUBKEY,
REDEEM_PUBKEY_HASH, REDEEM_PUBKEY_HASH,
REDEEM_SCRIPT_HASH, REDEEM_SCRIPT_HASH_TIME_LOCK,
REDEEM_SCRIPT REDEEM_SCRIPT_HASH_MULTI_SIG,
] ]
@classmethod @classmethod
@ -380,20 +389,38 @@ class InputScript(Script):
}) })
@classmethod @classmethod
def redeem_script_hash(cls, signatures, pubkeys): def redeem_multi_sig_script_hash(cls, signatures, pubkeys):
return cls(template=cls.REDEEM_SCRIPT_HASH, values={ return cls(template=cls.REDEEM_SCRIPT_HASH_MULTI_SIG, values={
'signatures': signatures, 'signatures': signatures,
'script': cls.redeem_script(signatures, pubkeys) 'script': cls(template=cls.MULTI_SIG_SCRIPT, values={
'signatures_count': len(signatures),
'pubkeys': pubkeys,
'pubkeys_count': len(pubkeys)
})
}) })
@classmethod @classmethod
def redeem_script(cls, signatures, pubkeys): def redeem_time_lock_script_hash(cls, signature, pubkey, height=None, pubkey_hash=None, script_source=None):
return cls(template=cls.REDEEM_SCRIPT, values={ if height and pubkey_hash:
'signatures_count': len(signatures), script = cls(template=cls.TIME_LOCK_SCRIPT, values={
'pubkeys': pubkeys, 'height': height,
'pubkeys_count': len(pubkeys) 'pubkey_hash': pubkey_hash
})
elif script_source:
script = cls(source=script_source, template=cls.TIME_LOCK_SCRIPT)
script.parse(script.template)
else:
raise ValueError("script_source or both height and pubkey_hash are required.")
return cls(template=cls.REDEEM_SCRIPT_HASH_TIME_LOCK, values={
'signature': signature,
'pubkey': pubkey,
'script': script
}) })
@property
def is_script_hash(self):
return self.template.name.startswith('script_hash+')
class OutputScript(Script): class OutputScript(Script):
@ -460,21 +487,6 @@ class OutputScript(Script):
UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes UPDATE_CLAIM_OPCODES + PAY_SCRIPT_HASH.opcodes
)) ))
SELL_SCRIPT = Template('sell_script', (
OP_VERIFY, OP_DROP, OP_DROP, OP_DROP, PUSH_INTEGER('price'), OP_PRICECHECK
))
SELL_CLAIM = Template('sell_claim+pay_script_hash', (
OP_SELL_CLAIM, PUSH_SINGLE('claim_id'), PUSH_SUBSCRIPT('sell_script', SELL_SCRIPT),
PUSH_SUBSCRIPT('receive_script', InputScript.REDEEM_SCRIPT), OP_2DROP, OP_2DROP
) + PAY_SCRIPT_HASH.opcodes)
BUY_CLAIM = Template('buy_claim+pay_script_hash', (
OP_BUY_CLAIM, PUSH_SINGLE('sell_id'),
PUSH_SINGLE('claim_id'), PUSH_SINGLE('claim_version'),
PUSH_SINGLE('owner_pubkey_hash'), PUSH_SINGLE('negotiation_signature'),
OP_2DROP, OP_2DROP, OP_2DROP,
) + PAY_SCRIPT_HASH.opcodes)
templates = [ templates = [
PAY_PUBKEY_FULL, PAY_PUBKEY_FULL,
PAY_PUBKEY_HASH, PAY_PUBKEY_HASH,
@ -489,8 +501,6 @@ class OutputScript(Script):
SUPPORT_CLAIM_DATA_SCRIPT, SUPPORT_CLAIM_DATA_SCRIPT,
UPDATE_CLAIM_PUBKEY, UPDATE_CLAIM_PUBKEY,
UPDATE_CLAIM_SCRIPT, UPDATE_CLAIM_SCRIPT,
SELL_CLAIM, SELL_SCRIPT,
BUY_CLAIM,
] ]
@classmethod @classmethod
@ -550,30 +560,6 @@ class OutputScript(Script):
'pubkey_hash': pubkey_hash 'pubkey_hash': pubkey_hash
}) })
@classmethod
def sell_script(cls, price):
return cls(template=cls.SELL_SCRIPT, values={
'price': price,
})
@classmethod
def sell_claim(cls, claim_id, price, signatures, pubkeys):
return cls(template=cls.SELL_CLAIM, values={
'claim_id': claim_id,
'sell_script': OutputScript.sell_script(price),
'receive_script': InputScript.redeem_script(signatures, pubkeys)
})
@classmethod
def buy_claim(cls, sell_id, claim_id, claim_version, owner_pubkey_hash, negotiation_signature):
return cls(template=cls.BUY_CLAIM, values={
'sell_id': sell_id,
'claim_id': claim_id,
'claim_version': claim_version,
'owner_pubkey_hash': owner_pubkey_hash,
'negotiation_signature': negotiation_signature,
})
@property @property
def is_pay_pubkey_hash(self): def is_pay_pubkey_hash(self):
return self.template.name.endswith('pay_pubkey_hash') return self.template.name.endswith('pay_pubkey_hash')
@ -602,17 +588,6 @@ class OutputScript(Script):
def is_support_claim_data(self): def is_support_claim_data(self):
return self.template.name.startswith('support_claim+data+') return self.template.name.startswith('support_claim+data+')
@property
def is_sell_claim(self):
return self.template.name.startswith('sell_claim+')
@property
def is_buy_claim(self):
return self.template.name.startswith('buy_claim+')
@property @property
def is_claim_involved(self): def is_claim_involved(self):
return any(( return any((self.is_claim_name, self.is_support_claim, self.is_update_claim))
self.is_claim_name, self.is_support_claim, self.is_update_claim,
self.is_sell_claim, self.is_buy_claim
))

File diff suppressed because it is too large Load diff

View file

@ -1,34 +0,0 @@
import logging
import traceback
import argparse
from lbry.wallet.server.env import Env
from lbry.wallet.server.server import Server
def get_argument_parser():
parser = argparse.ArgumentParser(
prog="lbry-hub"
)
Env.contribute_to_arg_parser(parser)
return parser
def main():
parser = get_argument_parser()
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
logging.info('lbry.server starting')
logging.getLogger('aiohttp').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
try:
server = Server(Env.from_arg_parser(args))
server.run()
except Exception:
traceback.print_exc()
logging.critical('lbry.server terminated abnormally')
else:
logging.info('lbry.server terminated normally')
if __name__ == "__main__":
main()

View file

@ -1,386 +0,0 @@
import re
import struct
from typing import List
from hashlib import sha256
from decimal import Decimal
from collections import namedtuple
import lbry.wallet.server.tx as lib_tx
from lbry.wallet.script import OutputScript, OP_CLAIM_NAME, OP_UPDATE_CLAIM, OP_SUPPORT_CLAIM
from lbry.wallet.server.tx import DeserializerSegWit
from lbry.wallet.server.util import cachedproperty, subclasses
from lbry.wallet.server.hash import Base58, hash160, double_sha256, hash_to_hex_str, HASHX_LEN
from lbry.wallet.server.daemon import Daemon, LBCDaemon
from lbry.wallet.server.script import ScriptPubKey, OpCodes
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.session import LBRYElectrumX, LBRYSessionManager
from lbry.wallet.server.block_processor import BlockProcessor
Block = namedtuple("Block", "raw header transactions")
OP_RETURN = OpCodes.OP_RETURN
class CoinError(Exception):
"""Exception raised for coin-related errors."""
class Coin:
"""Base class of coin hierarchy."""
REORG_LIMIT = 200
# Not sure if these are coin-specific
RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE = 2016
BASIC_HEADER_SIZE = 80
STATIC_BLOCK_HEADERS = True
SESSIONCLS = LBRYElectrumX
DESERIALIZER = lib_tx.Deserializer
DAEMON = Daemon
BLOCK_PROCESSOR = BlockProcessor
SESSION_MANAGER = LBRYSessionManager
DB = LevelDB
HEADER_VALUES = [
'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce'
]
HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from
MEMPOOL_HISTOGRAM_REFRESH_SECS = 500
XPUB_VERBYTES = bytes('????', 'utf-8')
XPRV_VERBYTES = bytes('????', 'utf-8')
ENCODE_CHECK = Base58.encode_check
DECODE_CHECK = Base58.decode_check
# Peer discovery
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
PEERS: List[str] = []
@classmethod
def lookup_coin_class(cls, name, net):
"""Return a coin class given name and network.
Raise an exception if unrecognised."""
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
for coin in subclasses(Coin):
if (coin.NAME.lower() == name.lower() and
coin.NET.lower() == net.lower()):
coin_req_attrs = req_attrs.copy()
missing = [attr for attr in coin_req_attrs
if not hasattr(coin, attr)]
if missing:
raise CoinError(f'coin {name} missing {missing} attributes')
return coin
raise CoinError(f'unknown coin {name} and network {net} combination')
@classmethod
def sanitize_url(cls, url):
# Remove surrounding ws and trailing /s
url = url.strip().rstrip('/')
match = cls.RPC_URL_REGEX.match(url)
if not match:
raise CoinError(f'invalid daemon URL: "{url}"')
if match.groups()[1] is None:
url += f':{cls.RPC_PORT:d}'
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
return url + '/'
@classmethod
def genesis_block(cls, block):
"""Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
"""
header = cls.block_header(block, 0)
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
return header + bytes(1)
@classmethod
def hashX_from_script(cls, script):
"""Returns a hashX from a script, or None if the script is provably
unspendable so the output can be dropped.
"""
if script and script[0] == OP_RETURN:
return None
return sha256(script).digest()[:HASHX_LEN]
@staticmethod
def lookup_xverbytes(verbytes):
"""Return a (is_xpub, coin_class) pair given xpub/xprv verbytes."""
# Order means BTC testnet will override NMC testnet
for coin in subclasses(Coin):
if verbytes == coin.XPUB_VERBYTES:
return True, coin
if verbytes == coin.XPRV_VERBYTES:
return False, coin
raise CoinError('version bytes unrecognised')
@classmethod
def address_to_hashX(cls, address):
"""Return a hashX given a coin address."""
return cls.hashX_from_script(cls.pay_to_address_script(address))
@classmethod
def P2PKH_address_from_hash160(cls, hash160):
"""Return a P2PKH address given a public key."""
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160)
@classmethod
def P2PKH_address_from_pubkey(cls, pubkey):
"""Return a coin address given a public key."""
return cls.P2PKH_address_from_hash160(hash160(pubkey))
@classmethod
def P2SH_address_from_hash160(cls, hash160):
"""Return a coin address given a hash160."""
assert len(hash160) == 20
return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160)
@classmethod
def hash160_to_P2PKH_script(cls, hash160):
return ScriptPubKey.P2PKH_script(hash160)
@classmethod
def hash160_to_P2PKH_hashX(cls, hash160):
return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160))
@classmethod
def pay_to_address_script(cls, address):
"""Return a pubkey script that pays to a pubkey hash.
Pass the address (either P2PKH or P2SH) in base58 form.
"""
raw = cls.DECODE_CHECK(address)
# Require version byte(s) plus hash160.
verbyte = -1
verlen = len(raw) - 20
if verlen > 0:
verbyte, hash160 = raw[:verlen], raw[verlen:]
if verbyte == cls.P2PKH_VERBYTE:
return cls.hash160_to_P2PKH_script(hash160)
if verbyte in cls.P2SH_VERBYTES:
return ScriptPubKey.P2SH_script(hash160)
raise CoinError(f'invalid address: {address}')
@classmethod
def privkey_WIF(cls, privkey_bytes, compressed):
"""Return the private key encoded in Wallet Import Format."""
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
if compressed:
payload.append(0x01)
return cls.ENCODE_CHECK(payload)
@classmethod
def header_hash(cls, header):
"""Given a header return hash"""
return double_sha256(header)
@classmethod
def header_prevhash(cls, header):
"""Given a header return previous hash"""
return header[4:36]
@classmethod
def static_header_offset(cls, height):
"""Given a header height return its offset in the headers file.
If header sizes change at some point, this is the only code
that needs updating."""
assert cls.STATIC_BLOCK_HEADERS
return height * cls.BASIC_HEADER_SIZE
@classmethod
def static_header_len(cls, height):
"""Given a header height return its length."""
return (cls.static_header_offset(height + 1)
- cls.static_header_offset(height))
@classmethod
def block_header(cls, block, height):
"""Returns the block header given a block and its height."""
return block[:cls.static_header_len(height)]
@classmethod
def block(cls, raw_block, height):
"""Return a Block namedtuple given a raw block and its height."""
header = cls.block_header(raw_block, height)
txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block()
return Block(raw_block, header, txs)
@classmethod
def transaction(cls, raw_tx: bytes):
"""Return a Block namedtuple given a raw block and its height."""
return cls.DESERIALIZER(raw_tx).read_tx()
@classmethod
def decimal_value(cls, value):
"""Return the number of standard coin units as a Decimal given a
quantity of smallest units.
For example 1 BTC is returned for 100 million satoshis.
"""
return Decimal(value) / cls.VALUE_PER_COIN
@classmethod
def electrum_header(cls, header, height):
h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header)))
# Add the height that is not present in the header itself
h['block_height'] = height
# Convert bytes to str
h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash'])
h['merkle_root'] = hash_to_hex_str(h['merkle_root'])
return h
class LBC(Coin):
DAEMON = LBCDaemon
SESSIONCLS = LBRYElectrumX
SESSION_MANAGER = LBRYSessionManager
DESERIALIZER = DeserializerSegWit
DB = LevelDB
NAME = "LBRY"
SHORTNAME = "LBC"
NET = "mainnet"
BASIC_HEADER_SIZE = 112
CHUNK_SIZE = 96
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("55")
P2SH_VERBYTES = bytes.fromhex("7A")
WIF_BYTE = bytes.fromhex("1C")
GENESIS_HASH = ('9c89283ba0f3227f6c03b70216b9f665'
'f0118d5e0fa729cedf4fb34d6a34f463')
TX_COUNT = 2716936
TX_COUNT_HEIGHT = 329554
TX_PER_BLOCK = 1
RPC_PORT = 9245
REORG_LIMIT = 200
nOriginalClaimExpirationTime = 262974
nExtendedClaimExpirationTime = 2102400
nExtendedClaimExpirationForkHeight = 400155
nNormalizedNameForkHeight = 539940 # targeting 21 March 2019
nMinTakeoverWorkaroundHeight = 496850
nMaxTakeoverWorkaroundHeight = 658300 # targeting 30 Oct 2019
nWitnessForkHeight = 680770 # targeting 11 Dec 2019
nAllClaimsInMerkleForkHeight = 658310 # targeting 30 Oct 2019
proportionalDelayFactor = 32
maxTakeoverDelay = 4032
PEERS = [
]
@classmethod
def genesis_block(cls, block):
'''Check the Genesis block is the right one for this coin.
Return the block less its unspendable coinbase.
'''
header = cls.block_header(block, 0)
header_hex_hash = hash_to_hex_str(cls.header_hash(header))
if header_hex_hash != cls.GENESIS_HASH:
raise CoinError(f'genesis block has hash {header_hex_hash} expected {cls.GENESIS_HASH}')
return block
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits, nonce = struct.unpack('<III', header[100:112])
return {
'version': version,
'prev_block_hash': hash_to_hex_str(header[4:36]),
'merkle_root': hash_to_hex_str(header[36:68]),
'claim_trie_root': hash_to_hex_str(header[68:100]),
'timestamp': timestamp,
'bits': bits,
'nonce': nonce,
'block_height': height,
}
@cachedproperty
def address_handlers(self):
return ScriptPubKey.PayToHandlers(
address=self.P2PKH_address_from_hash160,
script_hash=self.P2SH_address_from_hash160,
pubkey=self.P2PKH_address_from_pubkey,
unspendable=lambda: None,
strange=self.claim_address_handler,
)
@classmethod
def address_from_script(cls, script):
'''Given a pk_script, return the address it pays to, or None.'''
return ScriptPubKey.pay_to(cls.address_handlers, script)
@classmethod
def claim_address_handler(cls, script):
'''Parse a claim script, returns the address
'''
output = OutputScript(script)
if output.is_pay_pubkey_hash:
return cls.P2PKH_address_from_hash160(output.values['pubkey_hash'])
if output.is_pay_script_hash:
return cls.P2SH_address_from_hash160(output.values['script_hash'])
if output.is_pay_pubkey:
return cls.P2PKH_address_from_pubkey(output.values['pubkey'])
if output.is_return_data:
return None
return None
@classmethod
def hashX_from_script(cls, script):
'''
Overrides electrumx hashX from script by extracting addresses from claim scripts.
'''
if script and script[0] == OpCodes.OP_RETURN or not script:
return None
if script[0] in [
OP_CLAIM_NAME,
OP_UPDATE_CLAIM,
OP_SUPPORT_CLAIM,
]:
return cls.address_to_hashX(cls.claim_address_handler(script))
else:
return sha256(script).digest()[:HASHX_LEN]
@classmethod
def get_expiration_height(cls, last_updated_height: int, extended: bool = False) -> int:
if extended:
return last_updated_height + cls.nExtendedClaimExpirationTime
if last_updated_height < cls.nExtendedClaimExpirationForkHeight:
return last_updated_height + cls.nOriginalClaimExpirationTime
return last_updated_height + cls.nExtendedClaimExpirationTime
@classmethod
def get_delay_for_name(cls, blocks_of_continuous_ownership: int) -> int:
return min(blocks_of_continuous_ownership // cls.proportionalDelayFactor, cls.maxTakeoverDelay)
class LBCRegTest(LBC):
NET = "regtest"
GENESIS_HASH = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
XPUB_VERBYTES = bytes.fromhex('043587cf')
XPRV_VERBYTES = bytes.fromhex('04358394')
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTES = bytes.fromhex("c4")
nOriginalClaimExpirationTime = 500
nExtendedClaimExpirationTime = 600
nExtendedClaimExpirationForkHeight = 800
nNormalizedNameForkHeight = 250
nMinTakeoverWorkaroundHeight = -1
nMaxTakeoverWorkaroundHeight = -1
nWitnessForkHeight = 150
nAllClaimsInMerkleForkHeight = 350
class LBCTestNet(LBCRegTest):
NET = "testnet"
GENESIS_HASH = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'

View file

@ -1,375 +0,0 @@
import asyncio
import itertools
import json
import time
from functools import wraps
import aiohttp
from prometheus_client import Gauge, Histogram
from lbry.utils import LRUCacheWithMetrics
from lbry.wallet.rpc.jsonrpc import RPCError
from lbry.wallet.server.util import hex_to_bytes, class_logger
from lbry.wallet.rpc import JSONRPC
class DaemonError(Exception):
"""Raised when the daemon returns an error in its results."""
class WarmingUpError(Exception):
"""Internal - when the daemon is warming up."""
class WorkQueueFullError(Exception):
"""Internal - when the daemon's work queue is full."""
NAMESPACE = "wallet_server"
class Daemon:
"""Handles connections to a daemon at the given URL."""
WARMING_UP = -28
id_counter = itertools.count()
lbrycrd_request_time_metric = Histogram(
"lbrycrd_request", "lbrycrd requests count", namespace=NAMESPACE, labelnames=("method",)
)
lbrycrd_pending_count_metric = Gauge(
"lbrycrd_pending_count", "Number of lbrycrd rpcs that are in flight", namespace=NAMESPACE,
labelnames=("method",)
)
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
max_retry=4.0):
self.coin = coin
self.logger = class_logger(__name__, self.__class__.__name__)
self.set_url(url)
# Limit concurrent RPC calls to this number.
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
self.init_retry = init_retry
self.max_retry = max_retry
self._height = None
self.available_rpcs = {}
self.connector = aiohttp.TCPConnector()
self._block_hash_cache = LRUCacheWithMetrics(100000)
self._block_cache = LRUCacheWithMetrics(2 ** 13, metric_name='block', namespace=NAMESPACE)
async def close(self):
if self.connector:
await self.connector.close()
self.connector = None
def set_url(self, url):
"""Set the URLS to the given list, and switch to the first one."""
urls = url.split(',')
urls = [self.coin.sanitize_url(url) for url in urls]
for n, url in enumerate(urls):
status = '' if n else ' (current)'
logged_url = self.logged_url(url)
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
self.url_index = 0
self.urls = urls
def current_url(self):
"""Returns the current daemon URL."""
return self.urls[self.url_index]
def logged_url(self, url=None):
"""The host and port part, for logging."""
url = url or self.current_url()
return url[url.rindex('@') + 1:]
def failover(self):
"""Call to fail-over to the next daemon URL.
Returns False if there is only one, otherwise True.
"""
if len(self.urls) > 1:
self.url_index = (self.url_index + 1) % len(self.urls)
self.logger.info(f'failing over to {self.logged_url()}')
return True
return False
def client_session(self):
"""An aiohttp client session."""
return aiohttp.ClientSession(connector=self.connector, connector_owner=False)
async def _send_data(self, data):
if not self.connector:
raise asyncio.CancelledError('Tried to send request during shutdown.')
async with self.workqueue_semaphore:
async with self.client_session() as session:
async with session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
# bitcoind's HTTP protocol "handling" is a bad joke
text = await resp.text()
if 'Work queue depth exceeded' in text:
raise WorkQueueFullError
text = text.strip() or resp.reason
self.logger.error(text)
raise DaemonError(text)
async def _send(self, payload, processor):
"""Send a payload to be converted to JSON.
Handles temporary connection issues. Daemon response errors
are raise through DaemonError.
"""
def log_error(error):
nonlocal last_error_log, retry
now = time.time()
if now - last_error_log > 60:
last_error_log = now
self.logger.error(f'{error} Retrying occasionally...')
if retry == self.max_retry and self.failover():
retry = 0
on_good_message = None
last_error_log = 0
data = json.dumps(payload)
retry = self.init_retry
methods = tuple(
[payload['method']] if isinstance(payload, dict) else [request['method'] for request in payload]
)
while True:
try:
for method in methods:
self.lbrycrd_pending_count_metric.labels(method=method).inc()
result = await self._send_data(data)
result = processor(result)
if on_good_message:
self.logger.info(on_good_message)
return result
except asyncio.TimeoutError:
log_error('timeout error.')
except aiohttp.ServerDisconnectedError:
log_error('disconnected.')
on_good_message = 'connection restored'
except aiohttp.ClientConnectionError:
log_error('connection problem - is your daemon running?')
on_good_message = 'connection restored'
except aiohttp.ClientError as e:
log_error(f'daemon error: {e}')
on_good_message = 'running normally'
except WarmingUpError:
log_error('starting up checking blocks.')
on_good_message = 'running normally'
except WorkQueueFullError:
log_error('work queue full.')
on_good_message = 'running normally'
finally:
for method in methods:
self.lbrycrd_pending_count_metric.labels(method=method).dec()
await asyncio.sleep(retry)
retry = max(min(self.max_retry, retry * 2), self.init_retry)
async def _send_single(self, method, params=None):
"""Send a single request to the daemon."""
start = time.perf_counter()
def processor(result):
err = result['error']
if not err:
return result['result']
if err.get('code') == self.WARMING_UP:
raise WarmingUpError
raise DaemonError(err)
payload = {'method': method, 'id': next(self.id_counter)}
if params:
payload['params'] = params
result = await self._send(payload, processor)
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
return result
async def _send_vector(self, method, params_iterable, replace_errs=False):
"""Send several requests of the same method.
The result will be an array of the same length as params_iterable.
If replace_errs is true, any item with an error is returned as None,
otherwise an exception is raised."""
start = time.perf_counter()
def processor(result):
errs = [item['error'] for item in result if item['error']]
if any(err.get('code') == self.WARMING_UP for err in errs):
raise WarmingUpError
if not errs or replace_errs:
return [item['result'] for item in result]
raise DaemonError(errs)
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
for p in params_iterable]
result = []
if payload:
result = await self._send(payload, processor)
self.lbrycrd_request_time_metric.labels(method=method).observe(time.perf_counter() - start)
return result
async def _is_rpc_available(self, method):
"""Return whether given RPC method is available in the daemon.
Results are cached and the daemon will generally not be queried with
the same method more than once."""
available = self.available_rpcs.get(method)
if available is None:
available = True
try:
await self._send_single(method)
except DaemonError as e:
err = e.args[0]
error_code = err.get("code")
available = error_code != JSONRPC.METHOD_NOT_FOUND
self.available_rpcs[method] = available
return available
async def block_hex_hashes(self, first, count):
"""Return the hex hashes of count block starting at height first."""
if first + count < (self.cached_height() or 0) - 200:
return await self._cached_block_hex_hashes(first, count)
params_iterable = ((h, ) for h in range(first, first + count))
return await self._send_vector('getblockhash', params_iterable)
async def _cached_block_hex_hashes(self, first, count):
"""Return the hex hashes of count block starting at height first."""
cached = self._block_hash_cache.get((first, count))
if cached:
return cached
params_iterable = ((h, ) for h in range(first, first + count))
self._block_hash_cache[(first, count)] = await self._send_vector('getblockhash', params_iterable)
return self._block_hash_cache[(first, count)]
async def deserialised_block(self, hex_hash):
"""Return the deserialised block with the given hex hash."""
if hex_hash not in self._block_cache:
block = await self._send_single('getblock', (hex_hash, True))
self._block_cache[hex_hash] = block
return block
return self._block_cache[hex_hash]
async def raw_blocks(self, hex_hashes):
"""Return the raw binary blocks with the given hex hashes."""
params_iterable = ((h, False) for h in hex_hashes)
blocks = await self._send_vector('getblock', params_iterable)
# Convert hex string to bytes
return [hex_to_bytes(block) for block in blocks]
async def mempool_hashes(self):
"""Update our record of the daemon's mempool hashes."""
return await self._send_single('getrawmempool')
async def estimatefee(self, block_count):
"""Return the fee estimate for the block count. Units are whole
currency units per KB, e.g. 0.00000995, or -1 if no estimate
is available.
"""
args = (block_count, )
if await self._is_rpc_available('estimatesmartfee'):
estimate = await self._send_single('estimatesmartfee', args)
return estimate.get('feerate', -1)
return await self._send_single('estimatefee', args)
async def getnetworkinfo(self):
"""Return the result of the 'getnetworkinfo' RPC call."""
return await self._send_single('getnetworkinfo')
async def relayfee(self):
"""The minimum fee a low-priority tx must pay in order to be accepted
to the daemon's memory pool."""
network_info = await self.getnetworkinfo()
return network_info['relayfee']
async def getrawtransaction(self, hex_hash, verbose=False):
"""Return the serialized raw transaction with the given hash."""
# Cast to int because some coin daemons are old and require it
return await self._send_single('getrawtransaction',
(hex_hash, int(verbose)))
async def getrawtransactions(self, hex_hashes, replace_errs=True):
"""Return the serialized raw transactions with the given hashes.
Replaces errors with None by default."""
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
txs = await self._send_vector('getrawtransaction', params_iterable,
replace_errs=replace_errs)
# Convert hex strings to bytes
return [hex_to_bytes(tx) if tx else None for tx in txs]
async def broadcast_transaction(self, raw_tx):
"""Broadcast a transaction to the network."""
return await self._send_single('sendrawtransaction', (raw_tx, ))
async def height(self):
"""Query the daemon for its current height."""
self._height = await self._send_single('getblockcount')
return self._height
def cached_height(self):
"""Return the cached daemon height.
If the daemon has not been queried yet this returns None."""
return self._height
def handles_errors(decorated_function):
@wraps(decorated_function)
async def wrapper(*args, **kwargs):
try:
return await decorated_function(*args, **kwargs)
except DaemonError as daemon_error:
raise RPCError(1, daemon_error.args[0])
return wrapper
class LBCDaemon(Daemon):
@handles_errors
async def getrawtransaction(self, hex_hash, verbose=False):
return await super().getrawtransaction(hex_hash=hex_hash, verbose=verbose)
@handles_errors
async def getclaimbyid(self, claim_id):
'''Given a claim id, retrieves claim information.'''
return await self._send_single('getclaimbyid', (claim_id,))
@handles_errors
async def getclaimsbyids(self, claim_ids):
'''Given a list of claim ids, batches calls to retrieve claim information.'''
return await self._send_vector('getclaimbyid', ((claim_id,) for claim_id in claim_ids))
@handles_errors
async def getclaimsforname(self, name):
'''Given a name, retrieves all claims matching that name.'''
return await self._send_single('getclaimsforname', (name,))
@handles_errors
async def getclaimsfortx(self, txid):
'''Given a txid, returns the claims it make.'''
return await self._send_single('getclaimsfortx', (txid,)) or []
@handles_errors
async def getnameproof(self, name, block_hash=None):
'''Given a name and optional block_hash, returns a name proof and winner, if any.'''
return await self._send_single('getnameproof', (name, block_hash,) if block_hash else (name,))
@handles_errors
async def getvalueforname(self, name):
'''Given a name, returns the winning claim value.'''
return await self._send_single('getvalueforname', (name,))
@handles_errors
async def getnamesintrie(self):
'''Given a name, returns the winning claim value.'''
return await self._send_single('getnamesintrie')
@handles_errors
async def claimname(self, name, hexvalue, amount):
'''Claim a name, used for functional tests only.'''
return await self._send_single('claimname', (name, hexvalue, float(amount)))

View file

@ -1,42 +0,0 @@
import enum
@enum.unique
class DB_PREFIXES(enum.Enum):
claim_to_support = b'K'
support_to_claim = b'L'
claim_to_txo = b'E'
txo_to_claim = b'G'
claim_to_channel = b'I'
channel_to_claim = b'J'
claim_short_id_prefix = b'F'
effective_amount = b'D'
claim_expiration = b'O'
claim_takeover = b'P'
pending_activation = b'Q'
activated_claim_and_support = b'R'
active_amount = b'S'
repost = b'V'
reposted_claim = b'W'
undo = b'M'
claim_diff = b'Y'
tx = b'B'
block_hash = b'C'
header = b'H'
tx_num = b'N'
tx_count = b'T'
tx_hash = b'X'
utxo = b'u'
hashx_utxo = b'h'
hashx_history = b'x'
db_state = b's'
channel_count = b'Z'
support_amount = b'a'
block_txs = b'b'

View file

@ -1,447 +0,0 @@
import typing
CLAIM_TYPES = {
'stream': 1,
'channel': 2,
'repost': 3,
'collection': 4,
}
STREAM_TYPES = {
'video': 1,
'audio': 2,
'image': 3,
'document': 4,
'binary': 5,
'model': 6,
}
# 9/21/2020
MOST_USED_TAGS = {
"gaming",
"people & blogs",
"entertainment",
"music",
"pop culture",
"education",
"technology",
"blockchain",
"news",
"funny",
"science & technology",
"learning",
"gameplay",
"news & politics",
"comedy",
"bitcoin",
"beliefs",
"nature",
"art",
"economics",
"film & animation",
"lets play",
"games",
"sports",
"howto & style",
"game",
"cryptocurrency",
"playstation 4",
"automotive",
"crypto",
"mature",
"sony interactive entertainment",
"walkthrough",
"tutorial",
"video game",
"weapons",
"playthrough",
"pc",
"anime",
"how to",
"btc",
"fun",
"ethereum",
"food",
"travel & events",
"minecraft",
"science",
"autos & vehicles",
"play",
"politics",
"commentary",
"twitch",
"ps4live",
"love",
"ps4",
"nonprofits & activism",
"ps4share",
"fortnite",
"xbox",
"porn",
"video games",
"trump",
"español",
"money",
"music video",
"nintendo",
"movie",
"coronavirus",
"donald trump",
"steam",
"trailer",
"android",
"podcast",
"xbox one",
"survival",
"audio",
"linux",
"travel",
"funny moments",
"litecoin",
"animation",
"gamer",
"lets",
"playstation",
"bitcoin news",
"history",
"xxx",
"fox news",
"dance",
"god",
"adventure",
"liberal",
"2020",
"horror",
"government",
"freedom",
"reaction",
"meme",
"photography",
"truth",
"health",
"lbry",
"family",
"online",
"eth",
"crypto news",
"diy",
"trading",
"gold",
"memes",
"world",
"space",
"lol",
"covid-19",
"rpg",
"humor",
"democrat",
"film",
"call of duty",
"tech",
"religion",
"conspiracy",
"rap",
"cnn",
"hangoutsonair",
"unboxing",
"fiction",
"conservative",
"cars",
"hoa",
"epic",
"programming",
"progressive",
"cryptocurrency news",
"classical",
"jesus",
"movies",
"book",
"ps3",
"republican",
"fitness",
"books",
"multiplayer",
"animals",
"pokemon",
"bitcoin price",
"facebook",
"sharefactory",
"criptomonedas",
"cod",
"bible",
"business",
"stream",
"comics",
"how",
"fail",
"nsfw",
"new music",
"satire",
"pets & animals",
"computer",
"classical music",
"indie",
"musica",
"msnbc",
"fps",
"mod",
"sport",
"sony",
"ripple",
"auto",
"rock",
"marvel",
"complete",
"mining",
"political",
"mobile",
"pubg",
"hip hop",
"flat earth",
"xbox 360",
"reviews",
"vlogging",
"latest news",
"hack",
"tarot",
"iphone",
"media",
"cute",
"christian",
"free speech",
"trap",
"war",
"remix",
"ios",
"xrp",
"spirituality",
"song",
"league of legends",
"cat"
}
MATURE_TAGS = [
'nsfw', 'porn', 'xxx', 'mature', 'adult', 'sex'
]
def normalize_tag(tag):
return tag.replace(" ", "_").replace("&", "and").replace("-", "_")
COMMON_TAGS = {
tag: normalize_tag(tag) for tag in list(MOST_USED_TAGS)
}
INDEXED_LANGUAGES = [
'none',
'en',
'aa',
'ab',
'ae',
'af',
'ak',
'am',
'an',
'ar',
'as',
'av',
'ay',
'az',
'ba',
'be',
'bg',
'bh',
'bi',
'bm',
'bn',
'bo',
'br',
'bs',
'ca',
'ce',
'ch',
'co',
'cr',
'cs',
'cu',
'cv',
'cy',
'da',
'de',
'dv',
'dz',
'ee',
'el',
'eo',
'es',
'et',
'eu',
'fa',
'ff',
'fi',
'fj',
'fo',
'fr',
'fy',
'ga',
'gd',
'gl',
'gn',
'gu',
'gv',
'ha',
'he',
'hi',
'ho',
'hr',
'ht',
'hu',
'hy',
'hz',
'ia',
'id',
'ie',
'ig',
'ii',
'ik',
'io',
'is',
'it',
'iu',
'ja',
'jv',
'ka',
'kg',
'ki',
'kj',
'kk',
'kl',
'km',
'kn',
'ko',
'kr',
'ks',
'ku',
'kv',
'kw',
'ky',
'la',
'lb',
'lg',
'li',
'ln',
'lo',
'lt',
'lu',
'lv',
'mg',
'mh',
'mi',
'mk',
'ml',
'mn',
'mr',
'ms',
'mt',
'my',
'na',
'nb',
'nd',
'ne',
'ng',
'nl',
'nn',
'no',
'nr',
'nv',
'ny',
'oc',
'oj',
'om',
'or',
'os',
'pa',
'pi',
'pl',
'ps',
'pt',
'qu',
'rm',
'rn',
'ro',
'ru',
'rw',
'sa',
'sc',
'sd',
'se',
'sg',
'si',
'sk',
'sl',
'sm',
'sn',
'so',
'sq',
'sr',
'ss',
'st',
'su',
'sv',
'sw',
'ta',
'te',
'tg',
'th',
'ti',
'tk',
'tl',
'tn',
'to',
'tr',
'ts',
'tt',
'tw',
'ty',
'ug',
'uk',
'ur',
'uz',
've',
'vi',
'vo',
'wa',
'wo',
'xh',
'yi',
'yo',
'za',
'zh',
'zu'
]
class ResolveResult(typing.NamedTuple):
name: str
normalized_name: str
claim_hash: bytes
tx_num: int
position: int
tx_hash: bytes
height: int
amount: int
short_url: str
is_controlling: bool
canonical_url: str
creation_height: int
activation_height: int
expiration_height: int
effective_amount: int
support_amount: int
reposted: int
last_takeover_height: typing.Optional[int]
claims_in_channel: typing.Optional[int]
channel_hash: typing.Optional[bytes]
reposted_claim_hash: typing.Optional[bytes]
signature_valid: typing.Optional[bool]

View file

@ -1,119 +0,0 @@
import struct
from typing import Optional
from lbry.wallet.server.db import DB_PREFIXES
from lbry.wallet.server.db.revertable import RevertableOpStack, RevertablePut, RevertableDelete
class KeyValueStorage:
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
raise NotImplemented()
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
include_key=True, include_value=True, fill_cache=True):
raise NotImplemented()
def write_batch(self, transaction: bool = False):
raise NotImplemented()
def close(self):
raise NotImplemented()
@property
def closed(self) -> bool:
raise NotImplemented()
class PrefixDB:
UNDO_KEY_STRUCT = struct.Struct(b'>Q')
def __init__(self, db: KeyValueStorage, max_undo_depth: int = 200, unsafe_prefixes=None):
self._db = db
self._op_stack = RevertableOpStack(db.get, unsafe_prefixes=unsafe_prefixes)
self._max_undo_depth = max_undo_depth
def unsafe_commit(self):
"""
Write staged changes to the database without keeping undo information
Changes written cannot be undone
"""
try:
with self._db.write_batch(transaction=True) as batch:
batch_put = batch.put
batch_delete = batch.delete
for staged_change in self._op_stack:
if staged_change.is_put:
batch_put(staged_change.key, staged_change.value)
else:
batch_delete(staged_change.key)
finally:
self._op_stack.clear()
def commit(self, height: int):
"""
Write changes for a block height to the database and keep undo information so that the changes can be reverted
"""
undo_ops = self._op_stack.get_undo_ops()
delete_undos = []
if height > self._max_undo_depth:
delete_undos.extend(self._db.iterator(
start=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(0),
stop=DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height - self._max_undo_depth),
include_value=False
))
try:
with self._db.write_batch(transaction=True) as batch:
batch_put = batch.put
batch_delete = batch.delete
for staged_change in self._op_stack:
if staged_change.is_put:
batch_put(staged_change.key, staged_change.value)
else:
batch_delete(staged_change.key)
for undo_to_delete in delete_undos:
batch_delete(undo_to_delete)
batch_put(DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height), undo_ops)
finally:
self._op_stack.clear()
def rollback(self, height: int):
"""
Revert changes for a block height
"""
undo_key = DB_PREFIXES.undo.value + self.UNDO_KEY_STRUCT.pack(height)
self._op_stack.apply_packed_undo_ops(self._db.get(undo_key))
try:
with self._db.write_batch(transaction=True) as batch:
batch_put = batch.put
batch_delete = batch.delete
for staged_change in self._op_stack:
if staged_change.is_put:
batch_put(staged_change.key, staged_change.value)
else:
batch_delete(staged_change.key)
batch_delete(undo_key)
finally:
self._op_stack.clear()
def get(self, key: bytes, fill_cache: bool = True) -> Optional[bytes]:
return self._db.get(key, fill_cache=fill_cache)
def iterator(self, reverse=False, start=None, stop=None, include_start=True, include_stop=False, prefix=None,
include_key=True, include_value=True, fill_cache=True):
return self._db.iterator(
reverse=reverse, start=start, stop=stop, include_start=include_start, include_stop=include_stop,
prefix=prefix, include_key=include_key, include_value=include_value, fill_cache=fill_cache
)
def close(self):
if not self._db.closed:
self._db.close()
@property
def closed(self):
return self._db.closed
def stage_raw_put(self, key: bytes, value: bytes):
self._op_stack.append_op(RevertablePut(key, value))
def stage_raw_delete(self, key: bytes, value: bytes):
self._op_stack.append_op(RevertableDelete(key, value))

View file

@ -1 +0,0 @@
from .search import SearchIndex

View file

@ -1,100 +0,0 @@
INDEX_DEFAULT_SETTINGS = {
"settings":
{"analysis":
{"analyzer": {
"default": {"tokenizer": "whitespace", "filter": ["lowercase", "porter_stem"]}}},
"index":
{"refresh_interval": -1,
"number_of_shards": 1,
"number_of_replicas": 0,
"sort": {
"field": ["trending_score", "release_time"],
"order": ["desc", "desc"]
}}
},
"mappings": {
"properties": {
"claim_id": {
"fields": {
"keyword": {
"ignore_above": 256,
"type": "keyword"
}
},
"type": "text",
"index_prefixes": {
"min_chars": 1,
"max_chars": 10
}
},
"sd_hash": {
"fields": {
"keyword": {
"ignore_above": 96,
"type": "keyword"
}
},
"type": "text",
"index_prefixes": {
"min_chars": 1,
"max_chars": 4
}
},
"height": {"type": "integer"},
"claim_type": {"type": "byte"},
"censor_type": {"type": "byte"},
"trending_score": {"type": "double"},
"release_time": {"type": "long"}
}
}
}
FIELDS = {
'_id',
'claim_id', 'claim_type', 'claim_name', 'normalized_name',
'tx_id', 'tx_nout', 'tx_position',
'short_url', 'canonical_url',
'is_controlling', 'last_take_over_height',
'public_key_bytes', 'public_key_id', 'claims_in_channel',
'channel_id', 'signature', 'signature_digest', 'is_signature_valid',
'amount', 'effective_amount', 'support_amount',
'fee_amount', 'fee_currency',
'height', 'creation_height', 'activation_height', 'expiration_height',
'stream_type', 'media_type', 'censor_type',
'title', 'author', 'description',
'timestamp', 'creation_timestamp',
'duration', 'release_time',
'tags', 'languages', 'has_source', 'reposted_claim_type',
'reposted_claim_id', 'repost_count', 'sd_hash',
'trending_score', 'tx_num'
}
TEXT_FIELDS = {'author', 'canonical_url', 'channel_id', 'description', 'claim_id', 'censoring_channel_id',
'media_type', 'normalized_name', 'public_key_bytes', 'public_key_id', 'short_url', 'signature',
'claim_name', 'signature_digest', 'title', 'tx_id', 'fee_currency', 'reposted_claim_id',
'tags', 'sd_hash'}
RANGE_FIELDS = {
'height', 'creation_height', 'activation_height', 'expiration_height',
'timestamp', 'creation_timestamp', 'duration', 'release_time', 'fee_amount',
'tx_position', 'repost_count', 'limit_claims_per_channel',
'amount', 'effective_amount', 'support_amount',
'trending_score', 'censor_type', 'tx_num'
}
ALL_FIELDS = RANGE_FIELDS | TEXT_FIELDS | FIELDS
REPLACEMENTS = {
'claim_name': 'normalized_name',
'name': 'normalized_name',
'txid': 'tx_id',
'nout': 'tx_nout',
'trending_group': 'trending_score',
'trending_mixed': 'trending_score',
'trending_global': 'trending_score',
'trending_local': 'trending_score',
'reposted': 'repost_count',
'stream_types': 'stream_type',
'media_types': 'media_type',
'valid_channel_signature': 'is_signature_valid'
}

View file

@ -1,726 +0,0 @@
import time
import asyncio
import struct
from binascii import unhexlify
from collections import Counter, deque
from decimal import Decimal
from operator import itemgetter
from typing import Optional, List, Iterable, Union
from elasticsearch import AsyncElasticsearch, NotFoundError, ConnectionError
from elasticsearch.helpers import async_streaming_bulk
from lbry.error import ResolveCensoredError, TooManyClaimSearchParametersError
from lbry.schema.result import Outputs, Censor
from lbry.schema.tags import clean_tags
from lbry.schema.url import URL, normalize_name
from lbry.utils import LRUCache
from lbry.wallet.server.db.common import CLAIM_TYPES, STREAM_TYPES
from lbry.wallet.server.db.elasticsearch.constants import INDEX_DEFAULT_SETTINGS, REPLACEMENTS, FIELDS, TEXT_FIELDS, \
RANGE_FIELDS, ALL_FIELDS
from lbry.wallet.server.util import class_logger
from lbry.wallet.server.db.common import ResolveResult
class ChannelResolution(str):
@classmethod
def lookup_error(cls, url):
return LookupError(f'Could not find channel in "{url}".')
class StreamResolution(str):
@classmethod
def lookup_error(cls, url):
return LookupError(f'Could not find claim at "{url}".')
class IndexVersionMismatch(Exception):
def __init__(self, got_version, expected_version):
self.got_version = got_version
self.expected_version = expected_version
class SearchIndex:
VERSION = 1
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200):
self.search_timeout = search_timeout
self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import
self.search_client: Optional[AsyncElasticsearch] = None
self.sync_client: Optional[AsyncElasticsearch] = None
self.index = index_prefix + 'claims'
self.logger = class_logger(__name__, self.__class__.__name__)
self.claim_cache = LRUCache(2 ** 15)
self.search_cache = LRUCache(2 ** 17)
self._elastic_host = elastic_host
self._elastic_port = elastic_port
async def get_index_version(self) -> int:
try:
template = await self.sync_client.indices.get_template(self.index)
return template[self.index]['version']
except NotFoundError:
return 0
async def set_index_version(self, version):
await self.sync_client.indices.put_template(
self.index, body={'version': version, 'index_patterns': ['ignored']}, ignore=400
)
async def start(self) -> bool:
if self.sync_client:
return False
hosts = [{'host': self._elastic_host, 'port': self._elastic_port}]
self.sync_client = AsyncElasticsearch(hosts, timeout=self.sync_timeout)
self.search_client = AsyncElasticsearch(hosts, timeout=self.search_timeout)
while True:
try:
await self.sync_client.cluster.health(wait_for_status='yellow')
break
except ConnectionError:
self.logger.warning("Failed to connect to Elasticsearch. Waiting for it!")
await asyncio.sleep(1)
res = await self.sync_client.indices.create(self.index, INDEX_DEFAULT_SETTINGS, ignore=400)
acked = res.get('acknowledged', False)
if acked:
await self.set_index_version(self.VERSION)
return acked
index_version = await self.get_index_version()
if index_version != self.VERSION:
self.logger.error("es search index has an incompatible version: %s vs %s", index_version, self.VERSION)
raise IndexVersionMismatch(index_version, self.VERSION)
await self.sync_client.indices.refresh(self.index)
return acked
def stop(self):
clients = [self.sync_client, self.search_client]
self.sync_client, self.search_client = None, None
return asyncio.ensure_future(asyncio.gather(*(client.close() for client in clients)))
def delete_index(self):
return self.sync_client.indices.delete(self.index, ignore_unavailable=True)
async def _consume_claim_producer(self, claim_producer):
count = 0
async for op, doc in claim_producer:
if op == 'delete':
yield {
'_index': self.index,
'_op_type': 'delete',
'_id': doc
}
else:
yield {
'doc': {key: value for key, value in doc.items() if key in ALL_FIELDS},
'_id': doc['claim_id'],
'_index': self.index,
'_op_type': 'update',
'doc_as_upsert': True
}
count += 1
if count % 100 == 0:
self.logger.info("Indexing in progress, %d claims.", count)
if count:
self.logger.info("Indexing done for %d claims.", count)
else:
self.logger.debug("Indexing done for %d claims.", count)
async def claim_consumer(self, claim_producer):
touched = set()
async for ok, item in async_streaming_bulk(self.sync_client, self._consume_claim_producer(claim_producer),
raise_on_error=False):
if not ok:
self.logger.warning("indexing failed for an item: %s", item)
else:
item = item.popitem()[1]
touched.add(item['_id'])
await self.sync_client.indices.refresh(self.index)
self.logger.debug("Indexing done.")
def update_filter_query(self, censor_type, blockdict, channels=False):
blockdict = {blocked.hex(): blocker.hex() for blocked, blocker in blockdict.items()}
if channels:
update = expand_query(channel_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
else:
update = expand_query(claim_id__in=list(blockdict.keys()), censor_type=f"<{censor_type}")
key = 'channel_id' if channels else 'claim_id'
update['script'] = {
"source": f"ctx._source.censor_type={censor_type}; "
f"ctx._source.censoring_channel_id=params[ctx._source.{key}];",
"lang": "painless",
"params": blockdict
}
return update
async def update_trending_score(self, params):
update_trending_score_script = """
double softenLBC(double lbc) { return (Math.pow(lbc, 1.0 / 3.0)); }
double logsumexp(double x, double y)
{
double top;
if(x > y)
top = x;
else
top = y;
double result = top + Math.log(Math.exp(x-top) + Math.exp(y-top));
return(result);
}
double logdiffexp(double big, double small)
{
return big + Math.log(1.0 - Math.exp(small - big));
}
double squash(double x)
{
if(x < 0.0)
return -Math.log(1.0 - x);
else
return Math.log(x + 1.0);
}
double unsquash(double x)
{
if(x < 0.0)
return 1.0 - Math.exp(-x);
else
return Math.exp(x) - 1.0;
}
double log_to_squash(double x)
{
return logsumexp(x, 0.0);
}
double squash_to_log(double x)
{
//assert x > 0.0;
return logdiffexp(x, 0.0);
}
double squashed_add(double x, double y)
{
// squash(unsquash(x) + unsquash(y)) but avoiding overflow.
// Cases where the signs are the same
if (x < 0.0 && y < 0.0)
return -logsumexp(-x, logdiffexp(-y, 0.0));
if (x >= 0.0 && y >= 0.0)
return logsumexp(x, logdiffexp(y, 0.0));
// Where the signs differ
if (x >= 0.0 && y < 0.0)
if (Math.abs(x) >= Math.abs(y))
return logsumexp(0.0, logdiffexp(x, -y));
else
return -logsumexp(0.0, logdiffexp(-y, x));
if (x < 0.0 && y >= 0.0)
{
// Addition is commutative, hooray for new math
return squashed_add(y, x);
}
return 0.0;
}
double squashed_multiply(double x, double y)
{
// squash(unsquash(x)*unsquash(y)) but avoiding overflow.
int sign;
if(x*y >= 0.0)
sign = 1;
else
sign = -1;
return sign*logsumexp(squash_to_log(Math.abs(x))
+ squash_to_log(Math.abs(y)), 0.0);
}
// Squashed inflated units
double inflateUnits(int height) {
double timescale = 576.0; // Half life of 400 = e-folding time of a day
// by coincidence, so may as well go with it
return log_to_squash(height / timescale);
}
double spikePower(double newAmount) {
if (newAmount < 50.0) {
return(0.5);
} else if (newAmount < 85.0) {
return(newAmount / 100.0);
} else {
return(0.85);
}
}
double spikeMass(double oldAmount, double newAmount) {
double softenedChange = softenLBC(Math.abs(newAmount - oldAmount));
double changeInSoftened = Math.abs(softenLBC(newAmount) - softenLBC(oldAmount));
double power = spikePower(newAmount);
if (oldAmount > newAmount) {
-1.0 * Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
} else {
Math.pow(changeInSoftened, power) * Math.pow(softenedChange, 1.0 - power)
}
}
for (i in params.src.changes) {
double units = inflateUnits(i.height);
if (ctx._source.trending_score == null) {
ctx._source.trending_score = 0.0;
}
double bigSpike = squashed_multiply(units, squash(spikeMass(i.prev_amount, i.new_amount)));
ctx._source.trending_score = squashed_add(ctx._source.trending_score, bigSpike);
}
"""
start = time.perf_counter()
def producer():
for claim_id, claim_updates in params.items():
yield {
'_id': claim_id,
'_index': self.index,
'_op_type': 'update',
'script': {
'lang': 'painless',
'source': update_trending_score_script,
'params': {'src': {
'changes': [
{
'height': p.height,
'prev_amount': p.prev_amount / 1E8,
'new_amount': p.new_amount / 1E8,
} for p in claim_updates
]
}}
},
}
if not params:
return
async for ok, item in async_streaming_bulk(self.sync_client, producer(), raise_on_error=False):
if not ok:
self.logger.warning("updating trending failed for an item: %s", item)
await self.sync_client.indices.refresh(self.index)
self.logger.info("updated trending scores in %ims", int((time.perf_counter() - start) * 1000))
async def apply_filters(self, blocked_streams, blocked_channels, filtered_streams, filtered_channels):
if filtered_streams:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_streams), slices=4)
await self.sync_client.indices.refresh(self.index)
if filtered_channels:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels), slices=4)
await self.sync_client.indices.refresh(self.index)
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.SEARCH, filtered_channels, True), slices=4)
await self.sync_client.indices.refresh(self.index)
if blocked_streams:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_streams), slices=4)
await self.sync_client.indices.refresh(self.index)
if blocked_channels:
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels), slices=4)
await self.sync_client.indices.refresh(self.index)
await self.sync_client.update_by_query(
self.index, body=self.update_filter_query(Censor.RESOLVE, blocked_channels, True), slices=4)
await self.sync_client.indices.refresh(self.index)
self.clear_caches()
def clear_caches(self):
self.search_cache.clear()
self.claim_cache.clear()
async def cached_search(self, kwargs):
total_referenced = []
cache_item = ResultCacheItem.from_cache(str(kwargs), self.search_cache)
if cache_item.result is not None:
return cache_item.result
async with cache_item.lock:
if cache_item.result:
return cache_item.result
censor = Censor(Censor.SEARCH)
if kwargs.get('no_totals'):
response, offset, total = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
else:
response, offset, total = await self.search(**kwargs)
censor.apply(response)
total_referenced.extend(response)
if censor.censored:
response, _, _ = await self.search(**kwargs, censor_type=Censor.NOT_CENSORED)
total_referenced.extend(response)
response = [
ResolveResult(
name=r['claim_name'],
normalized_name=r['normalized_name'],
claim_hash=r['claim_hash'],
tx_num=r['tx_num'],
position=r['tx_nout'],
tx_hash=r['tx_hash'],
height=r['height'],
amount=r['amount'],
short_url=r['short_url'],
is_controlling=r['is_controlling'],
canonical_url=r['canonical_url'],
creation_height=r['creation_height'],
activation_height=r['activation_height'],
expiration_height=r['expiration_height'],
effective_amount=r['effective_amount'],
support_amount=r['support_amount'],
last_takeover_height=r['last_take_over_height'],
claims_in_channel=r['claims_in_channel'],
channel_hash=r['channel_hash'],
reposted_claim_hash=r['reposted_claim_hash'],
reposted=r['reposted'],
signature_valid=r['signature_valid']
) for r in response
]
extra = [
ResolveResult(
name=r['claim_name'],
normalized_name=r['normalized_name'],
claim_hash=r['claim_hash'],
tx_num=r['tx_num'],
position=r['tx_nout'],
tx_hash=r['tx_hash'],
height=r['height'],
amount=r['amount'],
short_url=r['short_url'],
is_controlling=r['is_controlling'],
canonical_url=r['canonical_url'],
creation_height=r['creation_height'],
activation_height=r['activation_height'],
expiration_height=r['expiration_height'],
effective_amount=r['effective_amount'],
support_amount=r['support_amount'],
last_takeover_height=r['last_take_over_height'],
claims_in_channel=r['claims_in_channel'],
channel_hash=r['channel_hash'],
reposted_claim_hash=r['reposted_claim_hash'],
reposted=r['reposted'],
signature_valid=r['signature_valid']
) for r in await self._get_referenced_rows(total_referenced)
]
result = Outputs.to_base64(
response, extra, offset, total, censor
)
cache_item.result = result
return result
async def get_many(self, *claim_ids):
await self.populate_claim_cache(*claim_ids)
return filter(None, map(self.claim_cache.get, claim_ids))
async def populate_claim_cache(self, *claim_ids):
missing = [claim_id for claim_id in claim_ids if self.claim_cache.get(claim_id) is None]
if missing:
results = await self.search_client.mget(
index=self.index, body={"ids": missing}
)
for result in expand_result(filter(lambda doc: doc['found'], results["docs"])):
self.claim_cache.set(result['claim_id'], result)
async def search(self, **kwargs):
try:
return await self.search_ahead(**kwargs)
except NotFoundError:
return [], 0, 0
# return expand_result(result['hits']), 0, result.get('total', {}).get('value', 0)
async def search_ahead(self, **kwargs):
# 'limit_claims_per_channel' case. Fetch 1000 results, reorder, slice, inflate and return
per_channel_per_page = kwargs.pop('limit_claims_per_channel', 0) or 0
remove_duplicates = kwargs.pop('remove_duplicates', False)
page_size = kwargs.pop('limit', 10)
offset = kwargs.pop('offset', 0)
kwargs['limit'] = 1000
cache_item = ResultCacheItem.from_cache(f"ahead{per_channel_per_page}{kwargs}", self.search_cache)
if cache_item.result is not None:
reordered_hits = cache_item.result
else:
async with cache_item.lock:
if cache_item.result:
reordered_hits = cache_item.result
else:
query = expand_query(**kwargs)
search_hits = deque((await self.search_client.search(
query, index=self.index, track_total_hits=False,
_source_includes=['_id', 'channel_id', 'reposted_claim_id', 'creation_height']
))['hits']['hits'])
if remove_duplicates:
search_hits = self.__remove_duplicates(search_hits)
if per_channel_per_page > 0:
reordered_hits = self.__search_ahead(search_hits, page_size, per_channel_per_page)
else:
reordered_hits = [(hit['_id'], hit['_source']['channel_id']) for hit in search_hits]
cache_item.result = reordered_hits
result = list(await self.get_many(*(claim_id for claim_id, _ in reordered_hits[offset:(offset + page_size)])))
return result, 0, len(reordered_hits)
def __remove_duplicates(self, search_hits: deque) -> deque:
known_ids = {} # claim_id -> (creation_height, hit_id), where hit_id is either reposted claim id or original
dropped = set()
for hit in search_hits:
hit_height, hit_id = hit['_source']['creation_height'], hit['_source']['reposted_claim_id'] or hit['_id']
if hit_id not in known_ids:
known_ids[hit_id] = (hit_height, hit['_id'])
else:
previous_height, previous_id = known_ids[hit_id]
if hit_height < previous_height:
known_ids[hit_id] = (hit_height, hit['_id'])
dropped.add(previous_id)
else:
dropped.add(hit['_id'])
return deque(hit for hit in search_hits if hit['_id'] not in dropped)
def __search_ahead(self, search_hits: list, page_size: int, per_channel_per_page: int):
reordered_hits = []
channel_counters = Counter()
next_page_hits_maybe_check_later = deque()
while search_hits or next_page_hits_maybe_check_later:
if reordered_hits and len(reordered_hits) % page_size == 0:
channel_counters.clear()
elif not reordered_hits:
pass
else:
break # means last page was incomplete and we are left with bad replacements
for _ in range(len(next_page_hits_maybe_check_later)):
claim_id, channel_id = next_page_hits_maybe_check_later.popleft()
if per_channel_per_page > 0 and channel_counters[channel_id] < per_channel_per_page:
reordered_hits.append((claim_id, channel_id))
channel_counters[channel_id] += 1
else:
next_page_hits_maybe_check_later.append((claim_id, channel_id))
while search_hits:
hit = search_hits.popleft()
hit_id, hit_channel_id = hit['_id'], hit['_source']['channel_id']
if hit_channel_id is None or per_channel_per_page <= 0:
reordered_hits.append((hit_id, hit_channel_id))
elif channel_counters[hit_channel_id] < per_channel_per_page:
reordered_hits.append((hit_id, hit_channel_id))
channel_counters[hit_channel_id] += 1
if len(reordered_hits) % page_size == 0:
break
else:
next_page_hits_maybe_check_later.append((hit_id, hit_channel_id))
return reordered_hits
async def _get_referenced_rows(self, txo_rows: List[dict]):
txo_rows = [row for row in txo_rows if isinstance(row, dict)]
referenced_ids = set(filter(None, map(itemgetter('reposted_claim_id'), txo_rows)))
referenced_ids |= set(filter(None, (row['channel_id'] for row in txo_rows)))
referenced_ids |= set(filter(None, (row['censoring_channel_id'] for row in txo_rows)))
referenced_txos = []
if referenced_ids:
referenced_txos.extend(await self.get_many(*referenced_ids))
referenced_ids = set(filter(None, (row['channel_id'] for row in referenced_txos)))
if referenced_ids:
referenced_txos.extend(await self.get_many(*referenced_ids))
return referenced_txos
def expand_query(**kwargs):
if "amount_order" in kwargs:
kwargs["limit"] = 1
kwargs["order_by"] = "effective_amount"
kwargs["offset"] = int(kwargs["amount_order"]) - 1
if 'name' in kwargs:
kwargs['name'] = normalize_name(kwargs.pop('name'))
if kwargs.get('is_controlling') is False:
kwargs.pop('is_controlling')
query = {'must': [], 'must_not': []}
collapse = None
if 'fee_currency' in kwargs and kwargs['fee_currency'] is not None:
kwargs['fee_currency'] = kwargs['fee_currency'].upper()
for key, value in kwargs.items():
key = key.replace('claim.', '')
many = key.endswith('__in') or isinstance(value, list)
if many and len(value) > 2048:
raise TooManyClaimSearchParametersError(key, 2048)
if many:
key = key.replace('__in', '')
value = list(filter(None, value))
if value is None or isinstance(value, list) and len(value) == 0:
continue
key = REPLACEMENTS.get(key, key)
if key in FIELDS:
partial_id = False
if key == 'claim_type':
if isinstance(value, str):
value = CLAIM_TYPES[value]
else:
value = [CLAIM_TYPES[claim_type] for claim_type in value]
elif key == 'stream_type':
value = [STREAM_TYPES[value]] if isinstance(value, str) else list(map(STREAM_TYPES.get, value))
if key == '_id':
if isinstance(value, Iterable):
value = [item[::-1].hex() for item in value]
else:
value = value[::-1].hex()
if not many and key in ('_id', 'claim_id', 'sd_hash') and len(value) < 20:
partial_id = True
if key in ('signature_valid', 'has_source'):
continue # handled later
if key in TEXT_FIELDS:
key += '.keyword'
ops = {'<=': 'lte', '>=': 'gte', '<': 'lt', '>': 'gt'}
if partial_id:
query['must'].append({"prefix": {key: value}})
elif key in RANGE_FIELDS and isinstance(value, str) and value[0] in ops:
operator_length = 2 if value[:2] in ops else 1
operator, value = value[:operator_length], value[operator_length:]
if key == 'fee_amount':
value = str(Decimal(value)*1000)
query['must'].append({"range": {key: {ops[operator]: value}}})
elif key in RANGE_FIELDS and isinstance(value, list) and all(v[0] in ops for v in value):
range_constraints = []
for v in value:
operator_length = 2 if v[:2] in ops else 1
operator, stripped_op_v = v[:operator_length], v[operator_length:]
if key == 'fee_amount':
stripped_op_v = str(Decimal(stripped_op_v)*1000)
range_constraints.append((operator, stripped_op_v))
query['must'].append({"range": {key: {ops[operator]: v for operator, v in range_constraints}}})
elif many:
query['must'].append({"terms": {key: value}})
else:
if key == 'fee_amount':
value = str(Decimal(value)*1000)
query['must'].append({"term": {key: {"value": value}}})
elif key == 'not_channel_ids':
for channel_id in value:
query['must_not'].append({"term": {'channel_id.keyword': channel_id}})
query['must_not'].append({"term": {'_id': channel_id}})
elif key == 'channel_ids':
query['must'].append({"terms": {'channel_id.keyword': value}})
elif key == 'claim_ids':
query['must'].append({"terms": {'claim_id.keyword': value}})
elif key == 'media_types':
query['must'].append({"terms": {'media_type.keyword': value}})
elif key == 'any_languages':
query['must'].append({"terms": {'languages': clean_tags(value)}})
elif key == 'any_languages':
query['must'].append({"terms": {'languages': value}})
elif key == 'all_languages':
query['must'].extend([{"term": {'languages': tag}} for tag in value])
elif key == 'any_tags':
query['must'].append({"terms": {'tags.keyword': clean_tags(value)}})
elif key == 'all_tags':
query['must'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
elif key == 'not_tags':
query['must_not'].extend([{"term": {'tags.keyword': tag}} for tag in clean_tags(value)])
elif key == 'not_claim_id':
query['must_not'].extend([{"term": {'claim_id.keyword': cid}} for cid in value])
elif key == 'limit_claims_per_channel':
collapse = ('channel_id.keyword', value)
if kwargs.get('has_channel_signature'):
query['must'].append({"exists": {"field": "signature"}})
if 'signature_valid' in kwargs:
query['must'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
elif 'signature_valid' in kwargs:
query.setdefault('should', [])
query["minimum_should_match"] = 1
query['should'].append({"bool": {"must_not": {"exists": {"field": "signature"}}}})
query['should'].append({"term": {"is_signature_valid": bool(kwargs["signature_valid"])}})
if 'has_source' in kwargs:
query.setdefault('should', [])
query["minimum_should_match"] = 1
is_stream_or_repost = {"terms": {"claim_type": [CLAIM_TYPES['stream'], CLAIM_TYPES['repost']]}}
query['should'].append(
{"bool": {"must": [{"match": {"has_source": kwargs['has_source']}}, is_stream_or_repost]}})
query['should'].append({"bool": {"must_not": [is_stream_or_repost]}})
query['should'].append({"bool": {"must": [{"term": {"reposted_claim_type": CLAIM_TYPES['channel']}}]}})
if kwargs.get('text'):
query['must'].append(
{"simple_query_string":
{"query": kwargs["text"], "fields": [
"claim_name^4", "channel_name^8", "title^1", "description^.5", "author^1", "tags^.5"
]}})
query = {
"_source": {"excludes": ["description", "title"]},
'query': {'bool': query},
"sort": [],
}
if "limit" in kwargs:
query["size"] = kwargs["limit"]
if 'offset' in kwargs:
query["from"] = kwargs["offset"]
if 'order_by' in kwargs:
if isinstance(kwargs["order_by"], str):
kwargs["order_by"] = [kwargs["order_by"]]
for value in kwargs['order_by']:
if 'trending_group' in value:
# fixme: trending_mixed is 0 for all records on variable decay, making sort slow.
continue
is_asc = value.startswith('^')
value = value[1:] if is_asc else value
value = REPLACEMENTS.get(value, value)
if value in TEXT_FIELDS:
value += '.keyword'
query['sort'].append({value: "asc" if is_asc else "desc"})
if collapse:
query["collapse"] = {
"field": collapse[0],
"inner_hits": {
"name": collapse[0],
"size": collapse[1],
"sort": query["sort"]
}
}
return query
def expand_result(results):
inner_hits = []
expanded = []
for result in results:
if result.get("inner_hits"):
for _, inner_hit in result["inner_hits"].items():
inner_hits.extend(inner_hit["hits"]["hits"])
continue
result = result['_source']
result['claim_hash'] = unhexlify(result['claim_id'])[::-1]
if result['reposted_claim_id']:
result['reposted_claim_hash'] = unhexlify(result['reposted_claim_id'])[::-1]
else:
result['reposted_claim_hash'] = None
result['channel_hash'] = unhexlify(result['channel_id'])[::-1] if result['channel_id'] else None
result['txo_hash'] = unhexlify(result['tx_id'])[::-1] + struct.pack('<I', result['tx_nout'])
result['tx_hash'] = unhexlify(result['tx_id'])[::-1]
result['reposted'] = result.pop('repost_count')
result['signature_valid'] = result.pop('is_signature_valid')
# result['normalized'] = result.pop('normalized_name')
# if result['censoring_channel_hash']:
# result['censoring_channel_hash'] = unhexlify(result['censoring_channel_hash'])[::-1]
expanded.append(result)
if inner_hits:
return expand_result(inner_hits)
return expanded
class ResultCacheItem:
__slots__ = '_result', 'lock', 'has_result'
def __init__(self):
self.has_result = asyncio.Event()
self.lock = asyncio.Lock()
self._result = None
@property
def result(self) -> str:
return self._result
@result.setter
def result(self, result: str):
self._result = result
if result is not None:
self.has_result.set()
@classmethod
def from_cache(cls, cache_key, cache):
cache_item = cache.get(cache_key)
if cache_item is None:
cache_item = cache[cache_key] = ResultCacheItem()
return cache_item

View file

@ -1,138 +0,0 @@
import os
import argparse
import asyncio
import logging
from elasticsearch import AsyncElasticsearch
from elasticsearch.helpers import async_streaming_bulk
from lbry.wallet.server.env import Env
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.db.elasticsearch.search import SearchIndex, IndexVersionMismatch
from lbry.wallet.server.db.elasticsearch.constants import ALL_FIELDS
async def get_recent_claims(env, index_name='claims', db=None):
log = logging.getLogger()
need_open = db is None
db = db or LevelDB(env)
try:
if need_open:
db.open_db()
if db.es_sync_height == db.db_height or db.db_height <= 0:
return
if need_open:
await db.initialize_caches()
log.info(f"catching up ES ({db.es_sync_height}) to leveldb height: {db.db_height}")
cnt = 0
touched_claims = set()
deleted_claims = set()
for height in range(db.es_sync_height, db.db_height + 1):
touched_or_deleted = db.prefix_db.touched_or_deleted.get(height)
touched_claims.update(touched_or_deleted.touched_claims)
deleted_claims.update(touched_or_deleted.deleted_claims)
touched_claims.difference_update(deleted_claims)
for deleted in deleted_claims:
yield {
'_index': index_name,
'_op_type': 'delete',
'_id': deleted.hex()
}
for touched in touched_claims:
claim = db.claim_producer(touched)
if claim:
yield {
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
'_id': claim['claim_id'],
'_index': index_name,
'_op_type': 'update',
'doc_as_upsert': True
}
cnt += 1
else:
logging.warning("could not sync claim %s", touched.hex())
if cnt % 10000 == 0:
logging.info("%i claims sent to ES", cnt)
db.es_sync_height = db.db_height
db.write_db_state()
db.prefix_db.unsafe_commit()
db.assert_db_state()
logging.info("finished sending %i claims to ES, deleted %i", cnt, len(deleted_claims))
finally:
if need_open:
db.close()
async def get_all_claims(env, index_name='claims', db=None):
need_open = db is None
db = db or LevelDB(env)
if need_open:
db.open_db()
await db.initialize_caches()
logging.info("Fetching claims to send ES from leveldb")
try:
cnt = 0
async for claim in db.all_claims_producer():
yield {
'doc': {key: value for key, value in claim.items() if key in ALL_FIELDS},
'_id': claim['claim_id'],
'_index': index_name,
'_op_type': 'update',
'doc_as_upsert': True
}
cnt += 1
if cnt % 10000 == 0:
logging.info("sent %i claims to ES", cnt)
finally:
if need_open:
db.close()
async def make_es_index_and_run_sync(env: Env, clients=32, force=False, db=None, index_name='claims'):
index = SearchIndex(env.es_index_prefix, elastic_host=env.elastic_host, elastic_port=env.elastic_port)
logging.info("ES sync host: %s:%i", env.elastic_host, env.elastic_port)
try:
created = await index.start()
except IndexVersionMismatch as err:
logging.info(
"dropping ES search index (version %s) for upgrade to version %s", err.got_version, err.expected_version
)
await index.delete_index()
await index.stop()
created = await index.start()
finally:
index.stop()
es = AsyncElasticsearch([{'host': env.elastic_host, 'port': env.elastic_port}])
if force or created:
claim_generator = get_all_claims(env, index_name=index_name, db=db)
else:
claim_generator = get_recent_claims(env, index_name=index_name, db=db)
try:
async for ok, item in async_streaming_bulk(es, claim_generator, request_timeout=600, raise_on_error=False):
if not ok:
logging.warning("indexing failed for an item: %s", item)
await es.indices.refresh(index=index_name)
finally:
await es.close()
def run_elastic_sync():
logging.basicConfig(level=logging.INFO)
logging.getLogger('aiohttp').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
logging.info('lbry.server starting')
parser = argparse.ArgumentParser(prog="lbry-hub-elastic-sync")
parser.add_argument("-c", "--clients", type=int, default=32)
parser.add_argument("-f", "--force", default=False, action='store_true')
Env.contribute_to_arg_parser(parser)
args = parser.parse_args()
env = Env.from_arg_parser(args)
if not os.path.exists(os.path.join(args.db_dir, 'lbry-leveldb')):
logging.info("DB path doesnt exist, nothing to sync to ES")
return
asyncio.run(make_es_index_and_run_sync(env, clients=args.clients, force=args.force))

File diff suppressed because it is too large Load diff

View file

@ -1,175 +0,0 @@
import struct
import logging
from string import printable
from collections import defaultdict
from typing import Tuple, Iterable, Callable, Optional
from lbry.wallet.server.db import DB_PREFIXES
_OP_STRUCT = struct.Struct('>BLL')
log = logging.getLogger()
class RevertableOp:
__slots__ = [
'key',
'value',
]
is_put = 0
def __init__(self, key: bytes, value: bytes):
self.key = key
self.value = value
@property
def is_delete(self) -> bool:
return not self.is_put
def invert(self) -> 'RevertableOp':
raise NotImplementedError()
def pack(self) -> bytes:
"""
Serialize to bytes
"""
return struct.pack(
f'>BLL{len(self.key)}s{len(self.value)}s', int(self.is_put), len(self.key), len(self.value), self.key,
self.value
)
@classmethod
def unpack(cls, packed: bytes) -> Tuple['RevertableOp', bytes]:
"""
Deserialize from bytes
:param packed: bytes containing at least one packed revertable op
:return: tuple of the deserialized op (a put or a delete) and the remaining serialized bytes
"""
is_put, key_len, val_len = _OP_STRUCT.unpack(packed[:9])
key = packed[9:9 + key_len]
value = packed[9 + key_len:9 + key_len + val_len]
if is_put == 1:
return RevertablePut(key, value), packed[9 + key_len + val_len:]
return RevertableDelete(key, value), packed[9 + key_len + val_len:]
def __eq__(self, other: 'RevertableOp') -> bool:
return (self.is_put, self.key, self.value) == (other.is_put, other.key, other.value)
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
from lbry.wallet.server.db.prefixes import auto_decode_item
k, v = auto_decode_item(self.key, self.value)
key = ''.join(c if c in printable else '.' for c in str(k))
val = ''.join(c if c in printable else '.' for c in str(v))
return f"{'PUT' if self.is_put else 'DELETE'} {DB_PREFIXES(self.key[:1]).name}: {key} | {val}"
class RevertableDelete(RevertableOp):
def invert(self):
return RevertablePut(self.key, self.value)
class RevertablePut(RevertableOp):
is_put = True
def invert(self):
return RevertableDelete(self.key, self.value)
class OpStackIntegrity(Exception):
pass
class RevertableOpStack:
def __init__(self, get_fn: Callable[[bytes], Optional[bytes]], unsafe_prefixes=None):
"""
This represents a sequence of revertable puts and deletes to a key-value database that checks for integrity
violations when applying the puts and deletes. The integrity checks assure that keys that do not exist
are not deleted, and that when keys are deleted the current value is correctly known so that the delete
may be undone. When putting values, the integrity checks assure that existing values are not overwritten
without first being deleted. Updates are performed by applying a delete op for the old value and a put op
for the new value.
:param get_fn: getter function from an object implementing `KeyValueStorage`
:param unsafe_prefixes: optional set of prefixes to ignore integrity errors for, violations are still logged
"""
self._get = get_fn
self._items = defaultdict(list)
self._unsafe_prefixes = unsafe_prefixes or set()
def append_op(self, op: RevertableOp):
"""
Apply a put or delete op, checking that it introduces no integrity errors
"""
inverted = op.invert()
if self._items[op.key] and inverted == self._items[op.key][-1]:
self._items[op.key].pop() # if the new op is the inverse of the last op, we can safely null both
return
elif self._items[op.key] and self._items[op.key][-1] == op: # duplicate of last op
return # raise an error?
stored_val = self._get(op.key)
has_stored_val = stored_val is not None
delete_stored_op = None if not has_stored_val else RevertableDelete(op.key, stored_val)
will_delete_existing_stored = False if delete_stored_op is None else (delete_stored_op in self._items[op.key])
try:
if op.is_put and has_stored_val and not will_delete_existing_stored:
raise OpStackIntegrity(
f"db op tries to add on top of existing key without deleting first: {op}"
)
elif op.is_delete and has_stored_val and stored_val != op.value and not will_delete_existing_stored:
# there is a value and we're not deleting it in this op
# check that a delete for the stored value is in the stack
raise OpStackIntegrity(f"db op tries to delete with incorrect existing value {op}")
elif op.is_delete and not has_stored_val:
raise OpStackIntegrity(f"db op tries to delete nonexistent key: {op}")
elif op.is_delete and stored_val != op.value:
raise OpStackIntegrity(f"db op tries to delete with incorrect value: {op}")
except OpStackIntegrity as err:
if op.key[:1] in self._unsafe_prefixes:
log.debug(f"skipping over integrity error: {err}")
else:
raise err
self._items[op.key].append(op)
def extend_ops(self, ops: Iterable[RevertableOp]):
"""
Apply a sequence of put or delete ops, checking that they introduce no integrity errors
"""
for op in ops:
self.append_op(op)
def clear(self):
self._items.clear()
def __len__(self):
return sum(map(len, self._items.values()))
def __iter__(self):
for key, ops in self._items.items():
for op in ops:
yield op
def __reversed__(self):
for key, ops in self._items.items():
for op in reversed(ops):
yield op
def get_undo_ops(self) -> bytes:
"""
Get the serialized bytes to undo all of the changes made by the pending ops
"""
return b''.join(op.invert().pack() for op in reversed(self))
def apply_packed_undo_ops(self, packed: bytes):
"""
Unpack and apply a sequence of undo ops from serialized undo bytes
"""
while packed:
op, packed = RevertableOp.unpack(packed)
self.append_op(op)
def get_last_op_for_key(self, key: bytes) -> Optional[RevertableOp]:
if key in self._items and self._items[key]:
return self._items[key][-1]

View file

@ -1,384 +0,0 @@
# Copyright (c) 2016, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
import math
import re
import resource
from os import environ
from collections import namedtuple
from ipaddress import ip_address
from lbry.wallet.server.util import class_logger
from lbry.wallet.server.coin import Coin, LBC, LBCTestNet, LBCRegTest
import lbry.wallet.server.util as lib_util
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
class Env:
# Peer discovery
PD_OFF, PD_SELF, PD_ON = range(3)
class Error(Exception):
pass
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None,
elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None,
chain=None, es_index_prefix=None, es_mode=None, cache_MB=None, reorg_limit=None, tcp_port=None,
udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None,
prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None,
allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None,
payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None,
session_timeout=None, drop_client=None, description=None, daily_fee=None,
database_query_timeout=None, db_max_open_files=512):
self.logger = class_logger(__name__, self.__class__.__name__)
self.db_dir = db_dir if db_dir is not None else self.required('DB_DIRECTORY')
self.daemon_url = daemon_url if daemon_url is not None else self.required('DAEMON_URL')
self.db_max_open_files = db_max_open_files
self.host = host if host is not None else self.default('HOST', 'localhost')
self.rpc_host = rpc_host if rpc_host is not None else self.default('RPC_HOST', 'localhost')
self.elastic_host = elastic_host if elastic_host is not None else self.default('ELASTIC_HOST', 'localhost')
self.elastic_port = elastic_port if elastic_port is not None else self.integer('ELASTIC_PORT', 9200)
self.loop_policy = self.set_event_loop_policy(
loop_policy if loop_policy is not None else self.default('EVENT_LOOP_POLICY', None)
)
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer('MAX_QUERY_WORKERS', 4)
self.websocket_host = websocket_host if websocket_host is not None else self.default('WEBSOCKET_HOST', self.host)
self.websocket_port = websocket_port if websocket_port is not None else self.integer('WEBSOCKET_PORT', None)
if coin is not None:
assert issubclass(coin, Coin)
self.coin = coin
else:
chain = chain if chain is not None else self.default('NET', 'mainnet').strip().lower()
if chain == 'mainnet':
self.coin = LBC
elif chain == 'testnet':
self.coin = LBCTestNet
else:
self.coin = LBCRegTest
self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default('ES_INDEX_PREFIX', '')
self.es_mode = es_mode if es_mode is not None else self.default('ES_MODE', 'writer')
self.cache_MB = cache_MB if cache_MB is not None else self.integer('CACHE_MB', 1024)
self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
# Server stuff
self.tcp_port = tcp_port if tcp_port is not None else self.integer('TCP_PORT', None)
self.udp_port = udp_port if udp_port is not None else self.integer('UDP_PORT', self.tcp_port)
self.ssl_port = ssl_port if ssl_port is not None else self.integer('SSL_PORT', None)
if self.ssl_port:
self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required('SSL_CERTFILE')
self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required('SSL_KEYFILE')
self.rpc_port = rpc_port if rpc_port is not None else self.integer('RPC_PORT', 8000)
self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer('PROMETHEUS_PORT', 0)
self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer('MAX_SUBSCRIPTIONS', 10000)
self.banner_file = banner_file if banner_file is not None else self.default('BANNER_FILE', None)
# self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
self.anon_logs = anon_logs if anon_logs is not None else self.boolean('ANON_LOGS', False)
self.log_sessions = log_sessions if log_sessions is not None else self.integer('LOG_SESSIONS', 3600)
self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean('ALLOW_LAN_UDP', False)
self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean('CACHE_ALL_TX_HASHES', False)
self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean('CACHE_ALL_CLAIM_TXOS', False)
self.country = country if country is not None else self.default('COUNTRY', 'US')
# Peer discovery
self.peer_discovery = self.peer_discovery_enum()
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.peer_hubs = self.extract_peer_hubs()
# self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
# self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified
self.payment_address = payment_address if payment_address is not None else self.default('PAYMENT_ADDRESS', '')
self.donation_address = donation_address if donation_address is not None else self.default('DONATION_ADDRESS', '')
# Server limits to help prevent DoS
self.max_send = max_send if max_send is not None else self.integer('MAX_SEND', 1000000)
self.max_receive = max_receive if max_receive is not None else self.integer('MAX_RECEIVE', 1000000)
# self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions()
# self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.session_timeout = session_timeout if session_timeout is not None else self.integer('SESSION_TIMEOUT', 600)
self.drop_client = drop_client if drop_client is not None else self.custom("DROP_CLIENT", None, re.compile)
self.description = description if description is not None else self.default('DESCRIPTION', '')
self.daily_fee = daily_fee if daily_fee is not None else self.string_amount('DAILY_FEE', '0')
# Identities
clearnet_identity = self.clearnet_identity()
tor_identity = self.tor_identity(clearnet_identity)
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \
(float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)
@classmethod
def default(cls, envvar, default):
return environ.get(envvar, default)
@classmethod
def boolean(cls, envvar, default):
default = 'Yes' if default else ''
return bool(cls.default(envvar, default).strip())
@classmethod
def required(cls, envvar):
value = environ.get(envvar)
if value is None:
raise cls.Error(f'required envvar {envvar} not set')
return value
@classmethod
def string_amount(cls, envvar, default):
value = environ.get(envvar, default)
amount_pattern = re.compile("[0-9]{0,10}(\.[0-9]{1,8})?")
if len(value) > 0 and not amount_pattern.fullmatch(value):
raise cls.Error(f'{value} is not a valid amount for {envvar}')
return value
@classmethod
def integer(cls, envvar, default):
value = environ.get(envvar)
if value is None:
return default
try:
return int(value)
except Exception:
raise cls.Error(f'cannot convert envvar {envvar} value {value} to an integer')
@classmethod
def custom(cls, envvar, default, parse):
value = environ.get(envvar)
if value is None:
return default
try:
return parse(value)
except Exception as e:
raise cls.Error(f'cannot parse envvar {envvar} value {value}') from e
@classmethod
def obsolete(cls, envvars):
bad = [envvar for envvar in envvars if environ.get(envvar)]
if bad:
raise cls.Error(f'remove obsolete environment variables {bad}')
@classmethod
def set_event_loop_policy(cls, policy_name: str = None):
if not policy_name or policy_name == 'default':
import asyncio
return asyncio.get_event_loop_policy()
elif policy_name == 'uvloop':
import uvloop
import asyncio
loop_policy = uvloop.EventLoopPolicy()
asyncio.set_event_loop_policy(loop_policy)
return loop_policy
raise cls.Error(f'unknown event loop policy "{policy_name}"')
def cs_host(self, *, for_rpc):
"""Returns the 'host' argument to pass to asyncio's create_server
call. The result can be a single host name string, a list of
host name strings, or an empty string to bind to all interfaces.
If rpc is True the host to use for the RPC server is returned.
Otherwise the host to use for SSL/TCP servers is returned.
"""
host = self.rpc_host if for_rpc else self.host
result = [part.strip() for part in host.split(',')]
if len(result) == 1:
result = result[0]
# An empty result indicates all interfaces, which we do not
# permitted for an RPC server.
if for_rpc and not result:
result = 'localhost'
if result == 'localhost':
# 'localhost' resolves to ::1 (ipv6) on many systems, which fails on default setup of
# docker, using 127.0.0.1 instead forces ipv4
result = '127.0.0.1'
return result
def sane_max_sessions(self):
"""Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit."""
env_value = self.integer('MAX_SESSIONS', 1000)
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.logger.warning(f'lowered maximum sessions from {env_value:,d} to {value:,d} '
f'because your open file limit is {nofile_limit:,d}')
return value
def clearnet_identity(self):
host = self.default('REPORT_HOST', None)
if host is None:
return None
try:
ip = ip_address(host)
except ValueError:
bad = (not lib_util.is_valid_hostname(host)
or host.lower() == 'localhost')
else:
bad = (ip.is_multicast or ip.is_unspecified
or (ip.is_private and self.peer_announce))
if bad:
raise self.Error(f'"{host}" is not a valid REPORT_HOST')
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
f'both resolve to {tcp_port}')
return NetIdentity(
host,
tcp_port,
ssl_port,
''
)
def tor_identity(self, clearnet):
host = self.default('REPORT_HOST_TOR', None)
if host is None:
return None
if not host.endswith('.onion'):
raise self.Error(f'tor host "{host}" must end with ".onion"')
def port(port_kind):
"""Returns the clearnet identity port, if any and not zero,
otherwise the listening port."""
result = 0
if clearnet:
result = getattr(clearnet, port_kind)
return result or getattr(self, port_kind)
tcp_port = self.integer('REPORT_TCP_PORT_TOR',
port('tcp_port')) or None
ssl_port = self.integer('REPORT_SSL_PORT_TOR',
port('ssl_port')) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
f'both resolve to {tcp_port}')
return NetIdentity(
host,
tcp_port,
ssl_port,
'_tor',
)
def hosts_dict(self):
return {identity.host: {'tcp_port': identity.tcp_port,
'ssl_port': identity.ssl_port}
for identity in self.identities}
def peer_discovery_enum(self):
pd = self.default('PEER_DISCOVERY', 'on').strip().lower()
if pd in ('off', ''):
return self.PD_OFF
elif pd == 'self':
return self.PD_SELF
else:
return self.PD_ON
def extract_peer_hubs(self):
return [hub.strip() for hub in self.default('PEER_HUBS', '').split(',') if hub.strip()]
@classmethod
def contribute_to_arg_parser(cls, parser):
parser.add_argument('--db_dir', type=str, help='path of the directory containing lbry-leveldb',
default=cls.default('DB_DIRECTORY', None))
parser.add_argument('--daemon_url',
help='URL for rpc from lbrycrd, <rpcuser>:<rpcpassword>@<lbrycrd rpc ip><lbrycrd rpc port>',
default=cls.default('DAEMON_URL', None))
parser.add_argument('--db_max_open_files', type=int, default=512,
help='number of files leveldb can have open at a time')
parser.add_argument('--host', type=str, default=cls.default('HOST', 'localhost'),
help='Interface for hub server to listen on')
parser.add_argument('--tcp_port', type=int, default=cls.integer('TCP_PORT', 50001),
help='TCP port to listen on for hub server')
parser.add_argument('--udp_port', type=int, default=cls.integer('UDP_PORT', 50001),
help='UDP port to listen on for hub server')
parser.add_argument('--rpc_host', default=cls.default('RPC_HOST', 'localhost'), type=str,
help='Listening interface for admin rpc')
parser.add_argument('--rpc_port', default=cls.integer('RPC_PORT', 8000), type=int,
help='Listening port for admin rpc')
parser.add_argument('--websocket_host', default=cls.default('WEBSOCKET_HOST', 'localhost'), type=str,
help='Listening interface for websocket')
parser.add_argument('--websocket_port', default=cls.integer('WEBSOCKET_PORT', None), type=int,
help='Listening port for websocket')
parser.add_argument('--ssl_port', default=cls.integer('SSL_PORT', None), type=int,
help='SSL port to listen on for hub server')
parser.add_argument('--ssl_certfile', default=cls.default('SSL_CERTFILE', None), type=str,
help='Path to SSL cert file')
parser.add_argument('--ssl_keyfile', default=cls.default('SSL_KEYFILE', None), type=str,
help='Path to SSL key file')
parser.add_argument('--reorg_limit', default=cls.integer('REORG_LIMIT', 200), type=int, help='Max reorg depth')
parser.add_argument('--elastic_host', default=cls.default('ELASTIC_HOST', 'localhost'), type=str,
help='elasticsearch host')
parser.add_argument('--elastic_port', default=cls.integer('ELASTIC_PORT', 9200), type=int,
help='elasticsearch port')
parser.add_argument('--es_mode', default=cls.default('ES_MODE', 'writer'), type=str,
choices=['reader', 'writer'])
parser.add_argument('--es_index_prefix', default=cls.default('ES_INDEX_PREFIX', ''), type=str)
parser.add_argument('--loop_policy', default=cls.default('EVENT_LOOP_POLICY', 'default'), type=str,
choices=['default', 'uvloop'])
parser.add_argument('--max_query_workers', type=int, default=cls.integer('MAX_QUERY_WORKERS', 4),
help='number of threads used by the request handler to read the database')
parser.add_argument('--cache_MB', type=int, default=cls.integer('CACHE_MB', 1024),
help='size of the leveldb lru cache, in megabytes')
parser.add_argument('--cache_all_tx_hashes', type=bool,
help='Load all tx hashes into memory. This will make address subscriptions and sync, '
'resolve, transaction fetching, and block sync all faster at the expense of higher '
'memory usage')
parser.add_argument('--cache_all_claim_txos', type=bool,
help='Load all claim txos into memory. This will make address subscriptions and sync, '
'resolve, transaction fetching, and block sync all faster at the expense of higher '
'memory usage')
parser.add_argument('--prometheus_port', type=int, default=cls.integer('PROMETHEUS_PORT', 0),
help='port for hub prometheus metrics to listen on, disabled by default')
parser.add_argument('--max_subscriptions', type=int, default=cls.integer('MAX_SUBSCRIPTIONS', 10000),
help='max subscriptions per connection')
parser.add_argument('--banner_file', type=str, default=cls.default('BANNER_FILE', None),
help='path to file containing banner text')
parser.add_argument('--anon_logs', type=bool, default=cls.boolean('ANON_LOGS', False),
help="don't log ip addresses")
parser.add_argument('--allow_lan_udp', type=bool, default=cls.boolean('ALLOW_LAN_UDP', False),
help='reply to hub UDP ping messages from LAN ip addresses')
parser.add_argument('--country', type=str, default=cls.default('COUNTRY', 'US'), help='')
parser.add_argument('--max_send', type=int, default=cls.default('MAX_SEND', 1000000), help='')
parser.add_argument('--max_receive', type=int, default=cls.default('MAX_RECEIVE', 1000000), help='')
parser.add_argument('--max_sessions', type=int, default=cls.default('MAX_SESSIONS', 1000), help='')
parser.add_argument('--session_timeout', type=int, default=cls.default('SESSION_TIMEOUT', 600), help='')
parser.add_argument('--drop_client', type=str, default=cls.default('DROP_CLIENT', None), help='')
parser.add_argument('--description', type=str, default=cls.default('DESCRIPTION', ''), help='')
parser.add_argument('--daily_fee', type=float, default=cls.default('DAILY_FEE', 0.0), help='')
parser.add_argument('--payment_address', type=str, default=cls.default('PAYMENT_ADDRESS', ''), help='')
parser.add_argument('--donation_address', type=str, default=cls.default('DONATION_ADDRESS', ''), help='')
parser.add_argument('--chain', type=str, default=cls.default('NET', 'mainnet'),
help="Which chain to use, default is mainnet")
parser.add_argument('--query_timeout_ms', type=int, default=cls.integer('QUERY_TIMEOUT_MS', 10000),
help="elasticsearch query timeout")
@classmethod
def from_arg_parser(cls, args):
return cls(
db_dir=args.db_dir, daemon_url=args.daemon_url, db_max_open_files=args.db_max_open_files,
host=args.host, rpc_host=args.rpc_host, elastic_host=args.elastic_host, elastic_port=args.elastic_port,
loop_policy=args.loop_policy, max_query_workers=args.max_query_workers, websocket_host=args.websocket_host,
websocket_port=args.websocket_port, chain=args.chain, es_index_prefix=args.es_index_prefix,
es_mode=args.es_mode, cache_MB=args.cache_MB, reorg_limit=args.reorg_limit, tcp_port=args.tcp_port,
udp_port=args.udp_port, ssl_port=args.ssl_port, ssl_certfile=args.ssl_certfile,
ssl_keyfile=args.ssl_keyfile, rpc_port=args.rpc_port, prometheus_port=args.prometheus_port,
max_subscriptions=args.max_subscriptions, banner_file=args.banner_file, anon_logs=args.anon_logs,
log_sessions=None, allow_lan_udp=args.allow_lan_udp,
cache_all_tx_hashes=args.cache_all_tx_hashes, cache_all_claim_txos=args.cache_all_claim_txos,
country=args.country, payment_address=args.payment_address, donation_address=args.donation_address,
max_send=args.max_send, max_receive=args.max_receive, max_sessions=args.max_sessions,
session_timeout=args.session_timeout, drop_client=args.drop_client, description=args.description,
daily_fee=args.daily_fee, database_query_timeout=(args.query_timeout_ms / 1000)
)

View file

@ -1,160 +0,0 @@
# Copyright (c) 2016-2017, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Cryptograph hash functions and related classes."""
import hashlib
import hmac
from lbry.wallet.server.util import bytes_to_int, int_to_bytes, hex_to_bytes
_sha256 = hashlib.sha256
_sha512 = hashlib.sha512
_new_hash = hashlib.new
_new_hmac = hmac.new
HASHX_LEN = 11
CLAIM_HASH_LEN = 20
def sha256(x):
"""Simple wrapper of hashlib sha256."""
return _sha256(x).digest()
def ripemd160(x):
"""Simple wrapper of hashlib ripemd160."""
h = _new_hash('ripemd160')
h.update(x)
return h.digest()
def double_sha256(x):
"""SHA-256 of SHA-256, as used extensively in bitcoin."""
return sha256(sha256(x))
def hmac_sha512(key, msg):
"""Use SHA-512 to provide an HMAC."""
return _new_hmac(key, msg, _sha512).digest()
def hash160(x):
"""RIPEMD-160 of SHA-256.
Used to make bitcoin addresses from pubkeys."""
return ripemd160(sha256(x))
def hash_to_hex_str(x: bytes) -> str:
"""Convert a big-endian binary hash to displayed hex string.
Display form of a binary hash is reversed and converted to hex.
"""
return x[::-1].hex()
def hex_str_to_hash(x: str) -> bytes:
"""Convert a displayed hex string to a binary hash."""
return hex_to_bytes(x)[::-1]
class Base58Error(Exception):
"""Exception used for Base58 errors."""
class Base58:
"""Class providing base 58 functionality."""
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(chars) == 58
cmap = {c: n for n, c in enumerate(chars)}
@staticmethod
def char_value(c):
val = Base58.cmap.get(c)
if val is None:
raise Base58Error(f'invalid base 58 character "{c}"')
return val
@staticmethod
def decode(txt):
"""Decodes txt into a big-endian bytearray."""
if not isinstance(txt, str):
raise TypeError('a string is required')
if not txt:
raise Base58Error('string cannot be empty')
value = 0
for c in txt:
value = value * 58 + Base58.char_value(c)
result = int_to_bytes(value)
# Prepend leading zero bytes if necessary
count = 0
for c in txt:
if c != '1':
break
count += 1
if count:
result = bytes(count) + result
return result
@staticmethod
def encode(be_bytes):
"""Converts a big-endian bytearray into a base58 string."""
value = bytes_to_int(be_bytes)
txt = ''
while value:
value, mod = divmod(value, 58)
txt += Base58.chars[mod]
for byte in be_bytes:
if byte != 0:
break
txt += '1'
return txt[::-1]
@staticmethod
def decode_check(txt, *, hash_fn=double_sha256):
"""Decodes a Base58Check-encoded string to a payload. The version
prefixes it."""
be_bytes = Base58.decode(txt)
result, check = be_bytes[:-4], be_bytes[-4:]
if check != hash_fn(result)[:4]:
raise Base58Error(f'invalid base 58 checksum for {txt}')
return result
@staticmethod
def encode_check(payload, *, hash_fn=double_sha256):
"""Encodes a payload bytearray (which includes the version byte(s))
into a Base58Check string."""
be_bytes = payload + hash_fn(payload)[:4]
return Base58.encode(be_bytes)

File diff suppressed because it is too large Load diff

View file

@ -1,361 +0,0 @@
# Copyright (c) 2016-2018, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
"""Mempool handling."""
import asyncio
import itertools
import time
import attr
import typing
from typing import Set, Optional, Callable, Awaitable
from collections import defaultdict
from prometheus_client import Histogram
from lbry.wallet.server.hash import hash_to_hex_str, hex_str_to_hash
from lbry.wallet.server.util import class_logger, chunks
from lbry.wallet.server.leveldb import UTXO
if typing.TYPE_CHECKING:
from lbry.wallet.server.session import LBRYSessionManager
@attr.s(slots=True)
class MemPoolTx:
prevouts = attr.ib()
# A pair is a (hashX, value) tuple
in_pairs = attr.ib()
out_pairs = attr.ib()
fee = attr.ib()
size = attr.ib()
raw_tx = attr.ib()
@attr.s(slots=True)
class MemPoolTxSummary:
hash = attr.ib()
fee = attr.ib()
has_unconfirmed_inputs = attr.ib()
NAMESPACE = "wallet_server"
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, 15.0, 20.0, 30.0, 60.0, float('inf')
)
mempool_process_time_metric = Histogram(
"processed_mempool", "Time to process mempool and notify touched addresses",
namespace=NAMESPACE, buckets=HISTOGRAM_BUCKETS
)
class MemPool:
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0):
self.coin = coin
self._daemon = daemon
self._db = db
self._touched_mp = {}
self._touched_bp = {}
self._highest_block = -1
self.logger = class_logger(__name__, self.__class__.__name__)
self.txs = {}
self.hashXs = defaultdict(set) # None can be a key
self.cached_compact_histogram = []
self.refresh_secs = refresh_secs
self.log_status_secs = log_status_secs
# Prevents mempool refreshes during fee histogram calculation
self.lock = state_lock
self.wakeup = asyncio.Event()
self.mempool_process_time_metric = mempool_process_time_metric
self.notified_mempool_txs = set()
self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
async def _logging(self, synchronized_event):
"""Print regular logs of mempool stats."""
self.logger.info('beginning processing of daemon mempool. '
'This can take some time...')
start = time.perf_counter()
await synchronized_event.wait()
elapsed = time.perf_counter() - start
self.logger.info(f'synced in {elapsed:.2f}s')
while True:
self.logger.info(f'{len(self.txs):,d} txs '
f'touching {len(self.hashXs):,d} addresses')
await asyncio.sleep(self.log_status_secs)
await synchronized_event.wait()
def _accept_transactions(self, tx_map, utxo_map, touched):
"""Accept transactions in tx_map to the mempool if all their inputs
can be found in the existing mempool or a utxo_map from the
DB.
Returns an (unprocessed tx_map, unspent utxo_map) pair.
"""
hashXs = self.hashXs
txs = self.txs
deferred = {}
unspent = set(utxo_map)
# Try to find all prevouts so we can accept the TX
for hash, tx in tx_map.items():
in_pairs = []
try:
for prevout in tx.prevouts:
utxo = utxo_map.get(prevout)
if not utxo:
prev_hash, prev_index = prevout
# Raises KeyError if prev_hash is not in txs
utxo = txs[prev_hash].out_pairs[prev_index]
in_pairs.append(utxo)
except KeyError:
deferred[hash] = tx
continue
# Spend the prevouts
unspent.difference_update(tx.prevouts)
# Save the in_pairs, compute the fee and accept the TX
tx.in_pairs = tuple(in_pairs)
# Avoid negative fees if dealing with generation-like transactions
# because some in_parts would be missing
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
sum(v for _, v in tx.out_pairs)))
txs[hash] = tx
for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs):
touched.add(hashX)
hashXs[hashX].add(hash)
return deferred, {prevout: utxo_map[prevout] for prevout in unspent}
async def _mempool_loop(self, synchronized_event):
try:
return await self._refresh_hashes(synchronized_event)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.exception("MEMPOOL DIED")
raise e
async def _refresh_hashes(self, synchronized_event):
"""Refresh our view of the daemon's mempool."""
while True:
start = time.perf_counter()
height = self._daemon.cached_height()
hex_hashes = await self._daemon.mempool_hashes()
if height != await self._daemon.height():
continue
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
async with self.lock:
new_hashes = hashes.difference(self.notified_mempool_txs)
touched = await self._process_mempool(hashes)
self.notified_mempool_txs.update(new_hashes)
new_touched = {
touched_hashx for touched_hashx, txs in self.hashXs.items() if txs.intersection(new_hashes)
}
synchronized_event.set()
synchronized_event.clear()
await self.on_mempool(touched, new_touched, height)
duration = time.perf_counter() - start
self.mempool_process_time_metric.observe(duration)
try:
# we wait up to `refresh_secs` but go early if a broadcast happens (which triggers wakeup event)
await asyncio.wait_for(self.wakeup.wait(), timeout=self.refresh_secs)
except asyncio.TimeoutError:
pass
finally:
self.wakeup.clear()
async def _process_mempool(self, all_hashes):
# Re-sync with the new set of hashes
txs = self.txs
hashXs = self.hashXs # hashX: [tx_hash, ...]
touched = set()
# First handle txs that have disappeared
for tx_hash in set(txs).difference(all_hashes):
tx = txs.pop(tx_hash)
tx_hashXs = {hashX for hashX, value in tx.in_pairs}
tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
for hashX in tx_hashXs:
hashXs[hashX].remove(tx_hash)
if not hashXs[hashX]:
del hashXs[hashX]
touched.update(tx_hashXs)
# Process new transactions
new_hashes = list(all_hashes.difference(txs))
if new_hashes:
fetches = []
for hashes in chunks(new_hashes, 200):
fetches.append(self._fetch_and_accept(hashes, all_hashes, touched))
tx_map = {}
utxo_map = {}
for fetch in asyncio.as_completed(fetches):
deferred, unspent = await fetch
tx_map.update(deferred)
utxo_map.update(unspent)
prior_count = 0
# FIXME: this is not particularly efficient
while tx_map and len(tx_map) != prior_count:
prior_count = len(tx_map)
tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map, touched)
if tx_map:
self.logger.info(f'{len(tx_map)} txs dropped')
return touched
async def _fetch_and_accept(self, hashes, all_hashes, touched):
"""Fetch a list of mempool transactions."""
raw_txs = await self._daemon.getrawtransactions((hash_to_hex_str(hash) for hash in hashes))
to_hashX = self.coin.hashX_from_script
deserializer = self.coin.DESERIALIZER
tx_map = {}
for hash, raw_tx in zip(hashes, raw_txs):
# The daemon may have evicted the tx from its
# mempool or it may have gotten in a block
if not raw_tx:
continue
tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
# Convert the inputs and outputs into (hashX, value) pairs
# Drop generation-like inputs from MemPoolTx.prevouts
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
for txin in tx.inputs
if not txin.is_generation())
txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
for txout in tx.outputs)
tx_map[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
0, tx_size, raw_tx)
# Determine all prevouts not in the mempool, and fetch the
# UTXO information from the database. Failed prevout lookups
# return None - concurrent database updates happen - which is
# relied upon by _accept_transactions. Ignore prevouts that are
# generation-like.
prevouts = tuple(prevout for tx in tx_map.values()
for prevout in tx.prevouts
if prevout[0] not in all_hashes)
utxos = await self._db.lookup_utxos(prevouts)
utxo_map = dict(zip(prevouts, utxos))
return self._accept_transactions(tx_map, utxo_map, touched)
#
# External interface
#
async def keep_synchronized(self, synchronized_event):
"""Keep the mempool synchronized with the daemon."""
await asyncio.wait([
self._mempool_loop(synchronized_event),
# self._refresh_histogram(synchronized_event),
self._logging(synchronized_event)
])
async def balance_delta(self, hashX):
"""Return the unconfirmed amount in the mempool for hashX.
Can be positive or negative.
"""
value = 0
if hashX in self.hashXs:
for hash in self.hashXs[hashX]:
tx = self.txs[hash]
value -= sum(v for h168, v in tx.in_pairs if h168 == hashX)
value += sum(v for h168, v in tx.out_pairs if h168 == hashX)
return value
def compact_fee_histogram(self):
"""Return a compact fee histogram of the current mempool."""
return self.cached_compact_histogram
async def potential_spends(self, hashX):
"""Return a set of (prev_hash, prev_idx) pairs from mempool
transactions that touch hashX.
None, some or all of these may be spends of the hashX, but all
actual spends of it (in the DB or mempool) will be included.
"""
result = set()
for tx_hash in self.hashXs.get(hashX, ()):
tx = self.txs[tx_hash]
result.update(tx.prevouts)
return result
def transaction_summaries(self, hashX):
"""Return a list of MemPoolTxSummary objects for the hashX."""
result = []
for tx_hash in self.hashXs.get(hashX, ()):
tx = self.txs[tx_hash]
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
return result
async def unordered_UTXOs(self, hashX):
"""Return an unordered list of UTXO named tuples from mempool
transactions that pay to hashX.
This does not consider if any other mempool transactions spend
the outputs.
"""
utxos = []
for tx_hash in self.hashXs.get(hashX, ()):
tx = self.txs.get(tx_hash)
for pos, (hX, value) in enumerate(tx.out_pairs):
if hX == hashX:
utxos.append(UTXO(-1, pos, tx_hash, 0, value))
return utxos
def get_mempool_height(self, tx_hash):
# Height Progression
# -2: not broadcast
# -1: in mempool but has unconfirmed inputs
# 0: in mempool and all inputs confirmed
# +num: confirmed in a specific block (height)
if tx_hash not in self.txs:
return -2
tx = self.txs[tx_hash]
unspent_inputs = sum(1 if hash in self.txs else 0 for hash, idx in tx.prevouts)
if unspent_inputs:
return -1
return 0
async def _maybe_notify(self, new_touched):
tmp, tbp = self._touched_mp, self._touched_bp
common = set(tmp).intersection(tbp)
if common:
height = max(common)
elif tmp and max(tmp) == self._highest_block:
height = self._highest_block
else:
# Either we are processing a block and waiting for it to
# come in, or we have not yet had a mempool update for the
# new block height
return
touched = tmp.pop(height)
for old in [h for h in tmp if h <= height]:
del tmp[old]
for old in [h for h in tbp if h <= height]:
touched.update(tbp.pop(old))
# print("notify", height, len(touched), len(new_touched))
await self.notify_sessions(height, touched, new_touched)
async def start(self, height, session_manager: 'LBRYSessionManager'):
self._highest_block = height
self.notify_sessions = session_manager._notify_sessions
await self.notify_sessions(height, set(), set())
async def on_mempool(self, touched, new_touched, height):
self._touched_mp[height] = touched
await self._maybe_notify(new_touched)
async def on_block(self, touched, height):
self._touched_bp[height] = touched
self._highest_block = height
await self._maybe_notify(set())

View file

@ -1,258 +0,0 @@
# Copyright (c) 2018, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
"""Merkle trees, branches, proofs and roots."""
from asyncio import Event
from math import ceil, log
from lbry.wallet.server.hash import double_sha256
class Merkle:
"""Perform merkle tree calculations on binary hashes using a given hash
function.
If the hash count is not even, the final hash is repeated when
calculating the next merkle layer up the tree.
"""
def __init__(self, hash_func=double_sha256):
self.hash_func = hash_func
@staticmethod
def tree_depth(hash_count):
return Merkle.branch_length(hash_count) + 1
@staticmethod
def branch_length(hash_count):
"""Return the length of a merkle branch given the number of hashes."""
if not isinstance(hash_count, int):
raise TypeError('hash_count must be an integer')
if hash_count < 1:
raise ValueError('hash_count must be at least 1')
return ceil(log(hash_count, 2))
@staticmethod
def branch_and_root(hashes, index, length=None, hash_func=double_sha256):
"""Return a (merkle branch, merkle_root) pair given hashes, and the
index of one of those hashes.
"""
hashes = list(hashes)
if not isinstance(index, int):
raise TypeError('index must be an integer')
# This also asserts hashes is not empty
if not 0 <= index < len(hashes):
raise ValueError(f"index '{index}/{len(hashes)}' out of range")
natural_length = Merkle.branch_length(len(hashes))
if length is None:
length = natural_length
else:
if not isinstance(length, int):
raise TypeError('length must be an integer')
if length < natural_length:
raise ValueError('length out of range')
branch = []
for _ in range(length):
if len(hashes) & 1:
hashes.append(hashes[-1])
branch.append(hashes[index ^ 1])
index >>= 1
hashes = [hash_func(hashes[n] + hashes[n + 1])
for n in range(0, len(hashes), 2)]
return branch, hashes[0]
@staticmethod
def root(hashes, length=None):
"""Return the merkle root of a non-empty iterable of binary hashes."""
branch, root = Merkle.branch_and_root(hashes, 0, length)
return root
# @staticmethod
# def root_from_proof(hash, branch, index, hash_func=double_sha256):
# """Return the merkle root given a hash, a merkle branch to it, and
# its index in the hashes array.
#
# branch is an iterable sorted deepest to shallowest. If the
# returned root is the expected value then the merkle proof is
# verified.
#
# The caller should have confirmed the length of the branch with
# branch_length(). Unfortunately this is not easily done for
# bitcoin transactions as the number of transactions in a block
# is unknown to an SPV client.
# """
# for elt in branch:
# if index & 1:
# hash = hash_func(elt + hash)
# else:
# hash = hash_func(hash + elt)
# index >>= 1
# if index:
# raise ValueError('index out of range for branch')
# return hash
@staticmethod
def level(hashes, depth_higher):
"""Return a level of the merkle tree of hashes the given depth
higher than the bottom row of the original tree."""
size = 1 << depth_higher
root = Merkle.root
return [root(hashes[n: n + size], depth_higher)
for n in range(0, len(hashes), size)]
@staticmethod
def branch_and_root_from_level(level, leaf_hashes, index,
depth_higher):
"""Return a (merkle branch, merkle_root) pair when a merkle-tree has a
level cached.
To maximally reduce the amount of data hashed in computing a
markle branch, cache a tree of depth N at level N // 2.
level is a list of hashes in the middle of the tree (returned
by level())
leaf_hashes are the leaves needed to calculate a partial branch
up to level.
depth_higher is how much higher level is than the leaves of the tree
index is the index in the full list of hashes of the hash whose
merkle branch we want.
"""
if not isinstance(level, list):
raise TypeError("level must be a list")
if not isinstance(leaf_hashes, list):
raise TypeError("leaf_hashes must be a list")
leaf_index = (index >> depth_higher) << depth_higher
leaf_branch, leaf_root = Merkle.branch_and_root(
leaf_hashes, index - leaf_index, depth_higher)
index >>= depth_higher
level_branch, root = Merkle.branch_and_root(level, index)
# Check last so that we know index is in-range
if leaf_root != level[index]:
raise ValueError('leaf hashes inconsistent with level')
return leaf_branch + level_branch, root
class MerkleCache:
"""A cache to calculate merkle branches efficiently."""
def __init__(self, merkle, source_func):
"""Initialise a cache hashes taken from source_func:
async def source_func(index, count):
...
"""
self.merkle = merkle
self.source_func = source_func
self.length = 0
self.depth_higher = 0
self.initialized = Event()
def _segment_length(self):
return 1 << self.depth_higher
def _leaf_start(self, index):
"""Given a level's depth higher and a hash index, return the leaf
index and leaf hash count needed to calculate a merkle branch.
"""
depth_higher = self.depth_higher
return (index >> depth_higher) << depth_higher
def _level(self, hashes):
return self.merkle.level(hashes, self.depth_higher)
async def _extend_to(self, length):
"""Extend the length of the cache if necessary."""
if length <= self.length:
return
# Start from the beginning of any final partial segment.
# Retain the value of depth_higher; in practice this is fine
start = self._leaf_start(self.length)
hashes = await self.source_func(start, length - start)
self.level[start >> self.depth_higher:] = self._level(hashes)
self.length = length
async def _level_for(self, length):
"""Return a (level_length, final_hash) pair for a truncation
of the hashes to the given length."""
if length == self.length:
return self.level
level = self.level[:length >> self.depth_higher]
leaf_start = self._leaf_start(length)
count = min(self._segment_length(), length - leaf_start)
hashes = await self.source_func(leaf_start, count)
level += self._level(hashes)
return level
async def initialize(self, length):
"""Call to initialize the cache to a source of given length."""
self.length = length
self.depth_higher = self.merkle.tree_depth(length) // 2
self.level = self._level(await self.source_func(0, length))
self.initialized.set()
def truncate(self, length):
"""Truncate the cache so it covers no more than length underlying
hashes."""
if not isinstance(length, int):
raise TypeError('length must be an integer')
if length <= 0:
raise ValueError('length must be positive')
if length >= self.length:
return
length = self._leaf_start(length)
self.length = length
self.level[length >> self.depth_higher:] = []
async def branch_and_root(self, length, index):
"""Return a merkle branch and root. Length is the number of
hashes used to calculate the merkle root, index is the position
of the hash to calculate the branch of.
index must be less than length, which must be at least 1."""
if not isinstance(length, int):
raise TypeError('length must be an integer')
if not isinstance(index, int):
raise TypeError('index must be an integer')
if length <= 0:
raise ValueError('length must be positive')
if index >= length:
raise ValueError('index must be less than length')
await self.initialized.wait()
await self._extend_to(length)
leaf_start = self._leaf_start(index)
count = min(self._segment_length(), length - leaf_start)
leaf_hashes = await self.source_func(leaf_start, count)
if length < self._segment_length():
return self.merkle.branch_and_root(leaf_hashes, index)
level = await self._level_for(length)
return self.merkle.branch_and_root_from_level(
level, leaf_hashes, index, self.depth_higher)

View file

@ -1,135 +0,0 @@
import time
import math
from typing import Tuple
def calculate_elapsed(start) -> int:
return int((time.perf_counter() - start) * 1000)
def calculate_avg_percentiles(data) -> Tuple[int, int, int, int, int, int, int, int]:
if not data:
return 0, 0, 0, 0, 0, 0, 0, 0
data.sort()
size = len(data)
return (
int(sum(data) / size),
data[0],
data[math.ceil(size * .05) - 1],
data[math.ceil(size * .25) - 1],
data[math.ceil(size * .50) - 1],
data[math.ceil(size * .75) - 1],
data[math.ceil(size * .95) - 1],
data[-1]
)
def remove_select_list(sql) -> str:
return sql[sql.index('FROM'):]
class APICallMetrics:
def __init__(self, name):
self.name = name
# total requests received
self.receive_count = 0
self.cache_response_count = 0
# millisecond timings for query based responses
self.query_response_times = []
self.query_intrp_times = []
self.query_error_times = []
self.query_python_times = []
self.query_wait_times = []
self.query_sql_times = [] # aggregate total of multiple SQL calls made per request
self.individual_sql_times = [] # every SQL query run on server
# actual queries
self.errored_queries = set()
self.interrupted_queries = set()
def to_json(self):
return {
# total requests received
"receive_count": self.receive_count,
# sum of these is total responses made
"cache_response_count": self.cache_response_count,
"query_response_count": len(self.query_response_times),
"intrp_response_count": len(self.query_intrp_times),
"error_response_count": len(self.query_error_times),
# millisecond timings for non-cache responses
"response": calculate_avg_percentiles(self.query_response_times),
"interrupt": calculate_avg_percentiles(self.query_intrp_times),
"error": calculate_avg_percentiles(self.query_error_times),
# response, interrupt and error each also report the python, wait and sql stats:
"python": calculate_avg_percentiles(self.query_python_times),
"wait": calculate_avg_percentiles(self.query_wait_times),
"sql": calculate_avg_percentiles(self.query_sql_times),
# extended timings for individual sql executions
"individual_sql": calculate_avg_percentiles(self.individual_sql_times),
"individual_sql_count": len(self.individual_sql_times),
# actual queries
"errored_queries": list(self.errored_queries),
"interrupted_queries": list(self.interrupted_queries),
}
def start(self):
self.receive_count += 1
def cache_response(self):
self.cache_response_count += 1
def _add_query_timings(self, request_total_time, metrics):
if metrics and 'execute_query' in metrics:
sub_process_total = metrics[self.name][0]['total']
individual_query_times = [f['total'] for f in metrics['execute_query']]
aggregated_query_time = sum(individual_query_times)
self.individual_sql_times.extend(individual_query_times)
self.query_sql_times.append(aggregated_query_time)
self.query_python_times.append(sub_process_total - aggregated_query_time)
self.query_wait_times.append(request_total_time - sub_process_total)
@staticmethod
def _add_queries(query_set, metrics):
if metrics and 'execute_query' in metrics:
for execute_query in metrics['execute_query']:
if 'sql' in execute_query:
query_set.add(remove_select_list(execute_query['sql']))
def query_response(self, start, metrics):
self.query_response_times.append(calculate_elapsed(start))
self._add_query_timings(self.query_response_times[-1], metrics)
def query_interrupt(self, start, metrics):
self.query_intrp_times.append(calculate_elapsed(start))
self._add_queries(self.interrupted_queries, metrics)
self._add_query_timings(self.query_intrp_times[-1], metrics)
def query_error(self, start, metrics):
self.query_error_times.append(calculate_elapsed(start))
self._add_queries(self.errored_queries, metrics)
self._add_query_timings(self.query_error_times[-1], metrics)
class ServerLoadData:
def __init__(self):
self._apis = {}
def for_api(self, name) -> APICallMetrics:
if name not in self._apis:
self._apis[name] = APICallMetrics(name)
return self._apis[name]
def to_json_and_reset(self, status):
try:
return {
'api': {name: api.to_json() for name, api in self._apis.items()},
'status': status
}
finally:
self._apis = {}

View file

@ -1,289 +0,0 @@
# Copyright (c) 2016-2017, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
"""Script-related classes and functions."""
from collections import namedtuple
from lbry.wallet.server.util import unpack_le_uint16_from, unpack_le_uint32_from, \
pack_le_uint16, pack_le_uint32
class EnumError(Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = {}
reverseLookup = {}
i = 0
uniqueNames = set()
uniqueValues = set()
for x in enumList:
if isinstance(x, tuple):
x, i = x
if not isinstance(x, str):
raise EnumError(f"enum name {x} not a string")
if not isinstance(i, int):
raise EnumError(f"enum value {i} not an integer")
if x in uniqueNames:
raise EnumError(f"enum name {x} not unique")
if i in uniqueValues:
raise EnumError(f"enum value {i} not unique")
uniqueNames.add(x)
uniqueValues.add(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
result = self.lookup.get(attr)
if result is None:
raise AttributeError(f'enumeration has no member {attr}')
return result
def whatis(self, value):
return self.reverseLookup[value]
class ScriptError(Exception):
"""Exception used for script errors."""
OpCodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1", 76),
"OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE",
"OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7", "OP_8",
"OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF",
"OP_ELSE", "OP_ENDIF", "OP_VERIFY", "OP_RETURN",
"OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP",
"OP_2OVER", "OP_2ROT", "OP_2SWAP", "OP_IFDUP", "OP_DEPTH", "OP_DROP",
"OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK",
"OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE",
"OP_INVERT", "OP_AND", "OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY",
"OP_RESERVED1", "OP_RESERVED2",
"OP_1ADD", "OP_1SUB", "OP_2MUL", "OP_2DIV", "OP_NEGATE", "OP_ABS",
"OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV", "OP_MOD",
"OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR", "OP_NUMEQUAL",
"OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN", "OP_GREATERTHAN",
"OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN",
"OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160", "OP_HASH256",
"OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
"OP_NOP1",
"OP_CHECKLOCKTIMEVERIFY", "OP_CHECKSEQUENCEVERIFY"
])
# Paranoia to make it hard to create bad scripts
assert OpCodes.OP_DUP == 0x76
assert OpCodes.OP_HASH160 == 0xa9
assert OpCodes.OP_EQUAL == 0x87
assert OpCodes.OP_EQUALVERIFY == 0x88
assert OpCodes.OP_CHECKSIG == 0xac
assert OpCodes.OP_CHECKMULTISIG == 0xae
def _match_ops(ops, pattern):
if len(ops) != len(pattern):
return False
for op, pop in zip(ops, pattern):
if pop != op:
# -1 means 'data push', whose op is an (op, data) tuple
if pop == -1 and isinstance(op, tuple):
continue
return False
return True
class ScriptPubKey:
"""A class for handling a tx output script that gives conditions
necessary for spending.
"""
TO_ADDRESS_OPS = [OpCodes.OP_DUP, OpCodes.OP_HASH160, -1,
OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]
TO_P2SH_OPS = [OpCodes.OP_HASH160, -1, OpCodes.OP_EQUAL]
TO_PUBKEY_OPS = [-1, OpCodes.OP_CHECKSIG]
PayToHandlers = namedtuple('PayToHandlers', 'address script_hash pubkey '
'unspendable strange')
@classmethod
def pay_to(cls, handlers, script):
"""Parse a script, invoke the appropriate handler and
return the result.
One of the following handlers is invoked:
handlers.address(hash160)
handlers.script_hash(hash160)
handlers.pubkey(pubkey)
handlers.unspendable()
handlers.strange(script)
"""
try:
ops = Script.get_ops(script)
except ScriptError:
return handlers.unspendable()
match = _match_ops
if match(ops, cls.TO_ADDRESS_OPS):
return handlers.address(ops[2][-1])
if match(ops, cls.TO_P2SH_OPS):
return handlers.script_hash(ops[1][-1])
if match(ops, cls.TO_PUBKEY_OPS):
return handlers.pubkey(ops[0][-1])
if ops and ops[0] == OpCodes.OP_RETURN:
return handlers.unspendable()
return handlers.strange(script)
@classmethod
def P2SH_script(cls, hash160):
return (bytes([OpCodes.OP_HASH160])
+ Script.push_data(hash160)
+ bytes([OpCodes.OP_EQUAL]))
@classmethod
def P2PKH_script(cls, hash160):
return (bytes([OpCodes.OP_DUP, OpCodes.OP_HASH160])
+ Script.push_data(hash160)
+ bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]))
@classmethod
def validate_pubkey(cls, pubkey, req_compressed=False):
if isinstance(pubkey, (bytes, bytearray)):
if len(pubkey) == 33 and pubkey[0] in (2, 3):
return # Compressed
if len(pubkey) == 65 and pubkey[0] == 4:
if not req_compressed:
return
raise PubKeyError('uncompressed pubkeys are invalid')
raise PubKeyError(f'invalid pubkey {pubkey}')
@classmethod
def pubkey_script(cls, pubkey):
cls.validate_pubkey(pubkey)
return Script.push_data(pubkey) + bytes([OpCodes.OP_CHECKSIG])
@classmethod
def multisig_script(cls, m, pubkeys):
"""Returns the script for a pay-to-multisig transaction."""
n = len(pubkeys)
if not 1 <= m <= n <= 15:
raise ScriptError(f'{m:d} of {n:d} multisig script not possible')
for pubkey in pubkeys:
cls.validate_pubkey(pubkey, req_compressed=True)
# See https://bitcoin.org/en/developer-guide
# 2 of 3 is: OP_2 pubkey1 pubkey2 pubkey3 OP_3 OP_CHECKMULTISIG
return (bytes([OP_1 + m - 1])
+ b''.join(cls.push_data(pubkey) for pubkey in pubkeys)
+ bytes([OP_1 + n - 1, OP_CHECK_MULTISIG]))
class Script:
@classmethod
def get_ops(cls, script):
ops = []
# The unpacks or script[n] below throw on truncated scripts
try:
n = 0
while n < len(script):
op = script[n]
n += 1
if op <= OpCodes.OP_PUSHDATA4:
# Raw bytes follow
if op < OpCodes.OP_PUSHDATA1:
dlen = op
elif op == OpCodes.OP_PUSHDATA1:
dlen = script[n]
n += 1
elif op == OpCodes.OP_PUSHDATA2:
dlen, = unpack_le_uint16_from(script[n: n + 2])
n += 2
else:
dlen, = unpack_le_uint32_from(script[n: n + 4])
n += 4
if n + dlen > len(script):
raise IndexError
op = (op, script[n:n + dlen])
n += dlen
ops.append(op)
except Exception:
# Truncated script; e.g. tx_hash
# ebc9fa1196a59e192352d76c0f6e73167046b9d37b8302b6bb6968dfd279b767
raise ScriptError('truncated script')
return ops
@classmethod
def push_data(cls, data):
"""Returns the opcodes to push the data on the stack."""
assert isinstance(data, (bytes, bytearray))
n = len(data)
if n < OpCodes.OP_PUSHDATA1:
return bytes([n]) + data
if n < 256:
return bytes([OpCodes.OP_PUSHDATA1, n]) + data
if n < 65536:
return bytes([OpCodes.OP_PUSHDATA2]) + pack_le_uint16(n) + data
return bytes([OpCodes.OP_PUSHDATA4]) + pack_le_uint32(n) + data
@classmethod
def opcode_name(cls, opcode):
if OpCodes.OP_0 < opcode < OpCodes.OP_PUSHDATA1:
return f'OP_{opcode:d}'
try:
return OpCodes.whatis(opcode)
except KeyError:
return f'OP_UNKNOWN:{opcode:d}'
@classmethod
def dump(cls, script):
opcodes, datas = cls.get_ops(script)
for opcode, data in zip(opcodes, datas):
name = cls.opcode_name(opcode)
if data is None:
print(name)
else:
print(f'{name} {data.hex()} ({len(data):d} bytes)')

View file

@ -1,91 +0,0 @@
import signal
import logging
import asyncio
from concurrent.futures.thread import ThreadPoolExecutor
import typing
import lbry
from lbry.wallet.server.mempool import MemPool
from lbry.wallet.server.block_processor import BlockProcessor
from lbry.wallet.server.leveldb import LevelDB
from lbry.wallet.server.session import LBRYSessionManager
from lbry.prometheus import PrometheusServer
class Server:
def __init__(self, env):
self.env = env
self.log = logging.getLogger(__name__).getChild(self.__class__.__name__)
self.shutdown_event = asyncio.Event()
self.cancellable_tasks = []
self.daemon = daemon = env.coin.DAEMON(env.coin, env.daemon_url)
self.db = db = LevelDB(env)
self.bp = bp = BlockProcessor(env, db, daemon, self.shutdown_event)
self.prometheus_server: typing.Optional[PrometheusServer] = None
self.session_mgr = LBRYSessionManager(
env, db, bp, daemon, self.shutdown_event
)
self._indexer_task = None
async def start(self):
env = self.env
min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings()
self.log.info(f'software version: {lbry.__version__}')
self.log.info(f'supported protocol versions: {min_str}-{max_str}')
self.log.info(f'event loop policy: {env.loop_policy}')
self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks')
await self.daemon.height()
def _start_cancellable(run, *args):
_flag = asyncio.Event()
self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag)))
return _flag.wait()
await self.start_prometheus()
if self.env.udp_port:
await self.bp.status_server.start(
0, bytes.fromhex(self.bp.coin.GENESIS_HASH)[::-1], self.env.country,
self.env.host, self.env.udp_port, self.env.allow_lan_udp
)
await _start_cancellable(self.bp.fetch_and_process_blocks)
await self.db.populate_header_merkle_cache()
await _start_cancellable(self.bp.mempool.keep_synchronized)
await _start_cancellable(self.session_mgr.serve, self.bp.mempool)
async def stop(self):
for task in reversed(self.cancellable_tasks):
task.cancel()
await asyncio.wait(self.cancellable_tasks)
if self.prometheus_server:
await self.prometheus_server.stop()
self.prometheus_server = None
self.shutdown_event.set()
await self.daemon.close()
def run(self):
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(self.env.max_query_workers, thread_name_prefix='hub-worker')
loop.set_default_executor(executor)
def __exit():
raise SystemExit()
try:
loop.add_signal_handler(signal.SIGINT, __exit)
loop.add_signal_handler(signal.SIGTERM, __exit)
loop.run_until_complete(self.start())
loop.run_until_complete(self.shutdown_event.wait())
except (SystemExit, KeyboardInterrupt):
pass
finally:
loop.run_until_complete(self.stop())
executor.shutdown(True)
async def start_prometheus(self):
if not self.prometheus_server and self.env.prometheus_port:
self.prometheus_server = PrometheusServer()
await self.prometheus_server.start("0.0.0.0", self.env.prometheus_port)

File diff suppressed because it is too large Load diff

View file

@ -1,626 +0,0 @@
# Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
"""Transaction-related classes and functions."""
import typing
from collections import namedtuple
from lbry.wallet.server.hash import sha256, double_sha256, hash_to_hex_str
from lbry.wallet.server.script import OpCodes
from lbry.wallet.server.util import (
unpack_le_int32_from, unpack_le_int64_from, unpack_le_uint16_from,
unpack_le_uint32_from, unpack_le_uint64_from, pack_le_int32, pack_varint,
pack_le_uint32, pack_le_int64, pack_varbytes,
)
ZERO = bytes(32)
MINUS_1 = 4294967295
class Tx(typing.NamedTuple):
version: int
inputs: typing.List['TxInput']
outputs: typing.List['TxOutput']
locktime: int
raw: bytes
class TxInput(typing.NamedTuple):
prev_hash: bytes
prev_idx: int
script: bytes
sequence: int
"""Class representing a transaction input."""
def __str__(self):
script = self.script.hex()
prev_hash = hash_to_hex_str(self.prev_hash)
return (f"Input({prev_hash}, {self.prev_idx:d}, script={script}, sequence={self.sequence:d})")
def is_generation(self):
"""Test if an input is generation/coinbase like"""
return self.prev_idx == MINUS_1 and self.prev_hash == ZERO
def serialize(self):
return b''.join((
self.prev_hash,
pack_le_uint32(self.prev_idx),
pack_varbytes(self.script),
pack_le_uint32(self.sequence),
))
class TxOutput(typing.NamedTuple):
value: int
pk_script: bytes
def serialize(self):
return b''.join((
pack_le_int64(self.value),
pack_varbytes(self.pk_script),
))
class Deserializer:
"""Deserializes blocks into transactions.
External entry points are read_tx(), read_tx_and_hash(),
read_tx_and_vsize() and read_block().
This code is performance sensitive as it is executed 100s of
millions of times during sync.
"""
TX_HASH_FN = staticmethod(double_sha256)
def __init__(self, binary, start=0):
assert isinstance(binary, bytes)
self.binary = binary
self.binary_length = len(binary)
self.cursor = start
self.flags = 0
def read_tx(self):
"""Return a deserialized transaction."""
start = self.cursor
return Tx(
self._read_le_int32(), # version
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32(), # locktime
self.binary[start:self.cursor],
)
def read_tx_and_hash(self):
"""Return a (deserialized TX, tx_hash) pair.
The hash needs to be reversed for human display; for efficiency
we process it in the natural serialized order.
"""
start = self.cursor
return self.read_tx(), self.TX_HASH_FN(self.binary[start:self.cursor])
def read_tx_and_vsize(self):
"""Return a (deserialized TX, vsize) pair."""
return self.read_tx(), self.binary_length
def read_tx_block(self):
"""Returns a list of (deserialized_tx, tx_hash) pairs."""
read = self.read_tx_and_hash
# Some coins have excess data beyond the end of the transactions
return [read() for _ in range(self._read_varint())]
def _read_inputs(self):
read_input = self._read_input
return [read_input() for i in range(self._read_varint())]
def _read_input(self):
return TxInput(
self._read_nbytes(32), # prev_hash
self._read_le_uint32(), # prev_idx
self._read_varbytes(), # script
self._read_le_uint32() # sequence
)
def _read_outputs(self):
read_output = self._read_output
return [read_output() for i in range(self._read_varint())]
def _read_output(self):
return TxOutput(
self._read_le_int64(), # value
self._read_varbytes(), # pk_script
)
def _read_byte(self):
cursor = self.cursor
self.cursor += 1
return self.binary[cursor]
def _read_nbytes(self, n):
cursor = self.cursor
self.cursor = end = cursor + n
assert self.binary_length >= end
return self.binary[cursor:end]
def _read_varbytes(self):
return self._read_nbytes(self._read_varint())
def _read_varint(self):
n = self.binary[self.cursor]
self.cursor += 1
if n < 253:
return n
if n == 253:
return self._read_le_uint16()
if n == 254:
return self._read_le_uint32()
return self._read_le_uint64()
def _read_le_int32(self):
result, = unpack_le_int32_from(self.binary, self.cursor)
self.cursor += 4
return result
def _read_le_int64(self):
result, = unpack_le_int64_from(self.binary, self.cursor)
self.cursor += 8
return result
def _read_le_uint16(self):
result, = unpack_le_uint16_from(self.binary, self.cursor)
self.cursor += 2
return result
def _read_le_uint32(self):
result, = unpack_le_uint32_from(self.binary, self.cursor)
self.cursor += 4
return result
def _read_le_uint64(self):
result, = unpack_le_uint64_from(self.binary, self.cursor)
self.cursor += 8
return result
class TxSegWit(namedtuple("Tx", "version marker flag inputs outputs "
"witness locktime raw")):
"""Class representing a SegWit transaction."""
class DeserializerSegWit(Deserializer):
# https://bitcoincore.org/en/segwit_wallet_dev/#transaction-serialization
def _read_witness(self, fields):
read_witness_field = self._read_witness_field
return [read_witness_field() for i in range(fields)]
def _read_witness_field(self):
read_varbytes = self._read_varbytes
return [read_varbytes() for i in range(self._read_varint())]
def _read_tx_parts(self):
"""Return a (deserialized TX, tx_hash, vsize) tuple."""
start = self.cursor
marker = self.binary[self.cursor + 4]
if marker:
tx = super().read_tx()
tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor])
return tx, tx_hash, self.binary_length
# Ugh, this is nasty.
version = self._read_le_int32()
orig_ser = self.binary[start:self.cursor]
marker = self._read_byte()
flag = self._read_byte()
start = self.cursor
inputs = self._read_inputs()
outputs = self._read_outputs()
orig_ser += self.binary[start:self.cursor]
base_size = self.cursor - start
witness = self._read_witness(len(inputs))
start = self.cursor
locktime = self._read_le_uint32()
orig_ser += self.binary[start:self.cursor]
vsize = (3 * base_size + self.binary_length) // 4
return TxSegWit(version, marker, flag, inputs, outputs, witness,
locktime, orig_ser), self.TX_HASH_FN(orig_ser), vsize
def read_tx(self):
return self._read_tx_parts()[0]
def read_tx_and_hash(self):
tx, tx_hash, vsize = self._read_tx_parts()
return tx, tx_hash
def read_tx_and_vsize(self):
tx, tx_hash, vsize = self._read_tx_parts()
return tx, vsize
class DeserializerAuxPow(Deserializer):
VERSION_AUXPOW = (1 << 8)
def read_header(self, height, static_header_size):
"""Return the AuxPow block header bytes"""
start = self.cursor
version = self._read_le_uint32()
if version & self.VERSION_AUXPOW:
# We are going to calculate the block size then read it as bytes
self.cursor = start
self.cursor += static_header_size # Block normal header
self.read_tx() # AuxPow transaction
self.cursor += 32 # Parent block hash
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Merkle branch
self.cursor += 4 # Index
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Chain merkle branch
self.cursor += 4 # Chain index
self.cursor += 80 # Parent block header
header_end = self.cursor
else:
header_end = static_header_size
self.cursor = start
return self._read_nbytes(header_end)
class DeserializerAuxPowSegWit(DeserializerSegWit, DeserializerAuxPow):
pass
class DeserializerEquihash(Deserializer):
def read_header(self, height, static_header_size):
"""Return the block header bytes"""
start = self.cursor
# We are going to calculate the block size then read it as bytes
self.cursor += static_header_size
solution_size = self._read_varint()
self.cursor += solution_size
header_end = self.cursor
self.cursor = start
return self._read_nbytes(header_end)
class DeserializerEquihashSegWit(DeserializerSegWit, DeserializerEquihash):
pass
class TxJoinSplit(namedtuple("Tx", "version inputs outputs locktime")):
"""Class representing a JoinSplit transaction."""
class DeserializerZcash(DeserializerEquihash):
def read_tx(self):
header = self._read_le_uint32()
overwintered = ((header >> 31) == 1)
if overwintered:
version = header & 0x7fffffff
self.cursor += 4 # versionGroupId
else:
version = header
is_overwinter_v3 = version == 3
is_sapling_v4 = version == 4
base_tx = TxJoinSplit(
version,
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32() # locktime
)
if is_overwinter_v3 or is_sapling_v4:
self.cursor += 4 # expiryHeight
has_shielded = False
if is_sapling_v4:
self.cursor += 8 # valueBalance
shielded_spend_size = self._read_varint()
self.cursor += shielded_spend_size * 384 # vShieldedSpend
shielded_output_size = self._read_varint()
self.cursor += shielded_output_size * 948 # vShieldedOutput
has_shielded = shielded_spend_size > 0 or shielded_output_size > 0
if base_tx.version >= 2:
joinsplit_size = self._read_varint()
if joinsplit_size > 0:
joinsplit_desc_len = 1506 + (192 if is_sapling_v4 else 296)
# JSDescription
self.cursor += joinsplit_size * joinsplit_desc_len
self.cursor += 32 # joinSplitPubKey
self.cursor += 64 # joinSplitSig
if is_sapling_v4 and has_shielded:
self.cursor += 64 # bindingSig
return base_tx
class TxTime(namedtuple("Tx", "version time inputs outputs locktime")):
"""Class representing transaction that has a time field."""
class DeserializerTxTime(Deserializer):
def read_tx(self):
return TxTime(
self._read_le_int32(), # version
self._read_le_uint32(), # time
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32(), # locktime
)
class DeserializerReddcoin(Deserializer):
def read_tx(self):
version = self._read_le_int32()
inputs = self._read_inputs()
outputs = self._read_outputs()
locktime = self._read_le_uint32()
if version > 1:
time = self._read_le_uint32()
else:
time = 0
return TxTime(version, time, inputs, outputs, locktime)
class DeserializerTxTimeAuxPow(DeserializerTxTime):
VERSION_AUXPOW = (1 << 8)
def is_merged_block(self):
start = self.cursor
self.cursor = 0
version = self._read_le_uint32()
self.cursor = start
if version & self.VERSION_AUXPOW:
return True
return False
def read_header(self, height, static_header_size):
"""Return the AuxPow block header bytes"""
start = self.cursor
version = self._read_le_uint32()
if version & self.VERSION_AUXPOW:
# We are going to calculate the block size then read it as bytes
self.cursor = start
self.cursor += static_header_size # Block normal header
self.read_tx() # AuxPow transaction
self.cursor += 32 # Parent block hash
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Merkle branch
self.cursor += 4 # Index
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Chain merkle branch
self.cursor += 4 # Chain index
self.cursor += 80 # Parent block header
header_end = self.cursor
else:
header_end = static_header_size
self.cursor = start
return self._read_nbytes(header_end)
class DeserializerBitcoinAtom(DeserializerSegWit):
FORK_BLOCK_HEIGHT = 505888
def read_header(self, height, static_header_size):
"""Return the block header bytes"""
header_len = static_header_size
if height >= self.FORK_BLOCK_HEIGHT:
header_len += 4 # flags
return self._read_nbytes(header_len)
class DeserializerGroestlcoin(DeserializerSegWit):
TX_HASH_FN = staticmethod(sha256)
class TxInputTokenPay(TxInput):
"""Class representing a TokenPay transaction input."""
OP_ANON_MARKER = 0xb9
# 2byte marker (cpubkey + sigc + sigr)
MIN_ANON_IN_SIZE = 2 + (33 + 32 + 32)
def _is_anon_input(self):
return (len(self.script) >= self.MIN_ANON_IN_SIZE and
self.script[0] == OpCodes.OP_RETURN and
self.script[1] == self.OP_ANON_MARKER)
def is_generation(self):
# Transactions coming in from stealth addresses are seen by
# the blockchain as newly minted coins. The reverse, where coins
# are sent TO a stealth address, are seen by the blockchain as
# a coin burn.
if self._is_anon_input():
return True
return super().is_generation()
class TxInputTokenPayStealth(
namedtuple("TxInput", "keyimage ringsize script sequence")):
"""Class representing a TokenPay stealth transaction input."""
def __str__(self):
script = self.script.hex()
keyimage = bytes(self.keyimage).hex()
return (f"Input({keyimage}, {self.ringsize[1]:d}, script={script}, sequence={self.sequence:d})")
def is_generation(self):
return True
def serialize(self):
return b''.join((
self.keyimage,
self.ringsize,
pack_varbytes(self.script),
pack_le_uint32(self.sequence),
))
class DeserializerTokenPay(DeserializerTxTime):
def _read_input(self):
txin = TxInputTokenPay(
self._read_nbytes(32), # prev_hash
self._read_le_uint32(), # prev_idx
self._read_varbytes(), # script
self._read_le_uint32(), # sequence
)
if txin._is_anon_input():
# Not sure if this is actually needed, and seems
# extra work for no immediate benefit, but it at
# least correctly represents a stealth input
raw = txin.serialize()
deserializer = Deserializer(raw)
txin = TxInputTokenPayStealth(
deserializer._read_nbytes(33), # keyimage
deserializer._read_nbytes(3), # ringsize
deserializer._read_varbytes(), # script
deserializer._read_le_uint32() # sequence
)
return txin
# Decred
class TxInputDcr(namedtuple("TxInput", "prev_hash prev_idx tree sequence")):
"""Class representing a Decred transaction input."""
def __str__(self):
prev_hash = hash_to_hex_str(self.prev_hash)
return (f"Input({prev_hash}, {self.prev_idx:d}, tree={self.tree}, sequence={self.sequence:d})")
def is_generation(self):
"""Test if an input is generation/coinbase like"""
return self.prev_idx == MINUS_1 and self.prev_hash == ZERO
class TxOutputDcr(namedtuple("TxOutput", "value version pk_script")):
"""Class representing a Decred transaction output."""
pass
class TxDcr(namedtuple("Tx", "version inputs outputs locktime expiry "
"witness")):
"""Class representing a Decred transaction."""
class DeserializerDecred(Deserializer):
@staticmethod
def blake256(data):
from blake256.blake256 import blake_hash
return blake_hash(data)
@staticmethod
def blake256d(data):
from blake256.blake256 import blake_hash
return blake_hash(blake_hash(data))
def read_tx(self):
return self._read_tx_parts(produce_hash=False)[0]
def read_tx_and_hash(self):
tx, tx_hash, vsize = self._read_tx_parts()
return tx, tx_hash
def read_tx_and_vsize(self):
tx, tx_hash, vsize = self._read_tx_parts(produce_hash=False)
return tx, vsize
def read_tx_block(self):
"""Returns a list of (deserialized_tx, tx_hash) pairs."""
read = self.read_tx_and_hash
txs = [read() for _ in range(self._read_varint())]
stxs = [read() for _ in range(self._read_varint())]
return txs + stxs
def read_tx_tree(self):
"""Returns a list of deserialized_tx without tx hashes."""
read_tx = self.read_tx
return [read_tx() for _ in range(self._read_varint())]
def _read_input(self):
return TxInputDcr(
self._read_nbytes(32), # prev_hash
self._read_le_uint32(), # prev_idx
self._read_byte(), # tree
self._read_le_uint32(), # sequence
)
def _read_output(self):
return TxOutputDcr(
self._read_le_int64(), # value
self._read_le_uint16(), # version
self._read_varbytes(), # pk_script
)
def _read_witness(self, fields):
read_witness_field = self._read_witness_field
assert fields == self._read_varint()
return [read_witness_field() for _ in range(fields)]
def _read_witness_field(self):
value_in = self._read_le_int64()
block_height = self._read_le_uint32()
block_index = self._read_le_uint32()
script = self._read_varbytes()
return value_in, block_height, block_index, script
def _read_tx_parts(self, produce_hash=True):
start = self.cursor
version = self._read_le_int32()
inputs = self._read_inputs()
outputs = self._read_outputs()
locktime = self._read_le_uint32()
expiry = self._read_le_uint32()
end_prefix = self.cursor
witness = self._read_witness(len(inputs))
if produce_hash:
# TxSerializeNoWitness << 16 == 0x10000
no_witness_header = pack_le_uint32(0x10000 | (version & 0xffff))
prefix_tx = no_witness_header + self.binary[start+4:end_prefix]
tx_hash = self.blake256(prefix_tx)
else:
tx_hash = None
return TxDcr(
version,
inputs,
outputs,
locktime,
expiry,
witness
), tx_hash, self.cursor - start

View file

@ -1,361 +0,0 @@
# Copyright (c) 2016-2017, Neil Booth
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
"""Miscellaneous utility classes and functions."""
import array
import inspect
from ipaddress import ip_address
import logging
import re
import sys
from collections import Container, Mapping
from struct import pack, Struct
# Logging utilities
class ConnectionLogger(logging.LoggerAdapter):
"""Prepends a connection identifier to a logging message."""
def process(self, msg, kwargs):
conn_id = self.extra.get('conn_id', 'unknown')
return f'[{conn_id}] {msg}', kwargs
class CompactFormatter(logging.Formatter):
"""Strips the module from the logger name to leave the class only."""
def format(self, record):
record.name = record.name.rpartition('.')[-1]
return super().format(record)
def make_logger(name, *, handler, level):
"""Return the root ElectrumX logger."""
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
return logger
def class_logger(path, classname):
"""Return a hierarchical logger for a class."""
return logging.getLogger(path).getChild(classname)
# Method decorator. To be used for calculations that will always
# deliver the same result. The method cannot take any arguments
# and should be accessed as an attribute.
class cachedproperty:
def __init__(self, f):
self.f = f
def __get__(self, obj, type):
obj = obj or type
value = self.f(obj)
setattr(obj, self.f.__name__, value)
return value
def formatted_time(t, sep=' '):
"""Return a number of seconds as a string in days, hours, mins and
maybe secs."""
t = int(t)
fmts = (('{:d}d', 86400), ('{:02d}h', 3600), ('{:02d}m', 60))
parts = []
for fmt, n in fmts:
val = t // n
if parts or val:
parts.append(fmt.format(val))
t %= n
if len(parts) < 3:
parts.append(f'{t:02d}s')
return sep.join(parts)
def deep_getsizeof(obj):
"""Find the memory footprint of a Python object.
Based on code from code.tutsplus.com: http://goo.gl/fZ0DXK
This is a recursive function that drills down a Python object graph
like a dictionary holding nested dictionaries with lists of lists
and tuples and sets.
The sys.getsizeof function does a shallow size of only. It counts each
object inside a container as pointer only regardless of how big it
really is.
"""
ids = set()
def size(o):
if id(o) in ids:
return 0
r = sys.getsizeof(o)
ids.add(id(o))
if isinstance(o, (str, bytes, bytearray, array.array)):
return r
if isinstance(o, Mapping):
return r + sum(size(k) + size(v) for k, v in o.items())
if isinstance(o, Container):
return r + sum(size(x) for x in o)
return r
return size(obj)
def subclasses(base_class, strict=True):
"""Return a list of subclasses of base_class in its module."""
def select(obj):
return (inspect.isclass(obj) and issubclass(obj, base_class) and
(not strict or obj != base_class))
pairs = inspect.getmembers(sys.modules[base_class.__module__], select)
return [pair[1] for pair in pairs]
def chunks(items, size):
"""Break up items, an iterable, into chunks of length size."""
for i in range(0, len(items), size):
yield items[i: i + size]
def resolve_limit(limit):
if limit is None:
return -1
assert isinstance(limit, int) and limit >= 0
return limit
def bytes_to_int(be_bytes):
"""Interprets a big-endian sequence of bytes as an integer"""
return int.from_bytes(be_bytes, 'big')
def int_to_bytes(value):
"""Converts an integer to a big-endian sequence of bytes"""
return value.to_bytes((value.bit_length() + 7) // 8, 'big')
def increment_byte_string(bs):
"""Return the lexicographically next byte string of the same length.
Return None if there is none (when the input is all 0xff bytes)."""
for n in range(1, len(bs) + 1):
if bs[-n] != 0xff:
return bs[:-n] + bytes([bs[-n] + 1]) + bytes(n - 1)
return None
class LogicalFile:
"""A logical binary file split across several separate files on disk."""
def __init__(self, prefix, digits, file_size):
digit_fmt = f'{{:0{digits:d}d}}'
self.filename_fmt = prefix + digit_fmt
self.file_size = file_size
def read(self, start, size=-1):
"""Read up to size bytes from the virtual file, starting at offset
start, and return them.
If size is -1 all bytes are read."""
parts = []
while size != 0:
try:
with self.open_file(start, False) as f:
part = f.read(size)
if not part:
break
except FileNotFoundError:
break
parts.append(part)
start += len(part)
if size > 0:
size -= len(part)
return b''.join(parts)
def write(self, start, b):
"""Write the bytes-like object, b, to the underlying virtual file."""
while b:
size = min(len(b), self.file_size - (start % self.file_size))
with self.open_file(start, True) as f:
f.write(b if size == len(b) else b[:size])
b = b[size:]
start += size
def open_file(self, start, create):
"""Open the virtual file and seek to start. Return a file handle.
Raise FileNotFoundError if the file does not exist and create
is False.
"""
file_num, offset = divmod(start, self.file_size)
filename = self.filename_fmt.format(file_num)
f = open_file(filename, create)
f.seek(offset)
return f
def open_file(filename, create=False):
"""Open the file name. Return its handle."""
try:
return open(filename, 'rb+')
except FileNotFoundError:
if create:
return open(filename, 'wb+')
raise
def open_truncate(filename):
"""Open the file name. Return its handle."""
return open(filename, 'wb+')
def address_string(address):
"""Return an address as a correctly formatted string."""
fmt = '{}:{:d}'
host, port = address
try:
host = ip_address(host)
except ValueError:
pass
else:
if host.version == 6:
fmt = '[{}]:{:d}'
return fmt.format(host, port)
# See http://stackoverflow.com/questions/2532053/validate-a-hostname-string
# Note underscores are valid in domain names, but strictly invalid in host
# names. We ignore that distinction.
SEGMENT_REGEX = re.compile("(?!-)[A-Z_\\d-]{1,63}(?<!-)$", re.IGNORECASE)
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
return all(SEGMENT_REGEX.match(x) for x in hostname.split("."))
def protocol_tuple(s):
"""Converts a protocol version number, such as "1.0" to a tuple (1, 0).
If the version number is bad, (0, ) indicating version 0 is returned."""
try:
return tuple(int(part) for part in s.split('.'))
except Exception:
return (0, )
def version_string(ptuple):
"""Convert a version tuple such as (1, 2) to "1.2".
There is always at least one dot, so (1, ) becomes "1.0"."""
while len(ptuple) < 2:
ptuple += (0, )
return '.'.join(str(p) for p in ptuple)
def protocol_version(client_req, min_tuple, max_tuple):
"""Given a client's protocol version string, return a pair of
protocol tuples:
(negotiated version, client min request)
If the request is unsupported, the negotiated protocol tuple is
None.
"""
if client_req is None:
client_min = client_max = min_tuple
else:
if isinstance(client_req, list) and len(client_req) == 2:
client_min, client_max = client_req
else:
client_min = client_max = client_req
client_min = protocol_tuple(client_min)
client_max = protocol_tuple(client_max)
result = min(client_max, max_tuple)
if result < max(client_min, min_tuple) or result == (0, ):
result = None
return result, client_min
struct_le_i = Struct('<i')
struct_le_q = Struct('<q')
struct_le_H = Struct('<H')
struct_le_I = Struct('<I')
struct_le_Q = Struct('<Q')
struct_be_H = Struct('>H')
struct_be_I = Struct('>I')
structB = Struct('B')
unpack_le_int32_from = struct_le_i.unpack_from
unpack_le_int64_from = struct_le_q.unpack_from
unpack_le_uint16_from = struct_le_H.unpack_from
unpack_le_uint32_from = struct_le_I.unpack_from
unpack_le_uint64_from = struct_le_Q.unpack_from
unpack_be_uint16_from = struct_be_H.unpack_from
unpack_be_uint32_from = struct_be_I.unpack_from
unpack_be_uint64 = lambda x: int.from_bytes(x, byteorder='big')
pack_le_int32 = struct_le_i.pack
pack_le_int64 = struct_le_q.pack
pack_le_uint16 = struct_le_H.pack
pack_le_uint32 = struct_le_I.pack
pack_be_uint64 = lambda x: x.to_bytes(8, byteorder='big')
pack_be_uint16 = lambda x: x.to_bytes(2, byteorder='big')
pack_be_uint32 = struct_be_I.pack
pack_byte = structB.pack
hex_to_bytes = bytes.fromhex
def pack_varint(n):
if n < 253:
return pack_byte(n)
if n < 65536:
return pack_byte(253) + pack_le_uint16(n)
if n < 4294967296:
return pack_byte(254) + pack_le_uint32(n)
return pack_byte(255) + pack_le_uint64(n)
def pack_varbytes(data):
return pack_varint(len(data)) + data

View file

@ -1,3 +0,0 @@
# need this to avoid circular import
PROTOCOL_MIN = (0, 54, 0)
PROTOCOL_MAX = (0, 199, 0)

View file

@ -1,55 +0,0 @@
import asyncio
from weakref import WeakSet
from aiohttp.web import Application, AppRunner, WebSocketResponse, TCPSite
from aiohttp.http_websocket import WSMsgType, WSCloseCode
class AdminWebSocket:
def __init__(self, manager):
self.manager = manager
self.app = Application()
self.app['websockets'] = WeakSet()
self.app.router.add_get('/', self.on_connect)
self.app.on_shutdown.append(self.on_shutdown)
self.runner = AppRunner(self.app)
async def on_status(self, _):
if not self.app['websockets']:
return
self.send_message({
'type': 'status',
'height': self.manager.daemon.cached_height(),
})
def send_message(self, msg):
for web_socket in self.app['websockets']:
asyncio.create_task(web_socket.send_json(msg))
async def start(self):
await self.runner.setup()
await TCPSite(self.runner, self.manager.env.websocket_host, self.manager.env.websocket_port).start()
async def stop(self):
await self.runner.cleanup()
async def on_connect(self, request):
web_socket = WebSocketResponse()
await web_socket.prepare(request)
self.app['websockets'].add(web_socket)
try:
async for msg in web_socket:
if msg.type == WSMsgType.TEXT:
await self.on_status(None)
elif msg.type == WSMsgType.ERROR:
print('web socket connection closed with exception %s' %
web_socket.exception())
finally:
self.app['websockets'].discard(web_socket)
return web_socket
@staticmethod
async def on_shutdown(app):
for web_socket in set(app['websockets']):
await web_socket.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown')

View file

@ -145,6 +145,14 @@ class Input(InputOutput):
script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY) script = InputScript.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
return cls(txo.ref, script) return cls(txo.ref, script)
@classmethod
def spend_time_lock(cls, txo: 'Output', script_source: bytes) -> 'Input':
""" Create an input to spend time lock script."""
script = InputScript.redeem_time_lock_script_hash(
cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY, script_source=script_source
)
return cls(txo.ref, script)
@property @property
def amount(self) -> int: def amount(self) -> int:
""" Amount this input adds to the transaction. """ """ Amount this input adds to the transaction. """
@ -710,8 +718,11 @@ class Transaction:
stream.write_compact_size(len(self._inputs)) stream.write_compact_size(len(self._inputs))
for i, txin in enumerate(self._inputs): for i, txin in enumerate(self._inputs):
if signing_input == i: if signing_input == i:
assert txin.txo_ref.txo is not None if txin.script.is_script_hash:
txin.serialize_to(stream, txin.txo_ref.txo.script.source) txin.serialize_to(stream, txin.script.values['script'].source)
else:
assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source)
else: else:
txin.serialize_to(stream, b'') txin.serialize_to(stream, b'')
self._serialize_outputs(stream) self._serialize_outputs(stream)
@ -854,16 +865,19 @@ class Transaction:
def signature_hash_type(hash_type): def signature_hash_type(hash_type):
return hash_type return hash_type
async def sign(self, funding_accounts: Iterable['Account']): async def sign(self, funding_accounts: Iterable['Account'], extra_keys: dict = None):
self._reset() self._reset()
ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts) ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts)
for i, txi in enumerate(self._inputs): for i, txi in enumerate(self._inputs):
assert txi.script is not None assert txi.script is not None
assert txi.txo_ref.txo is not None assert txi.txo_ref.txo is not None
txo_script = txi.txo_ref.txo.script txo_script = txi.txo_ref.txo.script
if txo_script.is_pay_pubkey_hash: if txo_script.is_pay_pubkey_hash or txo_script.is_pay_script_hash:
address = ledger.hash160_to_address(txo_script.values['pubkey_hash']) if 'pubkey_hash' in txo_script.values:
private_key = await ledger.get_private_key_for_address(wallet, address) address = ledger.hash160_to_address(txo_script.values.get('pubkey_hash', ''))
private_key = await ledger.get_private_key_for_address(wallet, address)
else:
private_key = next(iter(extra_keys.values()))
assert private_key is not None, 'Cannot find private key for signing output.' assert private_key is not None, 'Cannot find private key for signing output.'
tx = self._serialize_for_signature(i) tx = self._serialize_for_signature(i)
txi.script.values['signature'] = \ txi.script.values['signature'] = \
@ -937,6 +951,15 @@ class Transaction:
data = Output.add_purchase_data(Purchase(claim_id)) data = Output.add_purchase_data(Purchase(claim_id))
return cls.create([], [payment, data], funding_accounts, change_account) return cls.create([], [payment, data], funding_accounts, change_account)
@classmethod
async def spend_time_lock(cls, time_locked_txo: Output, script: bytes, account: 'Account'):
txi = Input.spend_time_lock(time_locked_txo, script)
txi.sequence = 0xFFFFFFFE
tx = await cls.create([txi], [], [account], account, sign=False)
tx.locktime = txi.script.values['script'].values['height']
tx._reset()
return tx
@property @property
def my_inputs(self): def my_inputs(self):
for txi in self.inputs: for txi in self.inputs:

View file

@ -23,7 +23,7 @@ class SPVPing(NamedTuple):
pad_bytes: bytes pad_bytes: bytes
def encode(self): def encode(self):
return struct.pack(b'!lB64s', *self) return struct.pack(b'!lB64s', *self) # pylint: disable=not-an-iterable
@staticmethod @staticmethod
def make() -> bytes: def make() -> bytes:
@ -49,7 +49,7 @@ class SPVPong(NamedTuple):
country: int country: int
def encode(self): def encode(self):
return struct.pack(PONG_ENCODING, *self) return struct.pack(PONG_ENCODING, *self) # pylint: disable=not-an-iterable
@staticmethod @staticmethod
def encode_address(address: str): def encode_address(address: str):
@ -110,6 +110,7 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
self._min_delay = 1 / throttle_reqs_per_sec self._min_delay = 1 / throttle_reqs_per_sec
self._allow_localhost = allow_localhost self._allow_localhost = allow_localhost
self._allow_lan = allow_lan self._allow_lan = allow_lan
self.closed = asyncio.Event()
def update_cached_response(self): def update_cached_response(self):
self._left_cache, self._right_cache = SPVPong.make_sans_source_address( self._left_cache, self._right_cache = SPVPong.make_sans_source_address(
@ -160,13 +161,16 @@ class SPVServerStatusProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport) -> None: def connection_made(self, transport) -> None:
self.transport = transport self.transport = transport
self.closed.clear()
def connection_lost(self, exc: Optional[Exception]) -> None: def connection_lost(self, exc: Optional[Exception]) -> None:
self.transport = None self.transport = None
self.closed.set()
def close(self): async def close(self):
if self.transport: if self.transport:
self.transport.close() self.transport.close()
await self.closed.wait()
class StatusServer: class StatusServer:
@ -184,9 +188,9 @@ class StatusServer:
await loop.create_datagram_endpoint(lambda: self._protocol, (interface, port)) await loop.create_datagram_endpoint(lambda: self._protocol, (interface, port))
log.info("started udp status server on %s:%i", interface, port) log.info("started udp status server on %s:%i", interface, port)
def stop(self): async def stop(self):
if self.is_running: if self.is_running:
self._protocol.close() await self._protocol.close()
self._protocol = None self._protocol = None
@property @property

View file

@ -2,6 +2,7 @@ import asyncio
import logging import logging
from lbry.error import ( from lbry.error import (
InsufficientFundsError,
ServerPaymentFeeAboveMaxAllowedError, ServerPaymentFeeAboveMaxAllowedError,
ServerPaymentInvalidAddressError, ServerPaymentInvalidAddressError,
ServerPaymentWalletLockedError ServerPaymentWalletLockedError
@ -24,41 +25,66 @@ class WalletServerPayer:
self.max_fee = max_fee self.max_fee = max_fee
self._on_payment_controller = StreamController() self._on_payment_controller = StreamController()
self.on_payment = self._on_payment_controller.stream self.on_payment = self._on_payment_controller.stream
self.on_payment.listen(None, on_error=lambda e: logging.warning(e.args[0])) self.on_payment.listen(None, on_error=lambda e: log.warning(e.args[0]))
async def pay(self): async def pay(self):
while self.running:
try:
await self._pay()
except (asyncio.TimeoutError, ConnectionError):
if not self.running:
break
delay = max(self.payment_period / 24, 10)
log.warning("Payement failed. Will retry after %g seconds.", delay)
asyncio.sleep(delay)
except BaseException as e:
if not isinstance(e, asyncio.CancelledError):
log.exception("Unexpected exception. Payment task exiting early.")
self.running = False
raise
async def _pay(self):
while self.running: while self.running:
await asyncio.sleep(self.payment_period) await asyncio.sleep(self.payment_period)
features = await self.ledger.network.retriable_call(self.ledger.network.get_server_features) features = await self.ledger.network.get_server_features()
log.debug("pay loop: received server features: %s", str(features))
address = features['payment_address'] address = features['payment_address']
amount = str(features['daily_fee']) amount = str(features['daily_fee'])
if not address or not amount: if not address or not amount:
log.debug("pay loop: no address or no amount")
continue continue
if not self.ledger.is_pubkey_address(address): if not self.ledger.is_pubkey_address(address):
log.info("pay loop: address not pubkey")
self._on_payment_controller.add_error(ServerPaymentInvalidAddressError(address)) self._on_payment_controller.add_error(ServerPaymentInvalidAddressError(address))
continue continue
if self.wallet.is_locked: if self.wallet.is_locked:
log.info("pay loop: wallet is locked")
self._on_payment_controller.add_error(ServerPaymentWalletLockedError()) self._on_payment_controller.add_error(ServerPaymentWalletLockedError())
continue continue
amount = lbc_to_dewies(features['daily_fee']) # check that this is in lbc and not dewies amount = lbc_to_dewies(features['daily_fee']) # check that this is in lbc and not dewies
limit = lbc_to_dewies(self.max_fee) limit = lbc_to_dewies(self.max_fee)
if amount > limit: if amount > limit:
log.info("pay loop: amount (%d) > limit (%d)", amount, limit)
self._on_payment_controller.add_error( self._on_payment_controller.add_error(
ServerPaymentFeeAboveMaxAllowedError(features['daily_fee'], self.max_fee) ServerPaymentFeeAboveMaxAllowedError(features['daily_fee'], self.max_fee)
) )
continue continue
tx = await Transaction.create( try:
[], tx = await Transaction.create(
[Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))], [],
self.wallet.get_accounts_or_all(None), [Output.pay_pubkey_hash(amount, self.ledger.address_to_hash160(address))],
self.wallet.get_account_or_default(None) self.wallet.get_accounts_or_all(None),
) self.wallet.get_account_or_default(None)
)
except InsufficientFundsError:
self._on_payment_controller.add_error(InsufficientFundsError())
continue
await self.ledger.broadcast(tx) await self.ledger.broadcast_or_release(tx, blocking=True)
if self.analytics_manager: if self.analytics_manager:
await self.analytics_manager.send_credits_sent() await self.analytics_manager.send_credits_sent()
self._on_payment_controller.add(tx) self._on_payment_controller.add(tx)
@ -70,7 +96,18 @@ class WalletServerPayer:
self.wallet = wallet self.wallet = wallet
self.running = True self.running = True
self.task = asyncio.ensure_future(self.pay()) self.task = asyncio.ensure_future(self.pay())
self.task.add_done_callback(lambda _: log.info("Stopping wallet server payments.")) self.task.add_done_callback(self._done_callback)
def _done_callback(self, f):
if f.cancelled():
reason = "Cancelled"
elif f.exception():
reason = f'Exception: {f.exception()}'
elif not self.running:
reason = "Stopped"
else:
reason = ""
log.info("Stopping wallet server payments. %s", reason)
async def stop(self): async def stop(self):
if self.running: if self.running:

View file

@ -10,6 +10,7 @@ from collections import UserDict
from hashlib import sha256 from hashlib import sha256
from operator import attrgetter from operator import attrgetter
from lbry.crypto.crypt import better_aes_encrypt, better_aes_decrypt from lbry.crypto.crypt import better_aes_encrypt, better_aes_decrypt
from lbry.error import InvalidPasswordError
from .account import Account from .account import Account
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
@ -138,6 +139,10 @@ class Wallet:
'accounts': [a.to_dict(encrypt_password) for a in self.accounts] 'accounts': [a.to_dict(encrypt_password) for a in self.accounts]
} }
def to_json(self):
assert not self.is_locked, "Cannot serialize a wallet with locked/encrypted accounts."
return json.dumps(self.to_dict())
def save(self): def save(self):
if self.preferences.get(ENCRYPT_ON_DISK, False): if self.preferences.get(ENCRYPT_ON_DISK, False):
if self.encryption_password is not None: if self.encryption_password is not None:
@ -164,21 +169,32 @@ class Wallet:
def pack(self, password): def pack(self, password):
assert not self.is_locked, "Cannot pack a wallet with locked/encrypted accounts." assert not self.is_locked, "Cannot pack a wallet with locked/encrypted accounts."
new_data = json.dumps(self.to_dict()) new_data_compressed = zlib.compress(self.to_json().encode())
new_data_compressed = zlib.compress(new_data.encode())
return better_aes_encrypt(password, new_data_compressed) return better_aes_encrypt(password, new_data_compressed)
@classmethod @classmethod
def unpack(cls, password, encrypted): def unpack(cls, password, encrypted):
decrypted = better_aes_decrypt(password, encrypted) decrypted = better_aes_decrypt(password, encrypted)
decompressed = zlib.decompress(decrypted) try:
decompressed = zlib.decompress(decrypted)
except zlib.error as e:
if "incorrect header check" in e.args[0].lower():
raise InvalidPasswordError()
if "unknown compression method" in e.args[0].lower():
raise InvalidPasswordError()
if "invalid window size" in e.args[0].lower():
raise InvalidPasswordError()
raise
return json.loads(decompressed) return json.loads(decompressed)
def merge(self, manager: 'WalletManager', def merge(self, manager: 'WalletManager',
password: str, data: str) -> List['Account']: password: str, data: str) -> (List['Account'], List['Account']):
assert not self.is_locked, "Cannot sync apply on a locked wallet." assert not self.is_locked, "Cannot sync apply on a locked wallet."
added_accounts = [] added_accounts, merged_accounts = [], []
decrypted_data = self.unpack(password, data) if password is None:
decrypted_data = json.loads(data)
else:
decrypted_data = self.unpack(password, data)
self.preferences.merge(decrypted_data.get('preferences', {})) self.preferences.merge(decrypted_data.get('preferences', {}))
for account_dict in decrypted_data['accounts']: for account_dict in decrypted_data['accounts']:
ledger = manager.get_or_create_ledger(account_dict['ledger']) ledger = manager.get_or_create_ledger(account_dict['ledger'])
@ -191,10 +207,11 @@ class Wallet:
break break
if local_match is not None: if local_match is not None:
local_match.merge(account_dict) local_match.merge(account_dict)
merged_accounts.append(local_match)
else: else:
new_account = Account.from_dict(ledger, self, account_dict) new_account = Account.from_dict(ledger, self, account_dict)
added_accounts.append(new_account) added_accounts.append(new_account)
return added_accounts return added_accounts, merged_accounts
@property @property
def is_locked(self) -> bool: def is_locked(self) -> bool:
@ -203,11 +220,12 @@ class Wallet:
return True return True
return False return False
def unlock(self, password): async def unlock(self, password):
for account in self.accounts: for account in self.accounts:
if account.encrypted: if account.encrypted:
if not account.decrypt(password): if not account.decrypt(password):
return False return False
await account.deterministic_channel_keys.ensure_cache_primed()
self.encryption_password = password self.encryption_password = password
return True return True

View file

@ -40,22 +40,17 @@ def checkrecord(record, expected_winner, expected_claim):
async def checkcontrolling(daemon: Daemon, db: SQLDB): async def checkcontrolling(daemon: Daemon, db: SQLDB):
records, claim_ids, names, futs = [], [], [], [] records, names, futs = [], [], []
for record in db.get_claims('claimtrie.claim_hash as is_controlling, claim.*', is_controlling=True): for record in db.get_claims('claimtrie.claim_hash as is_controlling, claim.*', is_controlling=True):
records.append(record) records.append(record)
claim_id = hex_reverted(record['claim_hash']) claim_id = hex_reverted(record['claim_hash'])
claim_ids.append((claim_id,)) names.append((record['normalized'], (claim_id,), "", True)) # last parameter is IncludeValues
names.append((record['normalized'],))
if len(names) > 50000: if len(names) > 50000:
futs.append(daemon._send_vector('getvalueforname', names[:])) futs.append(daemon._send_vector('getclaimsfornamebyid', names))
futs.append(daemon._send_vector('getclaimbyid', claim_ids[:]))
names.clear() names.clear()
claim_ids.clear()
if names: if names:
futs.append(daemon._send_vector('getvalueforname', names[:])) futs.append(daemon._send_vector('getclaimsfornamebyid', names))
futs.append(daemon._send_vector('getclaimbyid', claim_ids[:]))
names.clear() names.clear()
claim_ids.clear()
while futs: while futs:
winners, claims = futs.pop(0), futs.pop(0) winners, claims = futs.pop(0), futs.pop(0)

520
scripts/dht_crawler.py Normal file
View file

@ -0,0 +1,520 @@
import sys
import datetime
import logging
import asyncio
import os.path
import random
import time
import typing
from dataclasses import dataclass, astuple, replace
from aiohttp import web
from prometheus_client import Gauge, generate_latest as prom_generate_latest, Counter, Histogram
import lbry.dht.error
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import make_kademlia_peer, PeerManager, decode_tcp_peer_from_compact_address
from lbry.dht.protocol.distance import Distance
from lbry.dht.protocol.iterative_find import FindValueResponse, FindNodeResponse, FindResponse
from lbry.extras.daemon.storage import SQLiteMixin
from lbry.conf import Config
from lbry.utils import resolve_host
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
class SDHashSamples:
def __init__(self, samples_file_path):
with open(samples_file_path, "rb") as sample_file:
self._samples = sample_file.read()
assert len(self._samples) % 48 == 0
self.size = len(self._samples) // 48
def read_samples(self, count=1):
for _ in range(count):
offset = 48 * random.randrange(0, self.size)
yield self._samples[offset:offset + 48]
class PeerStorage(SQLiteMixin):
CREATE_TABLES_QUERY = """
PRAGMA JOURNAL_MODE=WAL;
CREATE TABLE IF NOT EXISTS peer (
peer_id INTEGER NOT NULL,
node_id VARCHAR(96),
address VARCHAR,
udp_port INTEGER,
tcp_port INTEGER,
first_online DATETIME,
errors INTEGER,
last_churn INTEGER,
added_on DATETIME NOT NULL,
last_check DATETIME,
last_seen DATETIME,
latency INTEGER,
PRIMARY KEY (peer_id)
);
CREATE TABLE IF NOT EXISTS connection (
from_peer_id INTEGER NOT NULL,
to_peer_id INTEGER NOT NULL,
PRIMARY KEY (from_peer_id, to_peer_id),
FOREIGN KEY(from_peer_id) REFERENCES peer (peer_id),
FOREIGN KEY(to_peer_id) REFERENCES peer (peer_id)
);
"""
async def open(self):
await super().open()
self.db.writer_connection.row_factory = dict_row_factory
async def all_peers(self):
return [
DHTPeer(**peer) for peer in await self.db.execute_fetchall(
"select * from peer where latency > 0 or last_seen > datetime('now', '-1 hour')")
]
async def save_peers(self, *peers):
log.info("Saving graph nodes (peers) to DB")
await self.db.executemany(
"INSERT OR REPLACE INTO peer("
"node_id, address, udp_port, tcp_port, first_online, errors, last_churn,"
"added_on, last_check, last_seen, latency, peer_id) VALUES (?,?,?,?,?,?,?,?,?,?,?,?)",
[astuple(peer) for peer in peers]
)
log.info("Finished saving graph nodes (peers) to DB")
async def save_connections(self, connections_map):
log.info("Saving graph edges (connections) to DB")
await self.db.executemany(
"DELETE FROM connection WHERE from_peer_id = ?", [(key,) for key in connections_map])
for from_peer_id in connections_map:
await self.db.executemany(
"INSERT INTO connection(from_peer_id, to_peer_id) VALUES(?,?)",
[(from_peer_id, to_peer_id) for to_peer_id in connections_map[from_peer_id]])
log.info("Finished saving graph edges (connections) to DB")
@dataclass(frozen=True)
class DHTPeer:
node_id: str
address: str
udp_port: int
tcp_port: int = None
first_online: datetime.datetime = None
errors: int = None
last_churn: int = None
added_on: datetime.datetime = None
last_check: datetime.datetime = None
last_seen: datetime.datetime = None
latency: int = None
peer_id: int = None
@classmethod
def from_kad_peer(cls, peer, peer_id):
node_id = peer.node_id.hex() if peer.node_id else None
return DHTPeer(
node_id=node_id, address=peer.address, udp_port=peer.udp_port, tcp_port=peer.tcp_port,
peer_id=peer_id, added_on=datetime.datetime.utcnow())
def to_kad_peer(self):
node_id = bytes.fromhex(self.node_id) if self.node_id else None
return make_kademlia_peer(node_id, self.address, self.udp_port, self.tcp_port)
def new_node(address="0.0.0.0", udp_port=0, node_id=None):
node_id = node_id or generate_id()
loop = asyncio.get_event_loop()
return Node(loop, PeerManager(loop), node_id, udp_port, udp_port, 3333, address)
class Crawler:
unique_total_hosts_metric = Gauge(
"unique_total_hosts", "Number of unique hosts seen in the last interval", namespace="dht_crawler_node",
)
reachable_hosts_metric = Gauge(
"reachable_hosts", "Number of hosts that replied in the last interval", namespace="dht_crawler_node",
)
total_historic_hosts_metric = Gauge(
"history_total_hosts", "Number of hosts seen since first run.", namespace="dht_crawler_node",
)
pending_check_hosts_metric = Gauge(
"pending_hosts", "Number of hosts on queue to be checked.", namespace="dht_crawler_node",
)
hosts_with_errors_metric = Gauge(
"error_hosts", "Number of hosts that raised errors during contact.", namespace="dht_crawler_node",
)
ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS = tuple(map(float, range(100))) + (
500., 1000., 2000., float('inf')
)
connections_found_metric = Histogram(
"connections_found", "Number of hosts returned by the last successful contact.", namespace="dht_crawler_node",
buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
known_connections_found_metric = Histogram(
"known_connections_found", "Number of already known hosts returned by last contact.",
namespace="dht_crawler_node", buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
reachable_connections_found_metric = Histogram(
"reachable_connections_found", "Number of reachable known hosts returned by last contact.",
namespace="dht_crawler_node", buckets=ROUTING_TABLE_SIZE_HISTOGRAM_BUCKETS
)
LATENCY_HISTOGRAM_BUCKETS = (
0., 5., 10., 15., 30., 60., 120., 180., 240., 300., 600., 1200., 1800., 4000., 6000., float('inf')
)
host_latency_metric = Histogram(
"host_latency", "Time spent on the last request, in milliseconds.", namespace="dht_crawler_node",
buckets=LATENCY_HISTOGRAM_BUCKETS
)
probed_streams_metric = Counter(
"probed_streams", "Amount of streams probed.", namespace="dht_crawler_node",
)
announced_streams_metric = Counter(
"announced_streams", "Amount of streams where announcements were found.", namespace="dht_crawler_node",
)
working_streams_metric = Counter(
"working_streams", "Amount of streams with reachable hosts.", namespace="dht_crawler_node",
)
def __init__(self, db_path: str, sd_hash_samples: SDHashSamples):
self.node = new_node()
self.db = PeerStorage(db_path)
self.sd_hashes = sd_hash_samples
self._memory_peers = {}
self._reachable_by_node_id = {}
self._connections = {}
async def open(self):
await self.db.open()
self._memory_peers = {
(peer.address, peer.udp_port): peer for peer in await self.db.all_peers()
}
self.refresh_reachable_set()
def refresh_reachable_set(self):
self._reachable_by_node_id = {
bytes.fromhex(peer.node_id): peer for peer in self._memory_peers.values() if (peer.latency or 0) > 0
}
async def probe_files(self):
if not self.sd_hashes:
return
while True:
for sd_hash in self.sd_hashes.read_samples(10_000):
self.refresh_reachable_set()
distance = Distance(sd_hash)
node_ids = list(self._reachable_by_node_id.keys())
node_ids.sort(key=lambda node_id: distance(node_id))
k_closest = [self._reachable_by_node_id[node_id] for node_id in node_ids[:8]]
found = False
working = False
for response in asyncio.as_completed(
[self.request_peers(peer.address, peer.udp_port, peer.node_id, sd_hash) for peer in k_closest]):
response = await response
if response and response.found:
found = True
blob_peers = []
for compact_addr in response.found_compact_addresses:
try:
blob_peers.append(decode_tcp_peer_from_compact_address(compact_addr))
except ValueError as e:
log.error("Error decoding compact peers: %s", e)
for blob_peer in blob_peers:
response = await self.request_peers(blob_peer.address, blob_peer.tcp_port, blob_peer.node_id, sd_hash)
if response:
working = True
log.info("Found responsive peer for %s: %s:%d(%d)",
sd_hash.hex()[:8], blob_peer.address,
blob_peer.udp_port or -1, blob_peer.tcp_port or -1)
else:
log.info("Found dead peer for %s: %s:%d(%d)",
sd_hash.hex()[:8], blob_peer.address,
blob_peer.udp_port or -1, blob_peer.tcp_port or -1)
self.probed_streams_metric.inc()
if found:
self.announced_streams_metric.inc()
if working:
self.working_streams_metric.inc()
log.info("Done querying stream %s for peers. Found: %s, working: %s", sd_hash.hex()[:8], found, working)
await asyncio.sleep(.5)
@property
def refresh_limit(self):
return datetime.datetime.utcnow() - datetime.timedelta(hours=1)
@property
def all_peers(self):
return [
peer for peer in self._memory_peers.values()
if (peer.last_seen and peer.last_seen > self.refresh_limit) or (peer.latency or 0) > 0
]
@property
def active_peers_count(self):
return len(self.all_peers)
@property
def checked_peers_count(self):
return len([peer for peer in self.all_peers if peer.last_check and peer.last_check > self.refresh_limit])
@property
def unreachable_peers_count(self):
return len([peer for peer in self.all_peers
if peer.last_check and peer.last_check > self.refresh_limit and not peer.latency])
@property
def peers_with_errors_count(self):
return len([peer for peer in self.all_peers if (peer.errors or 0) > 0])
def get_peers_needing_check(self):
to_check = [peer for peer in self.all_peers if peer.last_check is None or peer.last_check < self.refresh_limit]
return to_check
def remove_expired_peers(self):
for key, peer in list(self._memory_peers.items()):
if (peer.latency or 0) < 1 and peer.last_seen < self.refresh_limit:
del self._memory_peers[key]
def add_peers(self, *peers):
for peer in peers:
db_peer = self.get_from_peer(peer)
if db_peer and db_peer.node_id is None and peer.node_id is not None:
db_peer = replace(db_peer, node_id=peer.node_id.hex())
elif not db_peer:
db_peer = DHTPeer.from_kad_peer(peer, len(self._memory_peers) + 1)
db_peer = replace(db_peer, last_seen=datetime.datetime.utcnow())
self._memory_peers[(peer.address, peer.udp_port)] = db_peer
async def flush_to_db(self):
await self.db.save_peers(*self._memory_peers.values())
connections_to_save = self._connections
self._connections = {}
# await self.db.save_connections(connections_to_save) heavy call
self.remove_expired_peers()
def get_from_peer(self, peer):
return self._memory_peers.get((peer.address, peer.udp_port), None)
def set_latency(self, peer, latency=None):
if latency:
self.host_latency_metric.observe(latency / 1_000_000.0)
db_peer = self.get_from_peer(peer)
if not db_peer:
return
db_peer = replace(db_peer, latency=latency)
if not db_peer.node_id and peer.node_id:
db_peer = replace(db_peer, node_id=peer.node_id.hex())
if db_peer.first_online and latency is None:
db_peer = replace(db_peer, last_churn=(datetime.datetime.utcnow() - db_peer.first_online).seconds)
elif latency is not None and db_peer.first_online is None:
db_peer = replace(db_peer, first_online=datetime.datetime.utcnow())
db_peer = replace(db_peer, last_check=datetime.datetime.utcnow())
self._memory_peers[(db_peer.address, db_peer.udp_port)] = db_peer
def inc_errors(self, peer):
db_peer = self.get_from_peer(peer)
self._memory_peers[(peer.address, peer.node_id)] = replace(db_peer, errors=(db_peer.errors or 0) + 1)
def associate_peers(self, peer, other_peers):
self._connections[self.get_from_peer(peer).peer_id] = [
self.get_from_peer(other_peer).peer_id for other_peer in other_peers]
async def request_peers(self, host, port, node_id, key=None) -> typing.Optional[FindResponse]:
key = key or node_id
peer = make_kademlia_peer(key, await resolve_host(host, port, 'udp'), port)
for attempt in range(3):
try:
req_start = time.perf_counter_ns()
if key == node_id:
response = await self.node.protocol.get_rpc_peer(peer).find_node(key)
response = FindNodeResponse(key, response)
latency = time.perf_counter_ns() - req_start
self.set_latency(peer, latency)
else:
response = await self.node.protocol.get_rpc_peer(peer).find_value(key)
response = FindValueResponse(key, response)
await asyncio.sleep(0.05)
return response
except asyncio.TimeoutError:
if key == node_id:
self.set_latency(peer, None)
continue
except lbry.dht.error.TransportNotConnected:
log.info("Transport unavailable, waiting 1s to retry")
await asyncio.sleep(1)
except lbry.dht.error.RemoteException as e:
log.info('Peer errored: %s:%d attempt #%d - %s',
host, port, (attempt + 1), str(e))
if key == node_id:
self.inc_errors(peer)
self.set_latency(peer, None)
continue
async def crawl_routing_table(self, host, port, node_id=None):
start = time.time()
log.debug("querying %s:%d", host, port)
address = await resolve_host(host, port, 'udp')
key = node_id or self.node.protocol.peer_manager.get_node_id_for_endpoint(address, port)
peer = make_kademlia_peer(key, address, port)
self.add_peers(peer)
if not key:
latency = None
for _ in range(3):
try:
ping_start = time.perf_counter_ns()
await self.node.protocol.get_rpc_peer(peer).ping()
await asyncio.sleep(0.05)
key = key or self.node.protocol.peer_manager.get_node_id_for_endpoint(address, port)
peer = make_kademlia_peer(key, address, port)
latency = time.perf_counter_ns() - ping_start
break
except asyncio.TimeoutError:
pass
except lbry.dht.error.RemoteException:
self.inc_errors(peer)
pass
self.set_latency(peer, latency if peer.node_id else None)
if not latency or not peer.node_id:
if latency and not peer.node_id:
log.warning("No node id from %s:%d", host, port)
return set()
distance = Distance(key)
max_distance = int.from_bytes(bytes([0xff] * 48), 'big')
peers = set()
factor = 2048
for i in range(1000):
response = await self.request_peers(address, port, key)
new_peers = list(response.get_close_kademlia_peers(peer)) if response else None
if not new_peers:
break
new_peers.sort(key=lambda peer: distance(peer.node_id))
peers.update(new_peers)
far_key = new_peers[-1].node_id
if distance(far_key) <= distance(key):
current_distance = distance(key)
next_jump = current_distance + int(max_distance // factor) # jump closer
factor /= 2
if factor > 8 and next_jump < max_distance:
key = int.from_bytes(peer.node_id, 'big') ^ next_jump
if key.bit_length() > 384:
break
key = key.to_bytes(48, 'big')
else:
break
else:
key = far_key
factor = 2048
if peers:
log.info("Done querying %s:%d in %.2f seconds: %d peers found over %d requests.",
host, port, (time.time() - start), len(peers), i)
if peers:
self.connections_found_metric.observe(len(peers))
known_peers = 0
reachable_connections = 0
for peer in peers:
known_peer = self.get_from_peer(peer)
known_peers += 1 if known_peer else 0
reachable_connections += 1 if known_peer and (known_peer.latency or 0) > 0 else 0
self.known_connections_found_metric.observe(known_peers)
self.reachable_connections_found_metric.observe(reachable_connections)
self.add_peers(*peers)
self.associate_peers(peer, peers)
return peers
async def process(self):
to_process = {}
def submit(_peer):
f = asyncio.ensure_future(
self.crawl_routing_table(_peer.address, _peer.udp_port, bytes.fromhex(_peer.node_id)))
to_process[_peer.peer_id] = f
f.add_done_callback(lambda _: to_process.pop(_peer.peer_id))
to_check = self.get_peers_needing_check()
last_flush = datetime.datetime.utcnow()
while True:
for peer in to_check[:200]:
if peer.peer_id not in to_process:
submit(peer)
await asyncio.sleep(.05)
await asyncio.sleep(0)
self.unique_total_hosts_metric.set(self.checked_peers_count)
self.reachable_hosts_metric.set(self.checked_peers_count - self.unreachable_peers_count)
self.total_historic_hosts_metric.set(len(self._memory_peers))
self.pending_check_hosts_metric.set(len(to_check))
self.hosts_with_errors_metric.set(self.peers_with_errors_count)
log.info("%d known, %d contacted recently, %d unreachable, %d error, %d processing, %d on queue",
self.active_peers_count, self.checked_peers_count, self.unreachable_peers_count,
self.peers_with_errors_count, len(to_process), len(to_check))
if to_process:
await asyncio.wait(to_process.values(), return_when=asyncio.FIRST_COMPLETED)
to_check = self.get_peers_needing_check()
if (datetime.datetime.utcnow() - last_flush).seconds > 60:
log.info("flushing to db")
await self.flush_to_db()
last_flush = datetime.datetime.utcnow()
while not to_check and not to_process:
port = self.node.listening_port.get_extra_info('socket').getsockname()[1]
self.node.stop()
await self.node.start_listening()
log.info("Idle, sleeping a minute. Port changed to %d", port)
await asyncio.sleep(60.0)
to_check = self.get_peers_needing_check()
class SimpleMetrics:
def __init__(self, port):
self.prometheus_port = port
async def handle_metrics_get_request(self, _):
try:
return web.Response(
text=prom_generate_latest().decode(),
content_type='text/plain; version=0.0.4'
)
except Exception:
log.exception('could not generate prometheus data')
raise
async def start(self):
prom_app = web.Application()
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
metrics_runner = web.AppRunner(prom_app)
await metrics_runner.setup()
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
await prom_site.start()
def dict_row_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
if col[0] in ('added_on', 'first_online', 'last_seen', 'last_check'):
d[col[0]] = datetime.datetime.fromisoformat(row[idx]) if row[idx] else None
else:
d[col[0]] = row[idx]
return d
async def test():
db_path = "/tmp/peers.db" if len(sys.argv) == 1 else sys.argv[-1]
asyncio.get_event_loop().set_debug(True)
metrics = SimpleMetrics('8080')
await metrics.start()
conf = Config()
hosting_samples = SDHashSamples("test.sample") if os.path.isfile("test.sample") else None
crawler = Crawler(db_path, hosting_samples)
await crawler.open()
await crawler.flush_to_db()
await crawler.node.start_listening()
if crawler.active_peers_count < 100:
probes = []
for (host, port) in conf.known_dht_nodes:
probes.append(asyncio.create_task(crawler.crawl_routing_table(host, port)))
await asyncio.gather(*probes)
await crawler.flush_to_db()
await asyncio.gather(crawler.process(), crawler.probe_files())
if __name__ == '__main__':
asyncio.run(test())

View file

@ -83,7 +83,7 @@ async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional
await storage.open() await storage.open()
node = Node( node = Node(
loop, PeerManager(loop), node_id, port, port, 3333, None, loop, PeerManager(loop), node_id, port, port, 3333, None,
storage=storage storage=storage, is_bootstrap_node=True
) )
if prometheus_port > 0: if prometheus_port > 0:
metrics = SimpleMetrics(prometheus_port, node if export else None) metrics = SimpleMetrics(prometheus_port, node if export else None)

View file

@ -1,46 +0,0 @@
import asyncio
from pprint import pprint
from elasticsearch import AsyncElasticsearch
from elasticsearch._async.helpers import async_scan, async_bulk
DB = {}
INDEX = 'claims'
async def generate_support_amounts(client: AsyncElasticsearch):
async for doc in async_scan(client):
DB[doc['_id']] = doc['_source']['support_amount']
if len(DB) > 10:
break
pprint(DB)
def generate_support_to_trending():
for claim_id, amount in DB.items():
yield {'doc': {"trending_mixed": amount}, '_id': claim_id, '_index': INDEX, '_op_type': 'update'}
async def write_trending(client: AsyncElasticsearch):
await async_bulk(client, generate_support_to_trending())
def get_client(host='localhost', port=9201):
hosts = [{'host': host, 'port': port}]
return AsyncElasticsearch(hosts, timeout=port)
async def run():
client = get_client()
await generate_support_amounts(client)
await write_trending(client)
for claim_id, value in DB.items():
if value > 0:
break
doc = await client.get(INDEX, claim_id)
pprint(doc)
pprint(DB[claim_id])
await client.close()
asyncio.get_event_loop().run_until_complete(run())

Some files were not shown because too many files have changed in this diff Show more