Compare commits

...

2498 commits

Author SHA1 Message Date
Jonathan Moody
eb5da9511e Revert "TEMP: Try python 3.8."
This reverts commit 8def4d5177.
2023-04-03 13:34:36 -04:00
Jonathan Moody
8722ef840e Bump python_requires >= 3.8.
Code to handle CancelledError (as subclass of Exception) was removed.
2023-04-03 13:34:36 -04:00
Jonathan Moody
6e75a1a89b TEMP: Try python 3.8. 2023-04-03 13:34:36 -04:00
Jonathan Moody
ef3189de1d Work on some DeprecationWarnings: The explicit passing of coroutine objects to asyncio.wait() is deprecated since Python 3.8. 2023-04-03 13:34:36 -04:00
Jonathan Moody
c2d2080034 Try to suppress asyncio.CancelledError in a different way in test_streaming.py. 2023-04-03 13:34:36 -04:00
Jonathan Moody
d0b5a0a8fd TEMP: Add workflow_dispatch. 2023-04-03 13:34:36 -04:00
Jonathan Moody
1d0e17be21 Another place generalized to Exception or asyncio.CancelledError. 2023-04-03 13:34:36 -04:00
Jonathan Moody
4ef03bb1f4 Try separate file_manager.stop() and start() calls to better
control order of events in test.
While file_manager is stopped, we get no response to file_list().
2023-04-03 13:34:36 -04:00
Jonathan Moody
4bd4bcdc27 Try ubuntu-20.04 to resolve missing libffi.so.7 issue. 2023-04-03 13:34:36 -04:00
Jonathan Moody
e5ca967fa2 Make FileManager.stop() async because SourceManager.stop() is now async. 2023-04-03 13:34:36 -04:00
Jonathan Moody
eed7d02e8b Tweak aiohttp version to be compatible with hub repository. 2023-04-03 13:34:36 -04:00
Jonathan Moody
02aecad52b CancelledError derives from BaseException in Python >= 3.8. The significant functional
change here is in upload_to_reflector(). Unit tests in TestReflector were failing.
Deal with lint related to CancelledError cleanup.
2023-04-03 13:34:36 -04:00
Jonathan Moody
585962d930 Make stop(), stop_tasks() consistently async routines, and have stop_tasks()
wait for file_output_task completion. This fixes a problem with
test_download_stop_resume_delete.
2023-04-03 13:34:36 -04:00
Jonathan Moody
ea4fba39a6 Fix Transport, DatagramTransport mockup issues. 2023-04-03 13:34:36 -04:00
Jonathan Moody
7a86406746 Fix and enable lint no-self-use & try-except-raise. 2023-04-03 13:34:36 -04:00
Jonathan Moody
c8a3eb97a4 Bump pylint version. Old pylint did not find standard library stuff on 3.9.12. 2023-04-03 13:34:36 -04:00
Lex Berezhny
20213628d7 upgrade cryptography 2023-04-03 13:34:36 -04:00
Lex Berezhny
2d1649f972 pylint disable shuffle() arg check 2023-04-03 13:34:36 -04:00
Lex Berezhny
5cb04b86a0 shuffle() needs custom random, removed loop from Event()/Queue() 2023-04-03 13:34:36 -04:00
Lex Berezhny
93ab6b3be3 passing loop to asyncio functions is deprecated 2023-04-03 13:34:36 -04:00
Lex Berezhny
b9762c3e64 update plyvel 2023-04-03 13:34:36 -04:00
Lex Berezhny
82592d00ef try building 3.9 2023-04-03 13:34:36 -04:00
Jonathan Moody
c118174c1a Try shell: bash to simplify. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d284acd8b8 Remove "debug pip cache". 2023-02-02 14:16:07 -05:00
Jonathan Moody
235c98372d Fix syntax. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d2f5073ef4 Single "set pip cache dir" task with conditional inside. 2023-02-02 14:16:07 -05:00
Jonathan Moody
84e5e43117 Bump upload-artifact version too. 2023-02-02 14:16:07 -05:00
Jonathan Moody
7bd025ae54 Upgrade change-string-case. Use startsWith() to test runner.os.
Bump change-string-case-action version again.
2023-02-02 14:16:07 -05:00
Jonathan Moody
8f28ce65b0 Switch to environment vars in $GITHUB_ENV. 2023-02-02 14:16:07 -05:00
Jonathan Moody
d36e305129 Functions save-state, set-output deprecated. Use new mechanism. 2023-02-02 14:16:07 -05:00
Jonathan Moody
2609dee8fb Bump checkout, setup-python, cache action verions. 2023-02-02 14:16:07 -05:00
Lex Berezhny
a2da86d4b5 v0.113.0 2023-01-23 10:43:02 -05:00
Alex Grin
aa16c7fee5 Update conf.py 2023-01-23 10:30:25 -05:00
Alex Grin
3266f72b82 add s1.lbry.network 2023-01-23 10:30:25 -05:00
Jack Robison
77cd2a3f8a add more non lbry.com hubs/bootstrap dht nodes 2023-01-23 10:30:25 -05:00
Alex Grin
308e586e9a add grin's domain to bootstrap hubs list 2023-01-23 10:30:25 -05:00
84beddfd77 Added tracker and dht from pigg.es
Added tracker and dht from pigg.es
2023-01-22 19:09:17 -05:00
Victor Shyba
6258651650
Merge pull request #3716 from lbryio/dht_exceptions
handle remote exceptions on routing table ping
2022-12-13 17:18:47 -03:00
Victor Shyba
cc5f0b6630 handle remote exception on routing table ping 2022-12-13 16:56:58 -03:00
Jonathan Moody
f64d507d39 TEMP: Pin workflows to ubuntu-20.04 to work around missing ripemd160 issue. 2022-12-12 21:47:41 -05:00
Jonathan Moody
001819d5c2 Bump Hub to include fix for supports with wrong names. 2022-11-20 20:34:30 -05:00
Jonathan Moody
8b4c046d28 Try pyinstaller==4.6 to fix MacOS build failure. 2022-11-20 20:34:30 -05:00
Jonathan Moody
2c20ad6c43 Add another zlib.error mapped to InvalidPasswordError. 2022-11-20 20:34:30 -05:00
Jonathan Moody
9e610cc54c Update test for Hub rename of method stage_put() -> stash_put(). 2022-11-20 20:34:30 -05:00
Jonathan Moody
b9d25c6d01 Bump hub to latest, getting fix for TX negative caching issue and others. 2022-11-20 20:34:30 -05:00
Jonathan Moody
419b5b45f2 Allow a few initial "transaction not found" responses from Hub. 2022-11-20 20:34:30 -05:00
Jonathan Moody
516c2dd5d0 Bump hub to fix subscribe race + EADDRINUSE issue. 2022-11-20 20:34:30 -05:00
Jonathan Moody
b99102f9c9 Bump max_misuse_attempts by 50% to 120000. 2022-11-20 20:34:30 -05:00
Lex Berezhny
8c6c7b655c v0.112.0 2022-10-30 21:56:17 -04:00
Lex Berezhny
48c6873fc4 channel_sign command has customizeable salt 2022-10-30 21:53:53 -04:00
Victor Shyba
15dc52bd9a
Merge pull request #3695 from lbryio/3690
Fix claim fields fallback raising errors before download is saved on database
2022-10-28 11:16:52 -03:00
Victor Shyba
52d555078f initialize stored claim field for fallback earlier 2022-10-19 15:13:47 -03:00
Victor Shyba
cc976bd010 test for early fallback of suggested_file_name 2022-10-19 15:13:47 -03:00
Lex Berezhny
9cc6992011 torrents needs loop 2022-10-18 17:23:56 -04:00
Lex Berezhny
a1b87460c5 passing loop to asyncio functions is deprecated 2022-10-18 17:23:56 -04:00
jessopb
007e1115c4 v0.111.0 2022-10-18 11:18:26 -04:00
jessopb
20ae51b949
Merge pull request #3692 from lbryio/fix-import/export-backwards-compat
fix backwards compatibility in wallet import/export
2022-10-18 11:16:34 -04:00
zeppi
24e9c7b435 fix backwards compatibility in wallet import/export 2022-10-18 10:53:51 -04:00
zeppi
b452e76e1d reverting version to 0.110.0 2022-10-18 10:27:44 -04:00
Lex Berezhny
341834c30d v0.111.0 2022-10-18 00:37:44 -04:00
Victor Shyba
12bac730bd tests: check mime type as well 2022-10-18 00:31:10 -04:00
Victor Shyba
1027337833 fallback for stream name and tests 2022-10-18 00:31:10 -04:00
Victor Shyba
97fef21f75 fallback for suggested file name and tests 2022-10-18 00:31:10 -04:00
Lex Berezhny
9dafd5f69b added claim_list filtering by reposted_claim_id and fix for claim_id of reposted claim in JSON output 2022-10-18 00:28:13 -04:00
Victor Shyba
fd4f0b2049 bump first-start checkpoint 2022-10-18 00:26:10 -04:00
Lex Berezhny
734f0651a4 minor refactor 2022-10-18 00:25:41 -04:00
zeppi
94deaf55df lint 2022-10-18 00:25:41 -04:00
zeppi
d957d46f96 lint 2022-10-18 00:25:41 -04:00
zeppi
0217aede3d update docs 2022-10-18 00:25:41 -04:00
zeppi
e4e1600f51 Enable unencrypted wallet import and export 2022-10-18 00:25:41 -04:00
Jonathan Moody
d0aad8ccaf Add zlib.error string just observed for the first time. 2022-09-29 22:18:54 -04:00
Jonathan Moody
ab50cfa5c1 Add test steps to repeatedly sync_apply() using a bad password. 2022-09-29 22:18:54 -04:00
Jonathan Moody
5a26aea398 Feedback: Reuse IntegrationTestcase.generate() in generate_and_wait(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
bd1cebdb4c Bump hub to include TaskGroup fix. 2022-09-29 22:18:54 -04:00
Jonathan Moody
ec433f069f Substitute InvalidPasswordError for zlib.error. 2022-09-29 22:18:54 -04:00
Jonathan Moody
cd6d3fec9c Wait for initial sync in test_wallet_syncing_status(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
8c474a69de Hub error message changed to include blocked/filtered. 2022-09-29 22:18:54 -04:00
Jonathan Moody
8903056648 Bump hub version to latest. 2022-09-29 22:18:54 -04:00
Jonathan Moody
749a92d0e5 Wait on block height too in generate_and_wait(). 2022-09-29 22:18:54 -04:00
Jonathan Moody
a7d7efecc7 Logging level back to INFO. 2022-09-20 10:04:23 -04:00
Jonathan Moody
c88f0797a3 Log f.exception() if present instead of "Stopped". 2022-09-20 10:04:23 -04:00
Jonathan Moody
137ebd503d Test insufficient funds behavior. 2022-09-20 10:04:23 -04:00
Jonathan Moody
c3f5dd780e Revise exception handling. 2022-09-20 10:04:23 -04:00
Jonathan Moody
20b1865879 Don't use retriable_call(). Add handling for InsufficientFundsError. 2022-09-20 10:04:23 -04:00
Jonathan Moody
231b982422 Wait on usage payement TX to be processed. 2022-09-20 10:04:23 -04:00
Jonathan Moody
fd69401791 Catch and log exceptions coming from the pay() task.
Change test to reproduce failure.
2022-09-20 10:04:23 -04:00
Jonathan Moody
718d046833 Logging for test_single_server_payment debug. 2022-09-20 10:04:23 -04:00
Victor Shyba
e10f57d1ed
Merge pull request #3642 from lbryio/libtorrent
use official libtorrent, fix tests, make it a normal dependency
2022-09-09 19:58:38 -03:00
Victor Shyba
8a033d58df fix torrent component 2022-09-09 12:21:59 -03:00
Victor Shyba
c07c369a28 add libtorrent pyinstaller hook 2022-09-09 12:21:59 -03:00
Victor Shyba
5be990fc55 do not ignore libtorrent import error 2022-09-09 12:21:59 -03:00
Victor Shyba
8f26010c04 make libtorrent a normal dependency 2022-09-09 12:21:59 -03:00
Victor Shyba
3021962e3d tests: add peer directly instead of relying on torrent dht 2022-09-09 12:21:59 -03:00
Victor Shyba
84d89ce5af torrent: disable upnp, natpmp. 2022-09-09 12:21:59 -03:00
Victor Shyba
0961cad716 remove dead code 2022-09-09 12:21:59 -03:00
Jonathan Moody
5c543cb374 Wait for hub to update with all 100 new blocks
before proceeding with initial_headers_sync().
2022-09-08 22:40:09 -04:00
Jonathan Moody
f78d7896a5 Revert "Add more failure message details for debugging."
This reverts commit 5e00a79751.
2022-09-08 18:13:26 -04:00
Jonathan Moody
78a28de2aa Align style of generate() with generate_and_wait(). 2022-09-08 18:13:26 -04:00
Jonathan Moody
45a255e7a2 Reuse generate() logic to wait on hub
instead of half-baked reorg() logic.
2022-09-08 18:13:26 -04:00
Jonathan Moody
d2738c2e72 Add more failure message details for debugging. 2022-09-08 18:13:26 -04:00
Jonathan Moody
a7c7ab7f7b Correct the terminal height we wait for in generate(). 2022-09-08 18:13:26 -04:00
Jonathan Moody
988f288715 Lint fix for _es_height checks. 2022-09-08 18:13:26 -04:00
Jonathan Moody
38e9b5b432 Wait for _es_height in addition to db_height. 2022-09-08 18:13:26 -04:00
Victor Shyba
f7455600cc
Merge pull request #3625 from lbryio/dht_crawler
Add script to collect DHT metrics
2022-09-07 12:56:41 -03:00
Victor Shyba
c7c2d6fe5a collect connections reachability 2022-09-07 12:03:11 -03:00
Victor Shyba
c6c0228970 fix crawler startup query 2022-09-07 12:03:11 -03:00
Victor Shyba
8d9d2c76ae routing table sizes as histogram 2022-09-07 12:03:11 -03:00
Victor Shyba
0b059a5445 use a histogram for latency, remove labels 2022-09-07 12:03:11 -03:00
Victor Shyba
ab67f417ee dht_crawler: wait and retry during port switch 2022-09-07 12:03:11 -03:00
Victor Shyba
0e7a1aee0a dht_crawler: clean in memory set for expired peers 2022-09-07 12:03:11 -03:00
Victor Shyba
d0497cf6b5 dht_crawler: skip saving connections for now 2022-09-07 12:03:11 -03:00
Victor Shyba
c38573d5de dht_crawler: gather both loops, avoid task exceptions being hidden 2022-09-07 12:03:11 -03:00
Victor Shyba
f077e56cec dht_crawler:only count latency during findNode 2022-09-07 12:03:11 -03:00
Victor Shyba
5e58c2f224 fix hosting metrics, improve logging 2022-09-07 12:03:11 -03:00
Victor Shyba
cc64789e96 dht_crawler: fix logging for missing ports 2022-09-07 12:03:11 -03:00
Victor Shyba
b5c390ca04 docker: add volume declaration 2022-09-07 12:03:11 -03:00
Victor Shyba
da2ffb000e skip peers with bad ports without raising 2022-09-07 12:03:11 -03:00
Victor Shyba
df77392fe0 dht crawler:improve logging, metrics, make startup concurrent 2022-09-07 12:03:11 -03:00
Victor Shyba
9aa9ecdc0a add arg for db path 2022-09-07 12:03:11 -03:00
Victor Shyba
43b45a939b format logging 2022-09-07 12:03:11 -03:00
Victor Shyba
e2922a434f add script to generate probe dataset 2022-09-07 12:03:11 -03:00
Victor Shyba
0d6125de0b add sd_hash prober 2022-09-07 12:03:11 -03:00
Victor Shyba
13af7800c2 refactor script, remove dep 2022-09-07 12:03:11 -03:00
Victor Shyba
47a5d37d7c change default metric port, add sqlalchemy to dockerfile 2022-09-07 12:03:11 -03:00
Victor Shyba
4a3a7e318d update pip and setuptools on dht dockerfile 2022-09-07 12:03:11 -03:00
Victor Shyba
85ff487af5 dht_crawler: randomize port when idle 2022-09-07 12:03:11 -03:00
Victor Shyba
62eb9d5c75 dht_crawler: only count non zero connections 2022-09-07 12:03:11 -03:00
Victor Shyba
cfe5c8de8a dht_crawler: serve prometheus metrics at 7070 2022-09-07 12:03:11 -03:00
Victor Shyba
0497698c5b dht_crawler: skip ping if known node_id 2022-09-07 12:03:11 -03:00
Victor Shyba
508bdb8e94 dht_crawler: keep working set in memory, flush to db on intervals 2022-09-07 12:03:11 -03:00
Victor Shyba
cd42f0d726 dht_crawler: fix node id store 2022-09-07 12:03:11 -03:00
Victor Shyba
2706b66a92 dht_crawler: dont re-bootstrap. try known reachable even when they expire 2022-09-07 12:03:11 -03:00
Victor Shyba
29c2d5715d dht_crawler: fix last_seen update 2022-09-07 12:03:11 -03:00
Victor Shyba
965389b759 dht_crawler: process older first, avoid discarding 2022-09-07 12:03:11 -03:00
Victor Shyba
174439f517 dht_crawler: cleanup, try not to reset key 2022-09-07 12:03:11 -03:00
Victor Shyba
baf422fc03 dht_crawler: extract refresh_limit, bump to 1h 2022-09-07 12:03:11 -03:00
Victor Shyba
61f7fbe230 dht_crawler: avoid reads 2022-09-07 12:03:11 -03:00
Victor Shyba
c6c27925b7 dht_crawler: flush/commit only when finished 2022-09-07 12:03:11 -03:00
Victor Shyba
be4c62cf32 check membership instead of one update per peer 2022-09-07 12:03:11 -03:00
Victor Shyba
443a1c32fa dht_crawler: save a set of connections to avoid dupes, enable initial crawl 2022-09-07 12:03:11 -03:00
Victor Shyba
90c2a58470 dht_crawler: dont gather empty, fix crash 2022-09-07 12:03:11 -03:00
Victor Shyba
adc79ec404 dht_crawler: only warn for missing key if it replied 2022-09-07 12:03:11 -03:00
Victor Shyba
137d8ca4ac dht_crawler: enable WAL 2022-09-07 12:03:11 -03:00
Victor Shyba
abf4d888af dht_crawler: warn if we cannot get node id 2022-09-07 12:03:11 -03:00
Victor Shyba
6c350e57dd dht_crawler: query recently checked as stats 2022-09-07 12:03:11 -03:00
Victor Shyba
fb7a93096e only count checked unreachable 2022-09-07 12:03:11 -03:00
Victor Shyba
7ea88e7b31 dht_crawler: store data 2022-09-07 12:03:11 -03:00
Victor Shyba
2361e34541 dht crawler, initial version 2022-09-07 12:03:11 -03:00
Victor Shyba
be06378437 add method for getting the node_id from a known peer on peer manager 2022-09-07 12:03:11 -03:00
Victor Shyba
a334a93757
Merge pull request #3631 from lbryio/bootstrap_node
Add all peers when running as a bootstrap node
2022-08-29 11:07:29 -03:00
Victor Shyba
e3ee3892b2 better test name 2022-08-22 18:45:18 -03:00
Victor Shyba
d61accea1a simplify bucket refresh loop 2022-08-11 21:14:56 -03:00
Victor Shyba
e887453aa5 remove unused last_accessed 2022-08-11 20:39:51 -03:00
Victor Shyba
c3e4f0b988 add 'is_bootstrap_node' conf 2022-08-11 20:38:42 -03:00
Victor Shyba
318728aebd add bootstrap flag to routing table 2022-08-11 20:38:42 -03:00
Victor Shyba
d8c1aaebc2 routing table: mark private methods 2022-08-11 20:38:42 -03:00
Victor Shyba
d7b65c15d2 return none instead of raising 2022-08-11 20:38:42 -03:00
Victor Shyba
972db80246 move add peer logic to routing table 2022-08-11 20:38:42 -03:00
Victor Shyba
0d343ecb2f simplify iterative find constructor 2022-08-11 20:38:42 -03:00
Lex Berezhny
01cd95fe46 v0.110.0 2022-08-11 10:58:16 -04:00
Lex Berezhny
6dc57fc02c revert version 2022-08-11 10:20:58 -04:00
Lex Berezhny
10df0c1fba disable Hotbit and UPBit exchange rate feeds 2022-08-11 10:19:54 -04:00
Lex Berezhny
ec751e5add v0.110.0 2022-08-10 13:52:46 -04:00
Lex Berezhny
3e3974f813 lint 2022-08-08 14:55:44 -04:00
Lex Berezhny
ec82486e15 removed go hub dependency 2022-08-08 14:55:44 -04:00
Lex Berezhny
e16f6b07b8 revert release 2022-08-08 13:02:12 -04:00
Lex Berezhny
9a842c273b v0.110.0 2022-08-08 08:46:32 -04:00
Jonathan Moody
40f7d3ee4b Stabilize test_streaming.py by scaning the data_dir, not the parent of data_dir 2022-08-01 17:37:06 -04:00
Lex Berezhny
1dc2f0458b fix lint 2022-08-01 10:04:24 -04:00
Lex Berezhny
3924b28cc3 raise not implemented error when importing unencrypted wallet 2022-08-01 10:04:24 -04:00
Lex Berezhny
020487b6a0 account merge bug fix from upstream 2022-08-01 10:04:24 -04:00
zeppi
14037c9b2f help string edits 2022-08-01 10:04:24 -04:00
zeppi
0cb37a5c4b linting 2022-08-01 10:04:24 -04:00
zeppi
fa5f3e7e55 change api for data first, password optional, return (str) 2022-08-01 10:04:24 -04:00
zeppi
30aa0724ec newline end of test file 2022-08-01 10:04:24 -04:00
zeppi
059890e4e5 wallet import export feature 2022-08-01 10:04:24 -04:00
Jonathan Moody
9654d4f003 Obtain "amount" from new_txo.amount when calling save_supports(). 2022-08-01 09:10:49 -04:00
Jonathan Moody
956b52a2c1 Refactor _old_get_temp_claim_info(), eliminating "bid" arg. Obtain the value from txo.amount. 2022-08-01 09:10:49 -04:00
Lex Berezhny
2e975c8b61 lint 2022-07-26 22:18:29 -04:00
Lex Berezhny
656e299100 migrate key addresses on changed accounts after sync apply 2022-07-26 22:18:29 -04:00
Jack Robison
352e45b6b7 update pinned hub version 2022-07-25 10:12:46 -04:00
Jack Robison
a9a1076362 improve test_es_sync_utility 2022-07-25 10:12:46 -04:00
Jack Robison
6d370b0a12 dont skip test_setting_stream_fields 2022-07-25 10:12:46 -04:00
Jack Robison
c9fac27b66 test resolving different streams for a channel using short urls 2022-07-25 10:12:46 -04:00
Jack Robison
59bc0b9682 update censored error 2022-07-25 10:12:46 -04:00
Lex Berezhny
ba60aeeebc migrate certificates after importing new account 2022-07-18 10:36:21 -04:00
Jonathan Moody
dc427ecf6c Correct collection_update, account_fund docstrings. Regenerate api.json using generate_json_api.py. 2022-07-07 21:33:43 -04:00
Victor Shyba
87b4404767
Merge pull request #3624 from moodyjon/test_fix_exch_rate1
Fixes for intermittent test failures: test_exchange_rate_manager(), test_basic_claim_search()
2022-07-01 17:29:44 -03:00
Jonathan Moody
ba9ac489c3 Relax range in test_exchange_rate_manager.py. (again) 2022-06-19 19:17:48 -04:00
Jonathan Moody
7049629ad7 Relax range in test_exchange_rate_manager.py. 2022-06-19 19:17:40 -04:00
Jonathan Moody
3ae4aeea47 Search for longer prefix of sd_hash to give better chance of unique results. 2022-06-19 19:14:54 -04:00
Lex Berezhny
8becf1f69f v0.109.0 2022-06-08 12:40:35 -04:00
Victor Shyba
582f79ba1c do not consider pending blobs on disk space query 2022-06-08 12:25:38 -04:00
Lex Berezhny
3c28d869f4
Merge pull request #3620 from lbryio/repost_title_tags
reposts can have title, description and tags
2022-06-08 12:24:28 -04:00
Lex Berezhny
fe61b90610 reposts can have title, description and tags 2022-06-08 10:35:22 -04:00
Lex Berezhny
c04fbb2908
Merge pull request #3614 from lbryio/grins-tracker
add tracker.lbry.grin.io
2022-06-06 13:13:01 -04:00
Alex Grintsvayg
571e71b28e
add tracker.lbry.grin.io 2022-06-06 11:29:09 -04:00
Lex Berezhny
39fcfcccfb
Merge pull request #3608 from lbryio/fix_ci
upgraded SDK to use the new LBRY hub project
2022-06-06 09:01:57 -04:00
Jack Robison
2313d30996
fix reconnect test 2022-05-27 11:59:18 -04:00
Jack Robison
ac7e94c6ed
pylint 2022-05-27 09:59:11 -04:00
Jack Robison
a391fe9fc7
scribe -> hub 2022-05-27 09:58:13 -04:00
Jack Robison
ea8adc5367
update scribe env and fix tests 2022-05-27 09:58:13 -04:00
Victor Shyba
0ea8ba72dd
Env->ServerEnv from scribe changes 2022-05-26 14:28:33 -04:00
Victor Shyba
7a8d5da0e8
Merge pull request #3613 from lbryio/fix_ci_lbcd_urls
tests: fix ci lbcd/lbcwallet urls
2022-05-26 10:21:02 -03:00
Victor Shyba
da30f003e8 update lbcwallet url 2022-05-25 12:17:57 -03:00
Victor Shyba
6257948ad7 update lbcd url 2022-05-25 12:17:28 -03:00
Victor Shyba
a7f606d62c change pip upgrade due windows error 2022-05-23 16:28:36 -03:00
Victor Shyba
1d95eb1549
Merge pull request #3599 from moodyjon/async-for-pr3504
Tighten up IterativeFinder async close behavior (DHT iterator continues after consumer breaks out of it)
2022-05-23 11:12:40 -03:00
Jonathan Moody
e5e9873f79 Simplify by eliminating AsyncGenerator base and generator function. Remove any new places enforcing max_results. 2022-05-20 17:23:39 -04:00
Jonathan Moody
530f9c72ea Fix lint error lbry/utils.py 2022-05-20 17:23:39 -04:00
Jonathan Moody
fad84c771c Support official contextlib.aclosing() when it's available. 2022-05-20 17:23:39 -04:00
Jonathan Moody
fe07aac79c Define and use lbry.utils.aclosing() in lieu of official contextlib.aclosing(). 2022-05-20 17:23:39 -04:00
Jonathan Moody
91a6eae831 Fix lint issue in iterative_find.py. 2022-05-20 17:23:39 -04:00
Jonathan Moody
5852fcd287 Don't wait on running_tasks after cancel(). Sometimes a CancelledError exception is received, which is unhelpful, and complicates shutting down the generator. 2022-05-20 17:23:39 -04:00
Jonathan Moody
4767bb9dee Wrap "async for" over IterativeXXXFinder in try/finally ensuring aclose(). 2022-05-20 17:23:39 -04:00
Jonathan Moody
82d7f81f41 Correct call to _aclose() in response to TransportNotConnected. 2022-05-20 17:23:39 -04:00
Jonathan Moody
b036961954 Tighten up IterativeFinder logic to respect max_records better, and wait after task cancel().
Also make IterativeFinder a proper AsyncGenerator. This gives it an offically recognized aclose() method and could help with clean finalization.
2022-05-20 17:23:39 -04:00
Victor Shyba
5c708e1c6f
Merge pull request #3611 from lbryio/fix_hub_url
tests: fix hub url
2022-05-20 18:19:39 -03:00
Victor Shyba
9436600267 tests: bump exchange rate manager test 2022-05-20 17:25:02 -03:00
Victor Shyba
4ab29c4d5f tests: fix hub url 2022-05-20 16:50:09 -03:00
Alex Grin
6944c4a7c4
Update LICENSE 2022-05-17 12:16:00 -04:00
Victor Shyba
2735484fae
Merge pull request #3576 from lbryio/trackers
Add support for announcing and querying LBRY streams over BEP15 (BitTorrent Trackers)
2022-05-13 17:56:20 -03:00
Victor Shyba
03b0d5e250 tracker client: extract default timeout and concurreny. Bump concurrency to 100 2022-05-11 21:13:30 -03:00
Victor Shyba
629812337b changes from review 2022-05-11 21:13:30 -03:00
Victor Shyba
e54cc8850c return KademliaPeers directly into the queue instead of exposing Announcement abstraction 2022-05-11 21:13:30 -03:00
Victor Shyba
7cba51ca7d update tests, query with port 0, filter bad ports earlier, make unit tests more reliable 2022-05-11 21:13:30 -03:00
Victor Shyba
3dc145fe68 make peer list query trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
7d560df9fd use same arg name as overriden datagram_received (linting) 2022-05-11 21:13:30 -03:00
Victor Shyba
b3f894e480 add integration test for tracker discovery 2022-05-11 21:13:30 -03:00
Victor Shyba
235cc5dc05 results are indexed by ip, setdefault after resolve 2022-05-11 21:13:30 -03:00
Victor Shyba
c276053301 move server implementation to tracker module 2022-05-11 21:13:30 -03:00
Victor Shyba
2e85e29ef1 peer id PREFIX is a constant 2022-05-11 21:13:30 -03:00
Victor Shyba
1169a02c8b make client server updatable from conf 2022-05-11 21:13:30 -03:00
Victor Shyba
a7cea4082e tracker:log DNS errors as warning instead of trace 2022-05-11 21:13:30 -03:00
Victor Shyba
7e6ea97499 make peer id according to BEP20 2022-05-11 21:13:30 -03:00
Victor Shyba
3c46cc4fdd expire connection id quicker as some trackers have it set low 2022-05-11 21:13:30 -03:00
Victor Shyba
6e5c7a1927 use cache_concurrent to avoid requesting the same connection_id multiple times 2022-05-11 21:13:30 -03:00
Victor Shyba
4e09b35012 remove unused import and dead code 2022-05-11 21:13:30 -03:00
Victor Shyba
16a2023bbd stop tasks before removing transport 2022-05-11 21:13:30 -03:00
Victor Shyba
99fc7178c1 better way to batch announce + handle different intervals for different trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
d4aca89a48 handle multiple results from multiple trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
2918d8c7b4 tracker component is running only if the task is alive 2022-05-11 21:13:30 -03:00
Victor Shyba
407c570f8b tests: lower timeout, add test with bad and good mixed 2022-05-11 21:13:30 -03:00
Victor Shyba
e299a9c159 tests: multiple trackers, simple case 2022-05-11 21:13:30 -03:00
Victor Shyba
cc4a578578 tests: add support for multiple trackers 2022-05-11 21:13:30 -03:00
Victor Shyba
0e4f1eae5b reduce timeout to 10, fix lints 2022-05-11 21:13:30 -03:00
Victor Shyba
eccf0e6234 fix reusing result interval from failed expired attempt 2022-05-11 21:13:30 -03:00
Victor Shyba
a3da041412 fix exceptions on shutdown, stop using cancel_tasks 2022-05-11 21:13:30 -03:00
Victor Shyba
2f1617eee4 less verbose on timeouts, dont count timeouts, fix stop 2022-05-11 21:13:30 -03:00
Victor Shyba
05124d41ae only log when really announcing, stop counting cached ones 2022-05-11 21:13:30 -03:00
Victor Shyba
42fd1c962e stop tracker tasks on shutdown 2022-05-11 21:13:30 -03:00
Victor Shyba
47e432b4bb make it less verbose, only log after all events are fired 2022-05-11 21:13:30 -03:00
Victor Shyba
61c99abcf1 avoid readding the same hash when tracker is busy with too many files 2022-05-11 21:13:30 -03:00
Victor Shyba
28fdd62945 move concurreny control to lower layer 2022-05-11 21:13:30 -03:00
Victor Shyba
3855db6c66 pause announcer for 1 minute each round 2022-05-11 21:13:30 -03:00
Victor Shyba
30acde0afc at most 10 announces concurrently 2022-05-11 21:13:30 -03:00
Victor Shyba
2d9c5742c7 cache results, save interval on tracker 2022-05-11 21:13:30 -03:00
Victor Shyba
43e50f7f04 fix subscribe_hash 2022-05-11 21:13:30 -03:00
Victor Shyba
888e9918a6 improve timeout handling 2022-05-11 21:13:30 -03:00
Victor Shyba
9e9a64d989 evented system for tracker announcements 2022-05-11 21:13:30 -03:00
Victor Shyba
7acaecaed2 managed_stream: remove unused imports 2022-05-11 21:13:30 -03:00
Victor Shyba
24eb189b7f skip component on test cli 2022-05-11 21:13:30 -03:00
Victor Shyba
2344aca146 fix component property 2022-05-11 21:13:30 -03:00
Victor Shyba
758f9deafe fix unit tests 2022-05-11 21:13:30 -03:00
Victor Shyba
7b425eb2ac add tracker announcer component 2022-05-11 21:13:30 -03:00
Victor Shyba
30e8728f7f use tracker on download 2022-05-11 21:13:30 -03:00
Victor Shyba
3989eef84b return whole announcement so the caller knows the interval 2022-05-11 21:13:30 -03:00
Victor Shyba
dc6f8c4fc4 add arg to announce stopped, removing the announcement 2022-05-11 21:13:30 -03:00
Victor Shyba
2df8a1d99d make a helper function to announce 2022-05-11 21:13:30 -03:00
Victor Shyba
4ea858fdd3 add new conf: tracker_servers 2022-05-11 21:13:30 -03:00
Victor Shyba
006391dd26 move udp server to test file, add link to BEP15 2022-05-11 21:13:29 -03:00
Victor Shyba
4a0bf8a702 add torrent udp tracker client, server and tests 2022-05-11 21:13:29 -03:00
Victor Shyba
d0e715feb9
Merge pull request #3609 from lbryio/pin_scribe
CI: pin scribe, fix exchange rate manager test
2022-05-11 21:13:00 -03:00
Victor Shyba
fd73412f12 test_exchange_rate_manager: bump value 2022-05-11 20:28:06 -03:00
Victor Shyba
3819552861 try usedevelop=true 2022-05-11 20:14:55 -03:00
Victor Shyba
ca6fd5b7b9 fix scribe pinning 2022-05-11 20:14:44 -03:00
Lex Berezhny
b8867cd18c release.py script changed to use gh auth login for authentication 2022-04-10 23:28:16 -04:00
Lex Berezhny
8209eafc6b v0.108.0 2022-04-10 23:25:15 -04:00
Lex Berezhny
858e72a555
Merge pull request #3595 from lbryio/default_feer_per_name_char
pin scribe to specific version
2022-04-08 13:49:11 -04:00
Lex Berezhny
d3880fffa0 pin scribe to specific version 2022-04-08 13:48:30 -04:00
Lex Berezhny
0a51898722
Merge pull request #3593 from lbryio/default_feer_per_name_char
set the default per character fee for claims to zero
2022-04-08 13:46:54 -04:00
Lex Berezhny
63cef81015 fix scribe server version test 2022-04-08 13:22:51 -04:00
Lex Berezhny
9279865078 add sleep to transaction show test per jack suggestion 2022-04-08 12:59:25 -04:00
Lex Berezhny
fba7fc7aba fix scribe server version test 2022-04-08 12:53:19 -04:00
Lex Berezhny
a3d9d5bce7 fix transaction unit test 2022-04-08 11:05:45 -04:00
Lex Berezhny
23ecbc8ebe set the default per character fee for claims to zero 2022-04-08 10:58:02 -04:00
Lex Berezhny
42b2dbd92e
Merge pull request #3572 from orblivion/json-schema
Add wallet json-schema, validate in one test.
2022-04-08 10:56:58 -04:00
Lex Berezhny
37eb55375a only install jsonschema during testing 2022-04-08 10:56:18 -04:00
Lex Berezhny
94bf357817 cleanup paths 2022-04-08 10:56:18 -04:00
Daniel Krol
eca69391ef Add wallet json-schema, validate in one test. 2022-04-08 10:56:18 -04:00
Lex Berezhny
d0c5b32a90
Merge pull request #3575 from lbryio/spend_time_locked
added `account_deposit` command which is able to deposit time locked transaction into wallet
2022-04-08 10:52:08 -04:00
Lex Berezhny
84ef52cf4d fix redeem scripthash test 2022-04-08 10:11:11 -04:00
Lex Berezhny
8fb14bf713 remove command not available in lbcd 2022-04-08 09:59:22 -04:00
Lex Berezhny
16eb50a291 working jsonrpc_account_deposit 2022-04-08 09:57:15 -04:00
Lex Berezhny
dd503fbb82 set locktime from script 2022-04-08 09:57:15 -04:00
Lex Berezhny
ae79314869 wip 2022-04-08 09:57:15 -04:00
Lex Berezhny
0cbc514a8e account_deposit command added which accepts time locked TXs 2022-04-08 09:57:15 -04:00
Lex Berezhny
5777f3e15c wip 2022-04-08 09:57:15 -04:00
Lex Berezhny
8cdcd770c0
Merge pull request #3590 from lbryio/fix-address_list-pagination
fix pagination with `address_list` command
2022-04-06 09:52:41 -04:00
Lex Berezhny
2d20458bc2 re-use existing constraints cleanup function 2022-04-06 09:09:39 -04:00
zeppi
2bd2088248 bugfix 2022-04-06 09:09:39 -04:00
zeppi
5818270803 fix address_list pagination 2022-04-06 09:09:39 -04:00
Victor Shyba
79a5f0e375 lint 2022-04-05 00:35:48 -03:00
Victor Shyba
c830784f65
Merge pull request #3586 from AlessandroSpallina/master
fix #3530 added error log when tcp port is already in use
2022-04-05 00:04:59 -03:00
Victor Shyba
3fc538104d v0.107.2 2022-03-31 17:19:58 -03:00
AlessandroSpallina
96490fdb15
Merge branch 'master' into master 2022-03-29 13:50:57 +02:00
Victor Shyba
5a0c225c6f v0.107.0 2022-03-28 15:56:06 -03:00
Lex Berezhny
c3e524cb8b
Merge pull request #3588 from lbryio/scribe
move `lbry.wallet.server` to new project called `scribe`, switch from using `lbrycrd` to `lbcd` in integration tests
2022-03-28 00:14:54 -04:00
Jack Robison
9faf6e46ca move lbry.wallet.server to new project called scribe
switch from using lbrycrd to lbcd
2022-03-27 23:33:26 -04:00
Victor Shyba
e89acac235
Merge pull request #3585 from lbryio/fix_blob_db_queries
Fixes bugs on disk space management and stream recovery
2022-03-24 21:01:14 -03:00
Victor Shyba
200761ff13 make added_on a required parameter on BlobInfo, fix callers 2022-03-24 19:51:48 -03:00
Victor Shyba
cb78e95e3d add missing space on query, typo 2022-03-23 13:40:01 -03:00
AlessandroSpallina
f01cf98d62 fix #3530 added error log when tcp port is already in use 2022-03-22 17:17:41 +01:00
Victor Shyba
c9c2495611 if a blob file exists but is pending on db, fix on startup 2022-03-21 21:58:36 -03:00
Victor Shyba
aac72fa512 fix bug where recovery doesnt update blob status 2022-03-21 21:33:33 -03:00
Victor Shyba
c5e2f19dde fix bug where added_on is always 0 for downloads 2022-03-21 04:38:51 -03:00
Victor Shyba
34bd9e5cb4 exclude sd blobs from calculation and make them be picked last on removal 2022-03-21 04:26:27 -03:00
Lex Berezhny
ad489ed606
Merge pull request #3581 from lbryio/deterministic_channel_keys_post_unlock
eagerly load deterministic channel keys immediately after wallet is unlocked
2022-03-14 12:36:04 -04:00
Lex Berezhny
bb541901d9 fix tests 2022-03-13 21:30:38 -04:00
Lex Berezhny
ca4ba19a5e fixes #3577 2022-03-13 20:42:34 -04:00
Victor Shyba
f05943ff79 implement announcer as a consumer task on gather 2022-03-02 13:00:34 -03:00
Victor Shyba
7ded8a1333 make active an explicit ordered dict 2022-03-02 13:00:34 -03:00
Victor Shyba
c2478d4add remove unused search rounds 2022-03-02 13:00:34 -03:00
Victor Shyba
f69747bc89 timeout is now supported on dht tests 2022-03-02 13:00:34 -03:00
Victor Shyba
441cc950aa fix and enable test_blob_announcer 2022-03-02 13:00:34 -03:00
Victor Shyba
a76a0ac8c4 simplify dht mock and restore clock after accelerating 2022-03-02 13:00:34 -03:00
Victor Shyba
8b1009161a better representation of kademliapeer on debug logs 2022-03-02 13:00:34 -03:00
Victor Shyba
868a620e91 add a way to wait announcements to finish so tests are reliable 2022-03-02 13:00:34 -03:00
Victor Shyba
a0e34b0bc8 make timeout handler immune to asyncio time tricks 2022-03-02 13:00:34 -03:00
Victor Shyba
612dbcb2f3 allow running some extra probes for k replacements 2022-03-02 13:00:34 -03:00
Victor Shyba
b3614d965d remove all references to bottoming out 2022-03-02 13:00:34 -03:00
Victor Shyba
5d7137255e no stop condition, let it exhaust 2022-03-02 13:00:34 -03:00
Victor Shyba
6ff867ef55 bottoming out is now warning and no results for peer search 2022-03-02 13:00:34 -03:00
Victor Shyba
c14915df29 don't probe peers too far from the top closest 2022-03-02 13:00:34 -03:00
Victor Shyba
7d4966e2ae use a dict for the active queue 2022-03-02 13:00:34 -03:00
Victor Shyba
3876e0317d log bottom out of peer search in debug, show short key id for find value 2022-03-02 13:00:34 -03:00
Victor Shyba
0b2b10f759 bump bottom out limit of peer search so people can use 100 concurrent announcers 2022-03-02 13:00:34 -03:00
Victor Shyba
9a79b33664 wait until k peers are ready. do not double add peers 2022-03-02 13:00:34 -03:00
Victor Shyba
af1a6edd15 only return good (contacted) peers 2022-03-02 13:00:34 -03:00
Victor Shyba
b78929f4d5 reset closest peer on failure 2022-03-02 13:00:34 -03:00
Victor Shyba
fb6e342043 add peers from shortlist regardless, but check from other nodes 2022-03-02 13:00:34 -03:00
Victor Shyba
0faa2d35da bump split index to 2 2022-03-02 13:00:34 -03:00
Victor Shyba
511e57c231 fix distance sorting and improve logging 2022-03-02 13:00:34 -03:00
Victor Shyba
d762d675c4 closest peer is only ready when it was contacted and isn't known to be bad 2022-03-02 13:00:34 -03:00
Victor Shyba
3fdadee87c dont probe and ignore bad peers 2022-03-02 13:00:34 -03:00
Victor Shyba
1aa4d9d585 simplify, genaralize to any size and fix tests 2022-02-28 13:06:51 -03:00
Victor Shyba
8019f4bdb3 stop after finding what to download 2022-02-28 13:06:51 -03:00
Victor Shyba
ca65c1ebc5 replace duplicated code 2022-02-28 13:06:51 -03:00
Victor Shyba
f0e47aae86 add get_colliding_prefix_bits, docs and tests 2022-02-28 13:06:51 -03:00
Victor Shyba
dc7cd545ba extract method and avoid using hash builtin name 2022-02-28 13:06:51 -03:00
Victor Shyba
76bd59d82e extract min_prefix_colliding_bits to a contanst 2022-02-28 13:06:51 -03:00
Victor Shyba
461687ffb4 check that the stored blob is at least 1 prefix byte close to peer id 2022-02-28 13:06:51 -03:00
Victor Shyba
dd5b9ca81b add migrator to set head blobs should_announce=0 2022-02-20 22:33:57 -03:00
Victor Shyba
89ed04f8a7 fix test_announces 2022-02-20 22:33:57 -03:00
Victor Shyba
ec0d9f06c5 do not search for the head blob 2022-02-20 22:33:57 -03:00
Victor Shyba
03b59ac6fc dont set head blob to announce on save 2022-02-20 22:33:57 -03:00
Victor Shyba
43ac3336d7 break tie by length 2022-02-20 22:24:04 -03:00
Victor Shyba
d12c78db74 fix and test case for blob_clean after disabling network storage 2022-02-20 22:24:04 -03:00
Jack Robison
bfaf1b0957
Merge pull request #3564 from lbryio/fix_downloader_losing_peers
fix handling re-adding lost peers during download
2022-02-16 11:55:22 -05:00
Victor Shyba
bb60c385d5 put back all the peers, get rid of re_add 2022-02-08 21:41:52 -03:00
Alex Grin
c96d1d9c32
Merge pull request #3537 from lbryio/repost_update 2022-02-08 12:20:20 -05:00
Alex Grintsvayg
7c7a0d4bdf
let stream_update work on non-stream claims 2022-02-08 09:28:17 -05:00
Lex Berezhny
cc829a7bf4
Merge pull request #3558 from lbryio/jeffreypicard-patch-1
Update __init__.py
2022-02-04 12:36:01 -05:00
Jeffrey Picard
e0ea6383e2
Update __init__.py
Update go hub binary to fix es sync test.
2022-02-04 12:17:19 -05:00
Lex Berezhny
bcec5dc2ae
Merge pull request #3556 from lbryio/txo_dust_prevention
prevent creation of change which is below the dust threshold of 1000 dewies
2022-02-04 12:08:16 -05:00
Lex Berezhny
cba9c16a06 fix 2022-02-04 12:07:41 -05:00
Lex Berezhny
dd68fb077b prevent creation of change which is below the dust threshold of 1000 dewies 2022-02-04 12:07:41 -05:00
Jack Robison
c2294e97db
Merge pull request #3552 from lbryio/bump_dht_cache
Increase DHT peer manager cache size to 16384
2022-02-04 11:59:19 -05:00
Victor Shyba
c0f512ace7 bump DHT peer manager cache to 16384 2022-02-02 16:54:42 -03:00
Lex Berezhny
3305eb67c6
Merge pull request #3548 from lbryio/announce_metrics
Add optional Prometheus metrics for DHT announcements
2022-02-02 11:06:48 -05:00
Victor Shyba
c9d637b4da add gauge for queue size 2022-02-02 11:56:42 -03:00
Victor Shyba
ae3e8fadf5 count announcements and how many peers we were able to announce to 2022-02-02 11:56:42 -03:00
Lex Berezhny
a1abd94387
Merge pull request #3542 from eug3nix/gh_3481_file_type_detection
file type detection now looks inside the file to determine the type, in addition to using the file extension
2022-01-31 10:29:47 -05:00
Eugene Dubinin
9b463a8cab adds tests for guess_media_type
removes unnecessary comments
2022-01-29 20:49:42 +02:00
Eugene Dubinin
babc54a240 adjusts code style 2022-01-29 15:25:17 +02:00
Eugene Dubinin
5836a93b21 fixes KeyError on missing synonyms 2022-01-29 15:25:17 +02:00
Eugene Dubinin
557348e345 detect media_type from the file contents 2022-01-29 15:25:17 +02:00
Lex Berezhny
9adfec6b00
Merge pull request #3549 from lbryio/wallet_lock_w_deterministic_channels
wallet locking/unlocking no longer breaks deterministic channel keys
2022-01-26 11:17:55 -05:00
Lex Berezhny
3a496902f8 wallet locking/unlocking no longer breaks deterministic channel keys 2022-01-24 09:45:08 -05:00
Lex Berezhny
b5ead91746
Merge pull request #3534 from lbryio/normalize_signatures
drop dependency on cryptography library in wallet module
2022-01-17 13:38:20 -05:00
Lex Berezhny
302461b446 updated based on code review 2022-01-17 11:08:28 -05:00
Lex Berezhny
ac201c718e drop dependency on cryptography library in wallet module 2022-01-17 10:43:59 -05:00
Jack Robison
f78e3825ca
Merge pull request #3500 from lbryio/fix_script
Add Prometheus metrics for DHT internals
2022-01-14 12:46:28 -05:00
Victor Shyba
0618053bd4 remove request_flight metric 2022-01-12 12:41:04 -03:00
Victor Shyba
8e6fa3490c disable CSV endpoints by default 2022-01-12 12:39:23 -03:00
Victor Shyba
8a1a1a4000 remove estimation endpoints as that is done over prometheus metrics now 2022-01-12 12:39:23 -03:00
Victor Shyba
fd9dcbf9a8 add granular metric for stored blob prefix, for network announcements calculation 2022-01-12 12:39:23 -03:00
Victor Shyba
beb8583436 change colliding bits metric to gauge 2022-01-12 12:39:23 -03:00
Victor Shyba
b44e2c0b38 count bit collisions between 8 and 16 2022-01-12 12:39:23 -03:00
Victor Shyba
06e94640b5 add counter for peers with colliding bytes 2022-01-12 12:39:23 -03:00
Victor Shyba
ff36bdc802 add requests in flight and error 2022-01-12 12:39:23 -03:00
Victor Shyba
46f576de46 add request received 2022-01-12 12:39:23 -03:00
Victor Shyba
7b09c34fce add request_sent and request_time metric on dht 2022-01-12 12:39:23 -03:00
Victor Shyba
a22f50aa84 add storing_peers and peer_manager_keys 2022-01-12 12:39:23 -03:00
Victor Shyba
2d9130b4e0 prometheus: move blobs_stored and peers to SDK. add buckets_in_routing_table 2022-01-12 12:39:23 -03:00
Victor Shyba
470ee72462 add passive estimation to prometheus 2022-01-12 12:39:23 -03:00
Victor Shyba
add147b409 fix missing async 2022-01-12 12:39:23 -03:00
Victor Shyba
371df6e6c2 keep same node id between runs 2022-01-12 12:39:23 -03:00
Victor Shyba
7ed5fe8f66 add semaphore on active estimation to avoid abuse 2022-01-12 12:39:23 -03:00
Victor Shyba
a6ca7a6f38 same api across different estimation methods 2022-01-12 12:39:23 -03:00
Victor Shyba
1c857b8dd8 be explicit about ignoring params 2022-01-12 12:39:23 -03:00
Victor Shyba
87ff3f95ff better endpoint names, small docs 2022-01-12 12:39:23 -03:00
Victor Shyba
5cb4c06d0c add prefix_neighbors_count to routing table debug api 2022-01-12 12:39:23 -03:00
Jack Robison
e7d9079389 improve script 2022-01-12 12:39:23 -03:00
Victor Shyba
9cdcff0e1e first attempt at crawling 2022-01-12 12:39:23 -03:00
Lex Berezhny
a4dce8cf9f
Merge pull request #3535 from vertbyqb/hexdata-string
convert hexdata argument to a string before signing in `channel_sign` command
2022-01-10 09:48:41 -05:00
Lex Berezhny
aaa11c02bf added integration test 2022-01-10 08:46:10 -05:00
vertbyqb
d2ebbf5db6 jsonrpc_channel_sign - Convert hexdata to a string before signing
Fixes #3533
2022-01-10 08:46:10 -05:00
Jack Robison
e6efc1ad4a
Merge pull request #3538 from lbryio/dht_memory
Unify and fix DHT memory caches for peer manager
2022-01-07 11:29:44 -05:00
Victor Shyba
a8523996a9 extract cache values, increase peer cache to 2048 2022-01-07 12:58:52 -03:00
Victor Shyba
f586de2bbe DHT bugfix: failures tracking should be bound to 2048 LRU cache size 2022-01-07 12:46:00 -03:00
Victor Shyba
7df02303b2 fix missing docopt argument 2022-01-05 17:10:31 -03:00
Victor Shyba
f89c75e642 bump hub version to latest supporting sd_hash search 2022-01-05 17:10:31 -03:00
Victor Shyba
d2c1961101 update hub protobuf including sd_hash field 2022-01-05 17:10:31 -03:00
Victor Shyba
2a4c5a48bf increase indexed sd_hash prefix to 4 chars 2022-01-05 17:10:31 -03:00
Victor Shyba
5f5f39a4aa enable and test prefix search for sd hash 2022-01-05 17:10:31 -03:00
Victor Shyba
df54cc04af sync and search sd_hash 2022-01-05 17:10:31 -03:00
Victor Shyba
0439616480 add test 2022-01-05 17:10:31 -03:00
Victor Shyba
19fa274227 add sd hash to API 2022-01-05 17:10:31 -03:00
Lex Berezhny
8076000c27
Merge pull request #3450 from lbryio/deterministic_channel_keys
deterministic channel keys (requires wallet server re-sync)
2021-12-23 15:38:15 -05:00
Lex Berezhny
c80b30f070 test another signed claim by ytsync 2021-12-22 18:29:46 -05:00
Lex Berezhny
486d5c48b0 takeover tests fix 2021-12-22 18:29:46 -05:00
Lex Berezhny
4822792ee2 create nondetermnistic channel in test to replicate old test behavior 2021-12-22 18:29:46 -05:00
Lex Berezhny
569f1d42b1 fix tests 2021-12-22 18:29:46 -05:00
Lex Berezhny
23c10faff5 lint 2021-12-22 18:29:46 -05:00
Lex Berezhny
1eaa195363 reduced crypto dependency in wallet to coincurve 2021-12-22 18:29:46 -05:00
Lex Berezhny
fb57cfa5d8 moved imports for lint 2021-12-22 18:29:46 -05:00
Lex Berezhny
d33086c8f7 deleted extraneous test 2021-12-22 18:29:46 -05:00
Lex Berezhny
d815a6f02c use ecdsa for signing/veryfing instead of coincurve due to compatibility issues 2021-12-22 18:29:46 -05:00
Lex Berezhny
8216f4a873 work in progress 2021-12-22 18:29:46 -05:00
Lex Berezhny
e4cc4521d9 channel key generation no longer arbitrarily bounded 2021-12-22 18:29:46 -05:00
Lex Berezhny
6bd9b3744d progress, channel keys generate deterministically now 2021-12-22 18:29:46 -05:00
Lex Berezhny
f741b00768 progress on deterministic channel keys 2021-12-22 18:29:46 -05:00
Lex Berezhny
5eb95d7dd4
Merge pull request #3529 from lbryio/change_default_coin_selection_strategy
changes default coin selection strategy from standard to prefer_confirmed
2021-12-22 11:28:22 -05:00
Lex Berezhny
e5268f43e7 changes default coin selection strategy from standard to prefer_confirmed 2021-12-21 10:22:09 -05:00
Victor Shyba
54d6fb9da4 do not limit DHT results by K, respect max_results 2021-12-09 14:34:55 -03:00
Victor Shyba
3d5c9cc1c2 clarify DHT debug logging on key and operation 2021-12-09 14:32:30 -03:00
Alex Grin
442326f1d8
Merge pull request #3499 from lbryio/multiple-release-time-constraints
@jeffreypicard assures me these timeouts are safe to ignore
2021-12-06 11:43:46 -05:00
Jeffrey Picard
d66f46e07b Switch RangeField back to ints 2021-12-03 18:12:38 -05:00
Jeffrey Picard
757b53443d Try forcing tox reset 2021-12-03 17:42:56 -05:00
Jeffrey Picard
3436965b33 Debugging 2021-12-03 17:22:52 -05:00
Jeffrey Picard
df71132957
Update es version in workflow 2021-12-03 13:03:00 -05:00
Jeffrey Picard
1b322dc404
Update protobufs, go hub shim, and claim test. 2021-12-03 13:03:00 -05:00
Jack Robison
58341f4ff1
remove unused ES fields 2021-12-03 13:03:00 -05:00
Jack Robison
0d3ca80008
support lists of constraints for all range fields 2021-12-03 13:03:00 -05:00
Lex Berezhny
63437712cd
Merge pull request #3490 from ghost/integration_test_setup_cleanup_timeouts
added timeout of async operations to integration test setup/teardown
2021-12-02 19:52:44 -05:00
Jack Robison
26d0e87f46 v0.106.0 2021-12-02 17:17:00 -05:00
Jack Robison
2cad4fa1ce update json docs 2021-12-02 14:51:52 -05:00
Jack Robison
7bb293e5d6 update claim_search doc
backward compatibility for `trending_mixed`, `trending_local`, `trending_global`, and `trending_group` args to `claim_search`
2021-12-02 14:51:52 -05:00
Lex Berezhny
e4777f9314
Merge branch 'master' into integration_test_setup_cleanup_timeouts 2021-12-01 22:08:18 -05:00
Jack Robison
3508f562a7
update json docs 2021-12-01 18:47:03 -05:00
Jack Robison
1aa66c6038
update header checkpoints 2021-12-01 18:46:24 -05:00
Victor Shyba
e7458edb72 test case for stream_type search on claims missing source + fix 2021-12-01 18:42:47 -05:00
Lex Berezhny
7f97013703
Merge pull request #3497 from lbryio/fee_per_name_env_var
fee per name env var
2021-12-01 11:26:00 -05:00
Lex Berezhny
9e43060d41 fee per name env var 2021-12-01 10:22:34 -05:00
FemtosecondLaser
d69486fb6e returned conditional check in add_timeout() as it was making test_node.py tests unhappy 2021-11-30 01:01:35 +00:00
FemtosecondLaser
d4ebfdbc3c removed conditional check in add_timeout() 2021-11-29 22:56:50 +00:00
FemtosecondLaser
e00c3db71a
Merge branch 'master' into integration_test_setup_cleanup_timeouts 2021-11-29 21:50:05 +00:00
Victor Shyba
11c3ea0b87 fix typo from arg name 2021-11-24 13:05:43 -03:00
Jack Robison
7531401623
keep touched_or_deleted records 2021-11-21 13:52:03 -05:00
FemtosecondLaser
e6c1dc251e changed addTimeout to add_timeout for lint compliance 2021-11-20 00:47:46 +00:00
FemtosecondLaser
dca7977051 added timeout of async operations to integration test setup/teardown 2021-11-20 00:22:25 +00:00
Victor Shyba
d19e07d661 add blob endpoint for listing announced blobs 2021-11-17 13:27:19 -03:00
Victor Shyba
751ff6e21f add /peers.csv to monitoring endpoint 2021-11-17 13:27:19 -03:00
Brendon J. Brewer
3f6fe995b8 Rename trending 2021-11-16 10:59:10 -05:00
Jack Robison
1e00fb369d fix missing es notification for support amount changing 2021-11-15 00:58:18 -05:00
Jack Robison
54b522383a improve tests 2021-11-15 00:58:18 -05:00
Jack Robison
90a7de3b5c improve resolve tests 2021-11-15 00:58:18 -05:00
Jack Robison
3fe1582432 fix duplicate trending notification to ES 2021-11-15 00:58:18 -05:00
Jack Robison
85eddd2100 fix effective amount for resolve/ES being off while claims/supports are unactivated 2021-11-15 00:58:18 -05:00
Jack Robison
f5f8775c59 fix test_colliding_short_id 2021-11-10 13:02:28 -03:00
Jack Robison
0ca98678f7 update default tcp/blob port to be the same as the default udp/dht port (4444) 2021-11-10 13:02:28 -03:00
Victor Shyba
a19060c08d log unexpected errors, rename task/loop 2021-11-09 14:27:06 -05:00
Victor Shyba
fa2ad88cc4 clear cache on test assertions 2021-11-09 14:27:06 -05:00
Victor Shyba
63cbcd0956 make sure the downloader always stops gracefully 2021-11-09 14:27:06 -05:00
Victor Shyba
d6d0ebf8f4 cache space stats from running components so status is instant 2021-11-09 14:27:06 -05:00
Victor Shyba
0d810d92ca add index for blob table so size summaries are faster 2021-11-09 14:27:06 -05:00
Victor Shyba
1ff914a6f4 download from stored announcements and dont reannounce 2021-11-09 14:27:06 -05:00
Victor Shyba
5959b1be72 improve disk space manager status, include more info and unify space queries 2021-11-09 14:27:06 -05:00
Victor Shyba
d12a214c05 normal_blobs->stream_blobs, proactive->background 2021-11-09 14:27:06 -05:00
Victor Shyba
3a83052f2e fix free space calculation, test it and give a margin of 10mb before starting so it doesnt insist when full 2021-11-09 14:27:06 -05:00
Victor Shyba
510b44ca92 move more logic out of the downloader component 2021-11-09 14:27:06 -05:00
Victor Shyba
15edb6756d extract background downloader to its own class 2021-11-09 14:27:06 -05:00
Victor Shyba
fbfd02b08b add analytics event for network disk space 2021-11-09 14:27:06 -05:00
Victor Shyba
b39c26fc86 announce orphan blobs manually, as that was done when save stream 2021-11-09 14:27:06 -05:00
Victor Shyba
95b2c8d175 cleanup background downloader blobs from conf 2021-11-09 14:27:06 -05:00
Victor Shyba
d52748b09f separated network seeding space metrics 2021-11-09 14:27:06 -05:00
Victor Shyba
34d18a3a9a don't save streams for network blobs and bypass disk space manager 2021-11-09 14:27:06 -05:00
Victor Shyba
3b27d6a9b5 add conf for network seeding space limit 2021-11-09 14:27:06 -05:00
Victor Shyba
703c391f99 schedule the download task instead 2021-11-09 14:27:06 -05:00
Victor Shyba
4f1dc29df1 fix unit tests from component dependency chain changes 2021-11-09 14:27:06 -05:00
Victor Shyba
13667df374 download from DHT 2021-11-09 14:27:06 -05:00
Victor Shyba
8800d6985f drop channel support, prepare to hook into DHT 2021-11-09 14:27:06 -05:00
Victor Shyba
364b8f2605 handle case where something that isn't a sd blob gets hit 2021-11-09 14:27:06 -05:00
Victor Shyba
67b9ea9deb no api yet 2021-11-09 14:27:06 -05:00
Victor Shyba
b78f2336a7 download only blobs 2021-11-09 14:27:06 -05:00
Victor Shyba
c7ba637c7d fix tests 2021-11-09 14:27:06 -05:00
Victor Shyba
23a5ce3df7 fix exception arguments 2021-11-09 14:27:06 -05:00
Victor Shyba
8f88e28e50 test add/remove/list subscriptions 2021-11-09 14:27:06 -05:00
Victor Shyba
9cf6139557 fix and test main api 2021-11-09 14:27:06 -05:00
Victor Shyba
d556065a8b download all blobs and check that on tests 2021-11-09 14:27:06 -05:00
Victor Shyba
951716f7dc create downloader component and initial tests 2021-11-09 14:27:06 -05:00
Victor Shyba
1ddc7ddda3 with the fix we no longer need to restart the stream 2021-11-08 10:50:47 -05:00
Victor Shyba
903ed9f3dc fix tests by checking there are actual blobs being deleted 2021-11-08 10:50:47 -05:00
Victor Shyba
c42b76dcb8 dont lose results on duplicates, just warn 2021-11-08 10:50:47 -05:00
Victor Shyba
a73582d9ae remove tried_for_this_blob so banned peers are retried for same blob 2021-11-08 10:50:47 -05:00
Cristian Vicas
42c4fc7557 Bug [#2070] where blob_get RPC timed out.
Both stream.downloader and blob_exchange.downloader paths are adding the fixed_peers list to the DHT node.
Tested jsonrpc_blob_get daemon call.

Bug [#2070] where blob_get RPC timed out.

Both stream.downloader and blob_exchange.downloader paths are adding the fixed_peers list to the DHT node.
Tested jsonrpc_blob_get daemon call.
2021-11-08 10:49:48 -05:00
Jack Robison
ddbbb6f1dd
use mempool cache in transaction_get_batch 2021-10-27 20:19:08 -04:00
Lex Berezhny
ff21a92330
Merge pull request #3457 from FemtosecondLaser/feature/3270-check-default-download-dir-writable
Modified ensure_directory_exists() to check if the directory is writable by the process.
2021-10-27 11:00:13 -04:00
FemtosecondLaser
07f76f7ad1 Added an integration test covering the following scenario:
On start, if download dir is non-writable - daemon terminates with a helpful message.
2021-10-26 11:17:52 +01:00
Jack Robison
c90ccffd7b
Update docker-compose-wallet-server.yml 2021-10-25 14:20:39 -04:00
Jack Robison
a00d5f18af
add script to setup docker volumes from snapshots 2021-10-24 16:25:34 -04:00
Jack Robison
1e391d211b
fix attempting to update trending on abandoned claims 2021-10-23 18:39:04 -04:00
FemtosecondLaser
d87f9672fa Improved the readability of the tests. 2021-10-23 13:12:49 +01:00
FemtosecondLaser
2b5838aa01 Changed the tests to execute against a real file system instead of a fake one. 2021-10-23 02:52:58 +01:00
Jack Robison
e10486d6ec
update docs 2021-10-22 16:51:59 -04:00
Jack Robison
1a74d6604d skip loading tx/claim caches in the elastic sync script when not needed 2021-10-22 15:10:35 -04:00
Alex Grin
6d118536b6
Merge pull request #3460 from lbryio/dht_seed_script_metrics 2021-10-22 12:44:16 -04:00
Alex Grin
ca4d758db9
Merge branch 'master' into dht_seed_script_metrics 2021-10-22 11:54:19 -04:00
Victor Shyba
dc18c26aa4 add optional prometheus to dht_node script 2021-10-22 03:39:46 -03:00
Jack Robison
48505c2968 update trending with help from @eggplantbren 2021-10-21 00:17:12 -04:00
Jack Robison
a98ea1e66a update sync script to handle ES falling behind leveldb on shutdown 2021-10-20 23:41:11 -04:00
Jack Robison
3dec697816 logging 2021-10-20 23:41:11 -04:00
Jack Robison
88fd41e597 update docker 2021-10-20 23:41:11 -04:00
Jack Robison
b05d071a1c update Env to accept parameters from cli args 2021-10-20 23:41:11 -04:00
Jack Robison
a27d3b9689 set default CACHE_MB to 1024mb and the default QUERY_TIMEOUT_MS to 10s 2021-10-20 23:41:11 -04:00
Jack Robison
1facc0cd01 remove unused hub env settings 2021-10-20 23:41:11 -04:00
FemtosecondLaser
837f91d830 renamed the test class to be more specific about the sut 2021-10-21 00:31:02 +01:00
FemtosecondLaser
9c5f5aefb0 removed redundant tests
renamed a test to be more specific about the kind of the precondition
2021-10-21 00:27:31 +01:00
FemtosecondLaser
6b8d4a444b Modified ensure_directory_exists() to check if the directory is writable by the process. 2021-10-20 15:26:16 +01:00
Jack Robison
6bef09a3b1 update lbry-hub-elastic-sync to support resyncing recent blocks 2021-10-19 15:53:20 -04:00
Jack Robison
e35319e5a2 add CACHE_ALL_CLAIM_TXOS hub setting 2021-10-19 15:53:20 -04:00
Jack Robison
0e548b3812 remove dead code 2021-10-19 15:53:20 -04:00
Jack Robison
bfac02ccab add CACHE_ALL_TX_HASHES setting to optionally use more memory to save i/o 2021-10-19 15:53:20 -04:00
Jack Robison
7ea1a2b361 sleeps 2021-10-19 15:53:20 -04:00
Jack Robison
99df418f1d improve resolve caching 2021-10-19 15:53:20 -04:00
Jack Robison
6416d8ce9c threadpools for block processor and es sync reader 2021-10-19 15:53:20 -04:00
Jack Robison
22b43a2b01 doc strings 2021-10-19 15:53:20 -04:00
Jack Robison
05e5d24c5e improve claims_producer performance 2021-10-19 15:53:20 -04:00
Jack Robison
eabcc30367 resolve lru cache 2021-10-19 15:53:20 -04:00
Jack Robison
f5e0ef5223 add block_txs index 2021-10-19 15:53:20 -04:00
Jack Robison
f46d9330b0 smaller caches 2021-10-19 15:53:20 -04:00
Jack Robison
b62a0b4607 Update daemon.py
docstring
2021-10-15 09:40:15 -04:00
Cristian Vicas
1f044321fb Updated documentation for RPC calls: status, blob_list. 2021-10-15 09:40:15 -04:00
Jack Robison
a841d49483
Merge branch 'belikor-fix-wrong-url' 2021-10-15 09:00:59 -04:00
belikor
9509acc490
file_manager: raise new InvalidStreamURLError if the URL is invalid
When using `lbrynet get URL`, if the URL is not a valid URL
the function `url.URL.parse` will raise a `ValueError` exception
which will produce a whole backtrace.

For example, this is the case if we provide a channel name
with a forward slash but without a stream name.
```
lbrynet get @Non-existing/
```

```
Traceback (most recent call last):
  File "/opt/git/lbry-sdk/lbry/file/file_manager.py", line 84, in download_from_uri
    if not URL.parse(uri).has_stream:
  File "/opt/git/lbry-sdk/lbry/schema/url.py", line 114, in parse
    raise ValueError('Invalid LBRY URL')
ValueError: Invalid LBRY URL
WARNING  lbry.extras.daemon.daemon:1110: Error downloading Non-existing/: Invalid LBRY URL
```

Now we raise a new `InvalidStreamURLError` which can be trapped in the upper functions
that use `url.URL.parse` such as `FileManager.download_from_uri`.
If we do this the traceback won't be shown.
```
WARNING  lbry.file.file_manager:252:
Failed to download Non-existing/: Invalid LBRY stream URL: '@Non-existing/'
WARNING  lbry.extras.daemon.daemon:1110:
Error downloading Non-existing/: Invalid LBRY stream URL: '@Non-existing/'
```

This handles the case when trying to download only "channel" parts
without the claim part.
```
lbrynet get @Non-existing
lbrynet get @Non-existing/
lbrynet get Non-existing/
```
2021-10-15 08:59:37 -04:00
Jack Robison
02d356ef12
Merge pull request #3443 from lbryio/fix-resolve-reposted-channel
Fix including channels for reposted claims when resolving a repost
2021-10-08 16:51:40 -04:00
Jack Robison
d3516f299e
clear es attributes during initial sync 2021-10-08 16:34:48 -04:00
Jack Robison
79630767c2
fix setting references on txos in extra_txos 2021-10-08 16:34:15 -04:00
Jack Robison
084a76d075
fix reposted channel being missing from resolve result
-improve names of the resolve related methods in `LevelDB`
2021-10-07 15:09:13 -04:00
Jack Robison
bc6822e397
Merge pull request #3205 from lbryio/leveldb-resolve
drop sqlite in the hub and make resolve handle reorgs
2021-10-07 02:07:48 -04:00
Jack Robison
43432a9e48
fix compactify script 2021-10-07 00:37:55 -04:00
Jack Robison
d64a5bc12f
fix test 2021-10-06 23:53:17 -04:00
Jack Robison
b2922d18e2
move test_transaction_commands, test_internal_transaction_api , and test_transactions into their own runner
-move test_resolve_command to its own runner
2021-10-06 23:53:17 -04:00
Jack Robison
ccf03fc07b
only save undo info for blocks within reorg limit 2021-10-06 12:07:42 -04:00
Jack Robison
a7c45da10c
fix channel count 2021-10-06 00:02:16 -04:00
Jack Robison
e03f01e24a
try to fix test_sqlite_coin_chooser 2021-10-05 19:36:49 -04:00
Jack Robison
0939589557
move test_claim_commands and test_resolve_command into new directory 2021-10-05 17:51:43 -04:00
Jack Robison
8167af9b4a
sort touched or deleted claim hashes 2021-10-05 16:44:49 -04:00
Jack Robison
4cf76123e5
block processor db refactoring
-access db through HubDB class, don't use plyvel.DB directly
-add channel count and support amount prefixes
2021-10-05 16:44:49 -04:00
Jack Robison
01ee4b23e6
fix and add test for abandoning a controlling in the same block a new claim is made 2021-10-05 16:44:49 -04:00
Jack Robison
b198f79214
fix test_sqlite_coin_chooser 2021-10-05 16:44:49 -04:00
Jack Robison
09db868a28
fix ES index name so it stays the same within a test case 2021-10-05 16:44:49 -04:00
Jack Robison
33e8ef75ff
fix bug with early takeover by an update 2021-10-05 16:44:49 -04:00
Jack Robison
11dcb16b14
fix test 2021-10-05 16:44:49 -04:00
Jack Robison
86f21da28b
fix activating non existent claim 2021-10-05 16:44:49 -04:00
Jack Robison
89cd6a9aa4
add tests for takeovers from amount changes in updates before/on/after activation 2021-10-05 16:44:49 -04:00
Jack Robison
18e1256037
batch address history notifications 2021-10-05 16:44:49 -04:00
Jack Robison
02cf478d91
improve leveldb caching 2021-10-05 16:44:49 -04:00
Jack Robison
6ec70192fe
refactor reload_blocking_filtering_streams 2021-10-05 16:44:49 -04:00
Jack Robison
8c75098a9a
fix filtering error upon abandon 2021-10-05 16:44:49 -04:00
Jack Robison
72500f6948
faster read_claim_txos 2021-10-05 16:44:49 -04:00
Jack Robison
37ec9ab464
remove unused executor 2021-10-05 16:44:49 -04:00
Victor Shyba
82fe2a4c8d
fix blocking and filtering 2021-10-05 16:44:49 -04:00
Jack Robison
aa50e6ee66
fix test 2021-10-05 16:44:49 -04:00
Jack Robison
91a07cfaee
fix logging number of notified sessions 2021-10-05 16:44:49 -04:00
Jack Robison
709f5e9a65
fix update that initiates takeover not being delayed 2021-10-05 16:44:49 -04:00
Jack Robison
b2f9ef21cc
use hub binary from https://github.com/lbryio/hub/pull/13 2021-10-05 16:44:49 -04:00
Jack Robison
be6b72edcd
handle invalid release time 2021-10-05 16:44:49 -04:00
Jack Robison
ece2d1e78a
name and normalized -> claim_name and normalized_name
-update generated pb files
2021-10-05 16:44:49 -04:00
Jack Robison
1ee1a5f2a1
fix es sync.py 2021-10-05 16:44:49 -04:00
Jack Robison
a567326853
fix all_claims_producer 2021-10-05 16:44:49 -04:00
Jack Robison
6231861dd6
merge conflicts 2021-10-05 16:44:49 -04:00
Jack Robison
1ff7b77ee0
claim search fixes 2021-10-05 16:44:49 -04:00
Jack Robison
9365708bb2
fix release_time and creation_timestamp 2021-10-05 16:44:49 -04:00
Jack Robison
d23a0a8589
delete unused code 2021-10-05 16:44:49 -04:00
Jack Robison
701b39b043
test_spec_example 2021-10-05 16:44:49 -04:00
Jack Robison
58ad1f3876
non blocking claim producer 2021-10-05 16:44:49 -04:00
Jack Robison
2138e7ea33
fix tests 2021-10-05 16:44:49 -04:00
Jack Robison
32f8c9e59f
renormalization 2021-10-05 16:44:49 -04:00
Jack Robison
57028eab39
add trending integration test 2021-10-05 16:44:49 -04:00
Jack Robison
3a16edd8a6
fix trending overflow 2021-10-05 16:44:49 -04:00
Jack Robison
165f3bb270
refactor trending 2021-10-05 16:44:49 -04:00
Jack Robison
0ba75153f3
trending fixes 2021-10-05 16:44:49 -04:00
Jack Robison
db2789990f
make app backward compatible with trending_score
-update trending decay function to zero out low trending score values faster
2021-10-05 16:44:49 -04:00
Jack Robison
acaf299bcb
log time to update and decay trending in elasticsearch 2021-10-05 16:44:49 -04:00
Jack Robison
1940301824
skip integrity errors for trending spikes 2021-10-05 16:44:49 -04:00
Jack Robison
34576e880d
update trending in elasticsearch
-add TrendingPrefixSpike to leveldb
-expose `TRENDING_HALF_LIFE`, `TRENDING_WHALE_HALF_LIFE` and `TRENDING_WHALE_THRESHOLD` hub settings
2021-10-05 16:44:49 -04:00
Brendon J. Brewer
65c0668d40
constants 2021-10-05 16:44:49 -04:00
Brendon J. Brewer
53bd2bcbfe
Put trending score into ES 2021-10-05 16:44:49 -04:00
Brendon J. Brewer
388724fccb
Mark claims as touched 2021-10-05 16:44:49 -04:00
Jack Robison
231eabb013
fix non normalized canonical urls 2021-10-05 16:44:49 -04:00
Jack Robison
54903fc2ea
handle unicode error for unnormalized names 2021-10-05 16:44:49 -04:00
Jack Robison
3a1baf0700
prefix db 2021-10-05 16:44:49 -04:00
Brendon J. Brewer
0c0e36b6f8
trending 2021-10-05 16:44:49 -04:00
Jack Robison
234c03db09
fix claims not having non-normalized names 2021-10-05 16:44:49 -04:00
Jack Robison
59db5e7889
update test 2021-10-05 16:44:49 -04:00
Jack Robison
28aa7da349
merge conflicts 2021-10-05 16:44:49 -04:00
Jack Robison
c51e344b87
fix missing fields in reposts 2021-10-05 16:44:49 -04:00
Jack Robison
54461dfa75
fix merge conflicts and simplify extract_doc 2021-10-05 16:44:49 -04:00
Jack Robison
2d48e93f74
fix bulk es sync 2021-10-05 16:44:49 -04:00
Jack Robison
af22646322
fix tests 2021-10-05 16:44:49 -04:00
Jack Robison
722b42a93e
fix tests 2021-10-05 16:44:49 -04:00
Jack Robison
8f9e7f77a7
handle invalid claim update 2021-10-05 16:44:49 -04:00
Jack Robison
09bb1ba494
fix keeping claim_hash_to_txo and txo_to_claim in sync 2021-10-05 16:44:49 -04:00
Victor Shyba
d4137428ff
implement blocking and filtering 2021-10-05 16:44:49 -04:00
Jack Robison
b4d6c4f5b7
fix _get_pending_claim_name 2021-10-05 16:44:49 -04:00
Jack Robison
ffbe59ece5
fix applying expiration fork 2021-10-05 16:44:49 -04:00
Jack Robison
fab9c90ccb
update iterators to use pack_partial_key 2021-10-05 16:44:49 -04:00
Jack Robison
fb1a774bc4
delete lbry/wallet/server/storage.py
-expose leveldb lru cache size as `CACHE_MB` hub param
2021-10-05 16:44:49 -04:00
Jack Robison
98bc7d1e0e
remove dead code 2021-10-05 16:44:49 -04:00
Jack Robison
f7622f24b2
non blocking mempool loop 2021-10-05 16:44:49 -04:00
Jack Robison
f0a195a6d4
faster es sync 2021-10-05 16:44:49 -04:00
Jack Robison
180ba27d84
run advance_block in threadpool 2021-10-05 16:44:49 -04:00
Jack Robison
f944671f86
use claim_to_txo cache 2021-10-05 16:44:49 -04:00
Jack Robison
def2903f7d
faster _cached_get_active_amount for claims
-remove dead code
2021-10-05 16:44:49 -04:00
Jack Robison
0273a4e839
fix claim search by fee for claims without fees 2021-10-05 16:44:49 -04:00
Jack Robison
f8d2f02c5d
clear claim_to_txo cache before reading 2021-10-05 16:44:49 -04:00
Jack Robison
25147d8897
handle claims that dont exist in ES sync 2021-10-05 16:44:49 -04:00
Jack Robison
0fb6f05fba
in memory claim_to_txo and txo_to_claim dictionaries 2021-10-05 16:44:49 -04:00
Jack Robison
4e4e899356
fix spend_utxo 2021-10-05 16:44:49 -04:00
Jack Robison
5a01dbf269
split flush from advance_block 2021-10-05 16:44:49 -04:00
Jack Robison
30b923b283
rename extend_ops 2021-10-05 16:44:49 -04:00
Jack Robison
73ba381d20
faster spend_utxo 2021-10-05 16:44:49 -04:00
Jack Robison
1a5912877e
faster get_future_activated 2021-10-05 16:44:49 -04:00
Jack Robison
813e506b68
threadpool 2021-10-05 16:44:49 -04:00
Jack Robison
077ca987f7
cleanup 2021-10-05 16:44:49 -04:00
Jack Robison
c632a7a6a5
fix getting block hash during reorg 2021-10-05 16:44:49 -04:00
Jack Robison
e33e767510
fix test 2021-10-05 16:44:49 -04:00
Jack Robison
ac82617aa9
fix spends in address histories 2021-10-05 16:44:49 -04:00
Jack Robison
a35dfd1fd1
faster es sync 2021-10-05 16:44:49 -04:00
Jack Robison
c28aae9913
fix expiring channels 2021-10-05 16:44:49 -04:00
Jack Robison
c26a99e65c
fix abandoning signed claims in the same tx as their channel
-fix canonical/short url in es
2021-10-05 16:44:49 -04:00
Jack Robison
ca57dcfc2f
handle failure to generate a short id 2021-10-05 16:44:49 -04:00
Jack Robison
df5662dd69
fix resolve by short id 2021-10-05 16:44:49 -04:00
Jack Robison
8927a4889e
tests 2021-10-05 16:44:49 -04:00
Jack Robison
1ac7831f3c
move MemPool into BlockProcessor 2021-10-05 16:44:49 -04:00
Jack Robison
292d272a94
combine MemPool and Notifications classes 2021-10-05 16:44:49 -04:00
Jack Robison
a6ee8dc66e
fix touched hashXs notifications 2021-10-05 16:44:49 -04:00
Jack Robison
496f89f184
reorg claims in the search index 2021-10-05 16:44:49 -04:00
Jack Robison
7a56eff1ac
small fixes 2021-10-05 16:44:49 -04:00
Jack Robison
07e182aa16
rename 2021-10-05 16:44:49 -04:00
Jack Robison
7de06aa1e0
delete stale code 2021-10-05 16:44:49 -04:00
Jack Robison
3955b64405
simplify advance and reorg 2021-10-05 16:44:49 -04:00
Jack Robison
2bb55d681d
update limited_history 2021-10-05 16:44:49 -04:00
Jack Robison
f94e6ac527
update lookup_utxos 2021-10-05 16:44:49 -04:00
Jack Robison
b344f17b86
update RevertableOpStack 2021-10-05 16:44:49 -04:00
Jack Robison
677b8cb633
add remaining db prefixes 2021-10-05 16:44:49 -04:00
Jack Robison
6f3342e09e
update plyvel to 1.3.0
https://github.com/lbryio/lbry-sdk/pull/3205#issuecomment-877564489
2021-10-05 16:44:49 -04:00
Jack Robison
a1ddd762e0
cleanup 2021-10-05 16:44:49 -04:00
Jack Robison
68474e4057
skip es sync during initial hub sync, halt the hub upon finishing initial sync 2021-10-05 16:44:49 -04:00
Jack Robison
a84b9ee396
fix es sync 2021-10-05 16:44:49 -04:00
Jack Robison
b9c2ee745a
fix non localhost elasticsearch 2021-10-05 16:44:49 -04:00
Jack Robison
c91a47fcaa
improve channel invalidation test 2021-10-05 16:44:49 -04:00
Jack Robison
615e489d8d
fix stream_update --clear_channel flag 2021-10-05 16:44:49 -04:00
Jack Robison
c68f9f6f16
fix signed claim invalidation corner cases 2021-10-05 16:44:49 -04:00
Jack Robison
229cb85a6a
extra deletes
-the channel_to_claim/claim_to_channel entries already get deleted when the claim txo is spent
2021-10-05 16:44:49 -04:00
Jack Robison
e5c22fa665
fix has_no_source for reposts 2021-10-05 16:44:49 -04:00
Jack Robison
8bcfff05d7
update channel_to_claim and claim_to_channel at the same time 2021-10-05 16:44:49 -04:00
Jack Robison
6416ee8151
typing and fix error string 2021-10-05 16:44:49 -04:00
Jack Robison
f8eceb48e6
update staged txo_to_claim after invalidating channel sig
-fixes abandon of claim with invalidated signature and an update in same block
2021-10-05 16:44:49 -04:00
Jack Robison
310c483bfa
missing channel_to_claim delete 2021-10-05 16:44:49 -04:00
Jack Robison
a8f20361aa
fix RepostKey 2021-10-05 16:44:49 -04:00
Jack Robison
290be69d99
typing 2021-10-05 16:44:49 -04:00
Jack Robison
3b96bd7ea0
fix 2021-10-05 16:44:49 -04:00
Jack Robison
dc2f22f5fa
cleanup 2021-10-05 16:44:49 -04:00
Jack Robison
821be29f41
rename effective_amount prefix 2021-10-05 16:44:49 -04:00
Jack Robison
52ff1a12ff
fix undeleted claim_to_channel record 2021-10-05 16:44:49 -04:00
Jack Robison
814699ef11
cleanup 2021-10-05 16:44:49 -04:00
Jack Robison
0c30838b25
fix mismatch in claim_to_txo<->txo_to_claim 2021-10-05 16:44:49 -04:00
Jack Robison
cf66c2a1ee
rename things
-fix effective amount integrity error
2021-10-05 16:44:49 -04:00
Jack Robison
2ee419ffca
fix 2021-10-05 16:44:49 -04:00
Jack Robison
bfb9d696d7
pretty print 2021-10-05 16:44:49 -04:00
Jack Robison
bb2a34dd6b
fix duplicate activate 2021-10-05 16:44:49 -04:00
Jack Robison
ed652c0c56
fix updating resolve by effective amount after abandoning support 2021-10-05 16:44:49 -04:00
Jack Robison
1dc961d6eb
use RevertableOpStack in _get_takeover_ops 2021-10-05 16:44:49 -04:00
Jack Robison
d119fcfc98
remove debug prints 2021-10-05 16:44:49 -04:00
Jack Robison
4d3573724a
add RevertableOpStack to verify consistency of ops as they're staged 2021-10-05 16:44:49 -04:00
Jack Robison
8b37a66075
fix fee amount overflow in es 2021-10-05 16:44:49 -04:00
Jack Robison
ba4f32075a
faster claim producer
-make batches of claim txos from the iterator, and sort by tx hash before fetching to maximize cache and read ahead hits
2021-10-05 16:44:49 -04:00
Jack Robison
218be22576
imports 2021-10-05 16:44:49 -04:00
Jack Robison
7688293716
close db in sync script 2021-10-05 16:44:49 -04:00
Jack Robison
458f8533c4
try default block size 2021-10-05 16:44:49 -04:00
Jack Robison
34502752fc
update elastic sync 2021-10-05 16:44:49 -04:00
Jack Robison
d6758fd823
invalidate channel signatures upon channel abandon 2021-10-05 16:44:49 -04:00
Jack Robison
65700e790e
_prepare_claim_for_sync generators 2021-10-05 16:44:49 -04:00
Jack Robison
7c34e4bb96
logging 2021-10-05 16:44:49 -04:00
Jack Robison
d0d6e3563b
use default sync=False during write_batch 2021-10-05 16:44:49 -04:00
Jack Robison
a2619f8c78
genesis_bytes attribute 2021-10-05 16:44:49 -04:00
Jack Robison
42d07fd2f0
fix 2021-10-05 16:44:49 -04:00
Jack Robison
8bea10960f
disable es (revert) 2021-10-05 16:44:49 -04:00
Jack Robison
9cbb19c304
_cached_get_active_amount 2021-10-05 16:44:49 -04:00
Jack Robison
1b94dfd712
fix removing unactivated support 2021-10-05 16:44:49 -04:00
Jack Robison
9f3604d739
debug 2021-10-05 16:44:49 -04:00
Jack Robison
4a1b2be269
leveldb tuning 2021-10-05 16:44:49 -04:00
Jack Robison
962dc1b55b
debug 2021-10-05 16:44:49 -04:00
Jack Robison
07c86502f6
refactor ClaimToTXO prefix 2021-10-05 16:44:49 -04:00
Jack Robison
adb188e5d0
filter abandoned claims from those considered for early activation 2021-10-05 16:44:49 -04:00
Jack Robison
ce031dc6b8
only do early takeover on a larger amount (fix case where they're equal) 2021-10-05 16:44:49 -04:00
Jack Robison
18b5f03247
filter supported claim hashes for claims that dont exist from early takeover/activations 2021-10-05 16:44:49 -04:00
Jack Robison
8a555ecf1c
remove extra open functions 2021-10-05 16:44:49 -04:00
Jack Robison
1b325b9acd
fix flush id 2021-10-05 16:44:49 -04:00
Jack Robison
1bdaddb319
fix clearing pending_support caches upon abandon 2021-10-05 16:44:49 -04:00
Jack Robison
7896e177ef
fix putting spent unactivated supports in removed_active_support 2021-10-05 16:44:49 -04:00
Jack Robison
ce8e659008
fix syncing claim to es where channel is in the same block 2021-10-05 16:44:49 -04:00
Jack Robison
27be5deeb2
ignore activation for headless supports 2021-10-05 16:44:49 -04:00
Jack Robison
515f270c3a
faster get_future_activated 2021-10-05 16:44:49 -04:00
Jack Robison
ffff3bd334
debugging 2021-10-05 16:44:49 -04:00
Jack Robison
f493f13b25
prints 2021-10-05 16:44:49 -04:00
Jack Robison
e605c14b13
flush count 2021-10-05 16:44:49 -04:00
Jack Robison
338488f16d
tests 2021-10-05 16:44:49 -04:00
Jack Robison
2abc67c3e8
reposts 2021-10-05 16:44:49 -04:00
Jack Robison
eb1ba143ec
fix updating the ES search index
-update search index to use ResolveResult tuples
2021-10-05 16:44:49 -04:00
Jack Robison
6f5bca0f67
bid ordered resolve, feed ES claim data from block processor 2021-10-05 16:44:49 -04:00
Jack Robison
407cd8dd4b
fix duplicate update op for early activating claim 2021-10-05 16:44:49 -04:00
Jack Robison
62a4f0fc04
fix early takeovers by not-yet activated claims 2021-10-05 16:44:49 -04:00
Jack Robison
77cde411f1
test_early_takeover_abandoned_controlling_support 2021-10-05 16:44:49 -04:00
Jack Robison
3eb9d23108
require previous_winning arg for get_takeover_name_ops 2021-10-05 16:44:49 -04:00
Jack Robison
410d4aeb21
fix takeover edge case
if a claim with a higher value than that of a claim taking over a name exists but isn't yet activated, activate it early and have it take over the name
2021-10-05 16:44:49 -04:00
Jack Robison
0a28d216fd
comments 2021-10-05 16:44:49 -04:00
Jack Robison
b69faf6920
bid ordered resolve (WIP) 2021-10-05 16:44:49 -04:00
Jack Robison
efb92ea37a
fix udp ping test 2021-10-05 16:44:49 -04:00
Jack Robison
e77f9981df
DBError 2021-10-05 16:44:49 -04:00
Jack Robison
d27c2cc1e9
remove unused COIN file 2021-10-05 16:44:49 -04:00
Jack Robison
586b19675e
claim takeovers 2021-10-05 16:44:49 -04:00
Jack Robison
f2907536b4
move get_expiration_height and claimtrie constants to Coin class 2021-10-05 16:44:49 -04:00
Jack Robison
4aa4e35d1c
tests 2021-10-05 16:44:49 -04:00
Jack Robison
9a11ac06bf
claim activations and takeovers (WIP) 2021-10-05 16:44:49 -04:00
Jack Robison
aa3b18f848
advance_blocks -> advance_block 2021-10-05 16:44:49 -04:00
Jack Robison
103bdc151f
dead code 2021-10-05 16:44:49 -04:00
Jack Robison
6d4c1cd879
LBRYBlockProcessor -> BlockProcessor
- temporarily disable claim_search
2021-10-05 16:44:49 -04:00
Jack Robison
cacbe30871
rebase 2021-10-05 16:44:49 -04:00
Jack Robison
bfeeacb230
tests 2021-10-05 16:44:49 -04:00
Jack Robison
04bb7b4919
add wrapper for getnamesintrie
-used for verifying db state against lbrycrd
2021-10-05 16:44:49 -04:00
Jack Robison
b7df277a5c
db state struct
-remove dead code
2021-10-05 16:44:49 -04:00
Jack Robison
c681041b48
claim expiration 2021-10-05 16:44:49 -04:00
Jack Robison
923834c784
get_claim_by_claim_id 2021-10-05 16:44:49 -04:00
Jack Robison
588edf98be
claims db
-move all leveldb prefixes to DB_PREFIXES enum
-add serializable RevertableOp interface for key/value puts and deletes
-resolve urls from leveldb
2021-10-05 16:44:49 -04:00
Jack Robison
28c603ad5f
transaction_num_mapping 2021-10-05 16:44:49 -04:00
Jack Robison
6988a47e02
disable sqlite in block processor 2021-10-05 16:44:49 -04:00
Jack Robison
2c8ceb1217
named tuples 2021-10-05 16:44:49 -04:00
Jack Robison
ccac4ffa24
consolidate flush_backup 2021-10-05 16:44:49 -04:00
Jack Robison
4258cef9bd
remove lbry.wallet.server.history 2021-10-05 16:44:49 -04:00
Jack Robison
62cc6dfe76
consolidate leveldb block advance/reorg
-move methods from History to LevelDB
2021-10-05 16:44:49 -04:00
Jack Robison
9f224a971b
atomic flush_dbs 2021-10-05 16:44:49 -04:00
Jack Robison
cf5dba9157
combine leveldb databases 2021-10-05 16:44:49 -04:00
Jack Robison
23035b9aa0
Merkle staticmethods 2021-10-05 16:44:49 -04:00
Lex Berezhny
84908ec8ec v0.105.0 2021-10-05 11:29:39 -04:00
Victor Shyba
dade49743b fix file reflect and add test 2021-10-04 19:26:05 -03:00
Lex Berezhny
f29bf35c2a
Merge pull request #3438 from lbryio/disk_space_metrics
metrics reported now include disk space consumed by blobs and what the disk usage limit, if any, is set to
2021-10-03 20:01:50 -04:00
Lex Berezhny
dfa6701c43 disk space metrics 2021-10-03 19:33:18 -04:00
Victor Shyba
763ca69a73 dht: use bytes hex/fromhex instead of binascii 2021-09-30 13:26:33 -03:00
Victor Shyba
6bf3b152bf add grin to dht known list 2021-09-30 13:26:33 -03:00
Victor Shyba
aa19f85996 add madiator to known dht nodes 2021-09-30 13:26:33 -03:00
Victor Shyba
156d89567e add option to set bootstrap_node 2021-09-30 13:26:33 -03:00
Victor Shyba
ecc71baf61 add dockerfile for dht node 2021-09-30 13:26:33 -03:00
Victor Shyba
90c743d963 configure where to save peers 2021-09-30 13:26:33 -03:00
Victor Shyba
b926293fa7 define arg types 2021-09-30 13:26:33 -03:00
Victor Shyba
71a19191f8 add dht seed node script 2021-09-30 13:26:33 -03:00
Victor Shyba
38a0f20a33 fix conflict with imported function 2021-09-30 13:24:17 -03:00
Victor Shyba
c35192108c errors for empyt and misssing file on publish 2021-09-30 13:24:17 -03:00
Victor Shyba
245b564f13 generalize stream empty to argument empty 2021-09-30 13:24:17 -03:00
Victor Shyba
0d8d1ea4f3 empty stream name error for user input 2021-09-30 13:24:17 -03:00
Victor Shyba
27a427a363 error for missing channel private key 2021-09-30 13:24:17 -03:00
Victor Shyba
2ff028a694 error for already purchased claims 2021-09-30 13:24:17 -03:00
Lex Berezhny
c211338218
Merge pull request #3434 from belikor/fix-documentation
fix typo in `file list` arguments list
2021-09-27 11:08:10 -04:00
belikor
8ac89af8bd api.json: correct the error in the generated documentation
From `"name": "blobs_in_stream<blobs_in_stream>"`
to `"name": "blobs_in_stream"`.
2021-09-23 21:01:17 -05:00
belikor
bbbaf59591 daemon: fix documentation in the file_list docstring
This is necessary to produce the `docs/api.json`
(through `scripts/generate_json_api.py`)
with correct information, and to be able to parse this file later on
by other tools.
2021-09-23 21:00:31 -05:00
Lex Berezhny
169419896f v0.104.0 2021-09-22 18:39:01 -04:00
Lex Berezhny
0543dca502 re-enable coveralls 2021-09-22 18:15:13 -04:00
Lex Berezhny
cc6011d57a ubuntu 16.04 is deprecated on github actions, upgrading to 18.04 2021-09-22 18:14:15 -04:00
Lex Berezhny
fc4407ef7e revert release 2021-09-22 18:11:41 -04:00
Lex Berezhny
03735a125f v0.104.0 2021-09-22 14:02:52 -04:00
Lex Berezhny
5baeda9ff1
Merge pull request #3417 from lbryio/preserve_own_blobs
use database to track blob disk space use and preserve own blobs
2021-09-20 11:32:59 -04:00
Lex Berezhny
9b9794b5e0 default is_mine to true during migration 2021-09-20 09:23:42 -04:00
Lex Berezhny
0697d60a48 coveralls still down, will have to merged with coveralls off 2021-09-20 09:01:35 -04:00
Lex Berezhny
cfe6c82a31 tests 2021-09-19 21:38:09 -04:00
Lex Berezhny
3e30228d95 lint 2021-09-15 10:49:03 -04:00
Lex Berezhny
7264b53e5f during disk clean your own sd blob is now kept and file status of deleted files is set to stopped 2021-09-15 10:37:08 -04:00
Lex Berezhny
60836d8523 db migration and other fixes 2021-09-15 09:10:06 -04:00
Lex Berezhny
ef89c2e47a use databse to track blob disk space use and preserve own blobs 2021-09-15 09:10:06 -04:00
Lex Berezhny
2d9e3e1847 v0.103.0 2021-09-14 23:25:32 -04:00
Lex Berezhny
30136a9697 omit just node.py 2021-09-14 23:06:00 -04:00
Lex Berezhny
db7ccd66d3 coverage omit fix 2021-09-14 22:38:39 -04:00
Lex Berezhny
cfe6483102 omit coverage inside tox 2021-09-14 22:20:09 -04:00
Alex Grin
561566e723
Merge pull request #3421 from lbryio/vault_temp
avoid [''] on peers list
2021-09-13 16:14:43 -04:00
Victor Shyba
c2dcc4c898 avoid [''] on peers list 2021-09-13 15:57:21 -03:00
Lex Berezhny
d09bfdc4ff omit orchstr8 stuff since it doesnt always run the same way on every test run 2021-09-12 11:45:52 -04:00
Victor Shyba
358ef4536f add ConflictingInputValueError for claim_id+claim_ids 2021-09-10 18:57:20 -03:00
Victor Shyba
5061a35e66 remove ignored output from hub node 2021-09-10 18:57:20 -03:00
Victor Shyba
cd9a1e8c9e default to legacy search for this release 2021-09-10 18:57:20 -03:00
Victor Shyba
646902e75e only duplicate blockchain CI step 2021-09-10 18:57:20 -03:00
Victor Shyba
40d26cb868 fix error msg to match Go msg 2021-09-10 18:57:20 -03:00
Victor Shyba
b64aa51c0c fix stream_types being an integer 2021-09-10 18:57:20 -03:00
Victor Shyba
8206441834 run CI for old and new setups 2021-09-10 18:57:20 -03:00
Victor Shyba
d713783736 ignore default values 2021-09-10 18:57:20 -03:00
Victor Shyba
57dffaa2ce update hub to beta release 2021-09-10 18:57:20 -03:00
Victor Shyba
9e81dd2360 refactor arguments fixup 2021-09-10 18:57:20 -03:00
Victor Shyba
e2798969d7 claim_id is an invertible field, not a repeated 2021-09-10 18:57:20 -03:00
Victor Shyba
1c31ec66f2 simplify operator handling 2021-09-10 18:57:20 -03:00
Victor Shyba
241f9fc7b0 not_claim_id/not_claim_ids is not a search parameter 2021-09-10 18:57:20 -03:00
Victor Shyba
270192486a translate grpc errors to RPCError 2021-09-10 18:57:20 -03:00
Victor Shyba
a799503c97 update fields from hub 2021-09-10 18:57:20 -03:00
Victor Shyba
9685928087 there is no first_search 2021-09-10 18:57:20 -03:00
Victor Shyba
0e4b2fad99 specify index name 2021-09-10 18:57:20 -03:00
Victor Shyba
3c4571a4e0 remove fallback 2021-09-10 18:57:20 -03:00
Jeffrey Picard
046147eb1d updates for fields 2021-09-10 18:57:20 -03:00
Jeffrey Picard
7834520e54 update code to be consistent with field renames 2021-09-10 18:57:20 -03:00
Jeffrey Picard
8e5b4d4b6f hardcode port 2021-09-10 18:57:20 -03:00
Jeffrey Picard
4544a074d9 Move the go hub settings from network to ledger config and hook reset
correctly.
2021-09-10 18:57:20 -03:00
Jeffrey Picard
9b78501392 Set default server to the networks default and use go hub by default 2021-09-10 18:57:20 -03:00
Jeffrey Picard
f59ddcc88d Forgot to remove duplicate tests 2021-09-10 18:57:20 -03:00
Jeffrey Picard
a4955a2b79 remove uneeded prints 2021-09-10 18:57:20 -03:00
Jeffrey Picard
92ae1a565b updates protobuf 2021-09-10 18:57:20 -03:00
Jeffrey Picard
15a56ca25e tons of small changes squashed together 2021-09-10 18:57:20 -03:00
Jeffrey Picard
9dcaa829ea update protobufs 2021-09-10 18:57:20 -03:00
Jeffrey Picard
9f65799a3d uncomment tests, add remove_duplicates param
Cleanup prints and commented out code

remove print

don't do list claims

cleanup
2021-09-10 18:57:20 -03:00
Jeffrey Picard
886587848b protobuf changes
more protobuf changes (fix imports)
2021-09-10 18:57:20 -03:00
Jeffrey Picard
a97fc6dba8 cleanup and reorgnazing some stuff
Fixing tests

relabel failing tests properly

run all the tests for the hub

cleanup HubNode
2021-09-10 18:57:20 -03:00
Jeffrey Picard
c124e88d12 grpc client for python 2021-09-10 18:57:20 -03:00
Jeffrey Picard
17f3870296 Add tests for hub
Have the basic starting /stopping / querying. Still don't have the hub
jsonrpc stuff working right and from the looks of it I need to clearify
some of the logic in the claim search function itself because it's not
returning the correct number of claims anyways.

get the integration working with grpcurl

Got tests working, still need to port the rest of them

ported all of the claim search tests

still a few failing due to not having inflation working, and there's something weird
with limit_claims_per_channel that needs to be fixed.
2021-09-10 18:57:20 -03:00
Lex Berezhny
4626d42d08
Merge pull request #3414 from cristi-zz/remove_comment_api
removed `comment` API endoints
2021-09-09 13:07:12 -04:00
Cristian Vicas
e1e760055c Drop comment_* apis.
Refresh documentation.
2021-09-02 11:38:29 +03:00
Cristian Vicas
45bf6c3bf3 Drop comment_* apis.
Refactored dangling functions.
Added unit test.
2021-09-02 11:38:29 +03:00
Cristian Vicas
fef0cc764d Drop comment_* apis
Removed the comment API
Removed tests for the comment API
Removed the documentation section
Removed the comment server configuration
2021-09-02 08:51:00 +03:00
Lex Berezhny
72049afcf6
Merge pull request #3410 from belikor/fix-docstring
jsonrpc_support_sum: remove the + signs from the docstring
2021-09-01 10:05:17 -04:00
belikor
d26c06dbf3 jsonrpc_support_sum: remove the + signs from the docstring
These symbols came from 0a0ac3b7c9 and were probably added
accidentally to the beginning of the line by copying and pasting
some diffs.
2021-08-25 13:28:02 -05:00
Lex Berezhny
268decd655 update readme 2021-08-21 20:01:49 -04:00
Lex Berezhny
7ae246c839 always run on push, otherwise master branch does not get coverage 2021-08-21 17:10:53 -04:00
Lex Berezhny
c7c454e4fb
Merge pull request #3406 from lbryio/coveralls
submit code coverage reports to coveralls
2021-08-21 17:07:22 -04:00
Lex Berezhny
8e27297a81 coveralls flag name fix 2021-08-21 16:47:17 -04:00
Lex Berezhny
2cdec72985 coveralls fix 2021-08-21 16:22:01 -04:00
Lex Berezhny
0085ac534d coverage fix 2021-08-21 15:41:06 -04:00
Lex Berezhny
7828a79a96 coverage combine corrected 2021-08-21 15:31:50 -04:00
Lex Berezhny
5576c21e67 coverage for integration tests 2021-08-21 15:26:14 -04:00
Lex Berezhny
e49cfb1d2b another attempt 2021-08-21 14:44:59 -04:00
Lex Berezhny
1e541d0225 explicit coverralls service 2021-08-21 13:52:33 -04:00
Lex Berezhny
0974afd26d guess coverralls service 2021-08-21 12:06:45 -04:00
Lex Berezhny
8d93594771 coverage on win and mac 2021-08-21 10:51:37 -04:00
Lex Berezhny
1136ac70e8 fix makefile 2021-08-21 10:26:55 -04:00
Lex Berezhny
dc8d5a39ea fix spaces 2021-08-21 09:42:18 -04:00
Lex Berezhny
8329e649b0 try python coveralls package insead of github action 2021-08-21 09:41:16 -04:00
Lex Berezhny
66da8b164f try AndreMiras/coveralls-python-action action 2021-08-21 09:28:20 -04:00
Lex Berezhny
ea48577864 github workflow syntax fix 2021-08-21 09:15:20 -04:00
Lex Berezhny
597146b136 submit coverage to coveralls 2021-08-21 09:04:44 -04:00
Lex Berezhny
30dd0c1e11
Merge pull request #3405 from lbryio/upgrade_pylint
upgrade pylint and fix lint errors
2021-08-20 23:06:44 -04:00
Lex Berezhny
88772c4266 update setup.py 2021-08-20 22:42:12 -04:00
Lex Berezhny
dc1d9e1c84 upgrade pylint and fix lint errors 2021-08-20 22:36:35 -04:00
Lex Berezhny
69ea65835d
Merge pull request #3402 from lbryio/save_files_default_false
changed default setting `save_files` to be false
2021-08-19 10:57:51 -04:00
Lex Berezhny
d5bae3a8c6 manually set save_files=True in unit tests 2021-08-19 09:31:17 -04:00
Lex Berezhny
f14010bd5b explicitly set save_files = True in tests 2021-08-17 16:36:48 -04:00
Lex Berezhny
87094fc83f changed default setting save_files to be false 2021-08-17 15:47:18 -04:00
Lex Berezhny
7c179cfeab missing closing squiggly bracket 2021-08-17 14:48:13 -04:00
Lex Berezhny
7582c221d1 v0.102.0 2021-08-17 14:16:17 -04:00
Lex Berezhny
c109895848
Merge pull request #3399 from lbryio/better-error-logging
Less verbose error logs, only log tracebacks for errors not defined in `lbry.error`
2021-08-17 14:14:27 -04:00
Jack Robison
eccedada40
add TODOs for errors raised that aren't defined in lbry.error 2021-08-17 12:31:03 -04:00
Jack Robison
25d54accf8
return api errors from wallet_add and wallet_create 2021-08-17 12:30:17 -04:00
Jack Robison
d07685f0e9
only log tracebacks for api errors not defined in lbry.error 2021-08-17 11:30:58 -04:00
Jack Robison
2445c00c7e
raise WalletNotLoadedError in get_wallet_or_error instead of ValueError 2021-08-17 11:30:58 -04:00
Lex Berezhny
4c1d3ef514
Merge pull request #3398 from lbryio/clean_blobs_after_delay
clean blobs after waiting interval instead of immediately on startup
2021-08-17 10:44:34 -04:00
Lex Berezhny
4614c7d4c2 clean blobs after waiting interval instead of immediately on startup 2021-08-17 09:52:44 -04:00
Lex Berezhny
bbf1ef0dc3
Merge pull request #3378 from lbryio/disk_management
ability to limit disk spaced used for blobs via `blob_storage_limit` setting (oldest blobs are deleted when disk space limit is reached)
2021-08-16 17:41:05 -04:00
Lex Berezhny
3433c9e708 return number of files deleted 2021-08-16 17:03:40 -04:00
Lex Berezhny
2cd5d75a2e return true/false if clean was performed 2021-08-16 17:02:13 -04:00
Lex Berezhny
2535b8adef fix disk space unit test 2021-08-16 14:54:17 -04:00
Lex Berezhny
4edab7bb7f fix sorting by DirEntry error 2021-08-16 14:41:16 -04:00
Lex Berezhny
fd8658e317 test component unit test 2021-08-16 14:35:32 -04:00
Lex Berezhny
51d21d8c86 working disk cleanup 2021-08-16 14:15:12 -04:00
Lex Berezhny
b4c3307cdf fixed tests 2021-08-13 10:32:46 -04:00
Lex Berezhny
4e8d10cb44 disk space manager and status API 2021-08-13 10:32:46 -04:00
Lex Berezhny
e96875a425 workflow syntax 2021-08-13 10:32:46 -04:00
Lex Berezhny
5ab0035348 run tests on windows and mac 2021-08-13 10:32:46 -04:00
Lex Berezhny
4ddff96b1e
Merge pull request #3395 from lbryio/libtorrent_optional
make libtorrent optional and skip test which depends on it
2021-08-13 10:31:06 -04:00
Lex Berezhny
a08d84c1df make libtorrent optional and skip test which depends on it 2021-08-13 10:07:06 -04:00
Victor Shyba
21c71bfac1 update sync utility 2021-08-09 18:33:47 -03:00
Victor Shyba
6baaed3581 refactor query with new fields 2021-08-09 18:33:47 -03:00
Victor Shyba
152dbfd5d1 reflect fee_currency, fee_amount and duration on repost searches 2021-08-09 18:33:47 -03:00
Victor Shyba
a56d14086b reflect media_type on repost searches 2021-08-09 18:33:47 -03:00
Victor Shyba
aee87693f8 reflect stream_type on repost searches 2021-08-09 18:33:47 -03:00
Alex Grin
976b4affd9
Merge pull request #3383 from lbryio/dht_log 2021-08-09 17:10:01 -04:00
Victor Shyba
e222b6ad9c log that a invalid query happened 2021-08-09 15:07:44 -03:00
Victor Shyba
19b17374e8 throttle instead of disconnecting 2021-08-09 15:07:44 -03:00
Victor Shyba
43989122bb add error type and message to error readme and update code 2021-08-09 15:07:44 -03:00
Victor Shyba
72712d6047 raise and disconnect if too many parameters are used on search 2021-08-09 15:07:44 -03:00
Victor Shyba
0b52d2cc15 log invalid port as a warning instead of an exception 2021-08-03 15:29:52 -03:00
Lex Berezhny
8304102136 move <!channel> out of markdown converter 2021-07-27 11:54:08 -04:00
Lex Berezhny
3381aefcfa notify channel of slack message 2021-07-27 11:39:35 -04:00
Lex Berezhny
279a365cb1 v0.101.1 2021-07-27 11:12:06 -04:00
Lex Berezhny
2c9e00da56 revert version 2021-07-27 11:10:16 -04:00
Lex Berezhny
f7cae69704 switch to using a custom GITHUB_TOKEN for doing releases 2021-07-27 11:07:25 -04:00
Lex Berezhny
b7d58bcdbc v0.101.1 2021-07-26 17:01:25 -04:00
Lex Berezhny
13a856b843 revert version 2021-07-26 17:00:32 -04:00
Lex Berezhny
8da38985c3 debugging for release 2021-07-26 16:58:11 -04:00
Lex Berezhny
60cf6c6b97 v0.101.1 2021-07-26 16:02:28 -04:00
Lex Berezhny
35c2b34564
Merge pull request #3372 from lbryio/release_process_fixup
github release process fixes
2021-07-26 15:58:10 -04:00
Lex Berezhny
ef2e048efc fixes for release process 2021-07-26 15:57:45 -04:00
Lex Berezhny
6b3261aa33
Merge pull request #3373 from lbryio/fix_typo
fix typo in kwargs key
2021-07-26 15:29:29 -04:00
Victor Shyba
1849c02cb6 fix typo in kwargs key 2021-07-26 16:02:48 -03:00
Lex Berezhny
1ec74a89e2
Merge pull request #3367 from belikor/fix-search-claim-id
fix error when using `--claim_id` with `lbrynet claim search`
2021-07-23 10:08:57 -04:00
Victor Shyba
c591792de9 has_source is a special case 2021-07-22 16:25:55 -03:00
Victor Shyba
3108543ae5 3 missing fields 2021-07-22 16:25:55 -03:00
Victor Shyba
1eb221c743 translate reposted, signature_valid and normalized 2021-07-22 16:25:55 -03:00
Alex Grin
bebf6bc2e7 Update constants.py 2021-07-22 16:25:55 -03:00
Alex Grin
9e91cc2138 Update constants.py 2021-07-22 16:25:55 -03:00
Victor Shyba
c5b939cfb7 fix tests 2021-07-22 16:25:55 -03:00
Victor Shyba
5bd411ca27 filtering hash->id 2021-07-22 16:25:55 -03:00
Victor Shyba
a533cda6f0 ES: all _hash to _id 2021-07-22 16:25:55 -03:00
Lex Berezhny
fe4b07b8ae v0.101.0 2021-07-21 12:35:16 -04:00
Lex Berezhny
f9f2ccd904 revert version 2021-07-21 12:28:41 -04:00
Lex Berezhny
d9e87d7c32 publish after uploading release artifacts 2021-07-21 12:27:04 -04:00
Lex Berezhny
a0092c0770 remove docker steps from github action build 2021-07-21 12:14:49 -04:00
Lex Berezhny
3100131125 checkout code in release job 2021-07-21 11:33:41 -04:00
Lex Berezhny
988880cf83 update set_build script to work on github 2021-07-21 11:32:37 -04:00
Lex Berezhny
c3fb9672c4 re-enable skipping failing DHT unit test 2021-07-21 11:25:44 -04:00
Lex Berezhny
0a2d94e425 updated set_build to use GITHUB_ env vars 2021-07-21 09:19:58 -04:00
Lex Berezhny
8d9073cd31 v0.101.0 2021-07-20 22:52:44 -04:00
Lex Berezhny
d075961ffa removed .gitlab-ci.yml 2021-07-20 22:52:05 -04:00
Lex Berezhny
7a72409b61 fix dht node test 2021-07-20 22:43:57 -04:00
Lex Berezhny
34fc530fba cleanup github actions to be able to drop gitlab 2021-07-20 22:43:57 -04:00
Jack Robison
f257ff2f97
Merge pull request #3369 from lbryio/fix-hanging-tx-notification
fix stuck transaction notification due to race in mempool when advancing a block
2021-07-20 18:54:22 -04:00
Jack Robison
7ad5822c5b
fix test 2021-07-20 16:03:34 -04:00
Jack Robison
9a8f9f0a94
fix stuck notification due to mempool/notification race 2021-07-20 15:14:10 -04:00
belikor
6421cecafb daemon: fix --claim_id with lbrynet claim search
For some reason, when using `claim_search`
with `--claim_id`, the arguments dictionary will also
contain `claim_ids` with an empty list, even if we didn't specify it.
```
lbrynet claim search --claim_id=8945573bcfcb7f8276187dfbb93545eac4ebf71a
```

Using both `claim_id` and `claim_ids` will raise a `ValueError`
exception so the daemon won't return a valid result
even if the claim ID is in fact valid.

So if `claim_id` exists, we need to discard `claim_ids`
if it is empty, before proceeding with the rest of the code.

On the other hand, if `claim_ids` is used, and `claim_id` is absent,
there will be no problem as `claim_id` won't be added to the dictionary.
```
lbrynet claim search --claim_ids=8945573bcfcb7f8276187dfbb93545eac4ebf71a
```
2021-07-19 22:24:43 -05:00
Alex Grin
be544d6d89
Merge pull request #3358 from belikor/improve-install-md 2021-07-19 14:20:44 -04:00
Alex Grin
3c89ecafdd
Merge branch 'master' into improve-install-md 2021-07-19 14:20:39 -04:00
Alex Grin
35ec4eec52
Update INSTALL.md 2021-07-19 14:20:15 -04:00
Alex Grin
e47f737a2f
Update INSTALL.md 2021-07-19 14:15:21 -04:00
Alex Grin
ac671a065b
Merge pull request #3356 from lbryio/propagate_external_ip_change 2021-07-19 14:12:26 -04:00
Alex Grin
74116cc550
Merge branch 'master' into propagate_external_ip_change 2021-07-19 14:12:19 -04:00
Alex Grin
406070a5c3
Merge pull request #3354 from belikor/note-download-blob-peer 2021-07-19 14:10:13 -04:00
Victor Shyba
0ccafd5b53 make get_or_create_usable_address respect the generator lock 2021-07-19 14:09:52 -04:00
Alex Grin
940f517aa3
Merge branch 'master' into note-download-blob-peer 2021-07-19 14:09:51 -04:00
Alex Grin
216e5f65ad
Merge pull request #3363 from lbryio/troubleshoot_p2p_script
add script with web endpoints that can troubleshoot p2p/dht
2021-07-19 14:04:28 -04:00
Victor Shyba
a74685d66d add script to troubleshoot p2p/dht 2021-07-19 15:01:37 -03:00
belikor
b7791d2845 exchange_rate_manager: raise exception if 'error' is in json_response
If the error is not handled, the running daemon will continuously
print the following error message:
```
Traceback (most recent call last):
  File "lbry/extras/daemon/exchange_rate_manager.py", line 77, in get_rate
  File "lbry/extras/daemon/exchange_rate_manager.py", line 189, in get_rate_from_response
KeyError: 0
```

This started happening when the UPBit exchange decided to delist
the LBC coin.

Normally `json_response` should be a dictionary, not a list,
so `json_response[0]` causes an error.

By checking for the `'error'` key, we can raise the proper exception.

Once this is done, the message will be a warning, not a traceback.
```
WARNING  lbry.extras.daemon.exchange_rate_manager:92:
Failed to get exchange rate from UPbit: result not found
```
2021-07-19 13:41:49 -04:00
Victor Shyba
d151a82d78 add libtool and automake to the dockerfiles so they can build coincurve 2021-07-15 17:11:19 -03:00
belikor
8ce61fbd52 INSTALL.md: break the big blocks of code, and remove the space
Remove the first space in the block of code as it is not necessary.

This
 ```
 $ python --version
 ```

Becomes this
```
$ python --version
```

Also break the big block of code into individual blocks.
2021-07-11 19:44:54 -05:00
belikor
90c24aade3 INSTALL.md: using Python 3.8 does not work, issue #2769
Because of issue #2769 at the moment the `lbrynet` daemon
will only work correctly with Python 3.7.

The `deadsnakes` personal package archive (PPA) provides
Python 3.7 for Ubuntu distributions that no longer have it
in their official repositories like 18.04 and 20.04.

If Python 3.8+ is used, the daemon will start but the RPC server
may not accept messages, returning the following:
```
Could not connect to daemon. Are you sure it's running?
```
2021-07-11 19:44:54 -05:00
belikor
6b3f787fee INSTALL.md: add more information on the virtual environments
Leave with `deactivate`.

Enter the environment again with
```
source lbry-venv/bin/activate
```

When developing, we can start the server interactively.
```
python lbry/extras/cli.py start
```

Parameters can be passed in the same way.
```
python lbry/extras/cli.py wallet balance
```

If a Python debugger (`pdb` or `ipdb`) is installed we can also start
it in this way, set up break points, and step through the code.
```
ipdb lbry/extras/cli.py
```
2021-07-11 19:44:25 -05:00
belikor
4ebe4ce1b7 scripts: note to further investigate in download_blob_from_peer
Currently `lbrynet blob get <hash>` does not work to download
single blobs which are not already present in the system.
The function locks up and never returns.
It only works for blobs that are in the `blobfiles` directory
already.

This bug is reported in lbryio/lbry-sdk, issue #2070.

Maybe this script can be investigated, and certain parts
can be added to `lbry.extras.daemon.daemon.jsonrpc_blob_get`
in order to solve the previous issue, and finally download
single blobs from the network (peers or reflector servers).
2021-07-09 11:53:35 -05:00
belikor
8c79740ee8 script/test_claim_search: fix the import of ClientSession
This is nothing special, it just allows the module
to run without throwing an error on the import.

From
```
from lbry.wallet.client.basenetwork import ClientSession
```

To
```
from lbry.wallet.network import ClientSession
```
2021-07-09 10:52:41 -04:00
belikor
59d027ca02 script/find_max_server: fix the import of ClientSession
This is nothing special, it just allows the module
to run without throwing an error on the import.

From
```
from lbry.wallet.client.basenetwork import ClientSession
```

To
```
from lbry.wallet.network import ClientSession
```
2021-07-09 10:52:41 -04:00
Ofek Lev
37a7345a90 Upgrade coincurve dependency 2021-07-09 10:51:03 -04:00
Victor Shyba
c519d4651b loop.time is not usable on advance time, use wall time 2021-07-08 03:55:21 -03:00
Victor Shyba
9b3b609e40 re-enable test_losing_connection 2021-07-08 03:46:48 -03:00
Victor Shyba
6254f53716 propagate external ip changes from upnp component to dht node protocol 2021-07-08 03:46:05 -03:00
Jack Robison
f05dc46432
Merge pull request #3342 from lbryio/bug_flush_counter
[resync required] Avoid flush counter overflows on long running hubs by increasing it to 32 bits
2021-07-07 23:45:47 -04:00
Victor Shyba
3de0982a4a limit request error logging to 16k 2021-07-07 18:39:38 -03:00
Victor Shyba
c2184fb3bf run migration on history db open 2021-07-07 18:39:38 -03:00
Victor Shyba
919c09fcb0 add migration 2021-07-07 18:39:38 -03:00
Victor Shyba
1d9dbd40ec increase flush counter to 32 bits 2021-07-07 18:39:38 -03:00
belikor
0cd953a6f3 script/checktrie: fix the import to SQLDB
This is nothing special, it just allows the module
to run without throwing an error.

From
```
from lbry.wallet.server.db import SQLDB
```

To
```
from lbry.wallet.server.db.writer import SQLDB
```
2021-07-07 11:02:28 -03:00
Alex Grin
4db2b72351
Merge pull request #3347 from kodxana/master 2021-07-02 11:16:40 -04:00
kodxana
dd54fcbdbd
Create README.md 2021-07-01 18:21:20 +02:00
kodxana
3123cf7ac6
Added docker-compose 2021-07-01 18:17:36 +02:00
Victor Shyba
6b579dd4ce add dockerfiles for web sdk 2021-06-30 18:03:00 -03:00
Alex Grin
16dfaa3e27
Merge pull request #3343 from lbryio/example_es
add small example script showing how to read and update values to ES as we currently use it
2021-06-30 11:30:06 -04:00
Victor Shyba
d7842b9f84 small script showing how to read/update values to ES as we currently use it 2021-06-25 12:41:05 -03:00
Alex Grin
115034fccb
Merge pull request #3232 from lbryio/timeout 2021-06-25 11:05:25 -04:00
Victor Shyba
309e957a85 add concurrent_hub_requests conf 2021-06-24 21:21:19 -03:00
Victor Shyba
d7007e402e move request semaphore to session and apply to all requests 2021-06-24 21:02:41 -03:00
Victor Shyba
91323a21cf add hub_timeout and propagate it to network code 2021-06-24 21:02:41 -03:00
Lex Berezhny
fea893d76c v0.100.0 2021-06-22 13:33:03 -04:00
Lex Berezhny
761bc6ba4c revert release and fix test 2021-06-22 13:32:41 -04:00
Lex Berezhny
75172feb4e v0.100.0 2021-06-22 12:53:23 -04:00
Lex Berezhny
3285fb1608 revert release 2021-06-22 12:52:48 -04:00
Lex Berezhny
03a4c6910d v0.100.0 2021-06-22 12:51:36 -04:00
Lex Berezhny
485b958599 revert release 2021-06-22 12:50:11 -04:00
Lex Berezhny
da47ba2f67 v0.100.0 2021-06-22 11:11:02 -04:00
Lex Berezhny
c39195488a bug fix 2021-06-22 11:07:58 -04:00
Lex Berezhny
227fb0ae9b network integration test fix 2021-06-22 11:07:58 -04:00
Lex Berezhny
b12ff5b503 test fixes 2021-06-22 11:07:58 -04:00
Lex Berezhny
0946c72b88 lint 2021-06-22 11:07:58 -04:00
Lex Berezhny
7d49b046d4 added support to config for determining if value is set and implemented hub selection logic 2021-06-22 11:07:58 -04:00
Lex Berezhny
5f0426c840 country jurisdiction added to hub UDP protocol 2021-06-22 11:07:58 -04:00
Lex Berezhny
73e239cc5f client side hub discovery pub/sub and hub metadata stored, removed old peers implementation 2021-06-22 11:07:58 -04:00
Lex Berezhny
ad670f721a working client peer hub 2021-06-22 11:07:58 -04:00
Lex Berezhny
028a4a70cf wallet server federation, client portion 2021-06-22 11:07:58 -04:00
Lex Berezhny
77d7960347 increase lbc exchange rate threshold 2021-06-18 11:26:30 -04:00
Lex Berezhny
39821146bd increase lbc threshold in exchange rate integration tests even more 2021-06-17 10:23:33 -04:00
Lex Berezhny
7d505a41ac drop sqlite indexes from test 2021-06-15 18:22:42 -04:00
Lex Berezhny
e457b2f0d6 fix trending to use built-in sqlite instead of apsw 2021-06-15 18:22:42 -04:00
Lex Berezhny
c9cf7fd4d4 drop apsw in wallet.server.db.elasticsearch.sync 2021-06-15 18:22:42 -04:00
Lex Berezhny
b0371dd33d update test reader to use plain sqlite 2021-06-15 18:22:42 -04:00
Lex Berezhny
25e16c3565 dropping apsw 2021-06-15 18:22:42 -04:00
Lex Berezhny
7b39527863 update exchange rate threshold in integration tests due to significant drop in LBC price 2021-06-15 15:58:59 -04:00
Alex Grin
d861b08866
Merge pull request #3323 from lbryio/dht_leak 2021-06-07 16:15:59 -04:00
Victor Shyba
fb438dc108 remove the unregister call 2021-06-04 12:47:16 -03:00
Victor Shyba
4e6b4f179b add items() to LRUCache 2021-06-04 12:20:44 -03:00
Victor Shyba
00d038c8f3 add default parameter to pop on LRUCache 2021-06-04 12:15:47 -03:00
Victor Shyba
a9f6a68952 use LRU caches for DHT metrics 2021-06-04 11:54:37 -03:00
Alex Grin
b9142bbc5a
Merge pull request #3319 from lbryio/support_value_type
drop `value_type` for supports
2021-06-04 08:39:40 -04:00
Victor Shyba
6c812f663e drop value_type for support 2021-06-02 18:01:54 -03:00
Alex Grin
a93ec9783a
Update README.md 2021-06-02 14:10:19 -04:00
Lex Berezhny
2d184d77b6 v0.99.0 2021-06-02 12:07:37 -04:00
Victor Shyba
bce299ccc7 fix docopt typo 2021-06-02 12:05:36 -04:00
Victor Shyba
235cebd14a fix test value 2021-06-02 12:05:36 -04:00
Victor Shyba
a638aa9d53 add and test support for support_create anonymous --comment 2021-06-02 12:05:36 -04:00
Victor Shyba
67cce0ef7e test+implement --comment for support_create 2021-06-02 12:05:36 -04:00
Victor Shyba
82f4267bf6 add comment property/setter to the signable support class 2021-06-02 12:05:36 -04:00
Victor Shyba
45a9ca29c4 update generated support protobuf with field 2021-06-02 12:05:36 -04:00
Victor Shyba
7f4e813277 document schema update process 2021-06-02 12:05:36 -04:00
Lex Berezhny
3805ff4a0c fix purchase test 2021-06-02 11:34:21 -04:00
Lex Berezhny
464cfd475e properly format scripthash address on output 2021-06-02 11:34:21 -04:00
Lex Berezhny
fe469ae57f create appropriate script for scripthash address 2021-06-02 11:34:21 -04:00
Lex Berezhny
550ef9a1c4 allows script addresses (beginning with r) to be used 2021-06-02 11:34:21 -04:00
Alex Grin
935adfb51a
Merge pull request #3301 from lbryio/no_repeat_claim_id
add `--remove_duplicates` to the search api
2021-05-28 11:01:15 -04:00
Victor Shyba
3974df4a62 fix interaction between two modes 2021-05-27 20:14:12 -03:00
Victor Shyba
4870974161 update json api 2021-05-27 20:14:12 -03:00
Victor Shyba
8c4b0037f5 API: add --remove_duplicates to claim_search 2021-05-27 20:14:12 -03:00
Victor Shyba
2c6f763ef2 test picking oldest when originals doesnt match 2021-05-27 20:14:12 -03:00
Victor Shyba
ca28de02d8 test and implementation for remove_duplicates on post-search filtering 2021-05-27 20:14:12 -03:00
Victor Shyba
bfc15ea029 handle limit being 0 and skip reordering if 0/none 2021-05-27 20:14:12 -03:00
Victor Shyba
6e8b8a5920 always call search_ahead 2021-05-27 20:14:12 -03:00
Jack Robison
099f3b6a62
Merge pull request #3308 from lbryio/reflect_more
Don't set stream as reflected until reflector says it doesn't need any blob
2021-05-27 19:13:23 -04:00
Victor Shyba
142d182bc1 if progress was made, retry without a delay 2021-05-27 18:24:58 -03:00
Victor Shyba
1437871d88 fix reflector client: only set completed when server says so 2021-05-27 18:24:58 -03:00
Victor Shyba
352bf69409 improve test 2021-05-27 18:24:58 -03:00
Victor Shyba
9bdf3d23e1 test bug 3296, failing 2021-05-27 18:24:58 -03:00
Victor Shyba
be8ecfa707 sort keys so helper scripts can send blobs using send_request 2021-05-27 18:24:58 -03:00
Lex Berezhny
51da0d0259 v0.98.0 2021-05-26 09:23:19 -04:00
Alex Grin
f55b78a994
Merge pull request #3306 from lbryio/fix-collectionUpdateWithReplace 2021-05-18 15:25:53 -04:00
Alex Grin
e1a44c93f8
Merge branch 'master' into fix-collectionUpdateWithReplace 2021-05-18 15:25:40 -04:00
Alex Grin
07e7087a09
Merge pull request #3303 from keikari/patch-1
Minor fix suggestion for issue #3240
2021-05-18 15:23:56 -04:00
Alex Grin
2c79c7e2f6
Merge branch 'master' into patch-1 2021-05-18 15:23:08 -04:00
Victor Shyba
09f6637fe0 remove unused multiprocessin.Manager 2021-05-17 15:07:32 -03:00
Victor Shyba
3784db3308 test collections update with --replace 2021-05-15 03:27:33 -03:00
zeppi
2b950ff5dd fix bug in collection_update --replace 2021-05-15 03:27:33 -03:00
Alex Grin
09339c9cfb
Merge pull request #3305 from lbryio/fix_migrator_tool
fix hub Elasticsearch sync/migrations tool for when the db exists already
2021-05-14 11:06:07 -04:00
Victor Shyba
ccadd88af5 fix cache call 2021-05-13 22:40:21 -03:00
Victor Shyba
cc02a0efc2 fix es migration bug, expand test case 2021-05-13 19:00:53 -03:00
Victor Shyba
43a1385b79 test sync helper 2021-05-13 19:00:53 -03:00
Victor Shyba
5101464e3b add integration tests command on install.md 2021-05-13 19:00:36 -03:00
Victor Shyba
3d71478d38 update install.md with ES instructions 2021-05-13 19:00:36 -03:00
Victor Shyba
4989ed445e add ES to makefile 2021-05-13 19:00:36 -03:00
keikari
d9413039ec
Fix suggestion for issue #3240
L135: If `getattr()` returns `None`, use `""` instead to avoid error in issue #3240
2021-05-12 18:30:38 +03:00
Jack Robison
eba0c9be34
fix typo 2021-05-07 16:51:19 -04:00
Jack Robison
48c9e9f3cc
Merge pull request #3294 from lbryio/versioned-es-search-index
add versioning to ES search index and automate resync on version bumps
2021-05-07 15:38:21 -04:00
Jack Robison
81ebde88db
resync ES search index on version bumps
-bump ES search index to version 1
2021-05-07 14:36:53 -04:00
Jack Robison
79ced9d0f8
Merge pull request #3262 from lbryio/channel_repost_has_source
Fix bug for `has_source=True` hiding channel reposts
2021-05-07 14:36:04 -04:00
Victor Shyba
a4058b84ce clean out unused sharding 2021-05-07 15:03:37 -03:00
Victor Shyba
7bf211a52b apply reposted_claim_type on es sync 2021-05-07 15:03:37 -03:00
Victor Shyba
d5f722792f fix and test has_source for channel reposts 2021-05-07 15:03:37 -03:00
Victor Shyba
0f02906c9b fix has_source for reposted channels 2021-05-07 15:03:37 -03:00
Victor Shyba
9582e228b1 assert instead of sleep 2021-05-07 15:02:31 -03:00
Victor Shyba
45f20431f9 update tests from the removed feed 2021-05-07 15:02:31 -03:00
Victor Shyba
7554e6d7f9 remove dead code 2021-05-07 15:02:31 -03:00
Victor Shyba
cb8f26f177 remove broken feed 2021-05-07 15:02:31 -03:00
Jack Robison
b5dfce7861
Revert "finished switch from using hash # in URLs to colon :"
This reverts commit 888aa558
2021-05-07 11:31:28 -04:00
Jack Robison
2ca5a65544
Revert "FindShortestID updated"
This reverts commit 8f04a50c
2021-05-07 11:30:31 -04:00
Jack Robison
17deb136db
Revert "StreamCommands"
This reverts commit 2a8ccb06
2021-05-07 11:29:45 -04:00
Jack Robison
8c9710c76c
Merge pull request #3293 from lbryio/fix-block-processor-crash-invalid-fee
fix invalid claim fees breaking the block processor
2021-05-07 10:03:40 -04:00
Jack Robison
32f7ecb261
fix invalid claim fees breaking the block processor 2021-05-06 11:18:58 -04:00
Victor Shyba
fb77fde710 for debug, it is always whole page 2021-05-04 22:22:07 -03:00
Victor Shyba
3c67bb90d7 don't fail when a single one go on maintenance and set completion event regardless of failures 2021-05-04 22:22:07 -03:00
Victor Shyba
dabb168853 dont log full exceptions on simple connection errors 2021-05-04 22:22:07 -03:00
Victor Shyba
45e5b3b219 dont log full pages 2021-05-04 22:22:07 -03:00
Alex Grin
a6b7469923
Update README.md 2021-05-03 18:04:29 -04:00
Jack Robison
cb5dab3033
Merge pull request #3285 from lbryio/restrict-udp-source
Restrict udp sources, add `ALLOW_LAN_UDP` hub setting
2021-04-29 10:19:32 -04:00
Jack Robison
21d0038ff2
add timestamps to hub log 2021-04-28 16:47:00 -04:00
Jack Robison
c094d8f2e8
add ALLOW_LAN_UDP hub setting 2021-04-28 16:47:00 -04:00
Jack Robison
c465d6a6c2
ignore udp packets with low source ports 2021-04-28 16:47:00 -04:00
Lex Berezhny
73d35bc985 v0.97.0 2021-04-28 16:23:46 -04:00
Lex Berezhny
2a8ccb065b StreamCommands 2021-04-28 16:21:01 -04:00
Lex Berezhny
8f04a50ce1 FindShortestID updated 2021-04-28 16:21:01 -04:00
Lex Berezhny
888aa5586b finished switch from using hash # in URLs to colon : 2021-04-28 16:21:01 -04:00
Lex Berezhny
99f56f5d22 v0.96.0 2021-04-28 15:26:58 -04:00
Jack Robison
ad6281090d
Merge pull request #3275 from lbryio/search_caching_issues
add caching to "search ahead" code and invalidate short_url cache on every block
2021-04-28 14:14:29 -04:00
Victor Shyba
f0d334d3e2 refactor from review 2021-04-28 13:28:38 -03:00
Victor Shyba
5f829b048f use separator to avoid cache key conflicts 2021-04-27 22:57:04 -03:00
Victor Shyba
1a961e66ff invalidate short_id cache on new block 2021-04-27 22:57:04 -03:00
Victor Shyba
fdb0e22656 cache search_ahead 2021-04-27 22:57:04 -03:00
Jack Robison
132ee1915f
Merge pull request #3283 from lbryio/fix_multiprocessing_db
fix multiprocessing support on client db
2021-04-27 16:46:28 -04:00
Victor Shyba
44bf4f3c8f fix if statement from always evaluating a string 2021-04-27 17:10:04 -03:00
Alex Grintsvayg
6237767d5a
Merge branch 'make-test'
* make-test:
  run tests using make
2021-04-23 15:26:26 -04:00
Alex Grintsvayg
dec9d96417
run tests using make 2021-04-23 15:25:40 -04:00
Lex Berezhny
b167c87267 v0.95.0 2021-04-23 14:55:38 -04:00
Lex Berezhny
2280fe8e8e default has_source to 1 2021-04-23 14:54:51 -04:00
Lex Berezhny
575d6dcd2d migration specifically for upgrading from client db v1.5 to v1.6 2021-04-23 14:54:51 -04:00
Lex Berezhny
f729490c6b pending claims ordered towards top in claim_list 2021-04-23 11:00:58 -04:00
Lex Berezhny
b32124cdd6 regenerate docs 2021-04-23 10:24:48 -04:00
Lex Berezhny
3d4321ee38 added --has_source/--has_no_source filters to claim_list 2021-04-23 10:24:48 -04:00
Alex Grintsvayg
85034b382e
Revert "run tests using make"
This reverts commit 77a51d1ad4.
2021-04-21 11:41:16 -04:00
Alex Grintsvayg
77a51d1ad4
run tests using make 2021-04-20 15:41:09 -04:00
Alex Grin
33e0cdc2d7
Update docker-compose-wallet-server.yml 2021-04-16 11:58:02 -04:00
Alex Grin
6519faa2fe
Update docker-compose-wallet-server.yml 2021-04-16 11:53:16 -04:00
Lex Berezhny
5e3a234cbe v0.94.1 2021-04-16 11:18:24 -04:00
Lex Berezhny
e54c31d2d5 fix bug in how reserved balance is calculated 2021-04-16 11:17:51 -04:00
Alex Grin
66c0537251
Create ossar-analysis.yml 2021-04-15 15:24:26 -04:00
Alex Grin
ac58516593
Create codeql-analysis.yml 2021-04-15 15:22:57 -04:00
Alex Grin
c3da6322b5
Create SECURITY.md 2021-04-15 15:21:17 -04:00
Lex Berezhny
3d241500cf v0.94.0 2021-04-14 19:55:35 -04:00
Lex Berezhny
ded8224f66 update docs 2021-04-14 19:52:50 -04:00
Lex Berezhny
f8814881a1 ability to set sd_hash, file_name and file_hash when updating a stream claim 2021-04-14 19:52:50 -04:00
Victor Shyba
cc2852cd48 new implementation for limit_claims_per_channel 2021-04-14 18:32:16 -04:00
Lex Berezhny
467637a9eb fix test 2021-04-14 11:24:58 -04:00
Lex Berezhny
3cfc292d84 lint 2021-04-14 11:24:58 -04:00
Lex Berezhny
6acf94a810 moved balance calculation to SQL 2021-04-14 11:24:58 -04:00
Jack Robison
31367fb4c4 show hostnames of spvs 2021-04-13 11:51:27 -04:00
Jack Robison
12d6074e3b fix typing 2021-04-13 11:51:27 -04:00
Lex Berezhny
ff30386051 lint 2021-04-06 21:22:27 -04:00
shubhendra
601f99ac16 Remove unnecessary generator
Signed-off-by: shubhendra <withshubh@gmail.com>
2021-04-06 21:22:27 -04:00
shubhendra
87fe5c6101 Refactor the comparison involving not
Signed-off-by: shubhendra <withshubh@gmail.com>
2021-04-06 21:22:27 -04:00
shubhendra
68399ca31c Iterate dictionary directly
Signed-off-by: shubhendra <withshubh@gmail.com>
2021-04-06 21:22:27 -04:00
shubhendra
2a6d7fd80f Remove methods with unnecessary super delegation.
Signed-off-by: shubhendra <withshubh@gmail.com>
2021-04-06 21:22:27 -04:00
shubhendra
4725f510d8 Remove unnecessary use of comprehension
Signed-off-by: shubhendra <withshubh@gmail.com>
2021-04-06 21:22:27 -04:00
shubhendra
be0ba22222 Remove unnecessary comprehension
Signed-off-by: shubhendra <withshubh@gmail.com>
2021-04-06 21:22:27 -04:00
Lex Berezhny
c8781392be added unit test for Access-Control HTTP headers 2021-04-06 17:12:05 -04:00
John Leith
b97164fcfb adding access control headers 2021-04-06 17:12:05 -04:00
Lex Berezhny
0dfb92281b v0.93.0 2021-03-30 20:59:47 -04:00
Victor Shyba
4fe80c40da also apply to test:json-api 2021-03-30 17:00:15 -04:00
Victor Shyba
f0fac5115a update tox to pass ELASTIC_HOST 2021-03-30 17:00:15 -04:00
Victor Shyba
46dd389d0d add elasticsearch service to gitlab 2021-03-30 17:00:15 -04:00
Jack Robison
1e28e21ab5
Merge pull request #3248 from lbryio/add-es-host-setting
add ELASTIC_HOST and ELASTIC_PORT settings to hub
2021-03-30 13:09:18 -04:00
Jack Robison
7832c62c5d
add ELASTIC_HOST and ELASTIC_PORT settings to hub 2021-03-30 12:48:13 -04:00
Lex Berezhny
d025ee9dbe revert release 2021-03-30 11:29:17 -04:00
Lex Berezhny
a9a9cb4319 v0.93.0 2021-03-30 10:15:31 -04:00
Victor Shyba
aa727cb9b1 show channels regardless of no_source 2021-03-30 09:47:08 -04:00
Victor Shyba
b8c9a99f20 fix no_source for reposts 2021-03-30 09:47:08 -04:00
Lex Berezhny
aff995b0d0 temporary fix for mempool sync failing during reorg 2021-03-29 16:11:03 -04:00
Jack Robison
2cc7e5dfdc
Merge pull request #3153 from lbryio/elasticsearch
hub: use Elasticsearch for `claim_search` and `resolve` calls
2021-03-24 16:44:14 -04:00
Victor Shyba
5235a150b1 add prog name to sync arg parser 2021-03-24 17:07:17 -03:00
Victor Shyba
c6372ea9de hub->lbry-hub 2021-03-24 17:03:57 -03:00
Victor Shyba
7df4cc44c4 fixes from review 2021-03-24 16:30:33 -03:00
Victor Shyba
d47cf40544 add reader.py for test_sqldb tests 2021-03-19 19:58:13 -03:00
Victor Shyba
7f5d88e95c remove dead/broken/unused API 2021-03-19 19:58:13 -03:00
Victor Shyba
d09663c066 remove flush call 2021-03-19 19:58:13 -03:00
Victor Shyba
ef97c9b69f torba-server -> hub 2021-03-19 19:58:13 -03:00
Victor Shyba
d855e6c8b1 move elasticsearch things into its own module 2021-03-19 19:58:13 -03:00
Victor Shyba
cd66f7eb43 if not no_totals, use default page size 2021-03-19 19:58:13 -03:00
Victor Shyba
6a35a7ba4c expand content filtering tests for no_totals 2021-03-19 19:58:13 -03:00
Victor Shyba
a3e146dc68 sort on index time 2021-03-19 19:58:13 -03:00
Victor Shyba
b81305a4a9 index and allow has_source 2021-03-19 19:58:13 -03:00
Victor Shyba
73884b34bc apply no_totals 2021-03-19 19:58:13 -03:00
Victor Shyba
6166a34db2 check cache item before locking 2021-03-19 19:58:13 -03:00
Victor Shyba
6fa7da4b1c less slices 2021-03-19 19:58:13 -03:00
Victor Shyba
c3e426c491 fix search by channel for invalid channel 2021-03-19 19:58:13 -03:00
Victor Shyba
21e023f0db fix search by channel 2021-03-19 19:58:13 -03:00
Victor Shyba
063be001b3 cache inner parsing 2021-03-19 19:58:13 -03:00
Victor Shyba
5dff02e8bc on resolve, get all claims at once 2021-03-19 19:58:13 -03:00
Victor Shyba
60a59407d8 cache the encoded output instead 2021-03-19 19:58:13 -03:00
Victor Shyba
20a5aecfca fix lib exception to asyncio TimeoutError 2021-03-19 19:58:13 -03:00
Victor Shyba
c2e7b5a67d restore some of the interrupt metrics 2021-03-19 19:58:13 -03:00
Victor Shyba
8f32303d07 apply search timeout 2021-03-19 19:58:13 -03:00
Victor Shyba
891b1e7782 track results up to 200 2021-03-19 19:58:13 -03:00
Victor Shyba
f26394fd3b report deletions on docs that doesnt exist, but dont raise 2021-03-19 19:58:13 -03:00
Victor Shyba
4d83d42b4c fix equality instead of mod 2021-03-19 19:58:13 -03:00
Victor Shyba
57f1108df2 fix query being json serializable 2021-03-19 19:58:13 -03:00
Victor Shyba
2641a9abe5 make better resolve cache 2021-03-19 19:58:13 -03:00
Victor Shyba
6b193ab350 make indexing cooperative 2021-03-19 19:58:13 -03:00
Victor Shyba
b1bb37511c use right key on cache 2021-03-19 19:58:13 -03:00
Victor Shyba
319187d6d6 log mempool task exceptions 2021-03-19 19:58:13 -03:00
Victor Shyba
02eb789f84 caching for resolve 2021-03-19 19:58:13 -03:00
Victor Shyba
5a9338a27f use a dict on set_reference 2021-03-19 19:58:13 -03:00
Victor Shyba
eb6924277f round time to 10 minutes and fetch referenced by id 2021-03-19 19:58:13 -03:00
Victor Shyba
325419404d update dockerfile 2021-03-19 19:58:13 -03:00
Victor Shyba
bd8f371fd5 bump referenced rows query limit up 2021-03-19 19:58:13 -03:00
Victor Shyba
1783ff2845 dont delete claims on reorg 2021-03-19 19:58:13 -03:00
Victor Shyba
d388527ffa log indexing errors 2021-03-19 19:58:13 -03:00
Victor Shyba
19494088bd generate from queue 2021-03-19 19:58:13 -03:00
Victor Shyba
920dad524a simplify sync and use asyncio Queue instead 2021-03-19 19:58:13 -03:00
Victor Shyba
ec89bcac8e improve sync script for no-downtime maintenance 2021-03-19 19:58:13 -03:00
Victor Shyba
a916c1f4ad check if db file exists before sync 2021-03-19 19:58:13 -03:00
Victor Shyba
a9a0ac92d7 ignore unset flag 2021-03-19 19:58:13 -03:00
Victor Shyba
da8a8bd1ef filter+fts and tests for edge cases 2021-03-19 19:58:13 -03:00
Victor Shyba
d9c746891d pin python3.7 2021-03-19 19:58:13 -03:00
Victor Shyba
67817005b5 check ES synced without a process and wait for ES 2021-03-19 19:58:13 -03:00
Jack Robison
24d11de5a7 torba-elastic-sync 2021-03-19 19:58:13 -03:00
Victor Shyba
9251c87323 refresh after sync 2021-03-19 19:58:13 -03:00
Victor Shyba
e12fab90d1 docker compose update 2021-03-19 19:58:13 -03:00
Victor Shyba
0a194b5b01 claim_ids query 2021-03-19 19:58:13 -03:00
Victor Shyba
8d028adc53 be a writer by default 2021-03-19 19:58:13 -03:00
Victor Shyba
dfca15395e claim id is also a keyword 2021-03-19 19:58:13 -03:00
Victor Shyba
e21f2362fe apply reorg deletion as well 2021-03-19 19:58:13 -03:00
Victor Shyba
1ce328e8a9 cache signature inspection 2021-03-19 19:58:13 -03:00
Victor Shyba
038a5f999f cache encoded headers 2021-03-19 19:58:13 -03:00
Victor Shyba
5d3704c7ea reader mode 2021-03-19 19:58:13 -03:00
Victor Shyba
87037c06c9 remove reader code 2021-03-19 19:58:13 -03:00
Victor Shyba
dd412c0f50 delete sqlite fts 2021-03-19 19:58:13 -03:00
Victor Shyba
bf44befff6 backport fixes from server 2021-03-19 19:58:13 -03:00
Victor Shyba
e61874bb6f only repeat search if it has blocked items 2021-03-19 19:58:13 -03:00
Victor Shyba
1e5331768f fix some of the tests 2021-03-19 19:58:13 -03:00
Victor Shyba
ec9a3a4f7c do not page filtered 2021-03-19 19:58:13 -03:00
Victor Shyba
e439a3a8dc advanced resolve 2021-03-19 19:58:13 -03:00
Victor Shyba
19f70d7a11 create changelog trigger 2021-03-19 19:58:13 -03:00
Victor Shyba
afe7ed5b05 adjust size 2021-03-19 19:58:13 -03:00
Victor Shyba
d4bf004d74 use a thread pool to sync changes 2021-03-19 19:58:13 -03:00
Victor Shyba
e4d06a088b include the channel being filtered/blocked 2021-03-19 19:58:13 -03:00
Victor Shyba
0929088b12 missing refresh step 2021-03-19 19:58:13 -03:00
Victor Shyba
7b4838fc9b dont update more than 400 items a time 2021-03-19 19:58:13 -03:00
Victor Shyba
0cf9533248 narrow update by query 2021-03-19 19:58:13 -03:00
Victor Shyba
84ff0b8a9f general timeout 2021-03-19 19:58:13 -03:00
Victor Shyba
d467dcfeaf increase sync queue 2021-03-19 19:58:13 -03:00
Victor Shyba
8e68ba4751 fix join, refresh before update 2021-03-19 19:58:13 -03:00
Victor Shyba
0f2a85ba9f simplify sync 2021-03-19 19:58:13 -03:00
Victor Shyba
7674a0a91e backport fixes from testing server 2021-03-19 19:58:13 -03:00
Victor Shyba
5bc1a66572 32 slices and add censor type to fields 2021-03-19 19:58:13 -03:00
Victor Shyba
9b56067213 raise request timeout for content filtering 2021-03-19 19:58:13 -03:00
Victor Shyba
9a9df2fc3c apply filtering only to whats unfiltered 2021-03-19 19:58:13 -03:00
Victor Shyba
9989d8d1d4 refresh after delete 2021-03-19 19:58:13 -03:00
Victor Shyba
f9471f297e apply filter and block from ES script lang 2021-03-19 19:58:13 -03:00
Victor Shyba
146b693e4a exclude title and description 2021-03-19 19:58:13 -03:00
Victor Shyba
7295b7e329 make sync parallel 2021-03-19 19:58:13 -03:00
Victor Shyba
e2441ea3e7 use prefix from ES docs 2021-03-19 19:58:13 -03:00
Victor Shyba
119e51912e fix partial id 2021-03-19 19:58:13 -03:00
Victor Shyba
dd950f5b0d tag can have empty space 2021-03-19 19:58:13 -03:00
Victor Shyba
78a9bad1e1 no indexer_task 2021-03-19 19:58:13 -03:00
Victor Shyba
0c6eaf5484 fix resolve partial id 2021-03-19 19:58:13 -03:00
Victor Shyba
1010068ddb disable refresh interval. start with 3 shards 2021-03-19 19:58:13 -03:00
Victor Shyba
82eec3d8d7 use multiple clients on sync script indexing 2021-03-19 19:58:13 -03:00
Victor Shyba
ee7b37d3f3 also normalize the name supplied by user 2021-03-19 19:58:13 -03:00
Victor Shyba
143d82d242 normalized, not normalized_name 2021-03-19 19:58:13 -03:00
Victor Shyba
8b91b38855 update winners in one go 2021-03-19 19:58:13 -03:00
Victor Shyba
1098f0d2a3 use normalized name instead 2021-03-19 19:58:13 -03:00
Victor Shyba
ab53cec022 fix is_controlling sync 2021-03-19 19:58:13 -03:00
Victor Shyba
6f5f8e5648 add elasticsearch dep 2021-03-19 19:58:13 -03:00
Victor Shyba
edfd707c22 run ES on github actions 2021-03-19 19:58:13 -03:00
Victor Shyba
1870f30af8 add sync script 2021-03-19 19:58:13 -03:00
Victor Shyba
90106f5f08 all test_claim_commands tests green 2021-03-19 19:58:13 -03:00
Victor Shyba
9924b7b438 reposts and tag inheritance 2021-03-19 19:58:13 -03:00
Victor Shyba
aa37faab0a use porter analyzer with weights on full text search 2021-03-19 19:58:13 -03:00
Victor Shyba
dc10f8ce72 ignore errors when deleting 2021-03-19 19:58:13 -03:00
Victor Shyba
996686c1da claim search and resolve translated to ES queries 2021-03-19 19:58:13 -03:00
Victor Shyba
488785d013 add indexer task 2021-03-19 19:58:13 -03:00
Victor Shyba
3abdc01230 index ES during sync 2021-03-19 19:58:13 -03:00
Victor Shyba
8da04a584f start waiting before generate 2021-03-19 18:01:29 -03:00
Victor Shyba
27cc61d45e limit test time to 2 minutes, then consider it a failure and log what was running 2021-03-19 18:01:29 -03:00
Lex Berezhny
7371c30064 v0.92.0 2021-03-15 13:07:30 -04:00
Lex Berezhny
140d163895 removed redundant comment 2021-03-14 10:11:42 -04:00
Victor Shyba
dc33bdc1dc update api json 2021-03-14 10:11:42 -04:00
Victor Shyba
74df4fab83 change column to has_source and document both flags 2021-03-14 10:11:42 -04:00
Victor Shyba
1e5cd3d7a1 typo, fix tests 2021-03-14 10:11:42 -04:00
Victor Shyba
a54e9b64aa add no_source claim_search filter 2021-03-14 10:11:42 -04:00
Victor Shyba
74660704e3 fix update 2021-03-14 10:11:42 -04:00
Victor Shyba
7439893a2a fix get for sourceless claims 2021-03-14 10:11:42 -04:00
Victor Shyba
e27e49e9dc call update only once 2021-03-14 10:11:42 -04:00
Victor Shyba
34ed729c59 there is no 'sd_hash' parameter for this API 2021-03-14 10:11:42 -04:00
Victor Shyba
adaeeca3fd let file_path be optional 2021-03-14 10:11:42 -04:00
Jack Robison
dac75563d3 add --no_file_path param to publish, stream_create, and stream_update 2021-03-14 10:11:42 -04:00
Alex Grintsvayg
cbc76adcaa only return unspent txos if is_spent flag is not used. fixes #2923 2021-03-13 06:44:20 -05:00
Lex Berezhny
69a9cb383d oops 2021-03-12 13:29:55 -05:00
Lex Berezhny
4343073c00 clients can connect to wallet server even when they are not reachable by UDP 2021-03-12 13:29:55 -05:00
Jack Robison
fe60d4be88
Merge pull request #3221 from lbryio/subscribe_hash_on_call
Improve performance of address subscriptions and transaction proofs
2021-03-10 15:58:50 -05:00
Victor Shyba
ae337807f5 get merkles outside thread cooperatively 2021-03-10 13:05:17 -03:00
Victor Shyba
9ae30ac08e during subscribe, hash address only when its time 2021-03-10 12:51:58 -03:00
Lex Berezhny
62fa85c0a4 fix test 2021-03-09 13:27:36 -05:00
Lex Berezhny
7bb873dad9 removed connection_status field from the status command, use wallet.connected instead to determine if SDK is connected 2021-03-09 13:27:36 -05:00
Lex Berezhny
5f6c1c14cb v0.91.0 2021-03-04 00:04:25 -05:00
Lex Berezhny
d43189ad33 regenerate docs 2021-03-04 00:03:16 -05:00
Lex Berezhny
fcad76fc51 lint 2021-03-04 00:03:16 -05:00
Lex Berezhny
97e6e1684e simplifying 2021-03-04 00:03:16 -05:00
zeppi
67a0d3e926 update docs 2021-03-04 00:03:16 -05:00
zeppi
183fb9f9ff provide --resolve tag for collection claim, separate from resolving its contents
bugfix and docs generation

review changes
2021-03-04 00:03:16 -05:00
Lex Berezhny
9815ddef1f fixes stalling client reconnect issue 2021-03-03 23:31:59 -05:00
Lex Berezhny
f6d0847453 v0.90.1 2021-03-01 11:57:40 -05:00
Lex Berezhny
b0b9f0d65f regenerate docs 2021-03-01 11:52:17 -05:00
Lex Berezhny
0cec80f676 fixes transaction signing bug when tx had no change outputs 2021-03-01 11:20:08 -05:00
Patrick Keane
48c64143e3 Add no_totals argument to claim search 2021-02-26 10:41:15 -05:00
Patrick Keane
a8712422bc Update daemon.py 2021-02-21 23:45:18 -05:00
Patrick Keane
97f65bd283 Minor spelling correction(s). 2021-02-21 23:45:18 -05:00
Lex Berezhny
fd3c1c50f1 v0.90.0 2021-02-16 11:13:14 -05:00
Lex Berezhny
b153e4bb9f added support to claim_search for filtering collections via --claim_type 2021-02-16 11:08:54 -05:00
Lex Berezhny
db9856a8db use median exchange rate when several exchange rates are available 2021-02-15 14:09:32 -05:00
Lex Berezhny
75ecea265d ensures content purchase fails appropriately when exchange rate is not available 2021-02-11 13:57:33 -05:00
Lex Berezhny
be8751cb73 integration test fixes 2021-02-10 22:14:13 -05:00
Lex Berezhny
fb25ecb4a1 unittest 2021-02-10 22:14:13 -05:00
Lex Berezhny
f1cb7d27ac lint 2021-02-10 22:14:13 -05:00
Lex Berezhny
dee494e12f converting from USD, BTC to LBC is now done via several exchange rate providers: Bittrex, Cryptonator, CoinEx, hotbit and UPbit 2021-02-10 22:14:13 -05:00
Lex Berezhny
b13a121915 v0.89.0 2021-02-09 10:12:05 -05:00
Lex Berezhny
7486ee9537 do not tx._reset() in generate_channel_private_key 2021-02-08 09:56:03 -05:00
Lex Berezhny
4a20ccc28e tx._reset() the transaction after more internal changes 2021-02-08 09:56:03 -05:00
Lex Berezhny
f80dd2b307 tx._reset() the transaction after internal changes 2021-02-08 09:56:03 -05:00
Lex Berezhny
b208cf6d32 significant performance improvement when creating transactions and with txo spend specifically 2021-02-08 09:56:03 -05:00
Lex Berezhny
39e78ff17e updated exchange rate manager to use v3 bittrex API (old one is deprecated) 2021-02-03 14:15:19 -05:00
Alex Grin
a8177ea7fe
Merge pull request #3171 from lbryio/fix-collectionChannel 2021-02-02 14:16:42 -05:00
Alex Grin
bedcfc154b
Merge branch 'master' into fix-collectionChannel 2021-02-02 11:25:52 -05:00
zeppi
4c38b4aa3c docs and reenable gitlab json generation 2021-02-01 19:19:46 -05:00
Victor Shyba
f6cfe266e0 specify Returns correctly for docs gen 2021-02-01 19:19:46 -05:00
zeppi
4905e65f14 fix comment tests and json generat 2021-02-01 19:19:46 -05:00
Lex Berezhny
ccb250b410 txo_list --resolve now works for signed supports by resolving the signing channel 2021-01-26 20:53:36 -05:00
zeppi
aca57ffc62 allows cli to accept channel_id for collections 2021-01-26 10:02:01 -05:00
Dispatch
7f375f42d8 Fix / Add correct mimetypes for HLS / DASH 2021-01-25 15:55:56 -05:00
Lex Berezhny
eedcc2034d fixes #3166 2021-01-25 10:56:48 -05:00
Jack Robison
24c9a167d7
dockerfile 2021-01-22 12:28:04 -05:00
Jack Robison
909df8ef1f
dockerfile 2021-01-22 11:54:56 -05:00
Jack Robison
3b27cb3671 v0.88.0 2021-01-22 10:41:06 -05:00
Lex Berezhny
3fe0db4a7d fix integration tests 2021-01-22 07:51:13 -05:00
Lex Berezhny
8b55814ab2 fixes #2943 bug where get forced user to purchase their own priced stream 2021-01-22 07:51:13 -05:00
Jack Robison
575e471553
Merge pull request #3161 from lbryio/ledger-compatibility
handle connections from ledger clients in spv server
2021-01-22 00:09:50 -05:00
Jack Robison
0f5f1aebed
accept connections from ledger clients in spv server 2021-01-21 21:37:39 -05:00
Jack Robison
50e17eb1ab
Merge pull request #3148 from lbryio/udp-wallet-server-status
UDP based spv server status, improved server selection
2021-01-21 20:55:16 -05:00
Jack Robison
158cc2f660
skip test_single_server_payment 2021-01-21 19:29:59 -05:00
Jack Robison
1066a31acd
fix test 2021-01-21 18:46:28 -05:00
Jack Robison
1f9d0f4582
pylint 2021-01-21 16:30:54 -05:00
Jack Robison
a6d65233f1
fallback to getting external ip from spv servers instead of internal apis 2021-01-21 16:20:53 -05:00
Jack Robison
eff2fe7a1b
update tests 2021-01-21 16:20:01 -05:00
Jack Robison
20efdc70b3
use UDP ping for wallet server selection
-only connect to one spv server at a time
-remove session pool
2021-01-21 16:15:42 -05:00
Jack Robison
f0d8fb8f1a
add UDP based ping protocol for spv servers 2021-01-21 16:08:33 -05:00
Jack Robison
f7a380e9b7
start prometheus before block processing 2021-01-21 16:01:33 -05:00
Jack Robison
e9c7cf6f63
logging 2021-01-21 14:51:59 -05:00
Jack Robison
68f1661452
add LRUCache with no prometheus metrics 2021-01-21 14:37:08 -05:00
Jack Robison
36fd1b91ae
cache server features 2021-01-21 14:37:08 -05:00
Jack Robison
a4ec430ac0
improve subscription performance 2021-01-21 14:37:08 -05:00
Jack Robison
519614b2fd
skip libtorrent component in tests 2021-01-21 14:37:08 -05:00
Lex Berezhny
bf0118c8ef added support for --not_channel_id to txo_list commands 2021-01-21 14:03:59 -05:00
Jack Robison
a4db0820bc
Merge pull request #3136 from lbryio/fix-reflector-loop
fix reflector loop crashing upon an unexpected error
2021-01-08 12:40:18 -05:00
Jack Robison
ee7528413e
update tests 2021-01-08 11:54:02 -05:00
Jack Robison
7952fc8324
fix reflector loop propagating cancelled error 2021-01-08 09:53:12 -05:00
Jack Robison
652773d2cf
Merge pull request #3129 from lbryio/signing_api
add `channel_sign` api for signing data with a channel identity
2021-01-05 14:13:09 -05:00
Victor Shyba
2a17787242 fix test 2021-01-04 18:04:06 -03:00
Victor Shyba
0a53ad5721 use the comment api signing implementation 2021-01-04 18:04:06 -03:00
Victor Shyba
6da6bdc863 adds jsonrpc_channel_sign signing api 2021-01-04 18:04:06 -03:00
Jack Robison
42ad2bb83f
Merge pull request #3135 from lbryio/loop-metrics
add prometheus metrics for asyncio loop
2021-01-02 11:54:21 -05:00
Jack Robison
f309a65cb4
fix 2021-01-01 15:25:31 -05:00
Jack Robison
77e19ab1a4
prometheus metrics for asyncio loop responsiveness 2021-01-01 15:25:31 -05:00
Jack Robison
1a996b6ef3
Merge pull request #3134 from lbryio/log_checkpoint
log checkpoint finishing so we can gather information on leftover files bug
2021-01-01 15:24:30 -05:00
Victor Shyba
b882f1a010 log checkpoint finishing for gathering information on wal/shm file bug 2020-12-31 16:11:22 -03:00
Jack Robison
82a030e6ff
Merge pull request #3130 from lbryio/block-cache-metrics
skip null args in `channel_ids` given to `claim_search`
2020-12-30 20:57:56 -05:00
Jack Robison
0758b85179
skip null args given to channel_ids in claim search 2020-12-29 21:41:07 -05:00
Jack Robison
ab3d9bd080
block cache metrics 2020-12-29 21:40:39 -05:00
Thomas Zarebczan
8ff813f689
Take 5!
Why you try to pass version param to other installs?
2020-12-26 12:31:51 -05:00
Thomas Zarebczan
88ae72c0d3
Take 3
Why you fail on uninstall?
2020-12-24 20:28:50 -05:00
Thomas Zarebczan
312aa4be26
Fix Windows build take 2 2020-12-24 04:41:21 -05:00
Thomas Zarebczan
cbb06fce9d
Fix windows build, Merry christmas! 2020-12-24 04:13:03 -05:00
Thomas Zarebczan
f259e497c4
Uninstall old python first 2020-12-24 03:48:17 -05:00
Thomas Zarebczan
dd4172ac66
64 bit windows 2020-12-24 03:16:07 -05:00
Jack Robison
66029e60d3 v0.87.0 2020-12-23 21:51:13 -05:00
Jack Robison
364f484f04
Merge pull request #3127 from lbryio/lru-cache-metrics
Add metrics for lru caches
2020-12-23 21:08:01 -05:00
Jack Robison
9dd5159414
increase server cache sizes 2020-12-23 20:47:05 -05:00
Jack Robison
13e38d6fd8
use LRUCache instead of pylru in wallet server 2020-12-23 20:47:05 -05:00
Jack Robison
10dcb64715
lru cache metrics 2020-12-23 20:47:03 -05:00
Jack Robison
7551b51e7d
Merge pull request #3128 from lbryio/sync_smaller_batches
Improve memory usage during wallet sync
2020-12-23 20:45:03 -05:00
Victor Shyba
adb418aafc group cache hits in a single dict 2020-12-23 21:37:20 -03:00
Victor Shyba
270da80d64 return whole batches 2020-12-23 19:10:19 -03:00
Victor Shyba
b2027cfd66 sync in batches of 10, clearing after 2020-12-23 18:30:52 -03:00
Jack Robison
7f1f4eeac6
Merge pull request #3125 from lbryio/fix_order42
fix wallet db integrity / sync bugs
2020-12-22 20:50:46 -05:00
Jack Robison
7a7446c8bd
force resync blockchain.db 2020-12-22 20:08:26 -05:00
Jack Robison
ddbae294e6
skip doc test on gitlab 2020-12-22 16:55:33 -05:00
Jack Robison
8c71b744f3
fix request_synced_transactions edge cases 2020-12-22 15:19:08 -05:00
Jack Robison
479b5d31a9
fix test 2020-12-22 14:06:53 -05:00
Jack Robison
4cbf4230e8
fix txi.txo_ref 2020-12-22 14:05:37 -05:00
Victor Shyba
6a610187e0 cache bypass 2020-12-22 13:01:30 -03:00
Victor Shyba
eb2a4aebba unrestricted and reusing verified cache hits 2020-12-21 16:49:08 -03:00
Victor Shyba
21a2e67755 fix rebase 2020-12-20 21:39:36 -03:00
Victor Shyba
3b9e312615 fix verification 2020-12-20 21:25:26 -03:00
Victor Shyba
26dab04c9e checkpoint 2020-12-20 21:25:26 -03:00
Victor Shyba
00713c0d11 asyncgens 2020-12-20 21:25:26 -03:00
Victor Shyba
751b5f3027 refactor duplicate code 2020-12-20 21:25:26 -03:00
Victor Shyba
e8261b000e wip, see jack 2020-12-20 21:23:58 -03:00
Victor Shyba
41ecb70297 join network can only happen after initial header sync returns 2020-12-20 21:15:11 -03:00
Jack Robison
09ee104b8c
Merge pull request #3120 from lbryio/fix-reorg-crash
Fix off by one in wallet server reorg
2020-12-16 02:36:11 -05:00
Jack Robison
e3a4964787
catch any remaining index errors in fs_tx_hash 2020-12-16 01:28:30 -05:00
Jack Robison
9bf72910a4
fix off by one error in reorg 2020-12-16 01:27:03 -05:00
Jack Robison
ee39e20e6d
logging 2020-12-16 01:26:19 -05:00
Jack Robison
399d6db6f6
Merge branch 'disable-refresh-histogram' 2020-12-15 20:13:20 -05:00
Jack Robison
0821ce44b5
remove unused heavy call 2020-12-15 19:22:12 -05:00
Jack Robison
ea279111c6
Merge pull request #3113 from lbryio/leveldb-performance
Improve performance of fetching transactions and sending address notifications
2020-12-14 16:39:50 -05:00
Jack Robison
674ce02e58
logging 2020-12-14 14:38:36 -05:00
Jack Robison
8dfa2767ec
new_touched 2020-12-14 13:52:26 -05:00
Jack Robison
20dad7f07f
only notify hashxs touched since last notification 2020-12-14 13:42:20 -05:00
Jack Robison
751cc4c44d
don't deserialize mempool in a thread 2020-12-13 13:48:50 -05:00
Jack Robison
2318e6d8e9
faster fs_transactions 2020-12-13 13:48:02 -05:00
Jack Robison
61b4a492c3 v0.86.1 2020-12-08 16:41:46 -05:00
Jack Robison
9db3d01e09
Merge pull request #3105 from lbryio/dont-rerequest-mempool
fix mempool txs being re-requested
2020-12-08 16:00:24 -05:00
Jack Robison
8da73ad3dd
improve hash_to_hex_str performance 2020-12-08 15:39:03 -05:00
Jack Robison
b8c16d8ac5
fix mempool txs being re-requested with each address notification 2020-12-08 15:39:03 -05:00
Jack Robison
429c0951f3 v0.86.0 2020-12-07 21:19:24 -05:00
Jack Robison
74e103c791
version 2020-12-07 21:15:15 -05:00
Jack Robison
f941950ee2
fix comment_create docs 2020-12-07 21:13:43 -05:00
Jack Robison
846df2eef1 v0.86.0 2020-12-07 15:58:05 -05:00
Jack Robison
34ed058c97
Merge pull request #3103 from lbryio/fix-mempool-bloat
fix mempool notification bloat
2020-12-07 15:09:13 -05:00
Jack Robison
eae0290978
fix mempool notification bloat 2020-12-07 13:16:37 -05:00
Jack Robison
561368570e
Merge pull request #3102 from lbryio/faster-notifications
Improve performance of mempool processing loop
2020-12-03 22:43:11 -05:00
Jack Robison
3467d1fed0
fix tests 2020-12-03 22:16:28 -05:00
Jack Robison
d02ff232e5
_single_batch 2020-12-03 20:50:25 -05:00
Jack Robison
2d1c6a5402
fix 2020-12-03 19:15:15 -05:00
Jack Robison
eab3b65629
calculate notifications for all subscriptions instead of per session 2020-12-03 19:15:15 -05:00
Jack Robison
20b435732a
Merge pull request #3099 from lbryio/increase-tx-cache-size
Increase default `tx_cache_size`
2020-12-03 19:14:34 -05:00
Jack Robison
929617273d
resync blockchain.db 2020-12-03 15:56:35 -05:00
Jack Robison
2717bf7d49
increase default tx_cache_size setting 2020-12-03 15:56:35 -05:00
Jack Robison
5cd2ebc960
Merge pull request #3098 from lbryio/cached-txids
store txids in memory, faster address subscription/history
2020-12-02 14:08:07 -05:00
Jack Robison
9b4afe9816
store txids in memory, faster address subscription/history 2020-11-25 16:30:11 -05:00
Jack Robison
23bb5598d5
Merge pull request #3095 from lbryio/fix-transaction-getbatch-order
ensure transactions are returned in the order they're requested
2020-11-24 17:21:12 -05:00
Jack Robison
af1d7813e9
ensure transactions are returned in the order they're requested 2020-11-24 11:52:07 -05:00
Jack Robison
16c2e5a585
Merge pull request #3093 from lbryio/fix-merkle
fix off-by-one when getting block txs for tx_merkle
2020-11-23 19:08:11 -05:00
Jack Robison
c02750edbd
fix tx_merkle 2020-11-23 16:48:24 -05:00
Jack Robison
7204ddafec
Merge pull request #3092 from lbryio/faster-transaction-getbatch
faster `blockchain.transaction.get_batch`
2020-11-23 12:29:14 -05:00
Jack Robison
faeba9a7e4
faster blockchain.transaction.get_batch 2020-11-23 12:05:47 -05:00
Jack Robison
190d238a1f
faster read_headers 2020-11-23 12:03:11 -05:00
Jack Robison
715451b5fb v0.85.0 2020-11-21 00:14:20 -05:00
Jack Robison
87f1895405
Merge pull request #3090 from lbryio/fast-headers
store headers in memory on wallet server
2020-11-20 23:33:18 -05:00
Jack Robison
923d817751
store headers in memory 2020-11-20 22:37:42 -05:00
Jack Robison
0728209b66
Merge pull request #3089 from lbryio/force_checkpoint
Ensure no client .wal files remain on shutdown
2020-11-20 21:12:33 -05:00
Victor Shyba
b8b9dcc2ee WAL_CHECKPOINT on close 2020-11-20 20:16:03 -03:00
Jack Robison
f35e879852
Merge pull request #3088 from lbryio/batched-inflate-outputs
fix flooding with resolve/claim search
2020-11-20 17:43:56 -05:00
Jack Robison
34f4f12eb9
pylint 2020-11-20 16:15:57 -05:00
Jack Robison
fa63bf758d
delete single_call_context, use session pool 2020-11-20 15:52:11 -05:00
Jack Robison
f6b396ae64
Merge branch 'fix-reorg' 2020-11-20 13:32:59 -05:00
Jack Robison
2c7fd58e34
threaded compress headers 2020-11-20 13:09:54 -05:00
Jack Robison
982f2c9634
fix test 2020-11-20 10:57:28 -05:00
Jack Robison
f2fd42b47a
use write_batch 2020-11-19 18:37:49 -05:00
Jack Robison
1b4ccad938
allow settings TRENDING_ALGORITHMS to nothing 2020-11-19 18:36:34 -05:00
Jack Robison
a9de1ce8e0
fix fs_transactions 2020-11-19 12:40:46 -05:00
Jack Robison
ac752d5ec2
Merge pull request #3082 from lbryio/faster-leveldb
store transactions for wallet server in leveldb instead of requesting from lbrycrd
2020-11-17 17:02:11 -05:00
Jack Robison
632d8d02d2
remove dead code 2020-11-17 16:17:24 -05:00
Jack Robison
48aeb26e02
threaded_get_merkle
-remove dead code
2020-11-17 16:05:08 -05:00
Jack Robison
1694af8b5e
get transactions from leveldb instead of lbrycrd rpc 2020-11-17 15:30:48 -05:00
Jack Robison
83bcab9cd2
cleanup 2020-11-17 15:30:48 -05:00
Jack Robison
bdc7f4b3f5
combine tx_count_db and hashes_db, add tx db 2020-11-17 15:30:48 -05:00
Lex Berezhny
39202a3d79 old sequence resolution test 2020-11-17 11:24:29 -05:00
Lex Berezhny
912065a121 remove url sequence feature from code and tests 2020-11-17 11:24:29 -05:00
Thomas Zarebczan
c8466afac2
Remove * from invalid characters
This is already part of other URLs and channels, will be removed from spec as it won't be used for sequencing.
2020-11-16 14:09:04 -05:00
Victor Shyba
2619e162c1 recover invalid state described on #3026 2020-11-09 16:03:38 -05:00
Lex Berezhny
e1112e17f8 v0.84.0 2020-11-08 19:42:33 -05:00
Jack Robison
92b2ead74c
Merge pull request #3078 from lbryio/null-language
support `none` argument to `any_languages` filter in `claim_search`
2020-11-05 22:09:11 -05:00
Jack Robison
bbed9b94c1
null language filtering for claim search 2020-11-05 21:23:49 -05:00
Jack Robison
73d07311db
Merge branch 'brendon' 2020-11-05 21:21:24 -05:00
Brendon J. Brewer
1cdff47477
Rewrite of variable_decay.py for speed improvements 2020-11-05 21:15:55 -05:00
Jack Robison
511a5c3f82
Merge pull request #3058 from lbryio/faster-resolve
faster resolve and claim_search
2020-11-05 21:09:07 -05:00
Jack Robison
853885e2ff
debug 2020-11-05 19:25:34 -05:00
Jack Robison
d83936a66a
fix uncaught error 2020-11-04 22:21:35 -05:00
Jack Robison
5517d2bf56
fix new_sdk_server arg for resolve 2020-11-03 16:23:31 -05:00
Jack Robison
f21ab49ac5
bump aioupnp requirement 2020-11-03 10:39:38 -05:00
Jack Robison
925a458abe
tags 2020-11-03 10:39:08 -05:00
Jack Robison
76946c447f
use single_call_context for claim_search and resolve 2020-11-03 10:39:08 -05:00
Jack Robison
2faa29b1c4
fix dht_monitor script 2020-11-03 10:39:08 -05:00
Lex Berezhny
6826cc311d update test to use new url spec 2020-10-30 11:37:54 -04:00
Lex Berezhny
5e17ce0a0b minor fixup 2020-10-30 11:37:54 -04:00
Kevin Raoofi
e8d299d3b6 Allow : or # for claim_id
This removes the code for trying multiple patterns and the setup for it

Added a few unit tests to check that the parsed URL is as expected
2020-10-30 11:37:54 -04:00
Kevin Raoofi
7637aa2ab6 Added support for spec compliant URL parsing
Legacy URLs are preserved by attempting to parse the new URL format and,
on failing that, it'll attempt the legacy one.

Tests had to be updated such that the correct things are asserted
against each other.
2020-10-30 11:37:54 -04:00
Jack Robison
ab067d1d3a fix unclean closing of blockchain.db 2020-10-26 16:23:28 -04:00
Lex Berezhny
e6f84666c7 added glorious data in dictionary doc for support_sum 2020-10-16 12:03:01 -04:00
Lex Berezhny
4c5429af15 fix lint 2020-10-16 12:03:01 -04:00
Alex Grintsvayg
0a0ac3b7c9 pass-through for new support_sum api 2020-10-16 12:03:01 -04:00
Lex Berezhny
24833ce9fb v0.83.0 2020-10-10 13:03:42 -04:00
Lex Berezhny
ec2c18dc87 regenerate docs 2020-10-10 13:02:38 -04:00
jessop
7384609e74 support comment pinning 2020-10-10 13:02:38 -04:00
Lex Berezhny
3047649650 v0.82.0 2020-09-29 08:49:16 -04:00
kodxana
d298dac3f3 Fixed docker-compose - no snapshot error
Curently new containers do not get provide snapshots from LBRYinc (Wallet and CRD) that causes containers to get into restart loop.
2020-09-29 08:43:49 -04:00
Lex Berezhny
1574bca8a8
Merge pull request #3054 from lbryio/ios-support
iOS support
2020-09-29 08:42:17 -04:00
Lex Berezhny
ec2f6c6b80 lint 2020-09-29 07:52:33 -04:00
Akinwale Ariwodola
838cc60161 conditional import 2020-09-29 07:52:33 -04:00
Akinwale Ariwodola
310c61a5cc check KIVY_BUILD env variable 2020-09-29 07:52:33 -04:00
Lex Berezhny
318cc15323
Merge pull request #3045 from lbryio/bencode-byte-keys
add forward compatibility for byte datagram keys
2020-09-29 07:23:49 -04:00
Jack Robison
3a64ceb4d6
add forward compatibility for byte datagram keys 2020-09-28 15:56:13 -04:00
Lex Berezhny
d0f21c0095
Merge pull request #3050 from lbryio/language-indexes
add indexes for `any_languages` argument to `claim_search`
2020-09-28 15:52:41 -04:00
Jack Robison
46dc15dd29
use language indexes 2020-09-28 14:45:51 -04:00
Jack Robison
8dc654b513
add language table and indexes 2020-09-28 14:45:51 -04:00
Jack Robison
7000ac3f3f
update indexed tags 2020-09-28 14:45:51 -04:00
Lex Berezhny
43c2e8d8e9
Merge pull request #3051 from lbryio/comments-v2
Comments v2 with Reactions
2020-09-28 14:44:54 -04:00
jessop
0231139b01 support anonymous react_list 2020-09-27 16:36:51 -04:00
jessop
d6ee6446dd react list takes comment_ids 2020-09-26 15:21:26 -04:00
Lex Berezhny
7b666efcf8 lbrycrd download path 2020-09-24 20:11:40 -04:00
Lex Berezhny
eb5d2198fc lint 2020-09-24 20:09:46 -04:00
Lex Berezhny
34e44ebd1c minor cleanup and lint fixes 2020-09-24 20:03:22 -04:00
jessop
bf2f4bc040 wip 2020-09-24 17:47:59 -04:00
jessop
9dc4559aba new comment and reactions api 2020-09-23 16:43:28 -04:00
Lex Berezhny
eba8856261 v0.81.0 2020-08-19 15:47:53 -04:00
Lex Berezhny
a8c5aa471a update docs 2020-08-19 15:46:29 -04:00
Lex Berezhny
52f6dcf092
Merge pull request #3021 from lbryio/debug-reflector
Improve reflector upload cancellation handling
2020-08-19 15:44:27 -04:00
Jack Robison
dec79f3742 pylint 2020-08-19 15:24:17 -04:00
Jack Robison
8bdcac0f3e close reader before deleting blob 2020-08-19 15:24:17 -04:00
Jack Robison
8426b674a3 improve reflector upload cancellation handling 2020-08-19 15:24:17 -04:00
Lex Berezhny
da391bcc8d
Merge pull request #3028 from lbryio/limit_claims_per_channel
added `--limit_claims_per_channel` argument to `claim_search` to only return up to the specified number of claims per channel
2020-08-19 15:22:53 -04:00
Lex Berezhny
2d7443d454 slight cleanup 2020-08-19 13:59:26 -04:00
Lex Berezhny
991987ed76 docopt fix 2020-08-19 12:08:41 -04:00
Lex Berezhny
ec24ebf2cf added --limit_claims_per_channel argument to claim_search to only return up to the specified number of claims per channel 2020-08-19 10:51:31 -04:00
Lex Berezhny
6ba0976085
Merge pull request #3022 from btzr-io/patch-2
Add webvtt mime type ( subtitles, captions )
2020-08-12 18:58:07 -04:00
Baltazar Gomez
2b88d01a01
Add webvtt mime type ( subtitles, captions ) 2020-08-12 02:25:01 -05:00
Lex Berezhny
0c09f24cbf v0.80.0 2020-08-04 16:53:52 -04:00
Lex Berezhny
61d22afeba
Merge pull request #3016 from lbryio/release_all_outputs_on_start
release reserved outputs on startup
2020-08-04 16:53:15 -04:00
Lex Berezhny
9f1ed6e8c3 release_all_outputs on startup 2020-08-04 15:03:58 -04:00
Lex Berezhny
bbc4113cac
Merge pull request #3015 from lbryio/new_sdk_forwarding
added support for using the new SDK for `resolve` and `claim_search` by passing `--new_sdk_server`
2020-08-04 15:03:39 -04:00
Lex Berezhny
91194bf422 doc fix 2020-08-04 14:12:40 -04:00
Lex Berezhny
9c5f940b00 claim search forwarding to new sdk 2020-08-04 12:11:02 -04:00
Lex Berezhny
455b4043b8 new resolve 2020-08-04 11:33:39 -04:00
Lex Berezhny
bd83ee7931
Merge pull request #3010 from lbryio/feat-commentFaster
allow faster comment retrieval
2020-07-31 17:32:06 -04:00
jessop
f6bdf7c09a allow faster comment retrieval
switch default for include replies to false
2020-07-31 10:07:12 -04:00
Lex Berezhny
2db8afb8c2 v0.79.1 2020-07-21 21:59:15 -04:00
Lex Berezhny
e033129dd3
Merge pull request #3003 from lbryio/more_timestamp_fix
fix `modified_on` to always cast value to integer, also cast preferences timestamp to int
2020-07-21 21:58:48 -04:00
Lex Berezhny
f9dc590100 update tests 2020-07-21 20:56:50 -04:00
Lex Berezhny
8996aafe0d always cast modified_on to int() 2020-07-21 19:14:57 -04:00
Lex Berezhny
9dc25ef7af v0.79.0 2020-07-20 20:23:51 -04:00
Lex Berezhny
2b84f4d407
Merge pull request #3002 from lbryio/modified_on_int
fixed sync'ing issues related to `modified_on` timestamps having varying floating value between OSes
2020-07-20 20:22:43 -04:00
Lex Berezhny
097c8b674c fix modified_on when modifying account 2020-07-20 14:28:19 -04:00
Lex Berezhny
ba649d4b94 convert modified_on timestamp to int to avoid floating point conflicts when syncing 2020-07-20 14:24:53 -04:00
Lex Berezhny
6ed1614db0
Merge pull request #2997 from lbryio/fix-reflector-lost-connection
Fix uncaught reflector connection errors
2020-07-20 13:48:26 -04:00
Jack Robison
df5b6a8380
feedback 2020-07-20 12:20:00 -04:00
Jack Robison
1f82a8b99e
catch reflector error if a blob is deleted while it's being sent 2020-07-16 12:15:51 -04:00
Jack Robison
0c95d96f32
update pyyaml requirement to 5.3.1 2020-07-16 12:15:51 -04:00
Jack Robison
c2f5f84118
fix uncaught errors from broken reflector connections 2020-07-16 12:15:47 -04:00
Jack Robison
b3b5e3d8f0
test reflector connection breaking mid-transfer 2020-07-15 16:42:55 -04:00
Lex Berezhny
506d3f3cd9 v0.78.0 2020-07-14 23:33:34 -04:00
Lex Berezhny
516a8c5ee5
Merge pull request #2995 from lbryio/batch-sync
Batched sync for wallet transactions
2020-07-14 23:33:13 -04:00
Lex Berezhny
2f81e9d374 pylint 2020-07-14 23:13:14 -04:00
Jack Robison
2d8703bb8d pylint 2020-07-14 23:13:14 -04:00
Jack Robison
76e60d9bc3 logging 2020-07-14 23:13:14 -04:00
Jack Robison
9d5370be5f fix 2020-07-14 23:13:14 -04:00
Jack Robison
fc1a06bc45 fix 2020-07-14 23:13:14 -04:00
Jack Robison
fce80374f4 batched sync 2020-07-14 23:13:14 -04:00
Jack Robison
420c9f10c2 remove _update_cache_item 2020-07-14 23:13:14 -04:00
Jack Robison
5a39681a2e log 2020-07-14 23:13:14 -04:00
Jack Robison
7a1b7db7c8 support multiple blocks with blockchain.transaction.get_batch 2020-07-14 23:13:14 -04:00
Jack Robison
03a643da52 use block cache 2020-07-14 23:13:14 -04:00
Lex Berezhny
383f0ce450
Merge pull request #2986 from Death916/master
check sample rate and lower if too high
2020-07-14 23:01:05 -04:00
Lex Berezhny
c6c668676c changed video_bitrate_maximum from 8.4MB to 5MB 2020-07-14 21:54:42 -04:00
Trent N
7b01dde063 convert sample_rate to integer and dont force a volume_filter
added volume_filter example

deleted _get_volume_filter
2020-07-14 21:49:25 -04:00
Trent
8c25f65024 check sample rate and lower if too high 2020-07-14 21:49:25 -04:00
Lex Berezhny
1e478e3545
Merge pull request #2996 from DispatchCommit/patch-1
updates HLS .m3u8 media type and adds new DASH .mpd media type
2020-07-14 19:47:22 -04:00
Dispatch
6c75a8978b Fix bug in python CICD pipeline
implemented fix recommended by:
https://stackoverflow.com/a/61693590
https://github.com/pypa/setuptools/issues/1963
2020-07-14 19:24:48 -04:00
Lex Berezhny
640267fc2b
Merge pull request #2992 from lbryio/2987_debug
log task errors and add debug information for reflector task
2020-07-14 19:20:15 -04:00
Dispatch
33c7c3ee12
Fixes HLS .m3u8 and DASH .mpd extension
HLS uses a playlist .m3u8 file to provide video content Transport Segments (.ts) files.
DASH uses a playlist .mpd file to provide video content of a few different containers.
2020-07-10 05:39:46 -07:00
Victor Shyba
f9b41d34ae expose reflector task errors, add debug statements for ongoing issue 2020-07-07 00:39:59 -03:00
Lex Berezhny
d4bec79451
Merge pull request #2981 from lbryio/wallet-server-meta-db
Move wallet server tx hashes and headers to leveldb to speed up the rate mempool notifications are sent
2020-07-06 10:50:24 -04:00
Jack Robison
ac1a8b4daf
metric for time to send notifications 2020-07-02 18:57:36 -04:00
Jack Robison
28838c1759
notifications_in_flight_metric 2020-07-02 18:57:36 -04:00
Jack Robison
50ecb0dac9
fix notify 2020-07-02 18:57:36 -04:00
Jack Robison
e22bc01cbd
re-add wakeup event, add address history metric 2020-07-02 18:57:36 -04:00
Jack Robison
6c28713a4c
read history in one loop 2020-07-02 18:57:36 -04:00
Jack Robison
fc9023386c
non-blocking history lookup in notify 2020-07-02 18:57:36 -04:00
Jack Robison
e6cae9bcc3
remove mempool wakeup event, lower refresh delay 2020-07-02 18:57:36 -04:00
Jack Robison
a9eeca1302
mempool processing time metric 2020-07-02 18:57:36 -04:00
Jack Robison
8c695e42ca
fix sqlite coin chooser floor 2020-07-02 18:57:36 -04:00
Jack Robison
0aa7fd47d5
combine loops 2020-07-02 18:57:36 -04:00
Jack Robison
70596042d6
mempool_process_time_metric 2020-07-02 18:57:36 -04:00
Jack Robison
caf616234b
flush databases during sync 2020-07-02 18:57:36 -04:00
Jack Robison
375187aa70
tx hashes db 2020-07-02 18:57:36 -04:00
Jack Robison
71eccdc0e3
hashes path 2020-07-02 18:57:36 -04:00
Jack Robison
639b1e48f5
blocks dir 2020-07-02 18:57:36 -04:00
Jack Robison
0bb4cb4472
tx count db 2020-07-02 18:57:36 -04:00
Jack Robison
cc51543851
headers db 2020-07-02 18:57:36 -04:00
Jack Robison
22540390e1
break the wallet server with chris45 test 2020-07-02 18:57:36 -04:00
Jack Robison
98565eb67c
run read_raw_block in executor 2020-07-02 18:57:36 -04:00
Lex Berezhny
cf6a47ecb7
Merge pull request #2989 from lbryio/blocking-wallet-send
add `--blocking` to `account_send` and `wallet_send`
2020-07-02 18:45:43 -04:00
Jack Robison
fa60b9f9d3
logging 2020-07-02 17:35:38 -04:00
Jack Robison
644120ca31
add --blocking to account_send 2020-07-02 17:32:32 -04:00
Jack Robison
a50a625b3b
add --blocking arg to wallet_send 2020-07-02 17:32:32 -04:00
Lex Berezhny
7297c13331 v0.77.0 2020-06-24 11:32:41 -04:00
Jack Robison
f73399bfac fix test 2020-06-24 11:13:29 -04:00
Jack Robison
a056cd78f7 remove loggly 2020-06-24 11:13:29 -04:00
Lex Berezhny
a30f3c86c2 v0.76.0 2020-06-08 23:11:41 -04:00
Lex Berezhny
e70bdd86a7
Merge pull request #2959 from lbryio/sqlite-coin-chooser
Add all sqlite coin chooser
2020-06-08 18:46:42 -04:00
Jack Robison
bc9f33c2e0
fix test 2020-06-08 18:06:58 -04:00
Jack Robison
872b89ee93
fix mempool conflicts following cancelled api calls that send transactions with the blocking flag 2020-06-05 20:06:42 -04:00
Jack Robison
ae53062518
integration test for sqlite coin chooser 2020-06-05 20:06:42 -04:00
Jack Robison
17f76c9cb3
leave inputs for rejected tx reserved 2020-06-05 20:06:42 -04:00
Jack Robison
5de944146a
logging 2020-06-05 20:06:42 -04:00
Jack Robison
9dc6092cb0
update tests 2020-06-05 20:06:42 -04:00
Jack Robison
a32a2ef04e
add sqlite coin chooser 2020-06-05 20:06:42 -04:00
Lex Berezhny
ecfa0ae3da
Merge pull request #2974 from lbryio/signed_supports
added signed supports
2020-06-05 16:28:15 -04:00
Lex Berezhny
03595052ce fix regular support create tests 2020-06-05 16:08:25 -04:00
Lex Berezhny
1f94c53dd2 added support protobuf 2020-06-05 15:50:00 -04:00
Lex Berezhny
9c426373f2 lint 2020-06-05 15:49:18 -04:00
Lex Berezhny
c03e30a01f added support for signed supports 2020-06-05 15:49:18 -04:00
Lex Berezhny
07f7a77ac0
Merge pull request #2971 from lbryio/faster-publish
Speed up publish response time by not blocking on updating the content claim db
2020-06-05 15:47:40 -04:00
Lex Berezhny
3b9ea2c9a4
Merge pull request #2972 from lbryio/history-cache
Improve wallet server address history cache and the rate of sent notifications
2020-06-05 15:47:07 -04:00
Jack Robison
1beb13dd80
fix attribute error 2020-06-04 09:31:37 -04:00
Jack Robison
ddae84abb3
fix pending_count 2020-06-04 09:31:37 -04:00
Jack Robison
863b9a2c98
don't block returning from publish on updating the content claim table 2020-06-04 09:31:37 -04:00
Jack Robison
9d44bbdb48
don't block the notification loop on sending the notifications 2020-06-04 09:25:41 -04:00
Jack Robison
8d93dd5adc
improve wallet server address history cache 2020-06-04 09:15:21 -04:00
Lex Berezhny
48502961cf v0.75.0 2020-06-03 14:44:02 -04:00
Lex Berezhny
3c8bec61d3
Merge pull request #2966 from lbryio/check-origin
add `allowed_origin` to config, by default no longer allow any requests which pass Origin in header
2020-06-03 14:39:57 -04:00
Lex Berezhny
7296c7df1a Origin: null no longer allowed 2020-06-03 14:19:16 -04:00
Lex Berezhny
f3ee6603de improve allowed_origin request handling 2020-06-03 13:55:20 -04:00
Lex Berezhny
ee0aabda1d backwards compatible allowed_origin, default browsers not allowed 2020-06-03 13:28:32 -04:00
Jack Robison
08d37a4b0f add allowed_origin to config
-raise 403 error if a request doesn't have a matching origin
2020-06-03 12:55:24 -04:00
Jack Robison
f975ea99cb
Merge branch 'endes123321-master' 2020-06-02 15:59:53 -04:00
Jack Robison
f030d41dc7
add test_is_valid_ipv4 2020-06-02 15:56:57 -04:00
Jack Robison
8d079bfcd1
Merge branch 'master' into master 2020-06-02 15:52:04 -04:00
Lex Berezhny
d39b8654a6
Merge pull request #2968 from lbryio/transcode-adjust
more aggressive video transcoding to reduce file sizes
2020-06-01 13:56:33 -04:00
Thomas Zarebczan
ce7816a968 more aggressive video transdoing
Have noticed the defaults aren't aggressive enough to stream smoothly (yet). Downgrade max rate to 5500K, higher crf = smaller file size for now.
2020-06-01 13:11:56 -04:00
Jack Robison
2ee572e68f
Merge pull request #2967 from lbryio/transaction-cache-size
add `transaction_cache_size` to config
2020-05-27 00:57:48 -04:00
Jack Robison
4bbd850898
fix uncaught ValueError in hashX_unsubscribe 2020-05-25 10:25:04 -04:00
Jack Robison
34eae6e608
fix wallet server prometheus bucket sizes 2020-05-25 10:24:31 -04:00
Jack Robison
6a0302fec6
fix uncaught dht DecodeError 2020-05-25 10:23:11 -04:00
Jack Robison
c94cc293c2
fix uncaught errors in test_component_manager 2020-05-25 10:21:36 -04:00
Jack Robison
cae7792a1e
add transaction_cache_size to config 2020-05-25 10:16:18 -04:00
Lex Berezhny
7f6b2fe4f1 v0.74.0 2020-05-18 16:56:58 -04:00
Lex Berezhny
eba430bbc0
Merge pull request #2957 from lbryio/file-list-download-path
added `download_path` as a filter for `file_list` command
2020-05-18 14:15:44 -04:00
Akinwale Ariwodola
01280c8d04 update docstring for download_path 2020-05-18 18:52:13 +01:00
Lex Berezhny
ec141ac9c9
Merge pull request #2960 from lbryio/leaky_ship
fixed bug with leaky information between outputs
2020-05-18 13:21:08 -04:00
Lex Berezhny
590c892a6a re-set channel on txo 2020-05-18 12:27:22 -04:00
Lex Berezhny
ff8a50c366 fixed bug with leaky information between outputs 2020-05-18 11:16:01 -04:00
Lex Berezhny
46ef6c8ab7
Merge pull request #2947 from thebubbleindex/patch-1
properly handle integer environment values, fixes issue with specifying ports via environment vars
2020-05-18 11:11:10 -04:00
Lex Berezhny
b09eabc478 minor simplifcation 2020-05-18 08:53:34 -04:00
thebubbleindex
e49fcea6e3 fix issue with specifying ports via env vars
make sure tcp and udp port for dht are int type
2020-05-18 08:53:07 -04:00
Akinwale Ariwodola
68ed9f4ffc add download_path property to managed_stream 2020-05-17 12:12:31 +01:00
Akinwale Ariwodola
af94687d45 add download_path as a filter field for file_list 2020-05-17 10:32:26 +01:00
Jack Robison
26964ecf0f
fix download_blob_from_peer.py 2020-05-13 09:24:35 -04:00
Jack Robison
77d19af359 v0.73.1 2020-05-13 09:21:48 -04:00
Jack Robison
120b82f243
Merge pull request #2952 from lbryio/newwait
fix case where publish hangs until a new block
2020-05-12 23:10:03 -04:00
Victor Shyba
a0fea30a11
make wait check every second instead of once 2020-05-12 19:44:02 -04:00
Jack Robison
8bb3e0a64d
Merge pull request #2955 from lbryio/fix-set-node
fix node not being set on the downloader in some cases
2020-05-12 18:49:03 -04:00
Jack Robison
bbded12923
fix node not being set on the downloader in some cases 2020-05-12 17:50:20 -04:00
Lex Berezhny
e8ba5d7606 v0.73.0 2020-05-11 19:47:08 -04:00
Jack Robison
af66b31a44
Merge pull request #2954 from lbryio/file-list-reflector-args
add `uploading_to_reflector`,  `is_fully_reflected`, `completed`, and `status` filter arguments to `file_list`, update to match all filters rather than any filters
2020-05-11 19:45:51 -04:00
Jack Robison
b000a40f28
add completed filter arg to file_list 2020-05-11 19:22:53 -04:00
Jack Robison
3c85322523
add status arg to file_list cli 2020-05-11 19:16:08 -04:00
Jack Robison
a469b8bc04
return streams matching all file_list filters rather than those matching any
-fix filter fields when using sets
2020-05-11 19:11:41 -04:00
Jack Robison
78b8261a3a
cancel pending reflector request when connection is lost
-add 180s timeout
2020-05-11 16:08:48 -04:00
Jack Robison
f20ca70c01
add uploading_to_reflector and is_fully_reflected filter arguments to file_list 2020-05-11 15:48:34 -04:00
Jack Robison
4e4148fc1c
Merge pull request #2953 from lbryio/ecdsa-executor
fix channel key generation blocking globally
2020-05-11 15:16:17 -04:00
Jack Robison
c22482f907
channel private key generation in a thread pool 2020-05-11 14:54:31 -04:00
Jack Robison
870e139fce
Merge pull request #2951 from lbryio/add-fixed-peers-conf
add `fixed_peers` setting to config
2020-05-11 14:54:01 -04:00
Jack Robison
4d58648c02
update default fixed peer to cdn.reflector.lbry.com 2020-05-11 14:52:31 -04:00
Jack Robison
ebbb182537
fix test 2020-05-11 14:06:23 -04:00
Jack Robison
1cd5377b45
split fixed peer setting out from reflector_servers 2020-05-11 13:43:13 -04:00
Lex Berezhny
1d1f0527ee
Merge pull request #2948 from lpessin/patch-1
fix duplicate line on api doc
2020-05-07 17:59:57 -04:00
Jack Robison
37a5f77415
Merge pull request #2930 from lbryio/source_manager_ 2020-05-07 16:10:01 -04:00
Luiz
ced3c7efe4
fix duplicate line on api doc
delete line 3616 (duplicate line 3618)
2020-05-07 13:37:46 -03:00
Victor Shyba
c3b8f366ed fixes from review 2020-05-07 04:04:55 -03:00
Victor Shyba
de78876b1a fix test purchase 2020-05-07 04:04:55 -03:00
Victor Shyba
64c25b049c fixup get_filtered from rebase 2020-05-07 04:04:55 -03:00
Victor Shyba
8a4fe4f3ad lbry-libtorrent is now on pypi 2020-05-07 04:04:55 -03:00
Victor Shyba
8811b8c1fd use ubuntu 18 on gitlab, temporarly 2020-05-07 04:04:55 -03:00
Victor Shyba
190b01fdf9 calculate total bytes outside of dict 2020-05-07 04:04:55 -03:00
Victor Shyba
f145d08c10 tell progress, stop trying to read first piece 2020-05-07 04:04:55 -03:00
Victor Shyba
53382b7e15 wait started event 2020-05-07 04:04:55 -03:00
Victor Shyba
6ad0242617 find and show largest file 2020-05-07 04:04:55 -03:00
Victor Shyba
a7c2408c0a fix and test delete with torrents 2020-05-07 04:04:55 -03:00
Victor Shyba
ce1eabaed6 fix moving to a new btih 2020-05-07 04:04:55 -03:00
Victor Shyba
f602541ede fix not knowing a torrent exists 2020-05-07 04:04:55 -03:00
Victor Shyba
3f718e6efc gitlab: use ubuntu on datanetwork tests 2020-05-07 04:04:54 -03:00
Victor Shyba
ce7a985df6 add boost on gitlab, fix failing test, add libtorrent to linux build 2020-05-07 04:04:54 -03:00
Victor Shyba
6d83f7e7bd correct wheel with boost 1.65 2020-05-07 04:04:54 -03:00
Victor Shyba
b73c00943c linting and minor refactor 2020-05-07 04:04:54 -03:00
Victor Shyba
abaac8ef48 fixes from rebase, install libtorrent from s3 2020-05-07 04:04:54 -03:00
Victor Shyba
a2f8e7068e pylint 2020-05-07 04:04:54 -03:00
Victor Shyba
4d47873219 working file list after torrent get 2020-05-07 04:04:54 -03:00
Victor Shyba
cf985486e5 torrent test and misc fixes 2020-05-07 04:04:54 -03:00
Victor Shyba
b930c3fc93 fix torrent and stream manager reference leftovers 2020-05-07 04:04:54 -03:00
Victor Shyba
dd26a96828 adds more torrent parts 2020-05-07 04:04:54 -03:00
Victor Shyba
6865ddfc12 torrent manager and torrent source 2020-05-07 04:04:54 -03:00
Victor Shyba
e888e69d4d fix unit tests 2020-05-07 04:04:54 -03:00
Victor Shyba
2089059792 pylint 2020-05-07 04:04:54 -03:00
Victor Shyba
27739e0364 fix save from resolve 2020-05-07 04:04:54 -03:00
Victor Shyba
698ee271d6 stream manager component becomes file manager component 2020-05-07 04:04:54 -03:00
Victor Shyba
543c75b293 wip 2020-05-07 04:04:54 -03:00
Victor Shyba
b09c46f6f7 add torrent component 2020-05-07 04:04:54 -03:00
Jack Robison
f2cc19e6aa add lbry.torrent 2020-05-07 04:04:54 -03:00
Jack Robison
814a0a123f file manager refactor 2020-05-07 04:04:54 -03:00
Jack Robison
179383540f ManagedDownloadSource and SourceManager refactor 2020-05-07 04:04:54 -03:00
Lex Berezhny
8be1c8310d v0.72.0 2020-05-04 13:53:13 -04:00
Jack Robison
ef02d776ca
Merge pull request #2937 from lbryio/daemon-prometheus
fix database lockup and add prometheus db metrics
2020-05-04 13:51:12 -04:00
Jack Robison
750ff448ad
comments 2020-05-04 13:47:37 -04:00
Jack Robison
e3abab6d4d
pylint 2020-05-04 12:15:48 -04:00
Jack Robison
d3ffae72fb
buckets 2020-05-02 22:30:25 -04:00
Jack Robison
87f751188e
cancelled and failed api request metrics 2020-05-02 21:58:41 -04:00
Jack Robison
3469abaefd
write lock metrics 2020-05-02 21:23:17 -04:00
Jack Robison
797364ee5c
refactor prometheus metrics 2020-05-02 15:01:07 -04:00
Jack Robison
36c05fc4b9
move wallet server prometheus
-only run wallet server metrics for the wallet server
2020-05-01 12:54:35 -04:00
Jack Robison
79624febc0
prevent pileup of writes blocking reads 2020-05-01 12:48:41 -04:00
Jack Robison
0a9d4de126
include write lock in try/finally 2020-05-01 11:40:57 -04:00
Jack Robison
b6f6994db4
Merge pull request #2934 from lbryio/fix-disconnect-on-cancelled-request
don't close the wallet server connection upon a cancelled request
2020-04-30 12:22:46 -04:00
Jack Robison
ff7bed720a
don't close the connection upon a cancelled request 2020-04-29 12:32:31 -04:00
Lex Berezhny
00c0f48b02 v0.71.0 2020-04-27 12:21:04 -04:00
Lex Berezhny
94c45cf2a0
Merge pull request #2922 from lbryio/estimate_on_demand
estimate block timestamps on client only when necessary (header hasn't been downloaded yet)
2020-04-27 12:17:54 -04:00
Victor Shyba
58f77b2a1c load/dump header file using executor 2020-04-27 10:34:49 -04:00
Victor Shyba
7170e69b22 test fixes + leave tx plot always on estimations 2020-04-27 10:34:49 -04:00
Victor Shyba
239ee2437c estimate only whats not downloaded 2020-04-27 10:34:49 -04:00
Victor Shyba
ced368db31 hold headers file in memory during runtime 2020-04-27 10:34:49 -04:00
Lex Berezhny
6a991e5c15
Merge pull request #2914 from StripedMonkey/SM-Config-patch
use home directory if xdg directory not found (Linux)
2020-04-27 10:33:57 -04:00
Lex Berezhny
523d22262b pin pylint to 2.4.4 2020-04-27 10:18:39 -04:00
Noah
08197a327e fix Missing xdg download location
Fixes  an error in detection xdg config locations when XDG_DOWNLOAD_DIR is not present in `user-dirs.dirs`
2020-04-27 10:11:16 -04:00
Jack Robison
b7cb2a7aa5
Merge pull request #2926 from lbryio/blocking-file-io
make file i/o in stream and blob creation and reads non-blocking
2020-04-24 11:07:24 -04:00
Jack Robison
decc5c74ef
don't block when reading a file when creating a stream 2020-04-23 00:35:08 -04:00
Jack Robison
fbe0f886b6
non blocking blob creation 2020-04-23 00:35:08 -04:00
Jack Robison
7e23d6e2ef
Merge pull request #2925 from lbryio/fix-reposts-tag
fix: reposts being returned for single tags
2020-04-23 00:29:04 -04:00
Tom
49458d1085 fix: reposts being returned for single tags 2020-04-22 23:16:30 -04:00
Lex Berezhny
289c12bd9a
Merge pull request #2924 from lbryio/claim_list_is_spent_fix
fix for `claim_list` incorrectly handling `--is_spent` flag
2020-04-22 11:34:16 -04:00
Lex Berezhny
9a6326b027 fix for claim_list incorrectly handling --is_spent flag 2020-04-22 10:36:09 -04:00
Lex Berezhny
51f573f1ea v0.70.0 2020-04-20 13:37:40 -04:00
Jack Robison
c8b7cd8862
Merge pull request #2920 from lbryio/uploading-to-reflector-status
add `uploading_to_reflector` to `file_list` results
2020-04-20 13:35:02 -04:00
Jack Robison
21c112d059
lbrycrd url 2020-04-20 12:16:31 -04:00
Jack Robison
9432e1b5b2
add uploading_to_reflector to file_list results 2020-04-20 11:57:09 -04:00
endes
8269d2f83c better lint style 2020-04-19 20:20:19 +01:00
endes
5a4b6be974 fix 2020-04-19 20:14:21 +01:00
endes123321
35e8ce60a9
Merge branch 'master' into master 2020-04-19 19:54:24 +01:00
endes
8b6dd9f603 better lint style 2020-04-19 19:51:03 +01:00
Lex Berezhny
084f0ebdab v0.69.1 2020-04-17 12:55:07 -04:00
Lex Berezhny
58640c1521
Merge pull request #2917 from lbryio/faster-repost-search-query
improve how claim search query handles reposts
2020-04-17 12:53:50 -04:00
Jack Robison
7ffdfd12f8
faster not tags 2020-04-17 10:47:01 -04:00
Jack Robison
cb9a30f285
faster query 2020-04-17 10:47:01 -04:00
Lex Berezhny
e48bef809f
Merge pull request #2919 from lbryio/exclude_internal_transfers_fix
fix issue with `--exclude_internal_transfers` where it was filtering out sent payments
2020-04-16 20:26:37 -04:00
Lex Berezhny
f5d7570102 fix issue with --exclude_internal_transfers where it was filtering out sent payments 2020-04-16 17:55:49 -04:00
Lex Berezhny
a600c60cf8 v0.69.0 2020-04-13 15:36:27 -04:00
Jack Robison
695eabd026
Merge pull request #2911 from lbryio/file_list_claim_ids
support `claim_id`, `channel_claim_id`, and `outpoint` args given as lists of values to match in `file_list`
2020-04-13 14:58:13 -04:00
Jack Robison
e81b51a647
support claim_id, channel_claim_id, and outpoint args in file_list being lists 2020-04-13 13:19:25 -04:00
Jack Robison
b7e95ff090
Merge pull request #2912 from lbryio/reflector-progress
add `reflector_progress` to `file_list` results
2020-04-13 13:00:59 -04:00
Jack Robison
3ca41be686
add reflector_progress to file_list results 2020-04-13 12:08:22 -04:00
Lex Berezhny
3152046173
Merge pull request #2910 from osilkin98/comment-schema-update
Comment_hide update
2020-04-13 10:34:30 -04:00
Lex Berezhny
3a98dc8a95 run apt-get update before installing 2020-04-13 10:14:52 -04:00
Lex Berezhny
d737b28916 trying actions/checkout v2 2020-04-13 09:57:59 -04:00
Oleg Silkin
97c0dac876 linter 2020-04-07 19:28:26 -04:00
Oleg Silkin
006494b1fa hide_comments now returns lists for both hidden and visible comments 2020-04-07 19:17:27 -04:00
Alex Grintsvayg
149d343201
drop a few unused conf vars 2020-04-07 16:11:23 -04:00
Lex Berezhny
496cc79ba8 v0.68.0 2020-04-06 12:55:21 -04:00
Jack Robison
3591768745
Merge pull request #2908 from lbryio/batched-resolve
Fix connection to wallet server breaking upon giant resolve requests
2020-04-06 12:52:51 -04:00
Jack Robison
d615f6761a
automatically batch large resolve requests 2020-04-06 12:03:33 -04:00
Jack Robison
5b29894048
add reset clients counter to prometheus 2020-04-06 12:03:33 -04:00
Lex Berezhny
d17f0cfa40
Merge pull request #2909 from lbryio/none_for_unconfirmed_time
do not estimate timestamp for unconfirmed txs
2020-04-06 11:41:17 -04:00
Victor Shyba
b2f70c7120 return none for unconfirmed time estimation 2020-04-06 06:03:35 -03:00
Jack Robison
238707bd93
Merge pull request #2897 from lbryio/reorg-claims
handle claim reorgs in the wallet server
2020-04-05 17:48:56 -04:00
Jack Robison
87cdf1e3a0
improve test_reorg_change_claim_height 2020-04-05 16:58:36 -04:00
Jack Robison
57fd47022e
test_reorg_change_claim_height 2020-04-03 13:39:44 -04:00
Jack Robison
5eafd3bf6b
feedback 2020-04-03 13:39:44 -04:00
Jack Robison
640b5b0ea9
delete_claims_above_height with thread lock 2020-04-03 13:39:44 -04:00
Jack Robison
e4fb2f4680
test_reorg_dropping_claim 2020-04-03 13:39:44 -04:00
Jack Robison
a4909f54e4
test reorg count metric 2020-04-03 13:39:44 -04:00
Jack Robison
f7065c6f0c
add reorg count metric to prometheus 2020-04-03 13:39:44 -04:00
Jack Robison
64f7f837e7
delete claims above reorg height from the database 2020-04-03 13:39:44 -04:00
Jack Robison
130d36acd9
Merge pull request #2905 from lbryio/testnet-headers
fix header checkpoints on testnet
2020-04-03 13:38:47 -04:00
Jack Robison
e94c28cfa2
fix header checkpoints on testnet 2020-04-03 12:21:17 -04:00
Jack Robison
a697dec30c
Merge pull request #2902 from lbryio/bump_checkpoints
bump checkpoints
2020-04-03 12:20:38 -04:00
Victor Shyba
bac09e9b9f bump checkpoints 2020-04-03 12:26:52 -03:00
Lex Berezhny
b179f8f86a
Merge pull request #2901 from lbryio/txo_is_spent_filter
replaced `--unspent` with `--is_not_spent` on several commands and added `--is_spent` to many claim/txo `*_list` commands, `support_list` got three new flags `--sent --received --staked`
2020-04-02 22:24:47 -04:00
Lex Berezhny
962d04ae17 fix txo_spend 2020-04-01 21:03:56 -04:00
Lex Berezhny
f28e3bfe37 lint 2020-04-01 20:53:09 -04:00
Lex Berezhny
6474c86d32 cleaned up *_list commands 2020-04-01 20:44:34 -04:00
Lex Berezhny
769ea8cdfe added --is_spent filter to txo list/sum commands 2020-03-31 23:08:51 -04:00
Lex Berezhny
052e77dd5a v0.67.2 2020-03-31 17:47:19 -04:00
Lex Berezhny
1a4b0cb12a
Merge pull request #2900 from lbryio/wallet_status_does_not_fail
`wallet_status` no longer fails if wallet component has not started
2020-03-31 17:45:51 -04:00
Lex Berezhny
f9aa95c987 default to None for all values in wallet_status when wallet_manager not started yet 2020-03-31 17:22:13 -04:00
Lex Berezhny
a5d06fb4a4 wallet_status no longer fails if wallet component has not started 2020-03-31 16:20:13 -04:00
Lex Berezhny
d5e5d90bdc v0.67.1 2020-03-31 12:38:38 -04:00
Lex Berezhny
e62678e4e6
Merge pull request #2898 from lbryio/duration_is_int
`ffmpeg` file analysis returns duration as integer now
2020-03-31 12:06:44 -04:00
Lex Berezhny
558ac24f7e fix test 2020-03-31 11:29:58 -04:00
Lex Berezhny
6f22f6a59f use ceil() on duration float() instead of int() directly 2020-03-31 10:57:37 -04:00
Lex Berezhny
5ec74f8abe ffmpeg file analysis returns duration as integer now 2020-03-31 10:28:04 -04:00
Lex Berezhny
16d7547e03 changed default txo_spend batch_size default to 500 from 1000 2020-03-30 22:48:05 -04:00
Lex Berezhny
767112dcda updated txo_spend docs 2020-03-30 21:45:58 -04:00
Lex Berezhny
81e23d1d8c v0.67.0 2020-03-30 19:43:45 -04:00
Lex Berezhny
ace90a4354
Merge pull request #2890 from lbryio/faster-status
improve speed of `status`
2020-03-30 19:42:57 -04:00
Jack Robison
267e7096cc add test for rechecking ffmpeg installation 2020-03-30 19:11:04 -04:00
Jack Robison
e7cded7511 check ffmpeg/ffmprobe paths in a thread 2020-03-30 19:11:04 -04:00
Jack Robison
33fbd715c0 don't block status on connectivity check 2020-03-30 19:11:04 -04:00
Jack Robison
25ba5b867c dont recheck ffmpeg installation in status 2020-03-30 19:11:04 -04:00
Lex Berezhny
c8f431447c
Merge pull request #2885 from lbryio/repair_tip_on_open
repair headers tip on open and let new headers come in during background fetching
2020-03-30 19:03:09 -04:00
Victor Shyba
1b83a1d09a test and fix verifying from middle 2020-03-30 19:42:27 -03:00
Victor Shyba
d2fb7a7151 lock only when fetching, giving a chance for tip updates 2020-03-30 19:42:27 -03:00
Victor Shyba
7ad3447598 repair tip on open 2020-03-30 19:42:27 -03:00
Lex Berezhny
5e5bc8e705
Merge pull request #2887 from lbryio/txo_spend
added `txo_spend` command to support liquidating large number of txos (eg. tips) by batching them across several transactions
2020-03-30 18:41:05 -04:00
Lex Berezhny
886d1e8a19 added --include_full_tx option to txo_list 2020-03-30 18:15:13 -04:00
Lex Berezhny
6de7a035fa added preview/blocking back into doc string 2020-03-30 17:47:38 -04:00
Lex Berezhny
48d2497eb2 added txo_spend command to support liquidating large number of txos (eg. tips) 2020-03-30 17:47:38 -04:00
Lex Berezhny
6494754ab9
Merge pull request #2896 from lbryio/first_on_ready
fix for startup issues when connecting to wallet servers
2020-03-30 17:41:14 -04:00
Lex Berezhny
a8153627c6 move on_read.first to earlier 2020-03-30 17:02:08 -04:00
Lex Berezhny
ed38966edb add test to verify we listen to on_ready before it actually triggers 2020-03-30 15:26:04 -04:00
Lex Berezhny
85551d1e54 remove no-op test, segwit is always on now 2020-03-30 15:17:25 -04:00
Lex Berezhny
ca31363180 listen for on_read.first before it is triggered 2020-03-30 14:53:52 -04:00
Lex Berezhny
a8f11eb3c3
Merge pull request #2893 from lbryio/fix_float_timestamp
fix estimated timestamp to return integer instead of float
2020-03-30 10:58:32 -04:00
Lex Berezhny
151805121c increase wallet server payment test timeout 2020-03-29 20:00:23 -04:00
Lex Berezhny
0422d2a021 estimated timestamp should be integer 2020-03-29 19:39:37 -04:00
endes123321
8586762dde
fixed is_valid_public_ipv4 2020-03-29 16:12:36 +01:00
Lex Berezhny
ffdf70257b
Merge pull request #2888 from lbryio/m4a-content-type
add m4a content type
2020-03-27 17:54:45 -04:00
Thomas Zarebczan
6079d60aa1
add m4a content type 2020-03-26 23:08:52 -04:00
Jack Robison
d46d012f8c
Revert "delete live bittrex test"
This reverts commit f70343bb
2020-03-26 20:31:39 -04:00
Jack Robison
baf926b360 v0.66.0 2020-03-26 17:59:02 -04:00
Jack Robison
82339869dc
Revert "v0.66.0"
This reverts commit 4ab56ae3
2020-03-26 17:50:44 -04:00
Jack Robison
f70343bb63
delete live bittrex test 2020-03-26 17:50:31 -04:00
Jack Robison
4ab56ae3d1 v0.66.0 2020-03-26 16:49:54 -04:00
Jack Robison
39b4031684
Merge pull request #2880 from lbryio/handle_mkv_files
Handle MKV files
2020-03-26 16:45:20 -04:00
Brannon King
71f8965393
re-use ffprobe info in stream_type
avoid duplicate args


handle review comment
2020-03-26 16:20:12 -04:00
Brannon King
66857e72a4
ensure only webm-type MKV files 2020-03-26 16:20:12 -04:00
Jack Robison
15eb5d47eb
Merge pull request #2881 from lbryio/wallet_sync_status
add `wallet_syncing` to `status`, which is set to true when processing transaction updates
2020-03-26 15:28:20 -04:00
Jack Robison
86242139da
update tests 2020-03-26 13:56:51 -04:00
Victor Shyba
460bdc4148
move wallet_syncing to wallet status is_syncing 2020-03-26 11:43:48 -04:00
Victor Shyba
1052126522
add wallet_syncing status 2020-03-26 11:43:48 -04:00
Jack Robison
fdb42ac876
Merge pull request #2852 from lbryio/faster-transaction-sync
More efficient syncing of wallet transactions
2020-03-26 11:43:20 -04:00
Jack Robison
6486f986e8
bump min required server version 2020-03-26 11:41:49 -04:00
Jack Robison
bc4075e2ed
create_task instead of ensure_future 2020-03-26 11:41:49 -04:00
Jack Robison
11525c7d0d
fix caching transaction_show 2020-03-26 11:41:49 -04:00
Jack Robison
bb12ae9ce6
fix duplicate get_merkle requests 2020-03-26 11:41:49 -04:00
Jack Robison
66b4ad0c44
improve log 2020-03-26 11:41:49 -04:00
Jack Robison
4bff2d718e
clean up test 2020-03-26 11:41:49 -04:00
Jack Robison
af7a7b4dc6
fix maybe_migrate_certificates blocking the api while populating channel signing keys 2020-03-26 11:41:49 -04:00
Jack Robison
ec20d9a2a8
faster wallet sync with get_transaction_and_merkle 2020-03-26 11:41:49 -04:00
Lex Berezhny
b0aee3d335
Merge pull request #2884 from lbryio/txo_plot
added `txo_plot` command to allow plotting txo sums over time
2020-03-26 10:14:02 -04:00
Lex Berezhny
5e2ddbfd86 txo_plot returns lbc instead of dewies 2020-03-26 01:13:09 -04:00
Lex Berezhny
f086ebbb8e lint 2020-03-26 00:42:11 -04:00
Lex Berezhny
e5bf6a5bfc added txo_plot command to allow plotting txo sums over time 2020-03-26 00:37:13 -04:00
Lex Berezhny
76fa86d54b v0.65.0 2020-03-23 14:56:45 -04:00
Jack Robison
60d89506a5
Merge pull request #2877 from lbryio/on_ready_fixup
wait for tx sync to finish but not header sync
2020-03-23 13:46:47 -04:00
Victor Shyba
2893f1eb9e add other taskgroup for non-start-important tasks 2020-03-23 12:36:05 -03:00
Victor Shyba
a08cbf412d on_ready.wait so we start after tx syncs 2020-03-23 12:26:07 -03:00
Lex Berezhny
0bd65356f9 increase timeout in usage payment test 2020-03-23 11:06:29 -04:00
Lex Berezhny
2e2b39455f
Merge pull request #2875 from lbryio/1080p
default to 1080P for anything higher than 1080P
2020-03-23 10:44:18 -04:00
Lex Berezhny
1f9fbe34e4
Merge pull request #2863 from lbryio/async_headers
fetch headers on demand
2020-03-23 10:41:31 -04:00
Thomas Zarebczan
506d7ae50e
default to 1080P for anything higher than 1080P
Previously would do 1440p
2020-03-23 10:20:13 -04:00
Victor Shyba
342cb00625 less concurrent repeated header checks 2020-03-23 01:19:34 -03:00
Victor Shyba
952fc01efd add script that generates checkpoints 2020-03-23 00:05:36 -03:00
Victor Shyba
3eebe301fe move checkpoints out of folder into file 2020-03-22 23:45:14 -03:00
Victor Shyba
19c0a81c42 fix bad usages of hash and some tests 2020-03-22 23:31:47 -03:00
Victor Shyba
9fc7f9904b fix tests, delete old code 2020-03-22 23:31:47 -03:00
Victor Shyba
b04a516063 better locking, stop corrupting headers, fix some tests 2020-03-22 23:31:47 -03:00
Victor Shyba
241e946d91 first experimental version 2020-03-22 23:31:45 -03:00
Victor Shyba
e45375dc26 more async parts 2020-03-22 23:30:11 -03:00
Victor Shyba
ec8e243323 estimate timestamps instead of using block headers 2020-03-22 23:30:11 -03:00
Victor Shyba
af0e9368d4 headers get now async 2020-03-22 23:30:11 -03:00
Lex Berezhny
285483b81a
Merge pull request #2872 from lbryio/claim_list_include_received_tips
added `--include_received_tips` to `claim_list` and `txo_list` commands
2020-03-22 18:00:35 -04:00
Lex Berezhny
73a91d5569 added --include_received_tips to claim_list/txo_list 2020-03-22 17:22:15 -04:00
Lex Berezhny
e24c78be32
Merge pull request #2871 from lbryio/wallet_txo_data_leak
fix to prevent transaction cache from leaking some information about outputs between unrelated wallets
2020-03-22 13:16:03 -04:00
Lex Berezhny
b11184de68 fix purchase_create 2020-03-22 12:23:21 -04:00
Lex Berezhny
61d02fc5d7 fix to prevent transaction cache from leaking some information about outputs between unrelated wallets 2020-03-22 11:45:40 -04:00
Lex Berezhny
f0217f6821
Merge pull request #2870 from lbryio/resolve_includes
added new flags to `resolve` (and a few to `claim_search`) commands: `--include_purchase_receipt`, `--include_is_my_output`, `--include_sent_supports`, `--include_sent_tips` and `--include_received_tips`
2020-03-22 02:08:28 -04:00
Lex Berezhny
09644914a6 added --include_is_my_output to claim_search 2020-03-22 01:51:09 -04:00
Lex Berezhny
b5c24d6a48 fixing unit tests 2020-03-22 01:29:26 -04:00
Lex Berezhny
bdd2ac2c25 renamed some flags 2020-03-22 01:13:26 -04:00
Lex Berezhny
76376f0d33 lint 2020-03-22 00:24:38 -04:00
Lex Berezhny
9749da46ae added flags to resolve: --include_purchase_receipt, --include_is_my_output, --include_my_supports, --include_my_tips 2020-03-21 23:44:57 -04:00
Lex Berezhny
1c05295e89 updated doc 2020-03-21 20:15:21 -04:00
Lex Berezhny
4d0f28215a added release-text-lines: to release script to support multi-line release notes 2020-03-21 20:09:39 -04:00
Lex Berezhny
0bba72bc5a
Merge pull request #2862 from lbryio/txo_list_support_sent_outputs
`txo_list` adds many new ownership filters such as `--is_my_input`, `--is_my_output`, etc and some metadata filters such as `--channel_id`, `--reposted_claim_id`, etc and a new command `txo_sum` which takes the same arguments as `txo_list` and produces sum of outputs
2020-03-21 19:48:34 -04:00
Lex Berezhny
87089b8e83 fix support test 2020-03-21 18:48:06 -04:00
Lex Berezhny
6a58148a89 added support for --order_by=none 2020-03-21 18:16:25 -04:00
Lex Berezhny
15091052be added --no_totals to txo_list 2020-03-21 18:06:05 -04:00
Lex Berezhny
5cd7e9a9b8 increment scema version and force specific indexes to be used for get_txos() 2020-03-21 15:08:14 -04:00
Lex Berezhny
7cb530c334 added --channel_id and --order_by to txo_list 2020-03-20 23:19:26 -04:00
Lex Berezhny
5e0324cc91 added --reposted_claim_id to txo_list 2020-03-20 20:22:57 -04:00
Lex Berezhny
6293e227ea added txo_sum command 2020-03-20 19:08:47 -04:00
Lex Berezhny
d6d83a5c76 integration test fix 2020-03-20 19:08:47 -04:00
Lex Berezhny
93fc883b90 fixing unit tests 2020-03-20 19:08:47 -04:00
Lex Berezhny
dd21803598 working --is_my_input_or_output --is_my_input --is_my_output etc 2020-03-20 19:08:47 -04:00
Lex Berezhny
af2f2282c2 txo_list returns txo funded by my account but sent to external address 2020-03-20 19:08:47 -04:00
Jack Robison
869a76c9bb
Merge pull request #2851 from lbryio/transaction-info
Add `blockchain.transaction.info` and `blockchain.transaction.get_batch` to the wallet server for more efficient transaction fetching
2020-03-20 10:25:45 -04:00
Jack Robison
44b2964a6a
add blockchain.transaction.get_batch 2020-03-19 19:20:30 -04:00
Jack Robison
8fddb57e0a
reduce block lru cache sizes 2020-03-19 19:20:30 -04:00
Jack Robison
c7a5a0cab0
add blockchain.transaction.info 2020-03-19 19:20:30 -04:00
Jack Robison
c13aab3ffc
Merge pull request #2823 from lbryio/multiple-db-readers
Use multiple processes for querying the db for api calls
2020-03-19 19:19:53 -04:00
Jack Robison
95a9a76598
windows 2020-03-19 19:19:02 -04:00
Jack Robison
923cfa3d50
another 2020-03-19 19:19:02 -04:00
Jack Robison
2c489168c2
feedback 2020-03-19 19:19:02 -04:00
Jack Robison
f3292b4d34
feedback 2020-03-19 19:19:02 -04:00
Jack Robison
9769829b72
fix android executor 2020-03-19 19:19:02 -04:00
Jack Robison
36243d15cc
fix peer_list 2020-03-19 19:19:02 -04:00
Jack Robison
f4645f570c
more read only calls 2020-03-19 19:19:02 -04:00
Jack Robison
61603ccfce
write lock 2020-03-19 19:19:02 -04:00
Jack Robison
a26cfc639c
WAL 2020-03-19 19:19:02 -04:00
Jack Robison
90602931d8
multiple readers for transaction_list 2020-03-19 19:19:02 -04:00
Jack Robison
d1b330028c
multiple db reader processeses 2020-03-19 19:19:02 -04:00
Jack Robison
7a6b1930bf
rename executor -> writer_executor 2020-03-19 19:19:02 -04:00
Lex Berezhny
c271361552
Merge pull request #2865 from lbryio/lbrycrd-1744
upgrade lbrycrd to v0.17.4.4
2020-03-19 17:54:26 -04:00
Lex Berezhny
17789bc814
Merge pull request #2866 from lbryio/dont_shlex_windows
fixed issue with shell parsing on Windows
2020-03-19 17:53:47 -04:00
Brannon King
d7c16e161a fixed issue with shell parsing on Windows 2020-03-19 13:11:57 -06:00
Lex Berezhny
e71c17d7e7 upgrade lbrycrd to v0.17.4.4 2020-03-19 12:54:19 -04:00
Lex Berezhny
17d87eb157
Merge pull request #2861 from lbryio/ffmpeg_fixes
transcode in stream_update, re-enable transcode on Windows
2020-03-19 12:20:47 -04:00
Brannon King
bf11bcc084 fixed transcoding tests for non-async execute
status ordering broke path check
2020-03-18 12:43:27 -06:00
Brannon King
bd291109df addressing code review comments 2020-03-18 12:00:52 -06:00
Brannon King
ac89ba9b8d don't require ProactorEventLoop on Windows
fix linter errors
2020-03-18 12:00:52 -06:00
Brannon King
5ab634e375 support search path for ffmpeg 2020-03-18 12:00:52 -06:00
Brannon King
bb1978d976 ffmpeg now invoked via stream update 2020-03-18 12:00:52 -06:00
Lex Berezhny
bea94ce8ac
Merge pull request #2854 from lbryio/tracemalloc_api
Adds tracemalloc api for memory troubleshooting
2020-03-16 22:15:41 -04:00
Victor Shyba
9561f93594 disable tracemalloc after test finishes so it doesnt slow down everything else 2020-03-16 11:29:16 -03:00
Victor Shyba
d44d5c3304 enable/disable instead of set 2020-03-16 06:40:22 -03:00
Victor Shyba
ec541e2057 lint and improve tests 2020-03-11 19:56:37 -03:00
Victor Shyba
8169bf6b97 top objects api 2020-03-11 19:56:37 -03:00
Victor Shyba
56c8ad1221 start/stop tracemalloc over api 2020-03-11 19:56:37 -03:00
Lex Berezhny
6814f2e38c
Merge pull request #2857 from eggplantbren/master
Trending algorithm with time delay and variable decay rate
2020-03-11 18:34:52 -04:00
Brendon J. Brewer
5f043b9a78 variable decay 2020-03-12 10:33:15 +13:00
Lex Berezhny
c9092cd1c7 v0.64.0 2020-03-09 13:32:19 -04:00
Lex Berezhny
86cc65d894
Merge pull request #2848 from lbryio/claim_transaction_list_filtering
added new `txo_list` command with filtering for `--claim_id`, claim `--name` and `--is_received`/`--is_not_received`, also commands `claim_list`/`stream_list`/`channel_list`/`support_list` are based on `txo_list` and thus support most of the new filters
2020-03-08 23:33:38 -04:00
Lex Berezhny
8ef2647fa9 is_received 2020-03-08 23:11:03 -04:00
Lex Berezhny
3ff9e99416 added txo_list command 2020-03-08 16:22:49 -04:00
Lex Berezhny
1731046011
Merge pull request #2846 from lbryio/txo_is_spent
added `is_spent` attribute to transaction outputs
2020-03-06 21:10:30 -05:00
Lex Berezhny
c3d96184b6 fix test 2020-03-06 20:35:26 -05:00
Lex Berezhny
c255c606a7 added is_spent attribute to transaction outputs 2020-03-06 20:12:38 -05:00
Jack Robison
cc69faa1fd v0.63.0 2020-03-06 15:58:34 -05:00
Jack Robison
0f53cd86c8
Merge branch 'debug_find_ffmpeg' 2020-03-06 15:54:24 -05:00
Brannon King
212c8f188d
only change LD vars if running from bundle
missed a word


moved is_running_from_bundle
2020-03-06 15:53:09 -05:00
Brannon King
dec248adec
repair env modified by pyinstaller
see https://pyinstaller.readthedocs.io/en/stable/runtime-information.html#ld-library-path-libpath-considerations
2020-03-06 15:53:09 -05:00
Brannon King
98e6a066f4
add unit test and debug output 2020-03-06 15:53:09 -05:00
Jack Robison
c90db54a3d
Revert "v0.63.0"
This reverts commit 927d1569
2020-03-06 15:51:42 -05:00
Jack Robison
927d156933 v0.63.0 2020-03-06 14:49:53 -05:00
Jack Robison
c71f91073f
Merge pull request #2845 from lbryio/temp-disable-windows-ffmpeg
fix DHT setup and disable ffmpeg on windows temporarily
2020-03-06 14:20:18 -05:00
Jack Robison
ab28387692
remove ProactorEventLoop and disable ffmpeg on windows temporarily 2020-03-06 13:45:34 -05:00
Lex Berezhny
c4905d02b9
Merge pull request #2793 from lbryio/dont_validate_non_video
don't validate and repair files that aren't videos; add maximum video bit rate setting
2020-03-04 09:16:37 -05:00
Brannon King
ee39880fb5 fix items from review 2020-03-03 21:38:30 -07:00
Brannon King
926b3e56b9 take a hint, lint 2020-03-03 21:38:30 -07:00
Brannon King
e060df5367 hide ValueError 2020-03-03 21:38:30 -07:00
Brannon King
a3294d4a0d make bit_rate check support maximum 2020-03-03 21:38:30 -07:00
Brannon King
19ce0ab246 ogg -> ogv, ignore files that aren't video 2020-03-03 21:38:30 -07:00
Lex Berezhny
75a1cc0d33
Merge pull request #2840 from lbryio/status_returns_server_features
`status` command returns features available of currently connected wallet server, including trending algorithm
2020-03-03 20:47:04 -05:00
Lex Berezhny
05228529b0 fix tests 2020-03-03 20:26:07 -05:00
Lex Berezhny
3cbeadfbc3 lbrynet status command returns features available of currenty connected wallet server, including thetrending algorithm used 2020-03-03 20:04:57 -05:00
Lex Berezhny
3d4938c0e2
Merge pull request #2839 from lbryio/file_analysis_join_path
use `os.path.join()` instead of string addition when searching for file analysis binaries
2020-03-03 10:48:06 -05:00
Lex Berezhny
1b850b8a2b dont prepend path when doing which ffmpeg 2020-03-02 21:11:11 -05:00
Lex Berezhny
ebd33f1869 use os.path.join instead of string addition when searching for file analysis binaries 2020-03-02 20:34:54 -05:00
Jack Robison
ac846b4df3 v0.62.0 2020-03-01 20:29:32 -05:00
Jack Robison
0d427c9b90
Merge pull request #2836 from lbryio/wallet-server-lru-caches
Add LRU caches for blocks and block hashes in the wallet server
2020-03-01 15:19:28 -05:00
Jack Robison
dc6194f862
perf counter 2020-03-01 15:17:21 -05:00
Jack Robison
de50214e1f
block lru cache 2020-03-01 15:17:21 -05:00
Jack Robison
5ec4a88c35
block hash lru cache 2020-03-01 15:17:05 -05:00
Jack Robison
31c141e757
Merge pull request #2835 from lbryio/wallet-server-threadpools
Improve wallet server performance with separate thread pools for leveldb, the block processor, and mempool
2020-03-01 15:15:20 -05:00
Jack Robison
9d5760d899
fix test 2020-03-01 14:41:14 -05:00
Jack Robison
7945e1ea3c
mempool threadpool 2020-03-01 12:40:13 -05:00
Jack Robison
d94c40e371
leveldb threadpool 2020-03-01 12:40:13 -05:00
Jack Robison
31f22122e8
block processor threadpool 2020-03-01 12:40:13 -05:00
Lex Berezhny
506582aa2b
Merge pull request #2834 from lbryio/list_resolve_includes_local_data
when using `--resolve` with local claim `*_list` commands, update resolved result with local metadata (such as `is_mine`)
2020-02-29 15:48:49 -05:00
Lex Berezhny
a02b251c9b when using --resolve with local claim list commands, update resolved result with local metadata (such as is_mine and is_mine) 2020-02-29 09:42:11 -05:00
Lex Berezhny
34cbb6fa79
Merge pull request #2831 from lbryio/claim_type_index_improvement
update `claim_type_*` db indexes to be more performant
2020-02-27 14:27:24 -05:00
Lex Berezhny
33679b56fd update claim_type_* indexes to be more peformant 2020-02-27 12:07:38 -05:00
Lex Berezhny
8897c4d560
Merge pull request #2816 from osilkin98/removes-anonymous
removes ability to comment anonymously
2020-02-27 10:37:47 -05:00
Oleg Silkin
f0c18ccbe7 Removes testing for anonymous comments, fixes up existing asserts 2020-02-26 19:59:38 -05:00
Oleg Silkin
e560d83c51 drops support for commenting without a channel 2020-02-26 19:58:03 -05:00
Lex Berezhny
eca7addc67 v0.61.0 2020-02-24 10:01:03 -05:00
Lex Berezhny
263f3ba5c9
Merge pull request #2828 from lbryio/update_lbrycrd
update lbrycrd in tests to v0.17.4.3
2020-02-22 14:47:48 -05:00
Lex Berezhny
f514123ef0 update lbrycrd 2020-02-22 13:42:28 -05:00
Lex Berezhny
9f461db0d0
Merge pull request #2827 from lbryio/repost_inherit_tags
reposts inherit the tags of the original claim, filtered/blocked claims cause their reposts to also be filtered/blocked
2020-02-22 13:04:31 -05:00
Lex Berezhny
7de8670616 blocked original repost causes all reposts to also be blocked 2020-02-22 12:49:20 -05:00
Lex Berezhny
8c7908b200 reposts inherit the tags of the original claim 2020-02-22 12:23:11 -05:00
Lex Berezhny
e7722e039f
Merge pull request #2826 from lbryio/fix_not_channel_ids
`claim_search --not_channel_ids` excludes the channel itself and not just claims in the channel
2020-02-21 23:01:00 -05:00
Lex Berezhny
039bc0208a fix tests 2020-02-21 22:22:28 -05:00
Lex Berezhny
d57900a069 claim_search --not_channel_ids excludes the channel itself and not just claims in the channel 2020-02-21 21:59:46 -05:00
Jack Robison
c1153302aa v0.60.1 2020-02-21 16:13:30 -05:00
Victor Shyba
1086a3297f check its broadcasted and use a retriable call 2020-02-21 17:51:46 -03:00
Jack Robison
5c613934ca
Revert "v0.60.1"
This reverts commit 7f03b13579.
2020-02-21 15:05:33 -05:00
Jack Robison
6d7e9092f8
Merge pull request #2825 from lbryio/tx_from_stream_controller
use the stream controller tx instead of local history
2020-02-21 15:04:57 -05:00
Victor Shyba
8bb6f328dc use the stream controller tx instead of local history 2020-02-21 16:41:23 -03:00
Jack Robison
7f03b13579 v0.60.1 2020-02-21 14:14:15 -05:00
Jack Robison
09008ea991
Merge pull request #2824 from lbryio/skip_wallet_server_payment
max wallet server payment fee defaults to 0 and component doesnt start when its not a positive value
2020-02-21 13:27:58 -05:00
Jack Robison
ee234212e6
Merge branch 'master' into skip_wallet_server_payment 2020-02-21 13:26:38 -05:00
Jack Robison
277b243f52
Merge pull request #2772 from lbryio/reflector-status
Add `is_fully_reflected` to `file_list` response
2020-02-21 13:21:36 -05:00
Jack Robison
0973ac753f
add is_fully_reflected to file_list response 2020-02-21 12:49:19 -05:00
Victor Shyba
f41cfbfb97 max wallet server payment fee defaults to 0 and component doesnt start in that case 2020-02-21 14:47:09 -03:00
Lex Berezhny
a70980c81a reverting v0.60.1 release 2020-02-21 12:43:43 -05:00
Lex Berezhny
12a962f656
Merge pull request #2822 from lbryio/stream_controller_payment
use stream controller and documented errors for wallet server payments
2020-02-21 12:34:07 -05:00
Lex Berezhny
5936444f3e more reliable wait for wallet server payment test 2020-02-21 12:18:44 -05:00
Lex Berezhny
c7d42f00c6 shorten error message for ServerPaymentFeeAboveMaxAllowed 2020-02-21 10:16:56 -05:00
Lex Berezhny
b1318a9958 minor cleanup 2020-02-21 10:05:46 -05:00
Victor Shyba
d2560d260c use stream controller and documented errors 2020-02-21 00:04:37 -03:00
Jack Robison
f40a61cf9a v0.60.1 2020-02-20 17:56:54 -05:00
Jack Robison
e0623578bf
Merge pull request #2821 from lbryio/ffmpeg-find
add `ffmpeg_find` api to check ffmpeg installation status, don't recheck for ffmpeg in `status`
2020-02-20 17:48:36 -05:00
Jack Robison
ead8daaa14
doc 2020-02-20 17:27:18 -05:00
Jack Robison
23b4b9e230
add ffmpeg_find api, don't recheck for it in status 2020-02-20 16:43:41 -05:00
Jack Robison
947017e334
Merge pull request #2820 from lbryio/disconnect-on-oversized
disconnect from client sending message larger than MAX_RECEIVE
2020-02-20 16:24:26 -05:00
Jack Robison
7fd0d6507f
disconnect from client sending message larger than MAX_RECEIVE 2020-02-20 16:08:21 -05:00
Jack Robison
ad1e9ef086
Merge pull request #2819 from lbryio/max-receive
add MAX_RECEIVE setting to wallet server
2020-02-20 14:38:11 -05:00
Jack Robison
2f1d08e417
add MAX_RECEIVE wallet server setting 2020-02-20 14:11:16 -05:00
Lex Berezhny
55f4eb80ba
Merge pull request #2815 from lbryio/apply_share_usage_data_live
apply share_usage_data as its set, without restarting
2020-02-20 13:35:47 -05:00
Victor Shyba
7c160ff65e remove unused parameters 2020-02-20 14:38:13 -03:00
Victor Shyba
fc5d5faaed use conf directly isntead of lambda 2020-02-20 14:27:39 -03:00
Victor Shyba
5394f1763c simplify external ip logic 2020-02-20 12:12:40 -03:00
Victor Shyba
050b67c9d6 apply share_usage_data as its set 2020-02-20 12:12:40 -03:00
Lex Berezhny
4b87cb45ee
Merge pull request #2707 from lbryio/pay_wallet_servers
wallet servers requiring a daily fee will automatically get paid by client
2020-02-18 17:40:23 -05:00
Victor Shyba
f0e1db319c make wallet server payments a component 2020-02-18 19:18:30 -03:00
Victor Shyba
3950715237 add max_wallet_server_fee conf to limit daily wallet server payments 2020-02-18 18:10:58 -03:00
Victor Shyba
ae9ba14b59 use both donation and payment addresses separately 2020-02-18 18:10:58 -03:00
Victor Shyba
ad6c6fbe35 fixes from review and add analytics 2020-02-18 18:10:58 -03:00
Victor Shyba
ffa5c20c88 fix hanging test 2020-02-18 18:10:58 -03:00
Victor Shyba
5a6218eeca improve tests, fix types 2020-02-18 18:10:58 -03:00
Victor Shyba
200531dd96 wait for tx instead 2020-02-18 18:10:58 -03:00
Victor Shyba
15a2f048ac pay wallet server test and initial prototype 2020-02-18 18:10:58 -03:00
Victor Shyba
d317a4042c change daily_fee to be lbc instead of dewies 2020-02-18 18:10:58 -03:00
Victor Shyba
c2d717aba5 old code from #2683 2020-02-18 18:10:58 -03:00
Lex Berezhny
ef74777df1 v0.60.0 2020-02-17 17:52:11 -05:00
Lex Berezhny
f6dbf99fb5
Merge pull request #2811 from lbryio/index_fixes
wallet server indexes
2020-02-17 17:43:55 -05:00
Lex Berezhny
0b2d9e15b4 added order bys for fee_amount 2020-02-17 17:24:48 -05:00
Lex Berezhny
d14e5e75e8 all three order bys for duration filter 2020-02-17 17:24:48 -05:00
Lex Berezhny
df11ef34d2 index cleanup 2020-02-17 17:24:48 -05:00
Lex Berezhny
4fc619262c replacing indexes on trending_global, trending_mixed to be trending_group, trending_mixed 2020-02-17 17:24:48 -05:00
Lex Berezhny
2750eee86d
Merge pull request #2810 from lbryio/no_default_order_by
removed default `order_by` for `claim_search`
2020-02-17 17:04:41 -05:00
Lex Berezhny
71cb03345b removed default order_by for claim_search 2020-02-17 16:26:29 -05:00
Lex Berezhny
22c8ca6c40
Merge pull request #2806 from lbryio/privacyDefault
change default privacy setting
2020-02-17 16:21:34 -05:00
Victor Shyba
237a8965eb fix tests 2020-02-17 18:00:23 -03:00
jessop
7d872c7863 change default privacy setting 2020-02-13 18:56:11 -05:00
Jack Robison
0d08858dfb v0.59.2 2020-02-13 17:22:21 -05:00
Jack Robison
5c6d63c93d
Merge pull request #2803 from lbryio/fix-2802
fix claim apis failing with --resolve if no claims are present
2020-02-13 15:15:56 -05:00
Jack Robison
c8c10d2bb0
fix claim apis failing with --resolve if no claims are present
fixes https://github.com/lbryio/lbry-sdk/issues/2802
2020-02-13 14:58:51 -05:00
Jack Robison
f4d3e9ea6a
add development wallet server deploy script 2020-02-13 13:54:46 -05:00
Jack Robison
3713d3488d
Merge pull request #2800 from lbryio/fix-2746
Fix race condition setting/clearing the claim_search and resolve cache
2020-02-13 11:39:27 -05:00
Jack Robison
9b46d03c91
fix race between setting items in and clearing the resolve/claim_search cache 2020-02-13 11:17:21 -05:00
Jack Robison
fe4e738580
Merge pull request #2798 from lbryio/repost-index
Add sqlite index for reposted claims
2020-02-13 10:33:36 -05:00
Jack Robison
205b0c4263
call get_platform only once 2020-02-13 10:04:50 -05:00
Jack Robison
78eef25f1a
add reposted_claim_hash index
fixes https://github.com/lbryio/lbry-sdk/issues/2782
2020-02-12 17:39:57 -05:00
Lex Berezhny
c9305fd070
Merge pull request #2796 from lbryio/xenial_build
linux build of LBRY SDK for Ubuntu 16.04 Xenial
2020-02-12 14:28:41 -05:00
Lex Berezhny
20774280b9
Merge pull request #2790 from lbryio/sql_in_for_single_value
SQL generation fix to handle IN operation for one value lists
2020-02-12 14:27:36 -05:00
Lex Berezhny
74bd2557af linux build of LBRY SDK for Ubuntu 16.05 Xenial 2020-02-12 14:20:06 -05:00
Lex Berezhny
605fc8ecd8 claim_search by --claim_type=repost support 2020-02-12 11:51:35 -05:00
Lex Berezhny
6b745c53dc make sure SQL IN works with python set 2020-02-12 11:41:32 -05:00
Lex Berezhny
98c60b38fc disable too-many-nested-blocks lint rule 2020-02-12 10:47:40 -05:00
Lex Berezhny
be4515ec9a use sets for __in constraint values 2020-02-12 10:39:25 -05:00
Lex Berezhny
dcb1b64696 SQL generation fix to handle IN operation for one value lists 2020-02-12 10:31:27 -05:00
Jack Robison
1190a60c7a v0.59.1 2020-02-11 16:56:37 -05:00
Jack Robison
82ecf207c6
Merge pull request #2789 from lbryio/update-default-servers
update default wallet servers
2020-02-11 16:36:31 -05:00
Jack Robison
84c7f1bf58
update test 2020-02-11 16:13:55 -05:00
Jack Robison
5ac453fe07
update default wallet servers 2020-02-11 13:51:15 -05:00
Lex Berezhny
c2f39d653d v0.59.0 2020-02-10 12:08:24 -05:00
Lex Berezhny
748178f536 Merge pull request #2780 from lbryio/release_current
manually incremented version and added support to release script to r…
2020-02-09 20:41:00 -05:00
Lex Berezhny
ffbef69dc7 manually incremented version and added support to release script to release current version 2020-02-09 20:26:15 -05:00
Lex Berezhny
3757124323
Merge pull request #2771 from lbryio/update_url_regex
update url regex to include missing cases
2020-02-09 18:17:35 -05:00
Victor Shyba
e65f214b3c fix test using space in name 2020-02-09 17:55:29 -05:00
Victor Shyba
f14004e56b update url regex to cases tom reported 2020-02-09 17:55:29 -05:00
Lex Berezhny
dadc004dd6
Merge pull request #2765 from eggplantbren/master
fixed some issues with AR trending
2020-02-09 17:53:27 -05:00
Brendon J. Brewer
3697d9e1f0 Tweaked some parameters, back to 134 2020-02-09 17:38:34 -05:00
Brendon J. Brewer
94524f7330 lint 2020-02-09 17:38:34 -05:00
Brendon J. Brewer
17d8a3e5d6 Use claim_hash not claim_id 2020-02-09 17:38:24 -05:00
Brendon J. Brewer
39996d7612 Only create file during install if logging is turned on 2020-02-09 17:37:55 -05:00
Lex Berezhny
601ee4320f Merge pull request #2768 from mkroman/master
update cffi to v1.13.2
2020-02-09 17:35:06 -05:00
Mikkel Kroman
6e4f6fa92d Update cffi to v1.13.2
This fixes compilation issues for CPython 3.8+
2020-02-09 17:19:53 -05:00
Lex Berezhny
b7103c29dd Merge pull request #2773 from lbryio/fix_video_analysis_error
fix video analysis error
2020-02-09 17:19:23 -05:00
Brannon King
e53209e5ae handle early lbrycrd exit in tests 2020-02-09 17:02:16 -05:00
Brannon King
501fbd3114 handle strange file metadata 2020-02-09 17:02:16 -05:00
Lex Berezhny
5626f43e2b
Merge pull request #2779 from lbryio/github_actions
build community PRs using GitHub Actions
2020-02-09 16:49:40 -05:00
Lex Berezhny
0185cb48fb run lint and tests in parallel 2020-02-09 16:34:04 -05:00
Lex Berezhny
9a49eb06da fix import path in test_transcoding.py 2020-02-09 16:15:20 -05:00
Lex Berezhny
3fbc012231 moved blockchain/test_transcoding.py -> other/test_transcoding.py 2020-02-09 16:15:20 -05:00
Lex Berezhny
3938cf33d8 take github actions for a spin 2020-02-09 16:15:04 -05:00
Lex Berezhny
38b3d0c87c
Merge pull request #2777 from lbryio/claim_list_multi_type
`claim_list` argument `--claim_type` can now be repeated to pass a list of claim_types
2020-02-08 23:34:34 -05:00
Lex Berezhny
15a2fa6199 claim_list --claim_type argument can be repeated 2020-02-08 23:34:04 -05:00
Lex Berezhny
e834209d40 Merge pull request #2776 from lbryio/resolve_includes_censoring_channels_url
`resolve` results which are censored now include full details of channel which did the censoring
2020-02-08 23:32:44 -05:00
Lex Berezhny
9607d21828 censored searches/resolves include metadata of channel which did the censoring 2020-02-07 18:50:29 -05:00
Lex Berezhny
6fbbf36143 pylint 2020-02-07 18:44:30 -05:00
Lex Berezhny
db25f3282e lbry.error: save arguments on self and support for functions around arguments 2020-02-07 18:25:06 -05:00
Jack Robison
ceb92838e7 v0.58.2 2020-02-06 11:37:09 -05:00
Jack Robison
afb98f2cfc
Merge pull request #2770 from lbryio/disable-save-content-claim
Add `save_resolved_claims` config setting for better performance when not using file_x commands
2020-02-06 11:32:49 -05:00
Jack Robison
ed785fb087
add save_resolved_claims config setting to disable saving claims upon resolving them 2020-02-06 11:27:26 -05:00
Jack Robison
3921180d77 v0.58.1 2020-02-04 15:12:55 -05:00
Jack Robison
ef36e0311e
Merge pull request #2764 from lbryio/version-analytics
Add version tracking to prometheus metrics
2020-02-04 12:44:30 -05:00
Jack Robison
bf4cbe1204
add version to session count metric 2020-02-04 12:18:32 -05:00
Jack Robison
28eee4da25
add version metrics to prometheus 2020-02-04 10:52:08 -05:00
Jack Robison
933ccf6deb
add block updates to prometheus 2020-02-04 10:26:22 -05:00
Jack Robison
9ac41322e5
add cpu count to prometheus 2020-02-04 10:25:40 -05:00
Jack Robison
c3233e03ef
Merge pull request #2762 from lbryio/fix-2368
cancel reflector uploads upon file delete
2020-02-03 23:51:12 -05:00
Jack Robison
111871bb28
update test 2020-02-03 23:00:45 -05:00
Jack Robison
bf5b5f43e3
non blocking analytics 2020-02-03 23:00:45 -05:00
Jack Robison
2ed8ebff09
handle ConnectionError and ValueError in blob sendfile 2020-02-03 23:00:45 -05:00
Jack Robison
34eb856d09
cancel reflector uploads upon file delete
-remove unnecessary db call in stream_update
2020-02-03 23:00:45 -05:00
Lex Berezhny
2d644bdfb0 v0.58.0 2020-02-03 22:33:03 -05:00
Lex Berezhny
741e5cf103 regenerated api docs 2020-02-03 22:30:39 -05:00
Lex Berezhny
d745c04fe6
Merge pull request #2726 from lbryio/add_video_transcoding
added video file validation and optimization when publishing streams (using `--validate_file` and `--optimize_file` arguments)
2020-02-03 22:29:15 -05:00
Lex Berezhny
3a3c63956a removed unnecessary try/finally 2020-02-03 22:06:15 -05:00
Lex Berezhny
153bdf576a removed wheel from setup.py 2020-02-03 22:05:47 -05:00
Lex Berezhny
6525ee6510 moved __init__ to top of class 2020-02-03 22:05:23 -05:00
Brannon King
1780ddd329 added ffmpeg status, addressed items from code review
linter
2020-02-03 21:53:42 -05:00
Brannon King
85ad972ca8 return string instead of path object for compatibility with hachoir 2020-02-03 21:53:42 -05:00
Brannon King
a90b60799a Fixed check_video.py on Windows
using a cross-platform workaround


fixed proactor use in the SDK


fixed linter
2020-02-03 21:53:42 -05:00
Brannon King
47e8f74da9 changed to list append, relative claim test 2020-02-03 21:53:42 -05:00
Brannon King
fac28072ab added unit tests, other minor fixes
added universe


try again


try 4
2020-02-03 21:53:42 -05:00
Brannon King
ab77541f36 in progress on video transcoding
works


Fixing lint tests


remove eval
2020-02-03 21:53:42 -05:00
Lex Berezhny
9eb769f340
Merge pull request #2760 from lbryio/channel_hash_wallet_server_idx
added database index for several `channel_hash` based queries
2020-02-03 20:56:48 -05:00
Lex Berezhny
1e81d6f848 added database index for various channel_hash based queries 2020-02-03 20:28:39 -05:00
Jack Robison
dc393f4b77
Merge pull request #2761 from lbryio/catch-notification-timeout
catch TimeoutError when attempting to send notifications
2020-02-03 19:03:07 -05:00
Jack Robison
dd1de530c3
catch TimeoutError when attempting to send notifications 2020-02-03 18:46:17 -05:00
Lex Berezhny
74cd887249 v0.57.0 2020-02-03 13:41:33 -05:00
Lex Berezhny
b40bb35652 change MINIMUM_REQUIRED on client for servers it connects to 2020-02-03 13:41:05 -05:00
Lex Berezhny
dfe257af86 Revert "test to make sure next release tests pass"
This reverts commit ebb2a09107.
2020-02-03 13:39:36 -05:00
Lex Berezhny
ebb2a09107 test to make sure next release tests pass 2020-02-03 13:22:33 -05:00
Lex Berezhny
c157909b4e
Merge pull request #2758 from ykris45/patch-6
update copyright year in license
2020-02-02 20:19:33 -05:00
YULIUS KURNIAWAN KRISTIANTO
a744239b22
update year 2020-02-03 04:04:51 +07:00
Lex Berezhny
9b4417be87
Merge pull request #2757 from lbryio/resolve_for_local_list
added `--resolve` to local `claim_list`/`stream_list`/`channel_list` commands which returns more metadata for local claims
2020-02-01 18:15:35 -05:00
Lex Berezhny
84f807e278 regenerate docs 2020-02-01 17:59:57 -05:00
Lex Berezhny
448635a945 added --resolve to local *_list commands 2020-02-01 17:59:10 -05:00
Lex Berezhny
638e3e6b3d
Merge pull request #2756 from lbryio/blocked_resolve
resolve errors make distinction between truely not found claims and claims which were censored by wallet server
2020-02-01 14:00:37 -05:00
Lex Berezhny
bfe711bd42 fixing tests 2020-02-01 13:29:55 -05:00
Lex Berezhny
d5eed91e7f fix sql unit tests 2020-02-01 13:03:39 -05:00
Lex Berezhny
15abf49211 censored resolve responses return appropriate error 2020-02-01 12:53:39 -05:00
Lex Berezhny
b7eec0586c added ResolveCensoredError 2020-02-01 12:49:01 -05:00
Lex Berezhny
e639124a69
Merge pull request #2755 from eggplantbren/master
fixed initialization problem with ar trending algorithm
2020-01-30 13:44:25 -05:00
Brendon J. Brewer
22bca7a16e Length zero could also just mean there aren't any claims 2020-01-31 07:34:43 +13:00
Lex Berezhny
6f66b354e5
Merge pull request #2754 from lbryio/feat-claim_search_duration
add duration to claim search params
2020-01-29 16:43:34 -05:00
jessop
f98e3320ae make review changes 2020-01-29 16:25:04 -05:00
jessop
73c958222b add duration to claim search params 2020-01-29 15:46:47 -05:00
Lex Berezhny
e43230e46a v0.56.0 2020-01-28 19:58:28 -05:00
Lex Berezhny
dd07bc29eb
Merge pull request #2747 from eggplantbren/master
new trending algorithm contributed by Brendon Brewer, to enable on wallet server set `TRENDING_ALGORITHMS` environment variable on wallet server to `ar`
2020-01-28 19:36:52 -05:00
Lex Berezhny
790bbfcc99 integrate ar trending algorithm into configurable trending system 2020-01-28 18:11:32 -05:00
Brendon J. Brewer
994e70d43a ar trending algorithm 2020-01-28 18:11:32 -05:00
Jack Robison
7d5e19716a
Merge pull request #2749 from lbryio/prometheus-interrupts
add metrics to wallet server prometheus
2020-01-28 16:31:10 -05:00
Jack Robison
11530c675d
initialize_request_handlers, remove local rpc handlers 2020-01-28 15:54:22 -05:00
Jack Robison
1c474352fe
add wallet server prometheus metrics
-sessions gauge
-requests counter
-response times histogram
-notification counter
-request errors counter
-interrupt counter
-operational error counter
-internal error counter
-reader executor times histogram
-pending queries gauge
-lbrycrd request counter
-client versions counter
2020-01-28 15:54:22 -05:00
Jack Robison
3b2d635390
Revert "Revert "configurable trending algorithms""
This reverts commit 55e08f96a6.
2020-01-28 15:53:57 -05:00
Jack Robison
055c532d31
Merge pull request #2752 from lbryio/revert-2750-configurable_trending
Revert "configurable trending algorithms"
2020-01-28 15:04:29 -05:00
Jack Robison
55e08f96a6
Revert "configurable trending algorithms" 2020-01-28 15:03:09 -05:00
Lex Berezhny
3e0a9180bc
Merge pull request #2750 from lbryio/configurable_trending
configurable trending algorithms
2020-01-28 12:30:26 -05:00
Lex Berezhny
01ed71b243 configurable trending 2020-01-28 11:58:27 -05:00
Lex Berezhny
595c058517 moved trending.py to trending/zscore.py 2020-01-28 11:58:27 -05:00
Alex Grintsvayg
e04b0a6995
add docker tag to prometheus version info 2020-01-27 14:16:18 -05:00
Jack Robison
8961848ab1
Merge pull request #2748 from lbryio/cached-wallet-id
faster wallet.id attribute, improves speed of selecting wallets during api calls
2020-01-27 13:58:23 -05:00
Jack Robison
6fb1c72b7d
cached wallet id 2020-01-27 13:57:21 -05:00
Alex Grintsvayg
400d79d6ab
prom port off by default 2020-01-27 13:55:44 -05:00
Lex Berezhny
ce6018f387 v0.55.0 2020-01-27 12:20:41 -05:00
Alex Grintsvayg
65d9dca917
Merge branch 'docker'
* docker:
  fix gitlab ci
  add min_version and method name to metrics
  include docker tag in build info
  build_type.py -> build_info.py
  add docker build info to wallet server image
2020-01-23 13:13:33 -05:00
Alex Grintsvayg
2232b08351
fix gitlab ci 2020-01-23 13:13:20 -05:00
Alex Grintsvayg
0732ab6ad5
add min_version and method name to metrics 2020-01-23 13:13:20 -05:00
Alex Grintsvayg
fb568768c5
include docker tag in build info 2020-01-23 13:13:19 -05:00
Alex Grintsvayg
7fd56e0add
build_type.py -> build_info.py 2020-01-23 13:13:19 -05:00
Alex Grintsvayg
38a3f8cf4c
add docker build info to wallet server image 2020-01-23 13:13:19 -05:00
Lex Berezhny
8d8a5b36b6
Merge pull request #2737 from lbryio/fix_repost_resolve_bug
fix bug where repost claims were not being fully resolved
2020-01-22 23:32:55 -05:00
Lex Berezhny
a70cc7beb1 fix bug where repost claims were not being fully resolved 2020-01-22 22:59:05 -05:00
Alex Grintsvayg
c5421699b1
add prometheus port to docker-compose.yml 2020-01-22 20:08:29 -05:00
Alex Grintsvayg
d9f809864f
include version and build info in prometheus output 2020-01-22 19:13:58 -05:00
Lex Berezhny
bb23f509d7 rename BLOCKING_CHANNELS_IDS/FILTERING_CHANNELS_IDS to remove double S 2020-01-22 11:02:01 -05:00
Alex Grin
254e184677
Merge pull request #2734 from lbryio/prom2
Add prometheus metrics collection to client and server (take two)
2020-01-22 10:28:54 -05:00
Alex Grintsvayg
6c07141abd
Add prometheus metrics collection to client and server 2020-01-22 10:28:17 -05:00
Lex Berezhny
5822fc1d5b v0.54.1 2020-01-22 08:40:35 -05:00
Lex Berezhny
2cd7ea257c Added support to differentiate between filtering and blocking for content censoring 2020-01-22 08:39:14 -05:00
Alex Grin
0a21b72f9c v0.54.0 2020-01-21 16:17:12 -05:00
Alex Grintsvayg
80280f6f4a
Revert "v0.54.0"
This reverts commit bd3b4906d1.
2020-01-21 15:34:35 -05:00
Alex Grintsvayg
810a8e76d1
Pin pip to 19.3.1 on Windows
pip 20 made the following change:

> Remove interpreter-specific major version tag e.g. cp3-none-any from consideration.
> https://github.com/pypa/pip/pull/7355

The coincurve pip package only has a major version tag for its windows
wheel. pip20 will not allow us to install that wheel, and compiling
coincurve from source causes errors. So we cannot use pip 20 or above
until coincurve fixes its compatibility tags, we can compile coincurve
from source, we stop using coincurve, or we figure out another way to
install it.

Note that on osx and linux, coincurve has the correct tags.
2020-01-21 15:24:29 -05:00
Lex Berezhny
bd3b4906d1 v0.54.0 2020-01-21 11:30:14 -05:00
Lex Berezhny
d9c740f014 update tests to use 0.54.0 minimum version 2020-01-21 11:28:56 -05:00
Alex Grintsvayg
ec63a18960
Revert "v0.54.0"
This reverts commit fab0618b6b.
2020-01-21 11:23:51 -05:00
Alex Grin
fab0618b6b v0.54.0 2020-01-21 11:00:38 -05:00
Alex Grintsvayg
13e57e7aa8
Revert "v0.54.0"
This reverts commit 836b66e110.
2020-01-21 10:43:21 -05:00
Alex Grintsvayg
1299c9162c
Revert "Add prometheus metrics collection to client and server"
This reverts commit 59a5bacb2e.
2020-01-21 10:30:49 -05:00
Alex Grintsvayg
a64f33dbcc
ignore coverage files 2020-01-21 10:30:48 -05:00
Alex Grin
836b66e110 v0.54.0 2020-01-20 14:26:36 -05:00
Alex Grintsvayg
802119d789
Revert "v0.54.0"
This reverts commit aec4d1dc60.
2020-01-20 14:24:30 -05:00
Alex Grintsvayg
f94135cadd
make pycharm happy about long lines 2020-01-20 14:18:00 -05:00
Alex Grintsvayg
474c0f980e
skip randomly-failing dht test 2020-01-20 14:16:52 -05:00
Alex Grintsvayg
2cf0f791f2
remove unused import 2020-01-20 13:44:49 -05:00
Alex Grin
aec4d1dc60 v0.54.0 2020-01-20 13:13:30 -05:00
Alex Grintsvayg
8d28bb5a66
bump min protocol version 2020-01-20 12:53:10 -05:00
Alex Grintsvayg
1d5e553f9c
Merge branch 'blocked_search_metadata'
* blocked_search_metadata:
  add index for claim_type and release_time
  fix test
  fix json api generator
  add pagination for claim_search
  regenerate protobufs
  using multiprocessing.Manager to keep blocked content synced between readers
2020-01-20 12:51:02 -05:00
Lex Berezhny
9d79c52d20
add index for claim_type and release_time 2020-01-20 12:50:20 -05:00
Lex Berezhny
1ac03e2f1d
fix test 2020-01-20 12:50:16 -05:00
Lex Berezhny
801f05f45e
fix json api generator 2020-01-20 12:50:16 -05:00
Lex Berezhny
345196aa3b
add pagination for claim_search 2020-01-20 12:49:42 -05:00
Lex Berezhny
a01b52421e
regenerate protobufs 2020-01-20 12:49:41 -05:00
Lex Berezhny
86cedfe8b2
using multiprocessing.Manager to keep blocked content synced between readers 2020-01-20 12:49:41 -05:00
Alex Grintsvayg
709128225b
Merge branch 'prometheus'
* prometheus:
  Add prometheus metrics collection to client and server
2020-01-20 12:45:11 -05:00
Alex Grintsvayg
59a5bacb2e
Add prometheus metrics collection to client and server 2020-01-20 12:43:49 -05:00
Jack Robison
3a21df31ee v0.53.3 2020-01-16 16:43:26 -05:00
Jack Robison
0e3407ec2f
Merge pull request #2730 from lbryio/bump-aioupnp2
update aioupnp requirement to 0.0.17
2020-01-16 16:00:42 -05:00
Jack Robison
fd6609e961
bump aioupnp requirement to 0.0.17 2020-01-16 15:40:29 -05:00
Jack Robison
b0b5e045ff
non blocking aioupnp success analytics 2020-01-16 15:40:24 -05:00
Jack Robison
f293d7cccc
pin windows pyinstaller 2020-01-16 15:02:15 -05:00
Jack Robison
974ed29c36
Merge pull request #2729 from lbryio/fix-headers-subscription-to-old
disconnect from server that returns rpc error for server.version
2020-01-15 17:59:17 -05:00
Jack Robison
53eb033034
disconnect from server that returns rpc error for server.version
-the server returns this error if it thinks we are incompatible with it
2020-01-15 17:06:13 -05:00
Jack Robison
f2545b98ab
Merge pull request #2728 from lbryio/check-server-version-from-client
check minimum server version from wallet client
2020-01-15 16:53:10 -05:00
Jack Robison
1bf51e855d
check minimum server version from wallet client 2020-01-15 15:55:39 -05:00
Jack Robison
e4da2a695b v0.53.2 2020-01-14 16:47:56 -05:00
Lex Berezhny
1a905d9a42 make --start-tag work again in release/script.py 2020-01-13 21:40:07 -05:00
Victor Shyba
7d9d0c15d3 fix test_direct_sync flakiness 2020-01-13 21:17:10 -05:00
Victor Shyba
d34f5c2712 fix None in history, check records after wait 2020-01-13 21:17:10 -05:00
Victor Shyba
a56dd66c98 separate cases and make it deterministic 2020-01-13 21:17:10 -05:00
Victor Shyba
3946bc6662 fix and document new edge case on tests 2020-01-13 21:17:10 -05:00
Victor Shyba
6647dd8f08 fix decrypting invalid bytes with valid padding 2020-01-13 21:17:10 -05:00
Jack Robison
9371122bed
Merge pull request #2725 from lbryio/stream-manager-startup
faster stream manager startup
2020-01-13 18:09:49 -05:00
Jack Robison
734cd8ee08
logging 2020-01-13 17:51:05 -05:00
Jack Robison
d85d9d05bb
fast get_all_lbry_files 2020-01-13 17:51:05 -05:00
Jack Robison
1d7b87b3a9
fix inefficient loop in update_manually_removed_files_since_last_run 2020-01-13 17:51:02 -05:00
Lex Berezhny
f0cfde36f2 v0.53.1 2020-01-13 14:12:30 -05:00
Lex Berezhny
7e220d2741 downgrade setuptools 2020-01-13 14:03:45 -05:00
Lex Berezhny
c4b1351a43 stop building on travis 2020-01-13 14:03:45 -05:00
Lex Berezhny
90d2597bc3 pin to pyinstaller==3.5 2020-01-13 14:03:45 -05:00
Lex Berezhny
c1508c94c2 v0.53.0 2020-01-13 12:23:41 -05:00
Jack Robison
1a0d805bad
Merge pull request #2720 from lbryio/loggly-database-error
fix database error caused by logging tracebacks from within the sqlite transaction runner
2020-01-12 16:23:26 -05:00
Jack Robison
1a802469f5
missing fetchall() 2020-01-12 16:16:48 -05:00
Jack Robison
646ae7e2d1
catch RuntimeError in loggly handler to handle cases where there is not an event loop 2020-01-12 16:16:48 -05:00
Jack Robison
563d76ff9f
Merge pull request #2719 from lbryio/refactor_header_progress
refactor header progress calculation, making it simpler to understand and not prone to division by zero
2020-01-12 16:16:23 -05:00
Victor Shyba
54d0473e85 refactor header progress 2020-01-12 00:56:43 -03:00
Jack Robison
ec8a61c3af
Merge pull request #2718 from lbryio/add-connected-to-wallet-status
add `connected` server to wallet status
2020-01-11 01:11:34 -05:00
Jack Robison
0315eaff8a
add connected server to wallet status 2020-01-11 00:33:52 -05:00
Jack Robison
b34f44c867
logging 2020-01-11 00:17:32 -05:00
Jack Robison
ea7056835f
Merge pull request #2716 from lbryio/batched-address-subscriptions
speed up wallet sync and startup by batching address history subscriptions
2020-01-10 21:32:12 -05:00
Jack Robison
0bb4cdadd9
use network.subscribe_address 2020-01-10 13:57:52 -05:00
Jack Robison
08f6520557
bare excepts 2020-01-10 12:27:56 -05:00
Jack Robison
38b108752e
batched blockchain.address.subscribe 2020-01-09 23:40:04 -05:00
Jack Robison
0ee7870bdf
defaultdict 2020-01-09 23:06:29 -05:00
Jack Robison
37d46ecdb2
fix looping over same things in _transaction_io 2020-01-09 23:06:29 -05:00
Oleg Silkin
2a7911c7d6 rename jsonrpc_comment_edit -> jsonrpc_comment_update 2020-01-09 19:28:45 -05:00
Oleg Silkin
fc2d9b4fd2 restart travis build 2020-01-09 19:28:45 -05:00
Oleg Silkin
832020fa81 Makes claim_id and parent_id mutually exclusive for comment create 2020-01-09 19:28:45 -05:00
Oleg Silkin
78606ed4b8 Generates docs for comments 2020-01-09 19:28:45 -05:00
Oleg Silkin
a04ba606e6 Adds test for comment_edit 2020-01-09 19:28:45 -05:00
Oleg Silkin
ebf2e7ee70 Better validation logic; 2020-01-09 19:28:45 -05:00
Oleg Silkin
b31881f424 Removes possibility of no channel being passed in & propogates errors 2020-01-09 19:28:45 -05:00
Oleg Silkin
c0a6f6fd08 Adds validator methods 2020-01-09 19:28:45 -05:00
Oleg Silkin
7e1b1ca730 Adds comment_edit command 2020-01-09 19:28:45 -05:00
Jack Robison
163d176f0a
Merge pull request #2710 from lbryio/fix-version-and-stop
Fix `version` and `stop` rpcs
2020-01-08 17:42:10 -05:00
Jack Robison
0f7eab5100
pylint 2020-01-08 12:44:22 -05:00
Jack Robison
cbf78d474a
fix version and stop rpcs 2020-01-08 12:44:22 -05:00
Victor Shyba
3615e0de34 cleanup leftover comment from review 2020-01-07 19:57:34 -05:00
Victor Shyba
e73c081d36 remove bandwidth_limit env option 2020-01-07 19:57:34 -05:00
Victor Shyba
663bf6e3af remove bandwidth_limit from docker file 2020-01-07 19:57:34 -05:00
Victor Shyba
60194cbafd remove bw_limit on server 2020-01-07 19:57:34 -05:00
Lex Berezhny
7d2eb5faf7 v0.52.0 2020-01-06 12:21:49 -05:00
Lex Berezhny
5969f3d213 revert version 2020-01-06 12:20:05 -05:00
Lex Berezhny
6ddde88389 update release script with new directory structure 2020-01-06 12:18:26 -05:00
Lex Berezhny
e9992ed6b4 fix test 2020-01-06 12:17:09 -05:00
Lex Berezhny
4db823a3be set version to 0.52.0 2020-01-06 12:17:09 -05:00
Lex Berezhny
78732513c8 set wallet server min/max 0.52.0-0.99.0 2020-01-06 12:17:09 -05:00
Jack Robison
79557bb878
Merge pull request #2704 from lbryio/disable-cryptonator
disable cryptonator feed
2020-01-03 16:18:05 -05:00
Jack Robison
4094b02ae9
update test 2020-01-03 16:00:00 -05:00
Jack Robison
9cc7c118a8
disable cryptonator feed 2020-01-03 15:28:29 -05:00
Lex Berezhny
d3dad51c76 fix import error in cli.py 2020-01-03 03:08:15 -05:00
Lex Berezhny
5bf35de955 fix blockchain integration tests 2020-01-03 03:08:15 -05:00
Lex Berezhny
f8928c654b do not reset callable_methods on Daemon class 2020-01-03 03:08:15 -05:00
Lex Berezhny
b3903f4ffd fix import in unit test 2020-01-03 03:08:15 -05:00
Lex Berezhny
46c3f76edc run all integration tests on travis until we drop it 2020-01-03 03:08:15 -05:00
Victor Shyba
3488408b7a add jobs=8 back to pylint 2020-01-03 03:08:15 -05:00
Victor Shyba
b7b164b84c fix comment alignment 2020-01-03 03:08:15 -05:00
Victor Shyba
7c353b7d76 too many \ 2020-01-03 03:08:15 -05:00
Lex Berezhny
3d9841b61a fix unit wallet tests 2020-01-03 03:08:15 -05:00
Lex Berezhny
87e2d2c7b0 re-enable pylint on CI 2020-01-03 03:08:15 -05:00
Lex Berezhny
ff59619af4 more pylint 2020-01-03 03:08:15 -05:00
Lex Berezhny
b79f2b86b5 renamed Daemon.py to daemon.py per pylint 2020-01-03 03:08:15 -05:00
Lex Berezhny
386fd7a459 updated imports after renaming component files 2020-01-03 03:08:15 -05:00
Lex Berezhny
d4f41901ef lower cased component file names per pylint 2020-01-03 03:08:15 -05:00
Lex Berezhny
f170da3e78 pylint fixes in lbry/extras/daemon 2020-01-03 03:08:15 -05:00
Lex Berezhny
5dc15be98a pylint lbry/extras/daemon/exchange_rate_manager.py 2020-01-03 03:08:15 -05:00
Lex Berezhny
88c7cfc745 pylint in progress in lbry/extras/daemon 2020-01-03 03:08:15 -05:00
Victor Shyba
867478697d lint blob/* 2020-01-03 03:08:15 -05:00
Lex Berezhny
d27e8cf73a pylint fix in lbry/error/generate.py 2020-01-03 03:08:15 -05:00
Victor Shyba
4490caa4f7 lint storage 2020-01-03 03:08:15 -05:00
Lex Berezhny
1b25f2c531 pylint fix in lbry/utils.py 2020-01-03 03:08:15 -05:00
Lex Berezhny
9d911d1fa0 convince pylint about descriptor return type in lbry/conf.py 2020-01-03 03:08:15 -05:00
Victor Shyba
494feb9f6d lint: lbry/stream/* 2020-01-03 03:08:15 -05:00
Lex Berezhny
2a04943a67 add l variable for lists to allowed good-names in pylint 2020-01-03 03:08:15 -05:00
Lex Berezhny
731b29ce91 progress on pylint in lbry/conf.py 2020-01-03 03:08:15 -05:00
Victor Shyba
28fbb70858 blob_exchange lint 2020-01-03 03:08:15 -05:00
Victor Shyba
44f402c64e delete cryptoutils 2020-01-03 03:08:15 -05:00
Victor Shyba
c7f391ca44 lint: finish dht parts 2020-01-03 03:08:15 -05:00
Lex Berezhny
5ed9c5e168 ignore pylint for winpaths.py 2020-01-03 03:08:15 -05:00
Lex Berezhny
51b1a86d19 pylint for lbry/connection_manager.py 2020-01-03 03:08:15 -05:00
Victor Shyba
20c46677d0 lint dht: datastore protocol 2020-01-03 03:08:15 -05:00
Lex Berezhny
efb5f232f7 fixed pylint for lbry/utils.py 2020-01-03 03:08:15 -05:00
Lex Berezhny
9edf9561b2 pylint 2020-01-03 03:08:15 -05:00
Lex Berezhny
5d4eb018ee pylint for lbry/testcase.py 2020-01-03 03:08:15 -05:00
Victor Shyba
10fbce056b dht constants -> CONSTANTS (linting) 2020-01-03 03:08:15 -05:00
Lex Berezhny
cbc6d6a572 pylint passes for lbry/wallet 2020-01-03 03:08:15 -05:00
Lex Berezhny
fb1af9e3d2 update imports and more merging 2020-01-03 03:08:15 -05:00
Lex Berezhny
c9e410a6f4 merged torba base classes with lbry sub-classes 2020-01-03 03:08:15 -05:00
Lex Berezhny
c8d72b59c0 fix readme urls 2020-01-01 16:02:16 -05:00
Lex Berezhny
cd71d4a2f7 remove double install of coverage package 2020-01-01 16:02:11 -05:00
Lex Berezhny
ed0cc59e66 fix cyclic import error 2020-01-01 15:57:56 -05:00
Lex Berezhny
d4f1b48f52 renamed integration tests to be clearer in gitlab UI 2020-01-01 15:57:56 -05:00
Lex Berezhny
08bec02170 added __init__ 2020-01-01 15:57:56 -05:00
Lex Berezhny
c9f27b83e1 split integration tests into three jobs 2020-01-01 15:57:56 -05:00
Lex Berezhny
8327585b3b fix set_build.py and other things 2020-01-01 15:57:56 -05:00
Lex Berezhny
727815d6dd updating scripts after moving lbry up one level 2020-01-01 15:57:56 -05:00
Lex Berezhny
2968f74c6c moved lbry up one level 2020-01-01 15:57:56 -05:00
Lex Berezhny
431499f43f do not install torba 2020-01-01 15:57:56 -05:00
Lex Berezhny
d809283d23 fix integrartion test 2020-01-01 15:57:56 -05:00
Lex Berezhny
b49275fe6b added support for --no-logging argument to avoid leaking loggers in tests 2020-01-01 15:57:56 -05:00
Lex Berezhny
87c7ce588e disabled cryptonator.com in tests, removed references to torba 2020-01-01 15:57:56 -05:00
Lex Berezhny
26d0a7c742 fix generate_json_api 2020-01-01 15:57:56 -05:00
Lex Berezhny
b01887c7ec fix two unit tests 2020-01-01 15:57:56 -05:00
Lex Berezhny
745031c020 orchstr8 works again, for now... 2020-01-01 15:57:56 -05:00
Lex Berezhny
3ccfc09e01 conslidated testcase 2020-01-01 15:57:56 -05:00
Lex Berezhny
e8750275c9 skip code quality checks, will fix later 2020-01-01 15:57:56 -05:00
Lex Berezhny
f0d7ea4cc6 updated files and scripts post torba merge 2020-01-01 15:57:56 -05:00
Lex Berezhny
ea5322af82 removing bitcoin support 2020-01-01 15:57:56 -05:00
Lex Berezhny
0b23f68fb2 merged torba into lbry 2020-01-01 15:57:56 -05:00
Lex Berezhny
1c00129f76 v0.51.2 2019-12-30 16:34:52 -05:00
Lex Berezhny
454700af05 support json in setting_set 2019-12-30 16:31:14 -05:00
Jack Robison
d9b482d90a
Merge pull request #2699 from lbryio/faster-account-startup
don't log account details on startup if there are more than 10
2019-12-30 16:26:36 -05:00
Jack Robison
09c2c97069
only log account details on startup if there are not very many 2019-12-30 16:22:41 -05:00
Lex Berezhny
88faf0ce78 v0.51.1 2019-12-30 15:11:29 -05:00
Jack Robison
aef83eec31
Merge pull request #2695 from lbryio/fix-12-to-13-migrator
Fix 12 to 13 migrator
2019-12-28 15:57:41 -05:00
Jack Robison
5bd222c266
less verbose log 2019-12-28 13:17:19 -05:00
Jack Robison
d3e7f789b2
uncaught exception 2019-12-28 13:17:19 -05:00
Jack Robison
87c6e292f1
fix wallet server SessionManager._get_info 2019-12-28 13:17:19 -05:00
Jack Robison
b597ad0b3f
fix unique constraint failure in migrate12to13 2019-12-28 13:17:19 -05:00
Alex Grintsvayg
40dd99dea3
changelog note 2019-12-28 12:39:46 -05:00
Lex Berezhny
6ea2686219 v0.51.0 2019-12-23 12:39:48 -05:00
Alex Grintsvayg
6bca90c4f7
allow wallet snapshots using different compression algos 2019-12-21 19:18:20 -05:00
Alex Grintsvayg
20fa7bd852
better server range for boris 2019-12-21 13:48:07 -05:00
Alex Grintsvayg
6b80119eff
drop travis badge 2019-12-21 13:29:09 -05:00
Alex Grintsvayg
1fc5469eb1
add gitlab badge 2019-12-21 13:26:38 -05:00
Alex Grintsvayg
bf2769a0c2
disable travis docker builds. docker builds trigger from docker hub 2019-12-21 13:06:03 -05:00
Victor Shyba
325b601bea enable WAL 2019-12-21 07:49:21 -05:00
Lex Berezhny
126642912e lint 2019-12-20 16:52:56 -05:00
Lex Berezhny
57ee16d565 change returned error data structure to be JSONRPC standard compliant 2019-12-20 16:52:56 -05:00
Alex Grintsvayg
9f0fba063d
ensure asset exists before upload 2019-12-20 13:01:51 -05:00
Victor Shyba
757fb51415 make block_expected public 2019-12-20 12:30:16 -05:00
Victor Shyba
b1c5655138 __height -> _block_expected 2019-12-20 12:30:16 -05:00
Alex Grintsvayg
4d97dcaa93
remove extraneous apostrophe 2019-12-20 10:26:41 -05:00
Jack Robison
96db1b4685 v0.50.1 2019-12-19 22:09:19 -05:00
Jack Robison
9e21c52d04
Merge pull request #2685 from lbryio/fast_broadcast_notify
Update balance after a broadcast transaction is accepted to mempool without a delay, fix bug in `status` header progress
2019-12-19 22:07:16 -05:00
Victor Shyba
5fa801f9f2 broadcast_or_release uses None as timeout as opposed to tests 2019-12-19 23:42:31 -03:00
Victor Shyba
0301768b79 lint on tests 2019-12-19 22:58:36 -03:00
Victor Shyba
4c6dedfa4f fix last test 2019-12-19 22:34:10 -03:00
Victor Shyba
6652d55455 wip 2019-12-19 21:46:14 -03:00
Victor Shyba
3e03dd3e80 apply timeout and check for expected height on wait 2019-12-19 20:20:42 -03:00
Victor Shyba
84b8a22423 comment the broadcast interruption case 2019-12-19 16:20:12 -03:00
Victor Shyba
ada03e12fc proper check for restrictions on gettx 2019-12-19 15:13:13 -03:00
Victor Shyba
d66c801350 wake up mempool on broadcast 2019-12-19 15:13:13 -03:00
Victor Shyba
91846939f6 organize logic for when its downloading 2019-12-18 19:09:33 -03:00
Jack Robison
c85127d76b
fix division by zero error 2019-12-18 10:44:16 -05:00
Lex Berezhny
166bf65e88 torba test_database fix 2019-12-17 23:48:07 -05:00
Lex Berezhny
f68bdc406f fix bug when two updates for the same claim happened in the same block 2019-12-17 23:48:07 -05:00
Lex Berezhny
8390482d6c aws does not use memoryview 2019-12-17 23:48:07 -05:00
Alex Grintsvayg
783f454f8e
dont include dist dir in windows zip 2019-12-16 20:32:42 -05:00
Alex Grintsvayg
86a67bd245
autopublish release and notify in slack 2019-12-16 16:40:56 -05:00
Alex Grintsvayg
099b0b65c0
run torba unit tests the way lex runs them 2019-12-16 12:59:15 -05:00
Lex Berezhny
505652efa0 v0.50.0 2019-12-16 11:33:13 -05:00
Victor Shyba
a20088330f use buffer write return as written 2019-12-16 11:31:28 -05:00
Jack Robison
a20e2504bb
disable timing bug in test_ping_queue_discover 2019-12-16 10:10:58 -05:00
Lex Berezhny
b07bc150e1 do not registery fake component in real component registry 2019-12-15 02:39:17 -05:00
Lex Berezhny
46c6588aa1 fixup fake exchange rate manager for integration tests 2019-12-15 02:39:17 -05:00
Lex Berezhny
e36c672c9a pylint 2019-12-15 02:39:17 -05:00
Lex Berezhny
c044e1ea8c further refactor and simplification 2019-12-15 02:39:17 -05:00
Lex Berezhny
25b6c1b6ca renamed test_ExchangeRateManager to test_exchange_rate_manager 2019-12-15 02:39:17 -05:00
Lex Berezhny
c25d72d911 minor error class and type checking fixes 2019-12-15 02:39:17 -05:00
Miroslav Kovar
73613d1583 Refactor exchange rate manager 2019-12-15 02:39:17 -05:00
Lex Berezhny
1e6542d12d add --blocklist_channel_ids to claim_search command 2019-12-14 19:19:33 -05:00
Lex Berezhny
3cffaa43f7 remove invalid claim_id from default channel filter list 2019-12-14 19:19:33 -05:00
Lex Berezhny
08792e794f a support create and abandon in same block no longer inserts support 2019-12-14 18:19:06 -05:00
Lex Berezhny
7fa26d59da fixes for various edge cases where claim creation, updating and abandoning is occuring in the same block 2019-12-14 18:19:06 -05:00
Lex Berezhny
74469829e5 pylint 2019-12-14 15:47:28 -05:00
Lex Berezhny
ba414742a6 improved the data structure returned from RPC for errors by adding a error name and other metadata 2019-12-14 15:47:28 -05:00
Lex Berezhny
f6fcfb6dde simplified and only log potentially transactional functions 2019-12-13 18:44:32 -05:00
Oleg Silkin
c5b90c0144 Removes unused debug flag 2019-12-13 18:44:32 -05:00
Oleg Silkin
f669881849 Adds function, param logging to all lbrynet handling 2019-12-13 18:44:32 -05:00
Alex Grintsvayg
d0b70ea6df upload assets from gitlab CI to github on a tagged release
also disables travis asset uploading
2019-12-13 13:35:40 -05:00
Lex Berezhny
51af43d492 move none_check after fee update since that can be None 2019-12-13 13:23:23 -05:00
Lex Berezhny
8a49ad4586 catch invalid None value for all cases when creating/updating claims 2019-12-13 13:23:23 -05:00
gpjacobs
2ae068c2d2 Improving error handling for null values 2019-12-13 13:23:23 -05:00
Alex Grintsvayg
5e07e8dbbd
copy some of lex's changes to boris 2019-12-13 10:12:47 -05:00
Lex Berezhny
8d2c68505e remove VERBOSITY from test 2019-12-12 13:25:27 -05:00
Lex Berezhny
44f1e7c401 test all possible P2SH and BECH32 payment combinations on wallet server and at least P2SH and BECH32 to P2PKH receipt on client address 2019-12-12 13:25:27 -05:00
Lex Berezhny
fd2945469e pylint 2019-12-12 13:25:27 -05:00
Lex Berezhny
b0f0827cff client side segwit support 2019-12-12 13:25:27 -05:00
Lex Berezhny
c1e2c415c4 TaskGroup cannot work without even loop 2019-12-11 22:48:39 -05:00
Victor Shyba
a258b96602 simplify test 2019-12-11 22:48:39 -05:00
Victor Shyba
82ef76b1cb set regardless, there is no need for the check 2019-12-11 22:48:39 -05:00
Victor Shyba
aaa9982932 missing test file 2019-12-11 22:48:39 -05:00
Victor Shyba
1cdb4bc9e7 task group: set done if empty 2019-12-11 22:48:39 -05:00
Alex Grintsvayg
fddb0c9e66
notify us when boris is killed with signals 2019-12-11 14:02:39 -05:00
Lex Berezhny
5c78d2b6d2 install wget in Dockerfile 2019-12-11 13:08:02 -05:00
Victor Shyba
a4113cbb52 add apsw to pylint conf 2019-12-11 12:38:00 -05:00
Victor Shyba
afa3bda3c8 update tests 2019-12-11 12:06:41 -05:00
Victor Shyba
61d7edd15f change api name to wallet reconnect 2019-12-11 12:06:41 -05:00
Victor Shyba
c2106de7e1 refactor + tests 2019-12-11 12:06:41 -05:00
Victor Shyba
0bf1be6198 add wallet restart API 2019-12-11 12:06:41 -05:00
Lex Berezhny
af7c20e440 downgrade multidict to prevent segfault 2019-12-11 09:44:37 -05:00
Lex Berezhny
17771a2b89 added --initial-headers argument to lbrynet start command 2019-12-11 09:44:37 -05:00
Lex Berezhny
8840097fe2 test that creator can spend purchases received 2019-12-09 22:32:14 -05:00
Lex Berezhny
7d333efd45 utxo list includes purchases of your claims 2019-12-09 22:32:14 -05:00
Lex Berezhny
ec65815c57 add parsing for segwit outpoint 2019-12-09 20:59:31 -05:00
Lex Berezhny
640b0c71e6 v0.49.0 2019-12-09 13:05:08 -05:00
Victor Shyba
4208876ef3 add header sync to update tasks so it can be cancelled 2019-12-09 13:04:23 -05:00
Victor Shyba
bff9117360 progress from 0 2019-12-09 13:04:23 -05:00
Victor Shyba
6b3c4c70d2 dont timeout if data being transferred 2019-12-09 13:04:23 -05:00
Lex Berezhny
0292fd8d91 fix lint error 2019-12-08 22:40:06 -05:00
Lex Berezhny
94a41270d0 add InvalidPasswordError handling to more places 2019-12-08 22:40:06 -05:00
Lex Berezhny
75d78bfa53 added InvalidPasswordError code when password is invalid 2019-12-08 22:40:06 -05:00
Victor Shyba
88263db831 moar variables limit 2019-12-08 17:00:41 -05:00
Lex Berezhny
3ef96404ee use make install in Dockerfile 2019-12-08 17:00:41 -05:00
Lex Berezhny
13f9370f8c fix wallet server db writer test 2019-12-08 17:00:41 -05:00
Lex Berezhny
1c349270bf switch to apsw 2019-12-08 17:00:41 -05:00
Lex Berezhny
c28f0f6286 added apsw installation to tox 2019-12-08 17:00:41 -05:00
Lex Berezhny
b20939d211 added apsw installation 2019-12-08 17:00:41 -05:00
Lex Berezhny
b76c45fc2b drop jonathonf/backports sqlite3 PPA from .travis.yml since the package was removed 2019-12-08 17:00:41 -05:00
Lex Berezhny
1bbfccd082 refactored error codes generate script and error hierarchy 2019-12-06 10:45:23 -05:00
Lex Berezhny
cee7e06832 add configurable log level to exceptions, join message and description columns in error markdown table 2019-12-06 10:45:23 -05:00
Jack Robison
03dff4b382
fix build 2019-12-05 20:56:57 -05:00
Jack Robison
f546b20a01 v0.48.2 2019-12-04 13:10:40 -05:00
Jack Robison
0e8d9a264f
Merge pull request #2601 from mirgee/fix-2490
Persist and restore routing table
2019-12-03 15:44:01 -05:00
Jack Robison
66a4c98bee
update test 2019-12-03 13:11:34 -05:00
Miroslav Kovar
36101db500
Wait for routing table to clear with timeout 2019-12-03 13:06:50 -05:00
Miroslav Kovar
6bff298d1e
Add migrator for the new peer table 2019-12-03 13:06:50 -05:00
Jack Robison
c832f8ffbb
fix mock_network_loop param 2019-12-03 13:06:50 -05:00
Jack Robison
d7fe46dbde
fix drop/reconnect in test_losing_connection 2019-12-03 13:06:50 -05:00
Miroslav Kovar
5951186463
Minor changes
Fix typos
2019-12-03 13:06:50 -05:00
Miroslav Kovar
7191042bb8
Add new test, remove old test, change error type 2019-12-03 13:06:50 -05:00
Miroslav Kovar
a80fbcc252
Catch resolve timeouts 2019-12-03 13:06:50 -05:00
Miroslav Kovar
c321758afd
Rename, fix tests / deduplicate set_joined, resolve conditionally 2019-12-03 13:06:50 -05:00
Miroslav Kovar
ca8f59a643
Persist and restore routing table 2019-12-03 13:06:50 -05:00
Jack Robison
8110882617
Merge pull request #2650 from lbryio/fix-blob-request-loop
Fix blob request loop
2019-12-03 12:58:27 -05:00
Jack Robison
a283b66541
add timeout to file_save attempt starting
-remove test_unban_recovers_stream
2019-12-03 11:56:59 -05:00
Jack Robison
569de37e16
fix sendfile exceptions 2019-12-03 11:56:55 -05:00
Jack Robison
669f3394c7
fix requesting same blob over and over if only peer(s) say they don't have it 2019-12-03 11:56:52 -05:00
Alex Grin
87ec2c7378 v0.48.1 2019-12-02 12:39:41 -05:00
Jack Robison
ea7a31cad1
Merge pull request #2649 from lbryio/invalid-ips
Fix handling reserved ips
2019-12-01 18:06:32 -05:00
Jack Robison
c894ade25e
fix integration tests 2019-12-01 17:28:51 -05:00
Jack Robison
a3fe127a92
fix and test reserved ips 2019-12-01 17:04:03 -05:00
Jack Robison
7f526be879
Merge pull request #2648 from lbryio/fix-blob-udp-ping
Fix UDP ping port in blob peer accumulator
2019-12-01 17:00:12 -05:00
Jack Robison
71ae174e29
fix predicting udp port in _accumulate_peers_for_value
-add comments
-rename functions to be more descriptive of what they do
2019-12-01 16:21:15 -05:00
Jack Robison
2277d134cc
Merge pull request #2647 from lbryio/drain-iterative-findvalue
Request all pages of peers for a blob
2019-12-01 15:52:28 -05:00
Jack Robison
880aa265f1
drain all pages of peers returned by find_value in IterativeValueFinder 2019-12-01 14:53:39 -05:00
Alex Grintsvayg
ea639e64a4
add docker-compose file for wallet server 2019-11-27 10:18:39 -05:00
Lex Berezhny
b60fdee5e2 upgrade SDK to use lbrycrd v0.17.3.2 2019-11-26 21:31:31 -05:00
Alex Grintsvayg
44f07fb81d move build-related files into top-level dir 2019-11-26 18:28:58 -05:00
Alex Grintsvayg
60a85fadd3 no more codecov 2019-11-26 18:28:58 -05:00
Alex Grintsvayg
68367125e1 gitlab builds on windows
what's left:
  - github build progress notifications (need to upgrade to Gitlab Silver for this)
  - github releases
  - uploading coverage results to codecov.io
  - what directories to cache between builds
2019-11-26 18:28:58 -05:00
Alex Grintsvayg
6112f60681 gitlab ci setup
what works:
- tests
- linux and mac builds
- uploading builds to s3

what's left to do:
- uploading coverage results to codecov.io
- windows build
- github releases
- what directories to cache between builds
2019-11-26 18:28:58 -05:00
Alex Grintsvayg
ecc74e2ae5 if user has no Downloads dir on windows, use cwd 2019-11-26 18:28:58 -05:00
jessop
c2dae43965 response 2019-11-26 14:45:41 -05:00
jessop
6e3a25d255 status: wallet returns all servers rather than just available 2019-11-26 14:45:41 -05:00
jessop
4e13cf1d85 adds wallet server counts on startup 2019-11-26 14:45:41 -05:00
Lex Berezhny
0fee1897eb v0.48.0 2019-11-25 20:39:36 -05:00
Victor Shyba
9dd51467fe move lbry specific version to its place 2019-11-25 20:21:13 -05:00
Victor Shyba
cec6dca72f bump protocol version 2019-11-25 20:21:13 -05:00
Victor Shyba
58df2b9bcf ignore cancelled error 2019-11-25 16:07:03 -05:00
Victor Shyba
cc6bdd6295 sync fts only once 2019-11-25 16:07:03 -05:00
Victor Shyba
7a044f9f78 opening dbs can raise too 2019-11-25 16:07:03 -05:00
Victor Shyba
e77acde005 wip 2019-11-25 16:07:03 -05:00
Victor Shyba
ece5082096 enable callback tracebacks 2019-11-25 16:07:03 -05:00
Alex Grintsvayg
0723026a56
drop codecov 2019-11-25 12:52:30 -05:00
Lex Berezhny
0b7346f4a1 droped OldWalletServerTransaction class in wallet server unit tests 2019-11-25 10:22:34 -05:00
Lex Berezhny
a357db1ff6 use correct segwit deserializer 2019-11-25 10:22:34 -05:00
Jack Robison
a2287f8d0a
Merge pull request #2637 from lbryio/fix-unhandled-download-errors
don't log tracebacks for expected download errors
2019-11-25 10:11:37 -05:00
Jack Robison
076decd6e9
don't log tracebacks for expected download errors 2019-11-25 09:47:42 -05:00
jessop
d40461de94 review changes 2019-11-22 22:28:55 -05:00
jessop
4b1492be0d return value and more tests 2019-11-22 22:28:55 -05:00
jessop
1676246743 cleanup 2019-11-22 22:28:55 -05:00
jessop
f7c9f6d142 adds settings_clear method and integration test 2019-11-22 22:28:55 -05:00
Alex Grintsvayg
6022339091
Revert "dont show loop blocking warnings when running tests in CI"
This reverts commit 25d97afa60.
2019-11-22 20:27:10 -05:00
Alex Grintsvayg
25d97afa60
dont show loop blocking warnings when running tests in CI 2019-11-22 11:55:04 -05:00
Alex Grintsvayg
df635b963c
enable passing args to tests 2019-11-21 15:31:49 -05:00
Lex Berezhny
6d0a054170 regenerate latest docs with claim_id argument to stream_repost 2019-11-20 10:56:42 -05:00
Lex Berezhny
177d3af77f regenerate latest docs 2019-11-20 10:39:17 -05:00
Lex Berezhny
35f71c619b fix unit tests 2019-11-19 22:26:30 -05:00
Lex Berezhny
31a80ccce4 minor fixups 2019-11-19 22:26:30 -05:00
Lex Berezhny
3194cec8df added missing files 2019-11-19 22:26:30 -05:00
Lex Berezhny
57fd3c5801 updated code base to use generated errors 2019-11-19 22:26:30 -05:00
Lex Berezhny
86617f1bda added names to errors and script to generate error classes from README 2019-11-19 22:26:30 -05:00
Lex Berezhny
35f36c598c moved error.py to be a module 2019-11-19 22:26:30 -05:00
Lex Berezhny
36bd7816fe add handling for invalid claims in calculate_reposts 2019-11-18 17:07:46 -05:00
Lex Berezhny
fd632392d4 full reposted_claim data returned for claim_search and resolve 2019-11-18 17:07:46 -05:00
Victor Shyba
423b48866f apply refactor suggested on review by removing .out calls 2019-11-18 17:07:46 -05:00
Victor Shyba
745bc24343 add a test for normal repost mixed with blocking repost, use inner queries 2019-11-18 17:07:46 -05:00
Victor Shyba
04a823c7d0 FILTERING_CHANNELS_IDS on claim search 2019-11-18 17:07:46 -05:00
Victor Shyba
56af4c2fcb minor fixes from rebase 2019-11-18 17:07:46 -05:00
Victor Shyba
77daf6812a add config option for filtering channels 2019-11-18 17:07:46 -05:00
Victor Shyba
c7e964ec42 add reposted_claim_hash column, writer and reader w/ tests 2019-11-18 17:07:46 -05:00
Victor Shyba
694e2c2a4f improve test cases for reposts, add a new one for reverse search 2019-11-18 17:07:46 -05:00
Victor Shyba
df6537fae8 add API for reposting 2019-11-18 17:07:46 -05:00
Lex Berezhny
71f6542571 v0.47.0 2019-11-18 10:57:58 -05:00
Victor Shyba
5a6294c08e test errors arent trapped 2019-11-18 10:57:24 -05:00
Victor Shyba
f0390786d6 avoid trapping errors on client 2019-11-18 10:57:24 -05:00
Jack Robison
719d18c670
Merge pull request #2620 from lbryio/torrent-db-support
Add torrent support to SQLiteStorage
2019-11-15 18:34:00 -05:00
Jack Robison
a116dcd3bb
add SQLiteStorage.delete_torrent 2019-11-15 16:53:19 -05:00
Jack Robison
e2dd3dcf88
get_claims_from_torrent_info_hashes 2019-11-15 16:53:19 -05:00
Jack Robison
822f0c3cb2
add migrate12to13 script 2019-11-15 16:53:14 -05:00
Jack Robison
63fb39016b
add torrent db tables
-decouple file table from stream table
2019-11-15 15:25:33 -05:00
Jack Robison
e79f8d58ca
expose bt_infohash in Claim 2019-11-15 15:25:33 -05:00
Lex Berezhny
a469dfb583 stop signing support tx too many times 2019-11-15 14:16:54 -05:00
Victor Shyba
6cbc545f84 fix .connect injected method 2019-11-15 13:59:28 -05:00
Victor Shyba
ea1e24d8f9 refactor from review 2019-11-15 13:59:28 -05:00
Victor Shyba
6fcb4d93c9 header component now shows header synchronization progress 2019-11-15 13:59:28 -05:00
Victor Shyba
36031c9d0f fix test, off by 1 2019-11-15 13:59:28 -05:00
Victor Shyba
a70f5e2440 update block status during download 2019-11-15 13:59:28 -05:00
Victor Shyba
6c42aab567 stop spurious warning from wrong check 2019-11-15 13:59:28 -05:00
Victor Shyba
27631a1746 test initial header sync separately 2019-11-15 13:59:28 -05:00
Victor Shyba
320f5cebc7 cleanup 2019-11-15 13:59:28 -05:00
Victor Shyba
ea2a583803 wip 2019-11-15 13:59:28 -05:00
Victor Shyba
9965801258 base64 headers 2019-11-15 13:59:28 -05:00
Victor Shyba
24ed0521c7 checkpoints 2019-11-15 13:59:28 -05:00
Victor Shyba
d8fed79810 remove headers component 2019-11-15 13:59:28 -05:00
Victor Shyba
c30db15efa faster header download 2019-11-15 13:59:28 -05:00
Jack Robison
817869b915
Merge pull request #2623 from lbryio/aioupnp-0.0.16
bump aioupnp to 0.0.16, fixes UPnP support for TP-Link routers
2019-11-15 11:50:59 -05:00
Jack Robison
3038dd9440
bump aioupnp -> 0.0.16 2019-11-15 11:24:49 -05:00
Lex Berezhny
276b2c737a pylint 2019-11-14 18:53:20 -05:00
Lex Berezhny
62543b94c9 minor cleanup of collections implementation 2019-11-14 18:53:20 -05:00
jessop
4d607ed276 review response 2019-11-14 18:53:20 -05:00
jessop
26e2865e35 cleanup 2019-11-14 18:53:20 -05:00
jessop
f7c94c9eb5 collection resolve endpoint 2019-11-14 18:53:20 -05:00
jessop
25a91f89fd tests 2019-11-14 18:53:20 -05:00
jessop
246f055145 enables collections 2019-11-14 18:53:20 -05:00
Lex Berezhny
8c3bf0d4cb try ubuntu bionic on travis 2019-11-14 17:19:10 -05:00
Lex Berezhny
e49f24e95f added more tests to make sure stream update and abandon update the full text search indexes 2019-11-14 17:19:10 -05:00
Lex Berezhny
48a18ff771 full text search in wallet server 2019-11-14 17:19:10 -05:00
Alex Grintsvayg
37161057c6
plz explain errors 2019-11-14 09:52:30 -05:00
Alex Grintsvayg
68d1157495
catch the right error, boris 2019-11-13 17:58:47 -05:00
Alex Grintsvayg
19c3b2f4b6
add a latest-release tag to wallet server docker builds 2019-11-13 12:08:04 -05:00
Lex Berezhny
7051c332a1 v0.46.1 2019-11-12 11:26:24 -05:00
Lex Berezhny
0f2a0b7c97 added witness transaction parsing to torba parser 2019-11-12 11:24:42 -05:00
Lex Berezhny
bb6ef42d0c added support for lbrycrd v0.17.3.1 2019-11-12 11:24:42 -05:00
Jack Robison
3c591d4968
update time_to_first_bytes.py 2019-11-11 20:43:52 -05:00
Lex Berezhny
cf356a8e35 v0.46.0 2019-11-11 10:49:44 -05:00
Victor Shyba
3a0ce58cda send back correct server version 2019-11-11 10:14:00 -05:00
Lex Berezhny
2d7038dc18 fixed wallet balance for multiple accounts 2019-11-05 10:14:03 -05:00
Lex Berezhny
25a0e67841 added caching for account/wallet balance and removed --reserved_subtotals argument instead always returning the subtotals 2019-11-05 10:14:03 -05:00
Alex Grintsvayg
be64209292
less verbose snapshot download 2019-11-04 16:51:57 -05:00
Lex Berezhny
0006cb1c27 v0.45.0 2019-11-04 10:30:57 -05:00
Lex Berezhny
836d38ed2a added claim_trending_group_mixed_idx index to wallet server db 2019-11-04 10:28:22 -05:00
Thomas Zarebczan
ec71017785 update trending period
This gets us back to ~7 days, which was originally requested as part of this change.
2019-11-04 10:28:22 -05:00
Jack Robison
faf9449107
Merge pull request #2596 from lbryio/dont-log-incompatible-peer-error
don't log tracebacks for old peers failing to handle findValue requests
2019-11-01 13:13:38 -04:00
Jack Robison
f93bbd6bc3
improve reflector logging 2019-11-01 11:32:36 -04:00
Jack Robison
ca12d655ea
don't log tracebacks for old peers failing to handle findValue requests during blob announcement 2019-11-01 11:30:44 -04:00
Jack Robison
72e486791b
Merge pull request #2552 from lbryio/dht-extensions
Add optional extensions to DHT datagram format
2019-11-01 11:27:44 -04:00
Jack Robison
874c28bd88
add optional_fields to KademliaDatagramBase
-update KademliaDatagramBase.bencode and decode_datagram
2019-11-01 10:47:45 -04:00
Jack Robison
aa7c0a3544
test unused fields are backwards compatible 2019-11-01 10:45:12 -04:00
Lex Berezhny
3624a3b450 added --uri to purchase_list and file_list now includes purchase_receipt field 2019-10-30 23:00:45 -04:00
Lex Berezhny
c4c50699cc fix transaction_list for purchases correctly showing balance_delta 2019-10-30 23:00:45 -04:00
Lex Berezhny
6a3d760cfe fix for integration test 2019-10-30 23:00:45 -04:00
Lex Berezhny
8ceaf66a81 pylint fixes 2019-10-30 23:00:45 -04:00
Lex Berezhny
fdd2562f32 added purchase_list and purchase_create 2019-10-30 23:00:45 -04:00
Lex Berezhny
28457021f8 figure out why time to first byte duration went up 2019-10-30 23:00:45 -04:00
Lex Berezhny
459716bd6e initial RETURN_OP based purchase metadata 2019-10-30 23:00:45 -04:00
Lex Berezhny
41e0e6762c added wallet management tests and address unsubscribe feature on wallet server 2019-10-30 17:59:12 -04:00
Lex Berezhny
a5ee60c1c9 subscribe accounts on wallet_add 2019-10-30 17:59:12 -04:00
Victor Shyba
1391c1d607 remove bandwidth and subscriptions limits from wallet server 2019-10-29 19:51:26 -04:00
Lex Berezhny
8e5d47061f check txo.has_address before calling txo.get_address() 2019-10-29 11:53:34 -04:00
461 changed files with 37313 additions and 35424 deletions

206
.github/workflows/main.yml vendored Normal file
View file

@ -0,0 +1,206 @@
name: ci
on: ["push", "pull_request", "workflow_dispatch"]
jobs:
lint:
name: lint
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: extract pip cache
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install --user --upgrade pip wheel
- run: pip install -e .[lint]
- run: make lint
tests-unit:
name: "tests / unit"
strategy:
matrix:
os:
- ubuntu-20.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache
uses: actions/cache@v3
with:
path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- run: python -m pip install --user --upgrade pip wheel
- if: startsWith(runner.os, 'linux')
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux')
env:
HOME: /tmp
run: make test-unit-coverage
- if: startsWith(runner.os, 'linux') != true
run: pip install -e .[test]
- if: startsWith(runner.os, 'linux') != true
env:
HOME: /tmp
run: coverage run --source=lbry -m unittest tests/unit/test_conf.py
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-unit-${{ steps.os-name.outputs.lowercase }}
COVERALLS_PARALLEL: true
run: |
pip install coveralls
coveralls --service=github
tests-integration:
name: "tests / integration"
runs-on: ubuntu-20.04
strategy:
matrix:
test:
- datanetwork
- blockchain
- claims
- takeovers
- transactions
- other
steps:
- name: Configure sysctl limits
run: |
sudo swapoff -a
sudo sysctl -w vm.swappiness=1
sudo sysctl -w fs.file-max=262144
sudo sysctl -w vm.max_map_count=262144
- name: Runs Elasticsearch
uses: elastic/elastic-github-actions/elasticsearch@master
with:
stack-version: 7.12.1
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- if: matrix.test == 'other'
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends ffmpeg
- name: extract pip cache
uses: actions/cache@v3
with:
path: ./.tox
key: tox-integration-${{ matrix.test }}-${{ hashFiles('setup.py') }}
restore-keys: txo-integration-${{ matrix.test }}-
- run: pip install tox coverage coveralls
- if: matrix.test == 'claims'
run: rm -rf .tox
- run: tox -e ${{ matrix.test }}
- name: submit coverage report
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_FLAG_NAME: tests-integration-${{ matrix.test }}
COVERALLS_PARALLEL: true
run: |
coverage combine tests
coveralls --service=github
coverage:
needs: ["tests-unit", "tests-integration"]
runs-on: ubuntu-20.04
steps:
- name: finalize coverage report submission
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
pip install coveralls
coveralls --service=github --finish
build:
needs: ["lint", "tests-unit", "tests-integration"]
name: "build / binary"
strategy:
matrix:
os:
- ubuntu-20.04
- macos-latest
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- id: os-name
uses: ASzc/change-string-case-action@v5
with:
string: ${{ runner.os }}
- name: set pip cache dir
shell: bash
run: echo "PIP_CACHE_DIR=$(pip cache dir)" >> $GITHUB_ENV
- name: extract pip cache
uses: actions/cache@v3
with:
path: ${{ env.PIP_CACHE_DIR }}
key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }}
restore-keys: ${{ runner.os }}-pip-
- run: pip install pyinstaller==4.6
- run: pip install -e .
- if: startsWith(github.ref, 'refs/tags/v')
run: python docker/set_build.py
- if: startsWith(runner.os, 'linux') || startsWith(runner.os, 'mac')
name: Build & Run (Unix)
run: |
pyinstaller --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet --version
- if: startsWith(runner.os, 'windows')
name: Build & Run (Windows)
run: |
pip install pywin32==301
pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico --onefile --name lbrynet lbry/extras/cli.py
dist/lbrynet.exe --version
- uses: actions/upload-artifact@v3
with:
name: lbrynet-${{ steps.os-name.outputs.lowercase }}
path: dist/
release:
name: "release"
if: startsWith(github.ref, 'refs/tags/v')
needs: ["build"]
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v1
- uses: actions/download-artifact@v2
- name: upload binaries
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_API_TOKEN }}
run: |
pip install githubrelease
chmod +x lbrynet-macos/lbrynet
chmod +x lbrynet-linux/lbrynet
zip --junk-paths lbrynet-mac.zip lbrynet-macos/lbrynet
zip --junk-paths lbrynet-linux.zip lbrynet-linux/lbrynet
zip --junk-paths lbrynet-windows.zip lbrynet-windows/lbrynet.exe
ls -lh
githubrelease release lbryio/lbry-sdk info ${GITHUB_REF#refs/tags/}
githubrelease asset lbryio/lbry-sdk upload ${GITHUB_REF#refs/tags/} \
lbrynet-mac.zip lbrynet-linux.zip lbrynet-windows.zip
githubrelease release lbryio/lbry-sdk publish ${GITHUB_REF#refs/tags/}

22
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,22 @@
name: slack
on:
release:
types: [published]
jobs:
release:
name: "slack notification"
runs-on: ubuntu-20.04
steps:
- uses: LoveToKnow/slackify-markdown-action@v1.0.0
id: markdown
with:
text: "There is a new SDK release: ${{github.event.release.html_url}}\n${{ github.event.release.body }}"
- uses: slackapi/slack-github-action@v1.14.0
env:
CHANGELOG: '<!channel> ${{ steps.markdown.outputs.text }}'
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_RELEASE_BOT_WEBHOOK }}
with:
payload: '{"type": "mrkdwn", "text": ${{ toJSON(env.CHANGELOG) }} }'

20
.gitignore vendored
View file

@ -1,2 +1,22 @@
/.idea /.idea
/.DS_Store /.DS_Store
/build
/dist
/.tox
/.coverage*
/lbry-venv
/venv
/lbry/blockchain
lbry.egg-info
__pycache__
_trial_temp/
trending*.log
/tests/integration/claims/files
/tests/.coverage.*
/lbry/wallet/bin
/.vscode
/.gitignore

View file

@ -1,146 +0,0 @@
dist: xenial
language: python
python: "3.7"
jobs:
include:
- stage: code quality
name: "pylint & mypy"
install:
- make install
script: make lint
- stage: test
name: "LBRY Unit Tests"
install:
- make install
script:
- cd lbry && HOME=/tmp coverage run -p --source=lbry -m unittest discover -vv tests.unit
after_success:
- coverage combine lbry/
- bash <(curl -s https://codecov.io/bash)
- name: "LBRY Integration Tests"
install:
- pip install coverage tox-travis
- sudo mount -o mode=1777,nosuid,nodev -t tmpfs tmpfs /tmp
script: cd lbry && tox
after_success:
- coverage combine lbry
- bash <(curl -s https://codecov.io/bash)
- &torba-tests
name: "Torba Unit Tests"
env: TESTTYPE=unit
install:
- pip install coverage tox-travis
script: cd torba && tox
after_success:
- coverage combine torba/tests
- bash <(curl -s https://codecov.io/bash)
- <<: *torba-tests
name: "Torba Integration Tests"
env: TESTTYPE=integration
- name: "Run Examples"
install:
- make install
script:
- cd lbry && HOME=/tmp coverage run -p --source=lbry scripts/generate_json_api.py
after_success:
- coverage combine lbry
- bash <(curl -s https://codecov.io/bash)
- &build
stage: build
name: "Linux"
env: OS=linux
install:
- pip install pyinstaller awscli
- cd torba && pip install -e . && cd ..
- cd lbry
- python scripts/set_build.py
- pip install -e .
script:
- pyinstaller -F -n lbrynet lbry/extras/cli.py
- cd dist
- chmod +x lbrynet
- zip -j lbrynet-${OS}.zip lbrynet
- shasum -a 256 -b lbrynet-${OS}.zip
- ./lbrynet --version
after_success:
- aws configure set aws_access_key_id $ARTIFACTS_KEY
- aws configure set aws_secret_access_key $ARTIFACTS_SECRET
- aws configure set region us-east-1
- export S3_PATH="daemon/build-${TRAVIS_BUILD_NUMBER}_commit-${TRAVIS_COMMIT:0:7}_branch-${TRAVIS_BRANCH}$([ ! -z ${TRAVIS_TAG} ] && echo _tag-${TRAVIS_TAG})"
- aws s3 cp lbrynet-${OS}.zip s3://build.lbry.io/${S3_PATH}/lbrynet-${OS}.zip
deploy:
provider: releases
api_key:
secure: "unnR+aSJ1937Cl1PyBBZzGuZvV5W5TGcXELhXTgyOeeI6FgO/j80qmbNxJDA7qdFH/hvVicQFWoflhZu2dxN5rYP5BQJW3q3XoOLY3XAc1s1vicFkwqn3TIfdFiJTz+/D9eBUBBhHKeYFxm3M+thvklTLgjKl6fflh14NfGuNTevK9yQke8wewW3f9UmFTo1qNOPF1OsTZRbwua6oQYa59P+KukoPt4Dsu1VtILtTkj7hfEsUL79cjotwO3gkhYftxbl/xeDSZWOt+9Nhb8ZKmQG/uDx4JiTMm5lWRk4QB7pUujZ1CftxCYWz/lJx9nuJpdCOgP624tcHymErNlD+vGLwMTNslcXGYkAJH6xvGyxBJ+Obc8vRVnZbRM26BfH34TcPK1ueRxHSrDUbzMIIUsgcoZAxBuim8uDPp+K7bGqiygzSs2vQfr9U5Jhe9/F8sPdtNctfJZEfgmthNTeVFjyNsGIfIt754uGSfACqM7wDLh6fbKx7M+FHlNyOdvYCrbKUOAYXmTikYIpVDvlaaeMO+N+uW8Rhvm1j+JU7CVwhMavLySaPVc6Dt5OxiMMmxw9mVrjW9bBPjS5AkrS5MOA13T5wapoLzH6+gE92U4HzA6ilMcwRaQPSFnK2JU7tzyt2Wy1PH4MjHowXI2WyICG1x510dD3tX1P/1px8ro="
file: lbrynet-${OS}.zip
skip_cleanup: true
overwrite: true
draft: true
on:
tags: true
- <<: *build
name: "Mac"
os: osx
osx_image: xcode8.3
language: shell
env: OS=mac
before_install:
- brew uninstall mercurial
- brew upgrade python || true
- pip3 install --user --upgrade pip virtualenv
- /Users/travis/Library/Python/3.7/bin/virtualenv --clear $HOME/venv
- source $HOME/venv/bin/activate
before_cache:
- brew cleanup
- <<: *build
name: "Windows"
os: windows
language: shell
env:
- OS=windows
- PATH=/c/Python37:/c/Python37/Scripts:/C/Windows/System32/downlevel:$PATH
before_install:
- choco install python --version=3.7.4 --x86
- python -m pip install --upgrade pip
- pip install pywin32
script:
- pyinstaller --additional-hooks-dir=scripts/. --icon=icons/lbry256.ico -F -n lbrynet lbry/extras/cli.py
- cd dist
- 7z a -tzip lbrynet-windows.zip lbrynet.exe
- sha256sum -b lbrynet-windows.zip
- ./lbrynet.exe --version
- if: tag IS present
stage: build
name: "Wallet Server Docker Image - Tagged Release"
script:
- set -e
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
- travis_retry docker build -t lbry/wallet-server:$TRAVIS_TAG -f lbry/scripts/Dockerfile.wallet_server .
- docker push lbry/wallet-server:$TRAVIS_TAG
- if: tag IS blank AND branch = master AND NOT type IN (pull_request)
stage: build
name: "Wallet Server Docker Image - Master"
script:
- set -e
- echo "$DOCKER_PASSWORD" | docker login --username "$DOCKER_USERNAME" --password-stdin
- travis_retry docker build -t lbry/wallet-server:master -f lbry/scripts/Dockerfile.wallet_server .
- docker push lbry/wallet-server:master
cache:
directories:
- $HOME/venv
- $HOME/.cache/pip
- $HOME/Library/Caches/pip
- $HOME/Library/Caches/Homebrew
- $TRAVIS_BUILD_DIR/.tox

File diff suppressed because it is too large Load diff

View file

@ -9,20 +9,29 @@ Here's a video walkthrough of this setup, which is itself hosted by the LBRY net
## Prerequisites ## Prerequisites
Running `lbrynet` from source requires Python 3.7 or higher. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/). Running `lbrynet` from source requires Python 3.7. Get the installer for your OS [here](https://www.python.org/downloads/release/python-370/).
After installing python 3, you'll need to install some additional libraries depending on your operating system. After installing Python 3.7, you'll need to install some additional libraries depending on your operating system.
Because of [issue #2769](https://github.com/lbryio/lbry-sdk/issues/2769)
at the moment the `lbrynet` daemon will only work correctly with Python 3.7.
If Python 3.8+ is used, the daemon will start but the RPC server
may not accept messages, returning the following:
```
Could not connect to daemon. Are you sure it's running?
```
### macOS ### macOS
macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/). macOS users will need to install [xcode command line tools](https://developer.xamarin.com/guides/testcloud/calabash/configuring/osx/install-xcode-command-line-tools/) and [homebrew](http://brew.sh/).
These environment variables also need to be set: These environment variables also need to be set:
1. PYTHONUNBUFFERED=1 ```
2. EVENT_NOKQUEUE=1 PYTHONUNBUFFERED=1
EVENT_NOKQUEUE=1
```
Remaining dependencies can then be installed by running: Remaining dependencies can then be installed by running:
``` ```
brew install python protobuf brew install python protobuf
``` ```
@ -31,14 +40,17 @@ Assistance installing Python3: https://docs.python-guide.org/starting/install3/o
### Linux ### Linux
On Ubuntu (16.04 minimum, we recommend 18.04), install the following: On Ubuntu (we recommend 18.04 or 20.04), install the following:
``` ```
sudo add-apt-repository ppa:deadsnakes/ppa sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt-get update sudo apt-get update
sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf sudo apt-get install build-essential python3.7 python3.7-dev git python3.7-venv libssl-dev python-protobuf
``` ```
The [deadsnakes PPA](https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa) provides Python 3.7
for those Ubuntu distributions that no longer have it in their
official repositories.
On Raspbian, you will also need to install `python-pyparsing`. On Raspbian, you will also need to install `python-pyparsing`.
If you're running another Linux distro, install the equivalent of the above packages for your system. If you're running another Linux distro, install the equivalent of the above packages for your system.
@ -47,65 +59,119 @@ If you're running another Linux distro, install the equivalent of the above pack
### Linux/Mac ### Linux/Mac
To install on Linux/Mac: Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
``` Create a Python virtual environment for lbry-sdk:
Clone the repository: ```bash
$ git clone https://github.com/lbryio/lbry-sdk.git python3.7 -m venv lbry-venv
$ cd lbry-sdk ```
Create a Python virtual environment for lbry-sdk: Activate virtual environment:
$ python3.7 -m venv lbry-venv ```bash
source lbry-venv/bin/activate
```
Activating lbry-sdk virtual environment: Make sure you're on Python 3.7+ as default in the virtual environment:
$ source lbry-venv/bin/activate ```bash
python --version
```
Make sure you're on Python 3.7+ (as the default Python in virtual environment): Install packages:
$ python --version ```bash
make install
```
Install packages: If you are on Linux and using PyCharm, generates initial configs:
$ make install ```bash
make idea
```
If you are on Linux and using PyCharm, generates initial configs: To verify your installation, `which lbrynet` should return a path inside
$ make idea of the `lbry-venv` folder.
``` ```bash
(lbry-venv) $ which lbrynet
/opt/lbry-sdk/lbry-venv/bin/lbrynet
```
To verify your installation, `which lbrynet` should return a path inside of the `lbry-venv` folder created by the `python3.7 -m venv lbry-venv` command. To exit the virtual environment simply use the command `deactivate`.
### Windows ### Windows
To install on Windows: Clone the repository:
```bash
git clone https://github.com/lbryio/lbry-sdk.git
cd lbry-sdk
```
``` Create a Python virtual environment for lbry-sdk:
Clone the repository: ```bash
> git clone https://github.com/lbryio/lbry-sdk.git python -m venv lbry-venv
> cd lbry-sdk ```
Create a Python virtual environment for lbry-sdk: Activate virtual environment:
> python -m venv lbry-venv ```bash
lbry-venv\Scripts\activate
```
Activating lbry-sdk virtual environment: Install packages:
> lbry-venv\Scripts\activate ```bash
pip install -e .
Install packages: ```
> cd torba
> pip install -e .
> cd ../lbry
> pip install -e .
```
## Run the tests ## Run the tests
### Elasticsearch
To run the unit tests from the repo directory: For running integration tests, Elasticsearch is required to be available at localhost:9200/
``` The easiest way to start it is using docker with:
python -m unittest discover -s lbry tests.unit ```bash
``` make elastic-docker
```
Alternative installation methods are available [at Elasticsearch website](https://www.elastic.co/guide/en/elasticsearch/reference/current/install-elasticsearch.html).
To run the unit and integration tests from the repo directory:
```
python -m unittest discover tests.unit
python -m unittest discover tests.integration
```
## Usage ## Usage
To start the API server: To start the API server:
`lbrynet start` ```
lbrynet start
```
Whenever the code inside [lbry-sdk/lbry](./lbry)
is modified we should run `make install` to recompile the `lbrynet`
executable with the newest code.
## Development
When developing, remember to enter the environment,
and if you wish start the server interactively.
```bash
$ source lbry-venv/bin/activate
(lbry-venv) $ python lbry/extras/cli.py start
```
Parameters can be passed in the same way.
```bash
(lbry-venv) $ python lbry/extras/cli.py wallet balance
```
If a Python debugger (`pdb` or `ipdb`) is installed we can also start it
in this way, set up break points, and step through the code.
```bash
(lbry-venv) $ pip install ipdb
(lbry-venv) $ ipdb lbry/extras/cli.py
```
Happy hacking! Happy hacking!

View file

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2015-2019 LBRY Inc Copyright (c) 2015-2022 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,

View file

@ -1,4 +1,4 @@
include README.md include README.md
include CHANGELOG.md include CHANGELOG.md
include LICENSE include LICENSE
recursive-include torba *.txt *.py recursive-include lbry *.txt *.py

View file

@ -1,18 +1,26 @@
.PHONY: install tools lint test test-unit test-unit-coverage test-integration idea
install: install:
cd torba && pip install -e . pip install -e .
cd lbry && pip install -e .
pip install mypy==0.701
pip install coverage astroid pylint
lint: lint:
cd lbry && pylint lbry pylint --rcfile=setup.cfg lbry
cd torba && pylint --rcfile=setup.cfg torba #mypy --ignore-missing-imports lbry
cd torba && mypy --ignore-missing-imports torba
test: test: test-unit test-integration
cd lbry && tox
cd torba && tox test-unit:
python -m unittest discover tests.unit
test-unit-coverage:
coverage run --source=lbry -m unittest discover -vv tests.unit
test-integration:
tox
idea: idea:
mkdir -p .idea mkdir -p .idea
cp -r lbry/scripts/idea/* .idea cp -r scripts/idea/* .idea
elastic-docker:
docker run -d -v lbryhub:/usr/share/elasticsearch/data -p 9200:9200 -p 9300:9300 -e"ES_JAVA_OPTS=-Xms512m -Xmx512m" -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.12.1

View file

@ -1,15 +1,15 @@
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Build Status](https://travis-ci.org/lbryio/lbry-sdk.svg?branch=master)](https://travis-ci.org/lbryio/lbry-sdk) [![Test Coverage](https://codecov.io/gh/lbryio/lbry-sdk/branch/master/graph/badge.svg)](https://codecov.io/gh/lbryio/lbry-sdk) # <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![build](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml/badge.svg)](https://github.com/lbryio/lbry-sdk/actions/workflows/main.yml) [![coverage](https://coveralls.io/repos/github/lbryio/lbry-sdk/badge.svg)](https://coveralls.io/github/lbryio/lbry-sdk)
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers. LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include: LBRY SDK for Python is currently the most fully featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components include:
* Built on Python 3.7+ and `asyncio`. * Built on Python 3.7 and `asyncio`.
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/dht)). * Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/dht)).
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/blob_exchange)). * Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/blob_exchange)).
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/schema)). * Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/schema)).
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/wallet)). * Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/wallet)).
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/extras/daemon)). * Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/extras/daemon)).
## Installation ## Installation
@ -41,7 +41,7 @@ This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
## Security ## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it. We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
## Contact ## Contact
@ -53,4 +53,4 @@ The documentation for the API can be found [here](https://lbry.tech/api/sdk).
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings). Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/lbry/example_daemon_settings.yml). Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/example_daemon_settings.yml).

9
SECURITY.md Normal file
View file

@ -0,0 +1,9 @@
# Security Policy
## Supported Versions
While we are not at v1.0 yet, only the latest release will be supported.
## Reporting a Vulnerability
See https://lbry.com/faq/security

View file

@ -0,0 +1,43 @@
FROM debian:10-slim
ARG user=lbry
ARG projects_dir=/home/$user
ARG db_dir=/database
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
USER $user
WORKDIR $projects_dir
RUN python3 -m pip install -U setuptools pip
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
VOLUME $db_dir
ENTRYPOINT ["python3", "scripts/dht_node.py"]

View file

@ -0,0 +1,56 @@
FROM debian:10-slim
ARG user=lbry
ARG db_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
tar unzip \
build-essential \
automake libtool \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-cffi \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $db_dir
RUN chown -R $user:$user $db_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
ARG host=0.0.0.0
ARG tcp_port=50001
ARG daemon_url=http://lbry:lbry@localhost:9245/
VOLUME $db_dir
ENV TCP_PORT=$tcp_port
ENV HOST=$host
ENV DAEMON_URL=$daemon_url
ENV DB_DIRECTORY=$db_dir
ENV MAX_SESSIONS=1000000000
ENV MAX_SEND=1000000000000000000
ENV EVENT_LOOP_POLICY=uvloop
COPY ./docker/wallet_server_entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

45
docker/Dockerfile.web Normal file
View file

@ -0,0 +1,45 @@
FROM debian:10-slim
ARG user=lbry
ARG downloads_dir=/database
ARG projects_dir=/home/$user
ARG DOCKER_TAG
ARG DOCKER_COMMIT=docker
ENV DOCKER_TAG=$DOCKER_TAG DOCKER_COMMIT=$DOCKER_COMMIT
RUN apt-get update && \
apt-get -y --no-install-recommends install \
wget \
automake libtool \
tar unzip \
build-essential \
pkg-config \
libleveldb-dev \
python3.7 \
python3-dev \
python3-pip \
python3-wheel \
python3-setuptools && \
update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 && \
rm -rf /var/lib/apt/lists/*
RUN groupadd -g 999 $user && useradd -m -u 999 -g $user $user
RUN mkdir -p $downloads_dir
RUN chown -R $user:$user $downloads_dir
COPY . $projects_dir
RUN chown -R $user:$user $projects_dir
USER $user
WORKDIR $projects_dir
RUN pip install uvloop
RUN make install
RUN python3 docker/set_build.py
RUN rm ~/.cache -rf
# entry point
VOLUME $downloads_dir
COPY ./docker/webconf.yaml /webconf.yaml
ENTRYPOINT ["/home/lbry/.local/bin/lbrynet", "start", "--config=/webconf.yaml"]

9
docker/README.md Normal file
View file

@ -0,0 +1,9 @@
### How to run with docker-compose
1. Edit config file and after that fix permissions with
```
sudo chown -R 999:999 webconf.yaml
```
2. Start SDK with
```
docker-compose up -d
```

View file

@ -0,0 +1,49 @@
version: "3"
volumes:
wallet_server:
es01:
services:
wallet_server:
depends_on:
- es01
image: lbry/wallet-server:${WALLET_SERVER_TAG:-latest-release}
restart: always
network_mode: host
ports:
- "50001:50001" # rpc port
- "2112:2112" # uncomment to enable prometheus
volumes:
- "wallet_server:/database"
environment:
- DAEMON_URL=http://lbry:lbry@127.0.0.1:9245
- MAX_QUERY_WORKERS=4
- CACHE_MB=1024
- CACHE_ALL_TX_HASHES=
- CACHE_ALL_CLAIM_TXOS=
- MAX_SEND=1000000000000000000
- MAX_RECEIVE=1000000000000000000
- MAX_SESSIONS=100000
- HOST=0.0.0.0
- TCP_PORT=50001
- PROMETHEUS_PORT=2112
- FILTERING_CHANNEL_IDS=770bd7ecba84fd2f7607fb15aedd2b172c2e153f 95e5db68a3101df19763f3a5182e4b12ba393ee8
- BLOCKING_CHANNEL_IDS=dd687b357950f6f271999971f43c785e8067c3a9 06871aa438032244202840ec59a469b303257cad b4a2528f436eca1bf3bf3e10ff3f98c57bd6c4c6
es01:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.0
container_name: es01
environment:
- node.name=es01
- discovery.type=single-node
- indices.query.bool.max_clause_count=8192
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms4g -Xmx4g" # no more than 32, remember to disable swap
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- es01:/usr/share/elasticsearch/data
ports:
- 127.0.0.1:9200:9200

View file

@ -0,0 +1,9 @@
version: '3'
services:
websdk:
image: vshyba/websdk
ports:
- '5279:5279'
- '5280:5280'
volumes:
- ./webconf.yaml:/webconf.yaml

7
docker/hooks/build Normal file
View file

@ -0,0 +1,7 @@
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR/../.." ## make sure we're in the right place. Docker Hub screws this up sometimes
echo "docker build dir: $(pwd)"
docker build --build-arg DOCKER_TAG=$DOCKER_TAG --build-arg DOCKER_COMMIT=$SOURCE_COMMIT -f $DOCKERFILE_PATH -t $IMAGE_NAME .

11
docker/install_choco.ps1 Normal file
View file

@ -0,0 +1,11 @@
# requires powershell and .NET 4+. see https://chocolatey.org/install for more info.
$chocoVersion = powershell choco -v
if(-not($chocoVersion)){
Write-Output "Chocolatey is not installed, installing now"
Write-Output "IF YOU KEEP GETTING THIS MESSAGE ON EVERY BUILD, TRY RESTARTING THE GITLAB RUNNER SO IT GETS CHOCO INTO IT'S ENV"
Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
}
else{
Write-Output "Chocolatey version $chocoVersion is already installed"
}

44
docker/set_build.py Normal file
View file

@ -0,0 +1,44 @@
import sys
import os
import re
import logging
import lbry.build_info as build_info_mod
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
def _check_and_set(d: dict, key: str, value: str):
try:
d[key]
except KeyError:
raise Exception(f"{key} var does not exist in {build_info_mod.__file__}")
d[key] = value
def main():
build_info = {item: build_info_mod.__dict__[item] for item in dir(build_info_mod) if not item.startswith("__")}
commit_hash = os.getenv('DOCKER_COMMIT', os.getenv('GITHUB_SHA'))
if commit_hash is None:
raise ValueError("Commit hash not found in env vars")
_check_and_set(build_info, "COMMIT_HASH", commit_hash[:6])
docker_tag = os.getenv('DOCKER_TAG')
if docker_tag:
_check_and_set(build_info, "DOCKER_TAG", docker_tag)
_check_and_set(build_info, "BUILD", "docker")
else:
if re.match(r'refs/tags/v\d+\.\d+\.\d+$', str(os.getenv('GITHUB_REF'))):
_check_and_set(build_info, "BUILD", "release")
else:
_check_and_set(build_info, "BUILD", "qa")
log.debug("build info: %s", ", ".join([f"{k}={v}" for k, v in build_info.items()]))
with open(build_info_mod.__file__, 'w') as f:
f.write("\n".join([f"{k} = \"{v}\"" for k, v in build_info.items()]) + "\n")
if __name__ == '__main__':
sys.exit(main())

View file

@ -0,0 +1,25 @@
#!/bin/bash
# entrypoint for wallet server Docker image
set -euo pipefail
SNAPSHOT_URL="${SNAPSHOT_URL:-}" #off by default. latest snapshot at https://lbry.com/snapshot/wallet
if [[ -n "$SNAPSHOT_URL" ]] && [[ ! -f /database/lbry-leveldb ]]; then
files="$(ls)"
echo "Downloading wallet snapshot from $SNAPSHOT_URL"
wget --no-verbose --trust-server-names --content-disposition "$SNAPSHOT_URL"
echo "Extracting snapshot..."
filename="$(grep -vf <(echo "$files") <(ls))" # finds the file that was not there before
case "$filename" in
*.tgz|*.tar.gz|*.tar.bz2 ) tar xvf "$filename" --directory /database ;;
*.zip ) unzip "$filename" -d /database ;;
* ) echo "Don't know how to extract ${filename}. SNAPSHOT COULD NOT BE LOADED" && exit 1 ;;
esac
rm "$filename"
fi
/home/lbry/.local/bin/lbry-hub-elastic-sync
echo 'starting server'
/home/lbry/.local/bin/lbry-hub "$@"

9
docker/webconf.yaml Normal file
View file

@ -0,0 +1,9 @@
allowed_origin: "*"
max_key_fee: "0.0 USD"
save_files: false
save_blobs: false
streaming_server: "0.0.0.0:5280"
api: "0.0.0.0:5279"
data_dir: /tmp
download_dir: /tmp
wallet_dir: /tmp

5163
docs/api.json Normal file

File diff suppressed because one or more lines are too long

View file

Before

Width:  |  Height:  |  Size: 7.4 KiB

After

Width:  |  Height:  |  Size: 7.4 KiB

View file

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View file

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 1.2 KiB

View file

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

View file

Before

Width:  |  Height:  |  Size: 6.1 KiB

After

Width:  |  Height:  |  Size: 6.1 KiB

View file

Before

Width:  |  Height:  |  Size: 97 KiB

After

Width:  |  Height:  |  Size: 97 KiB

View file

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View file

Before

Width:  |  Height:  |  Size: 361 KiB

After

Width:  |  Height:  |  Size: 361 KiB

View file

Before

Width:  |  Height:  |  Size: 5.3 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB

View file

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View file

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 31 KiB

View file

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

1
lbry/.gitattributes vendored
View file

@ -1 +0,0 @@
/CHANGELOG.md merge=union

13
lbry/.gitignore vendored
View file

@ -1,13 +0,0 @@
.DS_Store
/build
/dist
/.tox
/.idea
/.coverage
/lbry-venv
lbry.egg-info
__pycache__
_trial_temp/
/tests/integration/files

View file

@ -1,441 +0,0 @@
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS,schema
# Add files or directories matching the regex patterns to the
# blacklist. The regex matches against base names, not paths.
# `\.#.*` - add emacs tmp files to the blacklist
ignore-patterns=\.#.*
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=4
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
# extension-pkg-whitelist=
# Allow optimization of some AST trees. This will activate a peephole AST
# optimizer, which will apply various small optimizations. For instance, it can
# be used to obtain the result of joining multiple strings with the addition
# operator. Joining a lot of strings can lead to a maximum recursion error in
# Pylint and this flag can prevent that. It has one side effect, the resulting
# AST will be different than the one from reality.
optimize-ast=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then re-enable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=
anomalous-backslash-in-string,
arguments-differ,
attribute-defined-outside-init,
bad-continuation,
bare-except,
broad-except,
cell-var-from-loop,
consider-iterating-dictionary,
dangerous-default-value,
duplicate-code,
fixme,
invalid-name,
len-as-condition,
locally-disabled,
logging-not-lazy,
missing-docstring,
no-else-return,
no-init,
no-member,
no-self-use,
protected-access,
redefined-builtin,
redefined-outer-name,
redefined-variable-type,
relative-import,
signature-differs,
super-init-not-called,
too-few-public-methods,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-nested-blocks,
too-many-public-methods,
too-many-return-statements,
too-many-statements,
trailing-newlines,
undefined-loop-variable,
ungrouped-imports,
unnecessary-lambda,
unused-argument,
unused-variable,
wrong-import-order,
wrong-import-position,
deprecated-lambda,
simplifiable-if-statement,
unidiomatic-typecheck,
inconsistent-return-statements,
keyword-arg-before-vararg,
assignment-from-no-return,
useless-return,
assignment-from-none,
stop-iteration-return
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_$|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,input
# Good variable names which should always be accepted, separated by a comma
# allow `d` as its used frequently for deferred callback chains
good-names=i,j,k,ex,Run,_,d
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[ELIF]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=leveldb,distutils
# Ignoring distutils because: https://github.com/PyCQA/pylint/issues/73
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work
# with qualified names.
# ignored-classes=
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=lbrynet.lbrynet_daemon.LBRYDaemon.Parameters
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[DESIGN]
# Maximum number of arguments for function / method
max-args=10
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of statements in function / method body
max-statements=50
# Maximum number of parents for a class (see R0901).
max-parents=8
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

View file

@ -1,56 +0,0 @@
# <img src="https://raw.githubusercontent.com/lbryio/lbry-sdk/master/lbry/lbry.png" alt="LBRY" width="48" height="36" /> LBRY SDK [![Build Status](https://travis-ci.org/lbryio/lbry-sdk.svg?branch=master)](https://travis-ci.org/lbryio/lbry-sdk) [![Test Coverage](https://codecov.io/gh/lbryio/lbry-sdk/branch/master/graph/badge.svg)](https://codecov.io/gh/lbryio/lbry-sdk)
LBRY is a decentralized peer-to-peer protocol for publishing and accessing digital content. It utilizes the [LBRY blockchain](https://github.com/lbryio/lbrycrd) as a global namespace and database of digital content. Blockchain entries contain searchable content metadata, identities, rights and access rules. LBRY also provides a data network that consists of peers (seeders) uploading and downloading data from other peers, possibly in exchange for payments, as well as a distributed hash table used by peers to discover other peers.
LBRY SDK for Python is currently the most full featured implementation of the LBRY Network protocols and includes many useful components and tools for building decentralized applications. Primary features and components:
* Built on Python 3.7+ and `asyncio`.
* Kademlia DHT (Distributed Hash Table) implementation for finding peers to download from and announcing to peers what we have to host ([lbry.dht](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/dht)).
* Blob exchange protocol for transferring encrypted blobs of content and negotiating payments ([lbry.blob_exchange](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/blob_exchange)).
* Protobuf schema for encoding and decoding metadata stored on the blockchain ([lbry.schema](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/schema)).
* Wallet implementation for the LBRY blockchain ([lbry.wallet](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/wallet)).
* Daemon with a JSON-RPC API to ease building end user applications in any language and for automating various tasks ([lbry.extras.daemon](https://github.com/lbryio/lbry-sdk/tree/master/lbry/lbry/extras/daemon)).
## Installation
Our [releases page](https://github.com/lbryio/lbry-sdk/releases) contains pre-built binaries of the latest release, pre-releases, and past releases for macOS, Debian-based Linux, and Windows. [Automated travis builds](http://build.lbry.io/daemon/) are also available for testing.
## Usage
Run `lbrynet start` to launch the API server.
By default, `lbrynet` will provide a JSON-RPC server at `http://localhost:5279`. It is easy to interact with via cURL or sane programming languages.
Our [quickstart guide](https://lbry.tech/playground) provides a simple walkthrough and examples for learning.
With the daemon running, `lbrynet commands` will show you a list of commands.
The full API is documented [here](https://lbry.tech/api/sdk).
## Running from source
Installing from source is also relatively painless. Full instructions are in [INSTALL.md](INSTALL.md)
## Contributing
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
## License
This project is MIT licensed. For the full license, see [LICENSE](LICENSE).
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our GPG key is here](https://lbry.com/faq/gpg-key) if you need it.
## Contact
The primary contact for this project is [@eukreign](mailto:lex@lbry.com).
## Additional information and links
The documentation for the API can be found [here](https://lbry.tech/api/sdk).
Daemon defaults, ports, and other settings are documented [here](https://lbry.tech/resources/daemon-settings).
Settings can be configured using a daemon-settings.yml file. An example can be found [here](https://github.com/lbryio/lbry-sdk/blob/master/lbry/example_daemon_settings.yml).

2
lbry/__init__.py Normal file
View file

@ -0,0 +1,2 @@
__version__ = "0.113.0"
version = tuple(map(int, __version__.split('.'))) # pylint: disable=invalid-name

6
lbry/blob/__init__.py Normal file
View file

@ -0,0 +1,6 @@
from lbry.utils import get_lbry_hash_obj
MAX_BLOB_SIZE = 2 * 2 ** 20
# digest_size is in bytes, and blob hashes are hex encoded
BLOBHASH_LENGTH = get_lbry_hash_obj().digest_size * 2

View file

@ -1,5 +1,6 @@
import os import os
import re import re
import time
import asyncio import asyncio
import binascii import binascii
import logging import logging
@ -9,18 +10,20 @@ from io import BytesIO
from cryptography.hazmat.primitives.ciphers import Cipher, modes from cryptography.hazmat.primitives.ciphers import Cipher, modes
from cryptography.hazmat.primitives.ciphers.algorithms import AES from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.padding import PKCS7 from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.backends import default_backend
from lbry.cryptoutils import backend, get_lbry_hash_obj from lbry.utils import get_lbry_hash_obj
from lbry.error import DownloadCancelledError, InvalidBlobHashError, InvalidDataError from lbry.error import DownloadCancelledError, InvalidBlobHashError, InvalidDataError
from lbry.blob import MAX_BLOB_SIZE, blobhash_length from lbry.blob import MAX_BLOB_SIZE, BLOBHASH_LENGTH
from lbry.blob.blob_info import BlobInfo from lbry.blob.blob_info import BlobInfo
from lbry.blob.writer import HashBlobWriter from lbry.blob.writer import HashBlobWriter
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
_hexmatch = re.compile("^[a-f,0-9]+$") HEXMATCH = re.compile("^[a-f,0-9]+$")
BACKEND = default_backend()
def is_valid_blobhash(blobhash: str) -> bool: def is_valid_blobhash(blobhash: str) -> bool:
@ -31,11 +34,11 @@ def is_valid_blobhash(blobhash: str) -> bool:
@return: True/False @return: True/False
""" """
return len(blobhash) == blobhash_length and _hexmatch.match(blobhash) return len(blobhash) == BLOBHASH_LENGTH and HEXMATCH.match(blobhash)
def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tuple[bytes, str]: def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tuple[bytes, str]:
cipher = Cipher(AES(key), modes.CBC(iv), backend=backend) cipher = Cipher(AES(key), modes.CBC(iv), backend=BACKEND)
padder = PKCS7(AES.block_size).padder() padder = PKCS7(AES.block_size).padder()
encryptor = cipher.encryptor() encryptor = cipher.encryptor()
encrypted = encryptor.update(padder.update(unencrypted) + padder.finalize()) + encryptor.finalize() encrypted = encryptor.update(padder.update(unencrypted) + padder.finalize()) + encryptor.finalize()
@ -47,7 +50,7 @@ def encrypt_blob_bytes(key: bytes, iv: bytes, unencrypted: bytes) -> typing.Tupl
def decrypt_blob_bytes(data: bytes, length: int, key: bytes, iv: bytes) -> bytes: def decrypt_blob_bytes(data: bytes, length: int, key: bytes, iv: bytes) -> bytes:
if len(data) != length: if len(data) != length:
raise ValueError("unexpected length") raise ValueError("unexpected length")
cipher = Cipher(AES(key), modes.CBC(iv), backend=backend) cipher = Cipher(AES(key), modes.CBC(iv), backend=BACKEND)
unpadder = PKCS7(AES.block_size).unpadder() unpadder = PKCS7(AES.block_size).unpadder()
decryptor = cipher.decryptor() decryptor = cipher.decryptor()
return unpadder.update(decryptor.update(data) + decryptor.finalize()) + unpadder.finalize() return unpadder.update(decryptor.update(data) + decryptor.finalize()) + unpadder.finalize()
@ -68,21 +71,27 @@ class AbstractBlob:
'writers', 'writers',
'verified', 'verified',
'writing', 'writing',
'readers' 'readers',
'added_on',
'is_mine',
] ]
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None): blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False,
):
self.loop = loop self.loop = loop
self.blob_hash = blob_hash self.blob_hash = blob_hash
self.length = length self.length = length
self.blob_completed_callback = blob_completed_callback self.blob_completed_callback = blob_completed_callback
self.blob_directory = blob_directory self.blob_directory = blob_directory
self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {} self.writers: typing.Dict[typing.Tuple[typing.Optional[str], typing.Optional[int]], HashBlobWriter] = {}
self.verified: asyncio.Event = asyncio.Event(loop=self.loop) self.verified: asyncio.Event = asyncio.Event()
self.writing: asyncio.Event = asyncio.Event(loop=self.loop) self.writing: asyncio.Event = asyncio.Event()
self.readers: typing.List[typing.BinaryIO] = [] self.readers: typing.List[typing.BinaryIO] = []
self.added_on = added_on or time.time()
self.is_mine = is_mine
if not is_valid_blobhash(blob_hash): if not is_valid_blobhash(blob_hash):
raise InvalidBlobHashError(blob_hash) raise InvalidBlobHashError(blob_hash)
@ -108,7 +117,7 @@ class AbstractBlob:
if reader in self.readers: if reader in self.readers:
self.readers.remove(reader) self.readers.remove(reader)
def _write_blob(self, blob_bytes: bytes): def _write_blob(self, blob_bytes: bytes) -> asyncio.Task:
raise NotImplementedError() raise NotImplementedError()
def set_length(self, length) -> None: def set_length(self, length) -> None:
@ -142,7 +151,7 @@ class AbstractBlob:
def close(self): def close(self):
while self.writers: while self.writers:
peer, writer = self.writers.popitem() _, writer = self.writers.popitem()
if writer and writer.finished and not writer.finished.done() and not self.loop.is_closed(): if writer and writer.finished and not writer.finished.done() and not self.loop.is_closed():
writer.finished.cancel() writer.finished.cancel()
while self.readers: while self.readers:
@ -163,7 +172,10 @@ class AbstractBlob:
if not self.is_readable(): if not self.is_readable():
raise OSError('blob files cannot be read') raise OSError('blob files cannot be read')
with self.reader_context() as handle: with self.reader_context() as handle:
try:
return await self.loop.sendfile(writer.transport, handle, count=self.get_length()) return await self.loop.sendfile(writer.transport, handle, count=self.get_length())
except (ConnectionError, BrokenPipeError, RuntimeError, OSError, AttributeError):
return -1
def decrypt(self, key: bytes, iv: bytes) -> bytes: def decrypt(self, key: bytes, iv: bytes) -> bytes:
""" """
@ -176,34 +188,41 @@ class AbstractBlob:
@classmethod @classmethod
async def create_from_unencrypted( async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, unencrypted: bytes, blob_num: int, added_on: int, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None) -> BlobInfo: blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], None]] = None,
) -> BlobInfo:
""" """
Create an encrypted BlobFile from plaintext bytes Create an encrypted BlobFile from plaintext bytes
""" """
blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted) blob_bytes, blob_hash = encrypt_blob_bytes(key, iv, unencrypted)
length = len(blob_bytes) length = len(blob_bytes)
blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir) blob = cls(loop, blob_hash, length, blob_completed_callback, blob_dir, added_on, is_mine)
writer = blob.get_blob_writer() writer = blob.get_blob_writer()
writer.write(blob_bytes) writer.write(blob_bytes)
await blob.verified.wait() await blob.verified.wait()
return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), blob_hash) return BlobInfo(blob_num, length, binascii.hexlify(iv).decode(), added_on, blob_hash, is_mine)
def save_verified_blob(self, verified_bytes: bytes): def save_verified_blob(self, verified_bytes: bytes):
if self.verified.is_set(): if self.verified.is_set():
return return
if self.is_writeable():
self._write_blob(verified_bytes) def update_events(_):
self.verified.set() self.verified.set()
self.writing.clear()
if self.is_writeable():
self.writing.set()
task = self._write_blob(verified_bytes)
task.add_done_callback(update_events)
if self.blob_completed_callback: if self.blob_completed_callback:
self.blob_completed_callback(self) task.add_done_callback(lambda _: self.blob_completed_callback(self))
def get_blob_writer(self, peer_address: typing.Optional[str] = None, def get_blob_writer(self, peer_address: typing.Optional[str] = None,
peer_port: typing.Optional[int] = None) -> HashBlobWriter: peer_port: typing.Optional[int] = None) -> HashBlobWriter:
if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed(): if (peer_address, peer_port) in self.writers and not self.writers[(peer_address, peer_port)].closed():
raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}") raise OSError(f"attempted to download blob twice from {peer_address}:{peer_port}")
fut = asyncio.Future(loop=self.loop) fut = asyncio.Future()
writer = HashBlobWriter(self.blob_hash, self.get_length, fut) writer = HashBlobWriter(self.blob_hash, self.get_length, fut)
self.writers[(peer_address, peer_port)] = writer self.writers[(peer_address, peer_port)] = writer
@ -237,11 +256,13 @@ class BlobBuffer(AbstractBlob):
""" """
An in-memory only blob An in-memory only blob
""" """
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None): blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
):
self._verified_bytes: typing.Optional[BytesIO] = None self._verified_bytes: typing.Optional[BytesIO] = None
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory) super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
@contextlib.contextmanager @contextlib.contextmanager
def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]: def _reader_context(self) -> typing.ContextManager[typing.BinaryIO]:
@ -256,9 +277,11 @@ class BlobBuffer(AbstractBlob):
self.verified.clear() self.verified.clear()
def _write_blob(self, blob_bytes: bytes): def _write_blob(self, blob_bytes: bytes):
async def write():
if self._verified_bytes: if self._verified_bytes:
raise OSError("already have bytes for blob") raise OSError("already have bytes for blob")
self._verified_bytes = BytesIO(blob_bytes) self._verified_bytes = BytesIO(blob_bytes)
return self.loop.create_task(write())
def delete(self): def delete(self):
if self._verified_bytes: if self._verified_bytes:
@ -276,10 +299,12 @@ class BlobFile(AbstractBlob):
""" """
A blob existing on the local file system A blob existing on the local file system
""" """
def __init__(self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None, def __init__(
self, loop: asyncio.AbstractEventLoop, blob_hash: str, length: typing.Optional[int] = None,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None, blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None,
blob_directory: typing.Optional[str] = None): blob_directory: typing.Optional[str] = None, added_on: typing.Optional[int] = None, is_mine: bool = False
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory) ):
super().__init__(loop, blob_hash, length, blob_completed_callback, blob_directory, added_on, is_mine)
if not blob_directory or not os.path.isdir(blob_directory): if not blob_directory or not os.path.isdir(blob_directory):
raise OSError(f"invalid blob directory '{blob_directory}'") raise OSError(f"invalid blob directory '{blob_directory}'")
self.file_path = os.path.join(self.blob_directory, self.blob_hash) self.file_path = os.path.join(self.blob_directory, self.blob_hash)
@ -314,22 +339,28 @@ class BlobFile(AbstractBlob):
handle.close() handle.close()
def _write_blob(self, blob_bytes: bytes): def _write_blob(self, blob_bytes: bytes):
def _write_blob():
with open(self.file_path, 'wb') as f: with open(self.file_path, 'wb') as f:
f.write(blob_bytes) f.write(blob_bytes)
async def write_blob():
await self.loop.run_in_executor(None, _write_blob)
return self.loop.create_task(write_blob())
def delete(self): def delete(self):
super().delete()
if os.path.isfile(self.file_path): if os.path.isfile(self.file_path):
os.remove(self.file_path) os.remove(self.file_path)
return super().delete()
@classmethod @classmethod
async def create_from_unencrypted( async def create_from_unencrypted(
cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes, cls, loop: asyncio.AbstractEventLoop, blob_dir: typing.Optional[str], key: bytes, iv: bytes,
unencrypted: bytes, blob_num: int, unencrypted: bytes, blob_num: int, added_on: float, is_mine: bool,
blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], blob_completed_callback: typing.Optional[typing.Callable[['AbstractBlob'], asyncio.Task]] = None
asyncio.Task]] = None) -> BlobInfo: ) -> BlobInfo:
if not blob_dir or not os.path.isdir(blob_dir): if not blob_dir or not os.path.isdir(blob_dir):
raise OSError(f"cannot create blob in directory: '{blob_dir}'") raise OSError(f"cannot create blob in directory: '{blob_dir}'")
return await super().create_from_unencrypted( return await super().create_from_unencrypted(
loop, blob_dir, key, iv, unencrypted, blob_num, blob_completed_callback loop, blob_dir, key, iv, unencrypted, blob_num, added_on, is_mine, blob_completed_callback
) )

View file

@ -7,13 +7,19 @@ class BlobInfo:
'blob_num', 'blob_num',
'length', 'length',
'iv', 'iv',
'added_on',
'is_mine'
] ]
def __init__(self, blob_num: int, length: int, iv: str, blob_hash: typing.Optional[str] = None): def __init__(
self, blob_num: int, length: int, iv: str, added_on,
blob_hash: typing.Optional[str] = None, is_mine=False):
self.blob_hash = blob_hash self.blob_hash = blob_hash
self.blob_num = blob_num self.blob_num = blob_num
self.length = length self.length = length
self.iv = iv self.iv = iv
self.added_on = added_on
self.is_mine = is_mine
def as_dict(self) -> typing.Dict: def as_dict(self) -> typing.Dict:
d = { d = {

View file

@ -2,7 +2,7 @@ import os
import typing import typing
import asyncio import asyncio
import logging import logging
from lbry.utils import LRUCache from lbry.utils import LRUCacheWithMetrics
from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob from lbry.blob.blob_file import is_valid_blobhash, BlobFile, BlobBuffer, AbstractBlob
from lbry.stream.descriptor import StreamDescriptor from lbry.stream.descriptor import StreamDescriptor
from lbry.connection_manager import ConnectionManager from lbry.connection_manager import ConnectionManager
@ -32,34 +32,34 @@ class BlobManager:
else self._node_data_store.completed_blobs else self._node_data_store.completed_blobs
self.blobs: typing.Dict[str, AbstractBlob] = {} self.blobs: typing.Dict[str, AbstractBlob] = {}
self.config = config self.config = config
self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCache( self.decrypted_blob_lru_cache = None if not self.config.blob_lru_cache_size else LRUCacheWithMetrics(
self.config.blob_lru_cache_size) self.config.blob_lru_cache_size)
self.connection_manager = ConnectionManager(loop) self.connection_manager = ConnectionManager(loop)
def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None): def _get_blob(self, blob_hash: str, length: typing.Optional[int] = None, is_mine: bool = False):
if self.config.save_blobs or ( if self.config.save_blobs or (
is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))): is_valid_blobhash(blob_hash) and os.path.isfile(os.path.join(self.blob_dir, blob_hash))):
return BlobFile( return BlobFile(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
) )
return BlobBuffer( return BlobBuffer(
self.loop, blob_hash, length, self.blob_completed, self.blob_dir self.loop, blob_hash, length, self.blob_completed, self.blob_dir, is_mine=is_mine
) )
def get_blob(self, blob_hash, length: typing.Optional[int] = None): def get_blob(self, blob_hash, length: typing.Optional[int] = None, is_mine: bool = False):
if blob_hash in self.blobs: if blob_hash in self.blobs:
if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer): if self.config.save_blobs and isinstance(self.blobs[blob_hash], BlobBuffer):
buffer = self.blobs.pop(blob_hash) buffer = self.blobs.pop(blob_hash)
if blob_hash in self.completed_blob_hashes: if blob_hash in self.completed_blob_hashes:
self.completed_blob_hashes.remove(blob_hash) self.completed_blob_hashes.remove(blob_hash)
self.blobs[blob_hash] = self._get_blob(blob_hash, length) self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
if buffer.is_readable(): if buffer.is_readable():
with buffer.reader_context() as reader: with buffer.reader_context() as reader:
self.blobs[blob_hash].write_blob(reader.read()) self.blobs[blob_hash].write_blob(reader.read())
if length and self.blobs[blob_hash].length is None: if length and self.blobs[blob_hash].length is None:
self.blobs[blob_hash].set_length(length) self.blobs[blob_hash].set_length(length)
else: else:
self.blobs[blob_hash] = self._get_blob(blob_hash, length) self.blobs[blob_hash] = self._get_blob(blob_hash, length, is_mine)
return self.blobs[blob_hash] return self.blobs[blob_hash]
def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool: def is_blob_verified(self, blob_hash: str, length: typing.Optional[int] = None) -> bool:
@ -83,6 +83,8 @@ class BlobManager:
to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir) to_add = await self.storage.sync_missing_blobs(in_blobfiles_dir)
if to_add: if to_add:
self.completed_blob_hashes.update(to_add) self.completed_blob_hashes.update(to_add)
# check blobs that aren't set as finished but were seen on disk
await self.ensure_completed_blobs_status(in_blobfiles_dir - to_add)
if self.config.track_bandwidth: if self.config.track_bandwidth:
self.connection_manager.start() self.connection_manager.start()
return True return True
@ -105,13 +107,26 @@ class BlobManager:
if isinstance(blob, BlobFile): if isinstance(blob, BlobFile):
if blob.blob_hash not in self.completed_blob_hashes: if blob.blob_hash not in self.completed_blob_hashes:
self.completed_blob_hashes.add(blob.blob_hash) self.completed_blob_hashes.add(blob.blob_hash)
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=True)) return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=True)
)
else: else:
return self.loop.create_task(self.storage.add_blobs((blob.blob_hash, blob.length), finished=False)) return self.loop.create_task(self.storage.add_blobs(
(blob.blob_hash, blob.length, blob.added_on, blob.is_mine), finished=False)
)
def check_completed_blobs(self, blob_hashes: typing.List[str]) -> typing.List[str]: async def ensure_completed_blobs_status(self, blob_hashes: typing.Iterable[str]):
"""Returns of the blobhashes_to_check, which are valid""" """Ensures that completed blobs from a given list of blob hashes are set as 'finished' in the database."""
return [blob_hash for blob_hash in blob_hashes if self.is_blob_verified(blob_hash)] to_add = []
for blob_hash in blob_hashes:
if not self.is_blob_verified(blob_hash):
continue
blob = self.get_blob(blob_hash)
to_add.append((blob.blob_hash, blob.length, blob.added_on, blob.is_mine))
if len(to_add) > 500:
await self.storage.add_blobs(*to_add, finished=True)
to_add.clear()
return await self.storage.add_blobs(*to_add, finished=True)
def delete_blob(self, blob_hash: str): def delete_blob(self, blob_hash: str):
if not is_valid_blobhash(blob_hash): if not is_valid_blobhash(blob_hash):

View file

@ -0,0 +1,77 @@
import asyncio
import logging
log = logging.getLogger(__name__)
class DiskSpaceManager:
def __init__(self, config, db, blob_manager, cleaning_interval=30 * 60, analytics=None):
self.config = config
self.db = db
self.blob_manager = blob_manager
self.cleaning_interval = cleaning_interval
self.running = False
self.task = None
self.analytics = analytics
self._used_space_bytes = None
async def get_free_space_mb(self, is_network_blob=False):
limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
space_used_mb = await self.get_space_used_mb()
space_used_mb = space_used_mb['network_storage'] if is_network_blob else space_used_mb['content_storage']
return max(0, limit_mb - space_used_mb)
async def get_space_used_bytes(self):
self._used_space_bytes = await self.db.get_stored_blob_disk_usage()
return self._used_space_bytes
async def get_space_used_mb(self, cached=True):
cached = cached and self._used_space_bytes is not None
space_used_bytes = self._used_space_bytes if cached else await self.get_space_used_bytes()
return {key: int(value/1024.0/1024.0) for key, value in space_used_bytes.items()}
async def clean(self):
await self._clean(False)
await self._clean(True)
async def _clean(self, is_network_blob=False):
space_used_mb = await self.get_space_used_mb(cached=False)
if is_network_blob:
space_used_mb = space_used_mb['network_storage']
else:
space_used_mb = space_used_mb['content_storage'] + space_used_mb['private_storage']
storage_limit_mb = self.config.network_storage_limit if is_network_blob else self.config.blob_storage_limit
if self.analytics:
asyncio.create_task(
self.analytics.send_disk_space_used(space_used_mb, storage_limit_mb, is_network_blob)
)
delete = []
available = storage_limit_mb - space_used_mb
if storage_limit_mb == 0 if not is_network_blob else available >= 0:
return 0
for blob_hash, file_size, _ in await self.db.get_stored_blobs(is_mine=False, is_network_blob=is_network_blob):
delete.append(blob_hash)
available += int(file_size/1024.0/1024.0)
if available >= 0:
break
if delete:
await self.db.stop_all_files()
await self.blob_manager.delete_blobs(delete, delete_from_db=True)
self._used_space_bytes = None
return len(delete)
async def cleaning_loop(self):
while self.running:
await asyncio.sleep(self.cleaning_interval)
await self.clean()
async def start(self):
self.running = True
self.task = asyncio.create_task(self.cleaning_loop())
self.task.add_done_callback(lambda _: log.info("Stopping blob cleanup service."))
async def stop(self):
if self.running:
self.running = False
self.task.cancel()

View file

@ -3,7 +3,7 @@ import logging
import asyncio import asyncio
from io import BytesIO from io import BytesIO
from lbry.error import InvalidBlobHashError, InvalidDataError from lbry.error import InvalidBlobHashError, InvalidDataError
from lbry.cryptoutils import get_lbry_hash_obj from lbry.utils import get_lbry_hash_obj
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View file

@ -32,7 +32,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.buf = b'' self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result # this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event(loop=self.loop) self.closed = asyncio.Event()
def data_received(self, data: bytes): def data_received(self, data: bytes):
if self.connection_manager: if self.connection_manager:
@ -95,7 +95,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
if self._response_fut and not self._response_fut.done(): if self._response_fut and not self._response_fut.done():
self._response_fut.set_exception(err) self._response_fut.set_exception(err)
async def _download_blob(self) -> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]: async def _download_blob(self) -> typing.Tuple[int, Optional['BlobExchangeClientProtocol']]: # pylint: disable=too-many-return-statements
""" """
:return: download success (bool), connected protocol (BlobExchangeClientProtocol) :return: download success (bool), connected protocol (BlobExchangeClientProtocol)
""" """
@ -111,7 +111,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.transport.write(msg) self.transport.write(msg)
if self.connection_manager: if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg)) self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop) response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout)
availability_response = response.get_availability_response() availability_response = response.get_availability_response()
price_response = response.get_price_response() price_response = response.get_price_response()
blob_response = response.get_blob_response() blob_response = response.get_blob_response()
@ -151,7 +151,9 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
f" timeout in {self.peer_timeout}" f" timeout in {self.peer_timeout}"
log.debug(msg) log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}" msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop) await asyncio.wait_for(self.writer.finished, self.peer_timeout)
# wait for the io to finish
await self.blob.verified.wait()
log.info("%s at %fMB/s", msg, log.info("%s at %fMB/s", msg,
round((float(self._blob_bytes_received) / round((float(self._blob_bytes_received) /
float(time.perf_counter() - start_time)) / 1000000.0, 2)) float(time.perf_counter() - start_time)) / 1000000.0, 2))
@ -185,7 +187,7 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
try: try:
self._blob_bytes_received = 0 self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port) self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future(loop=self.loop) self._response_fut = asyncio.Future()
return await self._download_blob() return await self._download_blob()
except OSError: except OSError:
# i'm not sure how to fix this race condition - jack # i'm not sure how to fix this race condition - jack
@ -213,11 +215,11 @@ class BlobExchangeClientProtocol(asyncio.Protocol):
self.connection_manager.connection_made(f"{self.peer_address}:{self.peer_port}") self.connection_manager.connection_made(f"{self.peer_address}:{self.peer_port}")
log.debug("connection made to %s:%i", self.peer_address, self.peer_port) log.debug("connection made to %s:%i", self.peer_address, self.peer_port)
def connection_lost(self, reason): def connection_lost(self, exc):
if self.connection_manager: if self.connection_manager:
self.connection_manager.outgoing_connection_lost(f"{self.peer_address}:{self.peer_port}") self.connection_manager.outgoing_connection_lost(f"{self.peer_address}:{self.peer_port}")
log.debug("connection lost to %s:%i (reason: %s, %s)", self.peer_address, self.peer_port, str(reason), log.debug("connection lost to %s:%i (reason: %s, %s)", self.peer_address, self.peer_port, str(exc),
str(type(reason))) str(type(exc)))
self.close() self.close()
@ -242,7 +244,7 @@ async def request_blob(loop: asyncio.AbstractEventLoop, blob: Optional['Abstract
try: try:
if not connected_protocol: if not connected_protocol:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port), await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout, loop=loop) peer_connect_timeout)
connected_protocol = protocol connected_protocol = protocol
if blob is None or blob.get_is_verified() or not blob.is_writeable(): if blob is None or blob.get_is_verified() or not blob.is_writeable():
# blob is None happens when we are just opening a connection # blob is None happens when we are just opening a connection

View file

@ -3,6 +3,7 @@ import typing
import logging import logging
from lbry.utils import cache_concurrent from lbry.utils import cache_concurrent
from lbry.blob_exchange.client import request_blob from lbry.blob_exchange.client import request_blob
from lbry.dht.node import get_kademlia_peers_from_hosts
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.conf import Config from lbry.conf import Config
from lbry.dht.node import Node from lbry.dht.node import Node
@ -27,9 +28,9 @@ class BlobDownloader:
self.ignored: typing.Dict['KademliaPeer', int] = {} self.ignored: typing.Dict['KademliaPeer', int] = {}
self.scores: typing.Dict['KademliaPeer', int] = {} self.scores: typing.Dict['KademliaPeer', int] = {}
self.failures: typing.Dict['KademliaPeer', int] = {} self.failures: typing.Dict['KademliaPeer', int] = {}
self.connection_failures: typing.List['KademliaPeer'] = [] self.connection_failures: typing.Set['KademliaPeer'] = set()
self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {} self.connections: typing.Dict['KademliaPeer', 'BlobExchangeClientProtocol'] = {}
self.is_running = asyncio.Event(loop=self.loop) self.is_running = asyncio.Event()
def should_race_continue(self, blob: 'AbstractBlob'): def should_race_continue(self, blob: 'AbstractBlob'):
max_probes = self.config.max_connections_per_download * (1 if self.connections else 10) max_probes = self.config.max_connections_per_download * (1 if self.connections else 10)
@ -48,7 +49,7 @@ class BlobDownloader:
connection_id=connection_id, connection_manager=self.blob_manager.connection_manager connection_id=connection_id, connection_manager=self.blob_manager.connection_manager
) )
if not bytes_received and not protocol and peer not in self.connection_failures: if not bytes_received and not protocol and peer not in self.connection_failures:
self.connection_failures.append(peer) self.connection_failures.add(peer)
if not protocol and peer not in self.ignored: if not protocol and peer not in self.ignored:
self.ignored[peer] = self.loop.time() self.ignored[peer] = self.loop.time()
log.debug("drop peer %s:%i", peer.address, peer.tcp_port) log.debug("drop peer %s:%i", peer.address, peer.tcp_port)
@ -63,8 +64,8 @@ class BlobDownloader:
self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1 self.scores[peer] = bytes_received / elapsed if bytes_received and elapsed else 1
async def new_peer_or_finished(self): async def new_peer_or_finished(self):
active_tasks = list(self.active_connections.values()) + [asyncio.sleep(1)] active_tasks = list(self.active_connections.values()) + [asyncio.create_task(asyncio.sleep(1))]
await asyncio.wait(active_tasks, loop=self.loop, return_when='FIRST_COMPLETED') await asyncio.wait(active_tasks, return_when='FIRST_COMPLETED')
def cleanup_active(self): def cleanup_active(self):
if not self.active_connections and not self.connections: if not self.active_connections and not self.connections:
@ -89,30 +90,30 @@ class BlobDownloader:
self.is_running.set() self.is_running.set()
try: try:
while not blob.get_is_verified() and self.is_running.is_set(): while not blob.get_is_verified() and self.is_running.is_set():
batch: typing.Set['KademliaPeer'] = set() batch: typing.Set['KademliaPeer'] = set(self.connections.keys())
while not self.peer_queue.empty(): while not self.peer_queue.empty():
batch.update(self.peer_queue.get_nowait()) batch.update(self.peer_queue.get_nowait())
if batch:
self.peer_queue.put_nowait(list(batch))
log.debug( log.debug(
"running, %d peers, %d ignored, %d active, %s connections", "%s running, %d peers, %d ignored, %d active, %s connections", blob_hash[:6],
len(batch), len(self.ignored), len(self.active_connections), len(self.connections) len(batch), len(self.ignored), len(self.active_connections), len(self.connections)
) )
for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True): for peer in sorted(batch, key=lambda peer: self.scores.get(peer, 0), reverse=True):
if peer in self.ignored or peer in self.active_connections: if peer in self.ignored:
continue
if peer in self.active_connections or not self.should_race_continue(blob):
continue continue
if not self.should_race_continue(blob):
break
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port) log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
just_probe = len(self.connections) == 0 t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id))
t = self.loop.create_task(self.request_blob_from_peer(blob, peer, connection_id, just_probe))
self.active_connections[peer] = t self.active_connections[peer] = t
self.peer_queue.put_nowait(list(batch))
await self.new_peer_or_finished() await self.new_peer_or_finished()
self.cleanup_active() self.cleanup_active()
log.debug("downloaded %s", blob_hash[:8]) log.debug("downloaded %s", blob_hash[:8])
return blob return blob
finally: finally:
blob.close() blob.close()
if self.loop.is_running():
self.loop.call_soon(self.cleanup_active)
def close(self): def close(self):
self.connection_failures.clear() self.connection_failures.clear()
@ -123,11 +124,14 @@ class BlobDownloader:
protocol.close() protocol.close()
async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', node: 'Node', async def download_blob(loop, config: 'Config', blob_manager: 'BlobManager', dht_node: 'Node',
blob_hash: str) -> 'AbstractBlob': blob_hash: str) -> 'AbstractBlob':
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download) search_queue = asyncio.Queue(maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash) search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = node.accumulate_peers(search_queue) peer_queue, accumulate_task = dht_node.accumulate_peers(search_queue)
fixed_peers = None if not config.fixed_peers else await get_kademlia_peers_from_hosts(config.fixed_peers)
if fixed_peers:
loop.call_later(config.fixed_peer_delay, peer_queue.put_nowait, fixed_peers)
downloader = BlobDownloader(loop, config, blob_manager, peer_queue) downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
try: try:
return await downloader.download_blob(blob_hash) return await downloader.download_blob(blob_hash)

View file

@ -46,7 +46,7 @@ class BlobAvailabilityRequest(BlobMessage):
def __init__(self, requested_blobs: typing.List[str], lbrycrd_address: typing.Optional[bool] = True, def __init__(self, requested_blobs: typing.List[str], lbrycrd_address: typing.Optional[bool] = True,
**kwargs) -> None: **kwargs) -> None:
assert len(requested_blobs) assert len(requested_blobs) > 0
self.requested_blobs = requested_blobs self.requested_blobs = requested_blobs
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
@ -134,9 +134,9 @@ class BlobErrorResponse(BlobMessage):
} }
blob_request_types = typing.Union[BlobPriceRequest, BlobAvailabilityRequest, BlobDownloadRequest, blob_request_types = typing.Union[BlobPriceRequest, BlobAvailabilityRequest, BlobDownloadRequest, # pylint: disable=invalid-name
BlobPaymentAddressRequest] BlobPaymentAddressRequest]
blob_response_types = typing.Union[BlobPriceResponse, BlobAvailabilityResponse, BlobDownloadResponse, blob_response_types = typing.Union[BlobPriceResponse, BlobAvailabilityResponse, BlobDownloadResponse, # pylint: disable=invalid-name
BlobErrorResponse, BlobPaymentAddressResponse] BlobErrorResponse, BlobPaymentAddressResponse]
@ -179,7 +179,7 @@ class BlobRequest:
return d return d
def _get_request(self, request_type: blob_request_types): def _get_request(self, request_type: blob_request_types):
request = tuple(filter(lambda r: type(r) == request_type, self.requests)) request = tuple(filter(lambda r: type(r) == request_type, self.requests)) # pylint: disable=unidiomatic-typecheck
if request: if request:
return request[0] return request[0]
@ -235,7 +235,7 @@ class BlobResponse:
return d return d
def _get_response(self, response_type: blob_response_types): def _get_response(self, response_type: blob_response_types):
response = tuple(filter(lambda r: type(r) == response_type, self.responses)) response = tuple(filter(lambda r: type(r) == response_type, self.responses)) # pylint: disable=unidiomatic-typecheck
if response: if response:
return response[0] return response[0]
@ -280,4 +280,3 @@ class BlobResponse:
if response_type.key in response if response_type.key in response
]) ])
return cls(requests, extra) return cls(requests, extra)

View file

@ -1,6 +1,7 @@
import asyncio import asyncio
import binascii import binascii
import logging import logging
import socket
import typing import typing
from json.decoder import JSONDecodeError from json.decoder import JSONDecodeError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types from lbry.blob_exchange.serialization import BlobResponse, BlobRequest, blob_response_types
@ -24,19 +25,19 @@ class BlobServerProtocol(asyncio.Protocol):
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.buf = b'' self.buf = b''
self.transport: typing.Optional[asyncio.Transport] = None self.transport: typing.Optional[asyncio.Transport] = None
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.peer_address_and_port: typing.Optional[str] = None self.peer_address_and_port: typing.Optional[str] = None
self.started_transfer = asyncio.Event(loop=self.loop) self.started_transfer = asyncio.Event()
self.transfer_finished = asyncio.Event(loop=self.loop) self.transfer_finished = asyncio.Event()
self.close_on_idle_task: typing.Optional[asyncio.Task] = None self.close_on_idle_task: typing.Optional[asyncio.Task] = None
async def close_on_idle(self): async def close_on_idle(self):
while self.transport: while self.transport:
try: try:
await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout, loop=self.loop) await asyncio.wait_for(self.started_transfer.wait(), self.idle_timeout)
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.debug("closing idle connection from %s", self.peer_address_and_port) log.debug("closing idle connection from %s", self.peer_address_and_port)
return self.close() return self.close()
@ -96,22 +97,30 @@ class BlobServerProtocol(asyncio.Protocol):
incoming_blob = {'blob_hash': blob.blob_hash, 'length': blob.length} incoming_blob = {'blob_hash': blob.blob_hash, 'length': blob.length}
responses.append(BlobDownloadResponse(incoming_blob=incoming_blob)) responses.append(BlobDownloadResponse(incoming_blob=incoming_blob))
self.send_response(responses) self.send_response(responses)
bh = blob.blob_hash[:8] blob_hash = blob.blob_hash[:8]
log.debug("send %s to %s:%i", bh, peer_address, peer_port) log.debug("send %s to %s:%i", blob_hash, peer_address, peer_port)
self.started_transfer.set() self.started_transfer.set()
try: try:
sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout, loop=self.loop) sent = await asyncio.wait_for(blob.sendfile(self), self.transfer_timeout)
if sent and sent > 0:
self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent) self.blob_manager.connection_manager.sent_data(self.peer_address_and_port, sent)
log.info("sent %s (%i bytes) to %s:%i", bh, sent, peer_address, peer_port) log.info("sent %s (%i bytes) to %s:%i", blob_hash, sent, peer_address, peer_port)
except (ConnectionResetError, BrokenPipeError, RuntimeError, OSError, asyncio.TimeoutError) as err:
if isinstance(err, asyncio.TimeoutError):
log.debug("timed out sending blob %s to %s", bh, peer_address)
else: else:
log.debug("stopped sending %s to %s:%i", bh, peer_address, peer_port)
self.close() self.close()
log.debug("stopped sending %s to %s:%i", blob_hash, peer_address, peer_port)
return
except (OSError, ValueError, asyncio.TimeoutError) as err:
if isinstance(err, asyncio.TimeoutError):
log.debug("timed out sending blob %s to %s", blob_hash, peer_address)
else:
log.warning("could not read blob %s to send %s:%i", blob_hash, peer_address, peer_port)
self.close()
return
finally: finally:
self.transfer_finished.set() self.transfer_finished.set()
if responses: else:
log.info("don't have %s to send %s:%i", blob.blob_hash[:8], peer_address, peer_port)
if responses and not self.transport.is_closing():
self.send_response(responses) self.send_response(responses)
def data_received(self, data): def data_received(self, data):
@ -122,14 +131,14 @@ class BlobServerProtocol(asyncio.Protocol):
return return
if data: if data:
self.blob_manager.connection_manager.received_data(self.peer_address_and_port, len(data)) self.blob_manager.connection_manager.received_data(self.peer_address_and_port, len(data))
message, separator, remainder = data.rpartition(b'}') _, separator, remainder = data.rpartition(b'}')
if not separator: if not separator:
self.buf += data self.buf += data
return return
try: try:
request = BlobRequest.deserialize(self.buf + data) request = BlobRequest.deserialize(self.buf + data)
self.buf = remainder self.buf = remainder
except JSONDecodeError: except (UnicodeDecodeError, JSONDecodeError):
log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port, log.error("request from %s is not valid json (%i bytes): %s", self.peer_address_and_port,
len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode()) len(self.buf + data), '' if not data else binascii.hexlify(self.buf + data).decode())
self.close() self.close()
@ -148,7 +157,7 @@ class BlobServer:
self.loop = loop self.loop = loop
self.blob_manager = blob_manager self.blob_manager = blob_manager
self.server_task: typing.Optional[asyncio.Task] = None self.server_task: typing.Optional[asyncio.Task] = None
self.started_listening = asyncio.Event(loop=self.loop) self.started_listening = asyncio.Event()
self.lbrycrd_address = lbrycrd_address self.lbrycrd_address = lbrycrd_address
self.idle_timeout = idle_timeout self.idle_timeout = idle_timeout
self.transfer_timeout = transfer_timeout self.transfer_timeout = transfer_timeout
@ -159,6 +168,13 @@ class BlobServer:
raise Exception("already running") raise Exception("already running")
async def _start_server(): async def _start_server():
# checking if the port is in use
# thx https://stackoverflow.com/a/52872579
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)) == 0:
# the port is already in use!
log.error("Failed to bind TCP %s:%d", interface, port)
server = await self.loop.create_server( server = await self.loop.create_server(
lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address, lambda: self.server_protocol_class(self.loop, self.blob_manager, self.lbrycrd_address,
self.idle_timeout, self.transfer_timeout), self.idle_timeout, self.transfer_timeout),

View file

@ -1,3 +1,4 @@
# don't touch this. CI server changes this during build/deployment # don't touch this. CI server changes this during build/deployment
BUILD = "dev" BUILD = "dev"
BUILD_COMMIT = "source installation" COMMIT_HASH = "none"
DOCKER_TAG = "none"

View file

@ -1,21 +1,21 @@
import os import os
import re import re
import sys import sys
import typing
import logging import logging
import yaml from typing import List, Dict, Tuple, Union, TypeVar, Generic, Optional
from argparse import ArgumentParser from argparse import ArgumentParser
from contextlib import contextmanager from contextlib import contextmanager
from appdirs import user_data_dir, user_config_dir from appdirs import user_data_dir, user_config_dir
import yaml
from lbry.error import InvalidCurrencyError from lbry.error import InvalidCurrencyError
from lbry.dht import constants from lbry.dht import constants
from torba.client.coinselection import STRATEGIES from lbry.wallet.coinselection import STRATEGIES
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
NOT_SET = type('NOT_SET', (object,), {}) NOT_SET = type('NOT_SET', (object,), {}) # pylint: disable=invalid-name
T = typing.TypeVar('T') T = TypeVar('T')
CURRENCIES = { CURRENCIES = {
'BTC': {'type': 'crypto'}, 'BTC': {'type': 'crypto'},
@ -24,18 +24,18 @@ CURRENCIES = {
} }
class Setting(typing.Generic[T]): class Setting(Generic[T]):
def __init__(self, doc: str, default: typing.Optional[T] = None, def __init__(self, doc: str, default: Optional[T] = None,
previous_names: typing.Optional[typing.List[str]] = None, previous_names: Optional[List[str]] = None,
metavar: typing.Optional[str] = None): metavar: Optional[str] = None):
self.doc = doc self.doc = doc
self.default = default self.default = default
self.previous_names = previous_names or [] self.previous_names = previous_names or []
self.metavar = metavar self.metavar = metavar
def __set_name__(self, owner, name): def __set_name__(self, owner, name):
self.name = name self.name = name # pylint: disable=attribute-defined-outside-init
@property @property
def cli_name(self): def cli_name(self):
@ -45,7 +45,7 @@ class Setting(typing.Generic[T]):
def no_cli_name(self): def no_cli_name(self):
return f"--no-{self.name.replace('_', '-')}" return f"--no-{self.name.replace('_', '-')}"
def __get__(self, obj: typing.Optional['BaseConfig'], owner) -> T: def __get__(self, obj: Optional['BaseConfig'], owner) -> T:
if obj is None: if obj is None:
return self return self
for location in obj.search_order: for location in obj.search_order:
@ -53,7 +53,7 @@ class Setting(typing.Generic[T]):
return location[self.name] return location[self.name]
return self.default return self.default
def __set__(self, obj: 'BaseConfig', val: typing.Union[T, NOT_SET]): def __set__(self, obj: 'BaseConfig', val: Union[T, NOT_SET]):
if val == NOT_SET: if val == NOT_SET:
for location in obj.modify_order: for location in obj.modify_order:
if self.name in location: if self.name in location:
@ -63,13 +63,25 @@ class Setting(typing.Generic[T]):
for location in obj.modify_order: for location in obj.modify_order:
location[self.name] = val location[self.name] = val
def validate(self, val): def is_set(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return True
return False
def is_set_to_default(self, obj: 'BaseConfig') -> bool:
for location in obj.search_order:
if self.name in location:
return location[self.name] == self.default
return False
def validate(self, value):
raise NotImplementedError() raise NotImplementedError()
def deserialize(self, value): def deserialize(self, value): # pylint: disable=no-self-use
return value return value
def serialize(self, value): def serialize(self, value): # pylint: disable=no-self-use
return value return value
def contribute_to_argparse(self, parser: ArgumentParser): def contribute_to_argparse(self, parser: ArgumentParser):
@ -82,14 +94,18 @@ class Setting(typing.Generic[T]):
class String(Setting[str]): class String(Setting[str]):
def validate(self, val): def validate(self, value):
assert isinstance(val, str), \ assert isinstance(value, str), \
f"Setting '{self.name}' must be a string." f"Setting '{self.name}' must be a string."
# TODO: removes this after pylint starts to understand generics
def __get__(self, obj: Optional['BaseConfig'], owner) -> str: # pylint: disable=useless-super-delegation
return super().__get__(obj, owner)
class Integer(Setting[int]): class Integer(Setting[int]):
def validate(self, val): def validate(self, value):
assert isinstance(val, int), \ assert isinstance(value, int), \
f"Setting '{self.name}' must be an integer." f"Setting '{self.name}' must be an integer."
def deserialize(self, value): def deserialize(self, value):
@ -97,8 +113,8 @@ class Integer(Setting[int]):
class Float(Setting[float]): class Float(Setting[float]):
def validate(self, val): def validate(self, value):
assert isinstance(val, float), \ assert isinstance(value, float), \
f"Setting '{self.name}' must be a decimal." f"Setting '{self.name}' must be a decimal."
def deserialize(self, value): def deserialize(self, value):
@ -106,8 +122,8 @@ class Float(Setting[float]):
class Toggle(Setting[bool]): class Toggle(Setting[bool]):
def validate(self, val): def validate(self, value):
assert isinstance(val, bool), \ assert isinstance(value, bool), \
f"Setting '{self.name}' must be a true/false value." f"Setting '{self.name}' must be a true/false value."
def contribute_to_argparse(self, parser: ArgumentParser): def contribute_to_argparse(self, parser: ArgumentParser):
@ -127,10 +143,10 @@ class Toggle(Setting[bool]):
class Path(String): class Path(String):
def __init__(self, doc: str, default: str = '', *args, **kwargs): def __init__(self, doc: str, *args, default: str = '', **kwargs):
super().__init__(doc, default, *args, **kwargs) super().__init__(doc, default, *args, **kwargs)
def __get__(self, obj, owner): def __get__(self, obj, owner) -> str:
value = super().__get__(obj, owner) value = super().__get__(obj, owner)
if isinstance(value, str): if isinstance(value, str):
return os.path.expanduser(os.path.expandvars(value)) return os.path.expanduser(os.path.expandvars(value))
@ -187,7 +203,7 @@ class MaxKeyFee(Setting[dict]):
) )
parser.add_argument( parser.add_argument(
self.no_cli_name, self.no_cli_name,
help=f"Disable maximum key fee check.", help="Disable maximum key fee check.",
dest=self.name, dest=self.name,
const=None, const=None,
action="store_const", action="store_const",
@ -196,7 +212,7 @@ class MaxKeyFee(Setting[dict]):
class StringChoice(String): class StringChoice(String):
def __init__(self, doc: str, valid_values: typing.List[str], default: str, *args, **kwargs): def __init__(self, doc: str, valid_values: List[str], default: str, *args, **kwargs):
super().__init__(doc, default, *args, **kwargs) super().__init__(doc, default, *args, **kwargs)
if not valid_values: if not valid_values:
raise ValueError("No valid values provided") raise ValueError("No valid values provided")
@ -204,16 +220,16 @@ class StringChoice(String):
raise ValueError(f"Default value must be one of: {', '.join(valid_values)}") raise ValueError(f"Default value must be one of: {', '.join(valid_values)}")
self.valid_values = valid_values self.valid_values = valid_values
def validate(self, val): def validate(self, value):
super().validate(val) super().validate(value)
if val not in self.valid_values: if value not in self.valid_values:
raise ValueError(f"Setting '{self.name}' value must be one of: {', '.join(self.valid_values)}") raise ValueError(f"Setting '{self.name}' value must be one of: {', '.join(self.valid_values)}")
class ListSetting(Setting[list]): class ListSetting(Setting[list]):
def validate(self, val): def validate(self, value):
assert isinstance(val, (tuple, list)), \ assert isinstance(value, (tuple, list)), \
f"Setting '{self.name}' must be a tuple or list." f"Setting '{self.name}' must be a tuple or list."
def contribute_to_argparse(self, parser: ArgumentParser): def contribute_to_argparse(self, parser: ArgumentParser):
@ -226,10 +242,10 @@ class ListSetting(Setting[list]):
class Servers(ListSetting): class Servers(ListSetting):
def validate(self, val): def validate(self, value):
assert isinstance(val, (tuple, list)), \ assert isinstance(value, (tuple, list)), \
f"Setting '{self.name}' must be a tuple or list of servers." f"Setting '{self.name}' must be a tuple or list of servers."
for idx, server in enumerate(val): for idx, server in enumerate(value):
assert isinstance(server, (tuple, list)) and len(server) == 2, \ assert isinstance(server, (tuple, list)) and len(server) == 2, \
f"Server defined '{server}' at index {idx} in setting " \ f"Server defined '{server}' at index {idx} in setting " \
f"'{self.name}' must be a tuple or list of two items." f"'{self.name}' must be a tuple or list of two items."
@ -260,26 +276,104 @@ class Servers(ListSetting):
class Strings(ListSetting): class Strings(ListSetting):
def validate(self, val): def validate(self, value):
assert isinstance(val, (tuple, list)), \ assert isinstance(value, (tuple, list)), \
f"Setting '{self.name}' must be a tuple or list of strings." f"Setting '{self.name}' must be a tuple or list of strings."
for idx, string in enumerate(val): for idx, string in enumerate(value):
assert isinstance(string, str), \ assert isinstance(string, str), \
f"Value of '{string}' at index {idx} in setting " \ f"Value of '{string}' at index {idx} in setting " \
f"'{self.name}' must be a string." f"'{self.name}' must be a string."
class KnownHubsList:
def __init__(self, config: 'Config' = None, file_name: str = 'known_hubs.yml'):
self.file_name = file_name
self.path = os.path.join(config.wallet_dir, self.file_name) if config else None
self.hubs: Dict[Tuple[str, int], Dict] = {}
if self.exists:
self.load()
@property
def exists(self):
return self.path and os.path.exists(self.path)
@property
def serialized(self) -> Dict[str, Dict]:
return {f"{host}:{port}": details for (host, port), details in self.hubs.items()}
def filter(self, match_none=False, **kwargs):
if not kwargs:
return self.hubs
result = {}
for hub, details in self.hubs.items():
for key, constraint in kwargs.items():
value = details.get(key)
if value == constraint or (match_none and value is None):
result[hub] = details
break
return result
def load(self):
if self.path:
with open(self.path, 'r') as known_hubs_file:
raw = known_hubs_file.read()
for hub, details in yaml.safe_load(raw).items():
self.set(hub, details)
def save(self):
if self.path:
with open(self.path, 'w') as known_hubs_file:
known_hubs_file.write(yaml.safe_dump(self.serialized, default_flow_style=False))
def set(self, hub: str, details: Dict):
if hub and hub.count(':') == 1:
host, port = hub.split(':')
hub_parts = (host, int(port))
if hub_parts not in self.hubs:
self.hubs[hub_parts] = details
return hub
def add_hubs(self, hubs: List[str]):
added = False
for hub in hubs:
if self.set(hub, {}) is not None:
added = True
return added
def items(self):
return self.hubs.items()
def __bool__(self):
return len(self) > 0
def __len__(self):
return self.hubs.__len__()
def __iter__(self):
return iter(self.hubs)
class EnvironmentAccess: class EnvironmentAccess:
PREFIX = 'LBRY_' PREFIX = 'LBRY_'
def __init__(self, environ: dict): def __init__(self, config: 'BaseConfig', environ: dict):
self.environ = environ self.configuration = config
self.data = {}
if environ:
self.load(environ)
def load(self, environ):
for setting in self.configuration.get_settings():
value = environ.get(f'{self.PREFIX}{setting.name.upper()}', NOT_SET)
if value != NOT_SET and not (isinstance(setting, ListSetting) and value is None):
self.data[setting.name] = setting.deserialize(value)
def __contains__(self, item: str): def __contains__(self, item: str):
return f'{self.PREFIX}{item.upper()}' in self.environ return item in self.data
def __getitem__(self, item: str): def __getitem__(self, item: str):
return self.environ[f'{self.PREFIX}{item.upper()}'] return self.data[item]
class ArgumentAccess: class ArgumentAccess:
@ -320,7 +414,7 @@ class ConfigFileAccess:
cls = type(self.configuration) cls = type(self.configuration)
with open(self.path, 'r') as config_file: with open(self.path, 'r') as config_file:
raw = config_file.read() raw = config_file.read()
serialized = yaml.load(raw) or {} serialized = yaml.safe_load(raw) or {}
for key, value in serialized.items(): for key, value in serialized.items():
attr = getattr(cls, key, None) attr = getattr(cls, key, None)
if attr is None: if attr is None:
@ -364,7 +458,7 @@ class ConfigFileAccess:
del self.data[key] del self.data[key]
TBC = typing.TypeVar('TBC', bound='BaseConfig') TBC = TypeVar('TBC', bound='BaseConfig')
class BaseConfig: class BaseConfig:
@ -438,7 +532,7 @@ class BaseConfig:
self.arguments = ArgumentAccess(self, args) self.arguments = ArgumentAccess(self, args)
def set_environment(self, environ=None): def set_environment(self, environ=None):
self.environment = EnvironmentAccess(environ or os.environ) self.environment = EnvironmentAccess(self, environ or os.environ)
def set_persisted(self, config_file_path=None): def set_persisted(self, config_file_path=None):
if config_file_path is None: if config_file_path is None:
@ -457,7 +551,27 @@ class BaseConfig:
self.persisted.save() self.persisted.save()
class CLIConfig(BaseConfig): class TranscodeConfig(BaseConfig):
ffmpeg_path = String('A list of places to check for ffmpeg and ffprobe. '
f'$data_dir/ffmpeg/bin and $PATH are checked afterward. Separator: {os.pathsep}',
'', previous_names=['ffmpeg_folder'])
video_encoder = String('FFmpeg codec and parameters for the video encoding. '
'Example: libaom-av1 -crf 25 -b:v 0 -strict experimental',
'libx264 -crf 24 -preset faster -pix_fmt yuv420p')
video_bitrate_maximum = Integer('Maximum bits per second allowed for video streams (0 to disable).', 5_000_000)
video_scaler = String('FFmpeg scaling parameters for reducing bitrate. '
'Example: -vf "scale=-2:720,fps=24" -maxrate 5M -bufsize 3M',
r'-vf "scale=if(gte(iw\,ih)\,min(1920\,iw)\,-2):if(lt(iw\,ih)\,min(1920\,ih)\,-2)" '
r'-maxrate 5500K -bufsize 5000K')
audio_encoder = String('FFmpeg codec and parameters for the audio encoding. '
'Example: libopus -b:a 128k',
'aac -b:a 160k')
volume_filter = String('FFmpeg filter for audio normalization. Exmple: -af loudnorm', '')
volume_analysis_time = Integer('Maximum seconds into the file that we examine audio volume (0 to disable).', 240)
class CLIConfig(TranscodeConfig):
api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT') api = String('Host name and port for lbrynet daemon API.', 'localhost:5279', metavar='HOST:PORT')
@ -475,6 +589,9 @@ class CLIConfig(BaseConfig):
class Config(CLIConfig): class Config(CLIConfig):
jurisdiction = String("Limit interactions to wallet server in this jurisdiction.")
# directories # directories
data_dir = Path("Directory path to store blobs.", metavar='DIR') data_dir = Path("Directory path to store blobs.", metavar='DIR')
download_dir = Path( download_dir = Path(
@ -496,7 +613,8 @@ class Config(CLIConfig):
"ports or have firewall rules you likely want to disable this.", True "ports or have firewall rules you likely want to disable this.", True
) )
udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port']) udp_port = Integer("UDP port for communicating on the LBRY DHT", 4444, previous_names=['dht_node_port'])
tcp_port = Integer("TCP port to listen for incoming blob requests", 3333, previous_names=['peer_port']) tcp_port = Integer("TCP port to listen for incoming blob requests", 4444, previous_names=['peer_port'])
prometheus_port = Integer("Port to expose prometheus metrics (off by default)", 0)
network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0') network_interface = String("Interface to use for the DHT and blob exchange", '0.0.0.0')
# routing table # routing table
@ -504,17 +622,24 @@ class Config(CLIConfig):
"Routing table bucket index below which we always split the bucket if given a new key to add to it and " "Routing table bucket index below which we always split the bucket if given a new key to add to it and "
"the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) " "the bucket is full. As this value is raised the depth of the routing table (and number of peers in it) "
"will increase. This setting is used by seed nodes, you probably don't want to change it during normal " "will increase. This setting is used by seed nodes, you probably don't want to change it during normal "
"use.", 1 "use.", 2
)
is_bootstrap_node = Toggle(
"When running as a bootstrap node, disable all logic related to balancing the routing table, so we can "
"add as many peers as possible and better help first-runs.", False
) )
# protocol timeouts # protocol timeouts
download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0) download_timeout = Float("Cumulative timeout for a stream to begin downloading before giving up", 30.0)
blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0) blob_download_timeout = Float("Timeout to download a blob from a peer", 30.0)
hub_timeout = Float("Timeout when making a hub request", 30.0)
peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0) peer_connect_timeout = Float("Timeout to establish a TCP connection to a peer", 3.0)
node_rpc_timeout = Float("Timeout when making a DHT request", constants.rpc_timeout) node_rpc_timeout = Float("Timeout when making a DHT request", constants.RPC_TIMEOUT)
# blob announcement and download # blob announcement and download
save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True) save_blobs = Toggle("Save encrypted blob files for hosting, otherwise download blobs to memory only.", True)
network_storage_limit = Integer("Disk space in MB to be allocated for helping the P2P network. 0 = disable", 0)
blob_storage_limit = Integer("Disk space in MB to be allocated for blob storage. 0 = no limit", 0)
blob_lru_cache_size = Integer( blob_lru_cache_size = Integer(
"LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when " "LRU cache size for decrypted downloaded blobs used to minimize re-downloading the same blobs when "
"replying to a range request. Set to 0 to disable.", 32 "replying to a range request. Set to 0 to disable.", 32
@ -531,6 +656,7 @@ class Config(CLIConfig):
"Maximum number of peers to connect to while downloading a blob", 4, "Maximum number of peers to connect to while downloading a blob", 4,
previous_names=['max_connections_per_stream'] previous_names=['max_connections_per_stream']
) )
concurrent_hub_requests = Integer("Maximum number of concurrent hub requests", 32)
fixed_peer_delay = Float( fixed_peer_delay = Float(
"Amount of seconds before adding the reflector servers as potential peers to download from in case dht" "Amount of seconds before adding the reflector servers as potential peers to download from in case dht"
"peers are not found or are slow", 2.0 "peers are not found or are slow", 2.0
@ -539,6 +665,7 @@ class Config(CLIConfig):
"Don't download streams with fees exceeding this amount. When set to " "Don't download streams with fees exceeding this amount. When set to "
"null, the amount is unbounded.", {'currency': 'USD', 'amount': 50.0} "null, the amount is unbounded.", {'currency': 'USD', 'amount': 50.0}
) )
max_wallet_server_fee = String("Maximum daily LBC amount allowed as payment for wallet servers.", "0.0")
# reflector settings # reflector settings
reflect_streams = Toggle( reflect_streams = Toggle(
@ -550,42 +677,62 @@ class Config(CLIConfig):
) )
# servers # servers
reflector_servers = Servers("Reflector re-hosting servers", [ reflector_servers = Servers("Reflector re-hosting servers for mirroring publishes", [
('reflector.lbry.com', 5566) ('reflector.lbry.com', 5566)
]) ])
fixed_peers = Servers("Fixed peers to fall back to if none are found on P2P for a blob", [
('cdn.reflector.lbry.com', 5567)
])
tracker_servers = Servers("BitTorrent-compatible (BEP15) UDP trackers for helping P2P discovery", [
('tracker.lbry.com', 9252),
('tracker.lbry.grin.io', 9252),
('tracker.lbry.pigg.es', 9252),
('tracker.lizard.technology', 9252),
('s1.lbry.network', 9252),
])
lbryum_servers = Servers("SPV wallet servers", [ lbryum_servers = Servers("SPV wallet servers", [
('spv1.lbry.com', 50001), ('spv11.lbry.com', 50001),
('spv2.lbry.com', 50001), ('spv12.lbry.com', 50001),
('spv3.lbry.com', 50001), ('spv13.lbry.com', 50001),
('spv4.lbry.com', 50001), ('spv14.lbry.com', 50001),
('spv5.lbry.com', 50001), ('spv15.lbry.com', 50001),
('spv6.lbry.com', 50001), ('spv16.lbry.com', 50001),
('spv7.lbry.com', 50001), ('spv17.lbry.com', 50001),
('spv8.lbry.com', 50001), ('spv18.lbry.com', 50001),
('spv9.lbry.com', 50001), ('spv19.lbry.com', 50001),
('hub.lbry.grin.io', 50001),
('hub.lizard.technology', 50001),
('s1.lbry.network', 50001),
]) ])
known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [ known_dht_nodes = Servers("Known nodes for bootstrapping connection to the DHT", [
('dht.lbry.grin.io', 4444), # Grin
('dht.lbry.madiator.com', 4444), # Madiator
('dht.lbry.pigg.es', 4444), # Pigges
('lbrynet1.lbry.com', 4444), # US EAST ('lbrynet1.lbry.com', 4444), # US EAST
('lbrynet2.lbry.com', 4444), # US WEST ('lbrynet2.lbry.com', 4444), # US WEST
('lbrynet3.lbry.com', 4444), # EU ('lbrynet3.lbry.com', 4444), # EU
('lbrynet4.lbry.com', 4444) # ASIA ('lbrynet4.lbry.com', 4444), # ASIA
('dht.lizard.technology', 4444), # Jack
('s2.lbry.network', 4444),
]) ])
comment_server = String("Comment server API URL", "https://comments.lbry.com/api")
# blockchain # blockchain
blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main') blockchain_name = String("Blockchain name - lbrycrd_main, lbrycrd_regtest, or lbrycrd_testnet", 'lbrycrd_main')
s3_headers_depth = Integer("download headers from s3 when the local height is more than 10 chunks behind", 96 * 10)
cache_time = Integer("Time to cache resolved claims", 150) # TODO: use this
# daemon # daemon
save_files = Toggle("Save downloaded files when calling `get` by default", True) save_files = Toggle("Save downloaded files when calling `get` by default", False)
components_to_skip = Strings("components which will be skipped during start-up of daemon", []) components_to_skip = Strings("components which will be skipped during start-up of daemon", [])
share_usage_data = Toggle( share_usage_data = Toggle(
"Whether to share usage stats and diagnostic info with LBRY.", True, "Whether to share usage stats and diagnostic info with LBRY.", False,
previous_names=['upload_log', 'upload_log', 'share_debug_info'] previous_names=['upload_log', 'upload_log', 'share_debug_info']
) )
track_bandwidth = Toggle("Track bandwidth usage", True) track_bandwidth = Toggle("Track bandwidth usage", True)
allowed_origin = String(
"Allowed `Origin` header value for API request (sent by browser), use * to allow "
"all hosts; default is to only allow API requests with no `Origin` value.", "")
# media server # media server
streaming_server = String('Host name and port to serve streaming media over range requests', streaming_server = String('Host name and port to serve streaming media over range requests',
@ -595,7 +742,14 @@ class Config(CLIConfig):
coin_selection_strategy = StringChoice( coin_selection_strategy = StringChoice(
"Strategy to use when selecting UTXOs for a transaction", "Strategy to use when selecting UTXOs for a transaction",
STRATEGIES, "standard") STRATEGIES, "prefer_confirmed"
)
transaction_cache_size = Integer("Transaction cache size", 2 ** 17)
save_resolved_claims = Toggle(
"Save content claims to the database when they are resolved to keep file_list up to date, "
"only disable this if file_x commands are not needed", True
)
@property @property
def streaming_host(self): def streaming_host(self):
@ -608,6 +762,7 @@ class Config(CLIConfig):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.set_default_paths() self.set_default_paths()
self.known_hubs = KnownHubsList(self)
def set_default_paths(self): def set_default_paths(self):
if 'darwin' in sys.platform.lower(): if 'darwin' in sys.platform.lower():
@ -629,10 +784,14 @@ class Config(CLIConfig):
return os.path.join(self.data_dir, 'lbrynet.log') return os.path.join(self.data_dir, 'lbrynet.log')
def get_windows_directories() -> typing.Tuple[str, str, str]: def get_windows_directories() -> Tuple[str, str, str]:
from lbry.winpaths import get_path, FOLDERID, UserHandle # pylint: disable=import-outside-toplevel from lbry.winpaths import get_path, FOLDERID, UserHandle, \
PathNotFoundException # pylint: disable=import-outside-toplevel
try:
download_dir = get_path(FOLDERID.Downloads, UserHandle.current) download_dir = get_path(FOLDERID.Downloads, UserHandle.current)
except PathNotFoundException:
download_dir = os.getcwd()
# old # old
appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current) appdata = get_path(FOLDERID.RoamingAppData, UserHandle.current)
@ -644,22 +803,22 @@ def get_windows_directories() -> typing.Tuple[str, str, str]:
# new # new
data_dir = user_data_dir('lbrynet', 'lbry') data_dir = user_data_dir('lbrynet', 'lbry')
lbryum_dir = user_data_dir('lbryum', 'lbry') lbryum_dir = user_data_dir('lbryum', 'lbry')
download_dir = get_path(FOLDERID.Downloads, UserHandle.current)
return data_dir, lbryum_dir, download_dir return data_dir, lbryum_dir, download_dir
def get_darwin_directories() -> typing.Tuple[str, str, str]: def get_darwin_directories() -> Tuple[str, str, str]:
data_dir = user_data_dir('LBRY') data_dir = user_data_dir('LBRY')
lbryum_dir = os.path.expanduser('~/.lbryum') lbryum_dir = os.path.expanduser('~/.lbryum')
download_dir = os.path.expanduser('~/Downloads') download_dir = os.path.expanduser('~/Downloads')
return data_dir, lbryum_dir, download_dir return data_dir, lbryum_dir, download_dir
def get_linux_directories() -> typing.Tuple[str, str, str]: def get_linux_directories() -> Tuple[str, str, str]:
try: try:
with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg: with open(os.path.join(user_config_dir(), 'user-dirs.dirs'), 'r') as xdg:
down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read()).group(1) down_dir = re.search(r'XDG_DOWNLOAD_DIR=(.+)', xdg.read())
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir) if down_dir:
down_dir = re.sub(r'\$HOME', os.getenv('HOME') or os.path.expanduser("~/"), down_dir.group(1))
download_dir = re.sub('\"', '', down_dir) download_dir = re.sub('\"', '', down_dir)
except OSError: except OSError:
download_dir = os.getenv('XDG_DOWNLOAD_DIR') download_dir = os.getenv('XDG_DOWNLOAD_DIR')

View file

@ -67,18 +67,18 @@ class ConnectionManager:
while True: while True:
last = time.perf_counter() last = time.perf_counter()
await asyncio.sleep(0.1, loop=self.loop) await asyncio.sleep(0.1)
self._status['incoming_bps'].clear() self._status['incoming_bps'].clear()
self._status['outgoing_bps'].clear() self._status['outgoing_bps'].clear()
now = time.perf_counter() now = time.perf_counter()
while self.outgoing: while self.outgoing:
k, v = self.outgoing.popitem() k, sent = self.outgoing.popitem()
self._status['total_sent'] += v self._status['total_sent'] += sent
self._status['outgoing_bps'][k] = v / (now - last) self._status['outgoing_bps'][k] = sent / (now - last)
while self.incoming: while self.incoming:
k, v = self.incoming.popitem() k, received = self.incoming.popitem()
self._status['total_received'] += v self._status['total_received'] += received
self._status['incoming_bps'][k] = v / (now - last) self._status['incoming_bps'][k] = received / (now - last)
self._status['total_outgoing_mbs'] = int(sum(list(self._status['outgoing_bps'].values()) self._status['total_outgoing_mbs'] = int(sum(list(self._status['outgoing_bps'].values())
)) / 1000000.0 )) / 1000000.0
self._status['total_incoming_mbs'] = int(sum(list(self._status['incoming_bps'].values()) self._status['total_incoming_mbs'] = int(sum(list(self._status['incoming_bps'].values())

2
lbry/constants.py Normal file
View file

@ -0,0 +1,2 @@
CENT = 1000000
COIN = 100*CENT

86
lbry/crypto/base58.py Normal file
View file

@ -0,0 +1,86 @@
from lbry.crypto.hash import double_sha256
from lbry.crypto.util import bytes_to_int, int_to_bytes
class Base58Error(Exception):
""" Exception used for Base58 errors. """
class Base58:
""" Class providing base 58 functionality. """
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(chars) == 58
char_map = {c: n for n, c in enumerate(chars)}
@classmethod
def char_value(cls, c):
val = cls.char_map.get(c)
if val is None:
raise Base58Error(f'invalid base 58 character "{c}"')
return val
@classmethod
def decode(cls, txt):
""" Decodes txt into a big-endian bytearray. """
if isinstance(txt, memoryview):
txt = str(txt)
if isinstance(txt, bytes):
txt = txt.decode()
if not isinstance(txt, str):
raise TypeError('a string is required')
if not txt:
raise Base58Error('string cannot be empty')
value = 0
for c in txt:
value = value * 58 + cls.char_value(c)
result = int_to_bytes(value)
# Prepend leading zero bytes if necessary
count = 0
for c in txt:
if c != '1':
break
count += 1
if count:
result = bytes((0,)) * count + result
return result
@classmethod
def encode(cls, be_bytes):
"""Converts a big-endian bytearray into a base58 string."""
value = bytes_to_int(be_bytes)
txt = ''
while value:
value, mod = divmod(value, 58)
txt += cls.chars[mod]
for byte in be_bytes:
if byte != 0:
break
txt += '1'
return txt[::-1]
@classmethod
def decode_check(cls, txt, hash_fn=double_sha256):
""" Decodes a Base58Check-encoded string to a payload. The version prefixes it. """
be_bytes = cls.decode(txt)
result, check = be_bytes[:-4], be_bytes[-4:]
if check != hash_fn(result)[:4]:
raise Base58Error(f'invalid base 58 checksum for {txt}')
return result
@classmethod
def encode_check(cls, payload, hash_fn=double_sha256):
""" Encodes a payload bytearray (which includes the version byte(s))
into a Base58Check string."""
be_bytes = payload + hash_fn(payload)[:4]
return cls.encode(be_bytes)

71
lbry/crypto/crypt.py Normal file
View file

@ -0,0 +1,71 @@
import os
import base64
import typing
from cryptography.hazmat.primitives.kdf.scrypt import Scrypt
from cryptography.hazmat.primitives.ciphers import Cipher, modes
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.padding import PKCS7
from cryptography.hazmat.backends import default_backend
from lbry.error import InvalidPasswordError
from lbry.crypto.hash import double_sha256
def aes_encrypt(secret: str, value: str, init_vector: bytes = None) -> str:
if init_vector is not None:
assert len(init_vector) == 16
else:
init_vector = os.urandom(16)
key = double_sha256(secret.encode())
encryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).encryptor()
padder = PKCS7(AES.block_size).padder()
padded_data = padder.update(value.encode()) + padder.finalize()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
return base64.b64encode(init_vector + encrypted_data).decode()
def aes_decrypt(secret: str, value: str) -> typing.Tuple[str, bytes]:
try:
data = base64.b64decode(value.encode())
key = double_sha256(secret.encode())
init_vector, data = data[:16], data[16:]
decryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).decryptor()
unpadder = PKCS7(AES.block_size).unpadder()
result = unpadder.update(decryptor.update(data)) + unpadder.finalize()
return result.decode(), init_vector
except UnicodeDecodeError:
raise InvalidPasswordError()
except ValueError as e:
if e.args[0] == 'Invalid padding bytes.':
raise InvalidPasswordError()
raise
def better_aes_encrypt(secret: str, value: bytes) -> bytes:
init_vector = os.urandom(16)
key = scrypt(secret.encode(), salt=init_vector)
encryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).encryptor()
padder = PKCS7(AES.block_size).padder()
padded_data = padder.update(value) + padder.finalize()
encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
return base64.b64encode(b's:8192:16:1:' + init_vector + encrypted_data)
def better_aes_decrypt(secret: str, value: bytes) -> bytes:
try:
data = base64.b64decode(value)
_, scryp_n, scrypt_r, scrypt_p, data = data.split(b':', maxsplit=4)
init_vector, data = data[:16], data[16:]
key = scrypt(secret.encode(), init_vector, int(scryp_n), int(scrypt_r), int(scrypt_p))
decryptor = Cipher(AES(key), modes.CBC(init_vector), default_backend()).decryptor()
unpadder = PKCS7(AES.block_size).unpadder()
return unpadder.update(decryptor.update(data)) + unpadder.finalize()
except ValueError as e:
if e.args[0] == 'Invalid padding bytes.':
raise InvalidPasswordError()
raise
def scrypt(passphrase, salt, scrypt_n=1<<13, scrypt_r=16, scrypt_p=1):
kdf = Scrypt(salt, length=32, n=scrypt_n, r=scrypt_r, p=scrypt_p, backend=default_backend())
return kdf.derive(passphrase)

47
lbry/crypto/hash.py Normal file
View file

@ -0,0 +1,47 @@
import hashlib
import hmac
from binascii import hexlify, unhexlify
def sha256(x):
""" Simple wrapper of hashlib sha256. """
return hashlib.sha256(x).digest()
def sha512(x):
""" Simple wrapper of hashlib sha512. """
return hashlib.sha512(x).digest()
def ripemd160(x):
""" Simple wrapper of hashlib ripemd160. """
h = hashlib.new('ripemd160')
h.update(x)
return h.digest()
def double_sha256(x):
""" SHA-256 of SHA-256, as used extensively in bitcoin. """
return sha256(sha256(x))
def hmac_sha512(key, msg):
""" Use SHA-512 to provide an HMAC. """
return hmac.new(key, msg, hashlib.sha512).digest()
def hash160(x):
""" RIPEMD-160 of SHA-256.
Used to make bitcoin addresses from pubkeys. """
return ripemd160(sha256(x))
def hash_to_hex_str(x):
""" Convert a big-endian binary hash to displayed hex string.
Display form of a binary hash is reversed and converted to hex. """
return hexlify(reversed(x))
def hex_str_to_hash(x):
""" Convert a displayed hex string to a binary hash. """
return reversed(unhexlify(x))

13
lbry/crypto/util.py Normal file
View file

@ -0,0 +1,13 @@
from binascii import unhexlify, hexlify
def bytes_to_int(be_bytes):
""" Interprets a big-endian sequence of bytes as an integer. """
return int(hexlify(be_bytes), 16)
def int_to_bytes(value):
""" Converts an integer to a big-endian sequence of bytes. """
length = (value.bit_length() + 7) // 8
s = '%x' % value
return unhexlify(('0' * (len(s) % 2) + s).zfill(length * 2))

View file

@ -0,0 +1,78 @@
import asyncio
import typing
import logging
from prometheus_client import Counter, Gauge
if typing.TYPE_CHECKING:
from lbry.dht.node import Node
from lbry.extras.daemon.storage import SQLiteStorage
log = logging.getLogger(__name__)
class BlobAnnouncer:
announcements_sent_metric = Counter(
"announcements_sent", "Number of announcements sent and their respective status.", namespace="dht_node",
labelnames=("peers", "error"),
)
announcement_queue_size_metric = Gauge(
"announcement_queue_size", "Number of hashes waiting to be announced.", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, node: 'Node', storage: 'SQLiteStorage'):
self.loop = loop
self.node = node
self.storage = storage
self.announce_task: asyncio.Task = None
self.announce_queue: typing.List[str] = []
self._done = asyncio.Event()
self.announced = set()
async def _run_consumer(self):
while self.announce_queue:
try:
blob_hash = self.announce_queue.pop()
peers = len(await self.node.announce_blob(blob_hash))
self.announcements_sent_metric.labels(peers=peers, error=False).inc()
if peers > 4:
self.announced.add(blob_hash)
else:
log.debug("failed to announce %s, could only find %d peers, retrying soon.", blob_hash[:8], peers)
except Exception as err:
self.announcements_sent_metric.labels(peers=0, error=True).inc()
log.warning("error announcing %s: %s", blob_hash[:8], str(err))
async def _announce(self, batch_size: typing.Optional[int] = 10):
while batch_size:
if not self.node.joined.is_set():
await self.node.joined.wait()
await asyncio.sleep(60)
if not self.node.protocol.routing_table.get_peers():
log.warning("No peers in DHT, announce round skipped")
continue
self.announce_queue.extend(await self.storage.get_blobs_to_announce())
self.announcement_queue_size_metric.labels(scope="global").set(len(self.announce_queue))
log.debug("announcer task wake up, %d blobs to announce", len(self.announce_queue))
while len(self.announce_queue) > 0:
log.info("%i blobs to announce", len(self.announce_queue))
await asyncio.gather(*[self._run_consumer() for _ in range(batch_size)])
announced = list(filter(None, self.announced))
if announced:
await self.storage.update_last_announced_blobs(announced)
log.info("announced %i blobs", len(announced))
self.announced.clear()
self._done.set()
self._done.clear()
def start(self, batch_size: typing.Optional[int] = 10):
assert not self.announce_task or self.announce_task.done(), "already running"
self.announce_task = self.loop.create_task(self._announce(batch_size))
def stop(self):
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
def wait(self):
return self._done.wait()

40
lbry/dht/constants.py Normal file
View file

@ -0,0 +1,40 @@
import hashlib
import os
HASH_CLASS = hashlib.sha384 # pylint: disable=invalid-name
HASH_LENGTH = HASH_CLASS().digest_size
HASH_BITS = HASH_LENGTH * 8
ALPHA = 5
K = 8
SPLIT_BUCKETS_UNDER_INDEX = 1
REPLACEMENT_CACHE_SIZE = 8
RPC_TIMEOUT = 5.0
RPC_ATTEMPTS = 5
RPC_ATTEMPTS_PRUNING_WINDOW = 600
ITERATIVE_LOOKUP_DELAY = RPC_TIMEOUT / 2.0 # TODO: use config val / 2 if rpc timeout is provided
REFRESH_INTERVAL = 3600 # 1 hour
REPLICATE_INTERVAL = REFRESH_INTERVAL
DATA_EXPIRATION = 86400 # 24 hours
TOKEN_SECRET_REFRESH_INTERVAL = 300 # 5 minutes
MAYBE_PING_DELAY = 300 # 5 minutes
CHECK_REFRESH_INTERVAL = REFRESH_INTERVAL / 5
RPC_ID_LENGTH = 20
PROTOCOL_VERSION = 1
MSG_SIZE_LIMIT = 1400
def digest(data: bytes) -> bytes:
h = HASH_CLASS()
h.update(data)
return h.digest()
def generate_id(num=None) -> bytes:
if num is not None:
return digest(str(num).encode())
else:
return digest(os.urandom(32))
def generate_rpc_id(num=None) -> bytes:
return generate_id(num)[:RPC_ID_LENGTH]

282
lbry/dht/node.py Normal file
View file

@ -0,0 +1,282 @@
import logging
import asyncio
import typing
import socket
from prometheus_client import Gauge
from lbry.utils import aclosing, resolve_host
from lbry.dht import constants
from lbry.dht.peer import make_kademlia_peer
from lbry.dht.protocol.distance import Distance
from lbry.dht.protocol.iterative_find import IterativeNodeFinder, IterativeValueFinder
from lbry.dht.protocol.protocol import KademliaProtocol
if typing.TYPE_CHECKING:
from lbry.dht.peer import PeerManager
from lbry.dht.peer import KademliaPeer
log = logging.getLogger(__name__)
class Node:
storing_peers_metric = Gauge(
"storing_peers", "Number of peers storing blobs announced to this node", namespace="dht_node",
labelnames=("scope",),
)
stored_blob_with_x_bytes_colliding = Gauge(
"stored_blobs_x_bytes_colliding", "Number of blobs with at least X bytes colliding with this node id prefix",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, udp_port: int,
internal_udp_port: int, peer_port: int, external_ip: str, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False,
storage: typing.Optional['SQLiteStorage'] = None):
self.loop = loop
self.internal_udp_port = internal_udp_port
self.protocol = KademliaProtocol(loop, peer_manager, node_id, external_ip, udp_port, peer_port, rpc_timeout,
split_buckets_under_index, is_bootstrap_node)
self.listening_port: asyncio.DatagramTransport = None
self.joined = asyncio.Event()
self._join_task: asyncio.Task = None
self._refresh_task: asyncio.Task = None
self._storage = storage
@property
def stored_blob_hashes(self):
return self.protocol.data_store.keys()
async def refresh_node(self, force_once=False):
while True:
# remove peers with expired blob announcements from the datastore
self.protocol.data_store.removed_expired_peers()
total_peers: typing.List['KademliaPeer'] = []
# add all peers in the routing table
total_peers.extend(self.protocol.routing_table.get_peers())
# add all the peers who have announced blobs to us
storing_peers = self.protocol.data_store.get_storing_contacts()
self.storing_peers_metric.labels("global").set(len(storing_peers))
total_peers.extend(storing_peers)
counts = {0: 0, 1: 0, 2: 0}
node_id = self.protocol.node_id
for blob_hash in self.protocol.data_store.keys():
bytes_colliding = 0 if blob_hash[0] != node_id[0] else 2 if blob_hash[1] == node_id[1] else 1
counts[bytes_colliding] += 1
self.stored_blob_with_x_bytes_colliding.labels(amount=0).set(counts[0])
self.stored_blob_with_x_bytes_colliding.labels(amount=1).set(counts[1])
self.stored_blob_with_x_bytes_colliding.labels(amount=2).set(counts[2])
# get ids falling in the midpoint of each bucket that hasn't been recently updated
node_ids = self.protocol.routing_table.get_refresh_list(0, True)
if self.protocol.routing_table.get_peers():
# if we have node ids to look up, perform the iterative search until we have k results
while node_ids:
peers = await self.peer_search(node_ids.pop())
total_peers.extend(peers)
else:
if force_once:
break
fut = asyncio.Future()
self.loop.call_later(constants.REFRESH_INTERVAL // 4, fut.set_result, None)
await fut
continue
# ping the set of peers; upon success/failure the routing able and last replied/failed time will be updated
to_ping = [peer for peer in set(total_peers) if self.protocol.peer_manager.peer_is_good(peer) is not True]
if to_ping:
self.protocol.ping_queue.enqueue_maybe_ping(*to_ping, delay=0)
if self._storage:
await self._storage.save_kademlia_peers(self.protocol.routing_table.get_peers())
if force_once:
break
fut = asyncio.Future()
self.loop.call_later(constants.REFRESH_INTERVAL, fut.set_result, None)
await fut
async def announce_blob(self, blob_hash: str) -> typing.List[bytes]:
hash_value = bytes.fromhex(blob_hash)
assert len(hash_value) == constants.HASH_LENGTH
peers = await self.peer_search(hash_value)
if not self.protocol.external_ip:
raise Exception("Cannot determine external IP")
log.debug("Store to %i peers", len(peers))
for peer in peers:
log.debug("store to %s %s %s", peer.address, peer.udp_port, peer.tcp_port)
stored_to_tup = await asyncio.gather(
*(self.protocol.store_to_peer(hash_value, peer) for peer in peers)
)
stored_to = [node_id for node_id, contacted in stored_to_tup if contacted]
if stored_to:
log.debug(
"Stored %s to %i of %i attempted peers", hash_value.hex()[:8],
len(stored_to), len(peers)
)
else:
log.debug("Failed announcing %s, stored to 0 peers", blob_hash[:8])
return stored_to
def stop(self) -> None:
if self.joined.is_set():
self.joined.clear()
if self._join_task:
self._join_task.cancel()
if self._refresh_task and not (self._refresh_task.done() or self._refresh_task.cancelled()):
self._refresh_task.cancel()
if self.protocol and self.protocol.ping_queue.running:
self.protocol.ping_queue.stop()
self.protocol.stop()
if self.listening_port is not None:
self.listening_port.close()
self._join_task = None
self.listening_port = None
log.info("Stopped DHT node")
async def start_listening(self, interface: str = '0.0.0.0') -> None:
if not self.listening_port:
self.listening_port, _ = await self.loop.create_datagram_endpoint(
lambda: self.protocol, (interface, self.internal_udp_port)
)
log.info("DHT node listening on UDP %s:%i", interface, self.internal_udp_port)
self.protocol.start()
else:
log.warning("Already bound to port %s", self.listening_port)
async def join_network(self, interface: str = '0.0.0.0',
known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
def peers_from_urls(urls: typing.Optional[typing.List[typing.Tuple[bytes, str, int, int]]]):
peer_addresses = []
for node_id, address, udp_port, tcp_port in urls:
if (node_id, address, udp_port, tcp_port) not in peer_addresses and \
(address, udp_port) != (self.protocol.external_ip, self.protocol.udp_port):
peer_addresses.append((node_id, address, udp_port, tcp_port))
return [make_kademlia_peer(*peer_address) for peer_address in peer_addresses]
if not self.listening_port:
await self.start_listening(interface)
self.protocol.ping_queue.start()
self._refresh_task = self.loop.create_task(self.refresh_node())
while True:
if self.protocol.routing_table.get_peers():
if not self.joined.is_set():
self.joined.set()
log.info(
"joined dht, %i peers known in %i buckets", len(self.protocol.routing_table.get_peers()),
self.protocol.routing_table.buckets_with_contacts()
)
else:
if self.joined.is_set():
self.joined.clear()
seed_peers = peers_from_urls(
await self._storage.get_persisted_kademlia_peers()
) if self._storage else []
if not seed_peers:
try:
seed_peers.extend(peers_from_urls([
(None, await resolve_host(address, udp_port, 'udp'), udp_port, None)
for address, udp_port in known_node_urls or []
]))
except socket.gaierror:
await asyncio.sleep(30)
continue
self.protocol.peer_manager.reset()
self.protocol.ping_queue.enqueue_maybe_ping(*seed_peers, delay=0.0)
await self.peer_search(self.protocol.node_id, shortlist=seed_peers, count=32)
await asyncio.sleep(1)
def start(self, interface: str, known_node_urls: typing.Optional[typing.List[typing.Tuple[str, int]]] = None):
self._join_task = self.loop.create_task(self.join_network(interface, known_node_urls))
def get_iterative_node_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
max_results: int = constants.K) -> IterativeNodeFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeNodeFinder(self.loop, self.protocol, key, max_results, shortlist)
def get_iterative_value_finder(self, key: bytes, shortlist: typing.Optional[typing.List['KademliaPeer']] = None,
max_results: int = -1) -> IterativeValueFinder:
shortlist = shortlist or self.protocol.routing_table.find_close_peers(key)
return IterativeValueFinder(self.loop, self.protocol, key, max_results, shortlist)
async def peer_search(self, node_id: bytes, count=constants.K, max_results=constants.K * 2,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None
) -> typing.List['KademliaPeer']:
peers = []
async with aclosing(self.get_iterative_node_finder(
node_id, shortlist=shortlist, max_results=max_results)) as node_finder:
async for iteration_peers in node_finder:
peers.extend(iteration_peers)
distance = Distance(node_id)
peers.sort(key=lambda peer: distance(peer.node_id))
return peers[:count]
async def _accumulate_peers_for_value(self, search_queue: asyncio.Queue, result_queue: asyncio.Queue):
tasks = []
try:
while True:
blob_hash = await search_queue.get()
tasks.append(self.loop.create_task(self._peers_for_value_producer(blob_hash, result_queue)))
finally:
for task in tasks:
task.cancel()
async def _peers_for_value_producer(self, blob_hash: str, result_queue: asyncio.Queue):
async def put_into_result_queue_after_pong(_peer):
try:
await self.protocol.get_rpc_peer(_peer).ping()
result_queue.put_nowait([_peer])
log.debug("pong from %s:%i for %s", _peer.address, _peer.udp_port, blob_hash)
except asyncio.TimeoutError:
pass
# prioritize peers who reply to a dht ping first
# this minimizes attempting to make tcp connections that won't work later to dead or unreachable peers
async with aclosing(self.get_iterative_value_finder(bytes.fromhex(blob_hash))) as value_finder:
async for results in value_finder:
to_put = []
for peer in results:
if peer.address == self.protocol.external_ip and self.protocol.peer_port == peer.tcp_port:
continue
is_good = self.protocol.peer_manager.peer_is_good(peer)
if is_good:
# the peer has replied recently over UDP, it can probably be reached on the TCP port
to_put.append(peer)
elif is_good is None:
if not peer.udp_port:
# TODO: use the same port for TCP and UDP
# the udp port must be guessed
# default to the ports being the same. if the TCP port appears to be <=0.48.0 default,
# including on a network with several nodes, then assume the udp port is proportionately
# based on a starting port of 4444
udp_port_to_try = peer.tcp_port
if 3400 > peer.tcp_port > 3332:
udp_port_to_try = (peer.tcp_port - 3333) + 4444
self.loop.create_task(put_into_result_queue_after_pong(
make_kademlia_peer(peer.node_id, peer.address, udp_port_to_try, peer.tcp_port)
))
else:
self.loop.create_task(put_into_result_queue_after_pong(peer))
else:
# the peer is known to be bad/unreachable, skip trying to connect to it over TCP
log.debug("skip bad peer %s:%i for %s", peer.address, peer.tcp_port, blob_hash)
if to_put:
result_queue.put_nowait(to_put)
def accumulate_peers(self, search_queue: asyncio.Queue,
peer_queue: typing.Optional[asyncio.Queue] = None
) -> typing.Tuple[asyncio.Queue, asyncio.Task]:
queue = peer_queue or asyncio.Queue()
return queue, self.loop.create_task(self._accumulate_peers_for_value(search_queue, queue))
async def get_kademlia_peers_from_hosts(peer_list: typing.List[typing.Tuple[str, int]]) -> typing.List['KademliaPeer']:
peer_address_list = [(await resolve_host(url, port, proto='tcp'), port) for url, port in peer_list]
kademlia_peer_list = [make_kademlia_peer(None, address, None, tcp_port=port, allow_localhost=True)
for address, port in peer_address_list]
return kademlia_peer_list

View file

@ -1,44 +1,54 @@
import typing import typing
import asyncio import asyncio
import logging import logging
import ipaddress
from binascii import hexlify
from dataclasses import dataclass, field from dataclasses import dataclass, field
from functools import lru_cache from functools import lru_cache
from prometheus_client import Gauge
from lbry.utils import is_valid_public_ipv4 as _is_valid_public_ipv4, LRUCache
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address from lbry.dht.serialization.datagram import make_compact_address, make_compact_ip, decode_compact_address
ALLOW_LOCALHOST = False
CACHE_SIZE = 16384
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@lru_cache(1024) @lru_cache(CACHE_SIZE)
def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str], def make_kademlia_peer(node_id: typing.Optional[bytes], address: typing.Optional[str],
udp_port: typing.Optional[int] = None, udp_port: typing.Optional[int] = None,
tcp_port: typing.Optional[int] = None) -> 'KademliaPeer': tcp_port: typing.Optional[int] = None,
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port) allow_localhost: bool = False) -> 'KademliaPeer':
return KademliaPeer(address, node_id, udp_port, tcp_port=tcp_port, allow_localhost=allow_localhost)
def is_valid_ipv4(address): def is_valid_public_ipv4(address, allow_localhost: bool = False):
try: allow_localhost = bool(allow_localhost or ALLOW_LOCALHOST)
ip = ipaddress.ip_address(address) return _is_valid_public_ipv4(address, allow_localhost)
return ip.version == 4
except ipaddress.AddressValueError:
return False
class PeerManager: class PeerManager:
peer_manager_keys_metric = Gauge(
"peer_manager_keys", "Number of keys tracked by PeerManager dicts (sum)", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop): def __init__(self, loop: asyncio.AbstractEventLoop):
self._loop = loop self._loop = loop
self._rpc_failures: typing.Dict[ self._rpc_failures: typing.Dict[
typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]] typing.Tuple[str, int], typing.Tuple[typing.Optional[float], typing.Optional[float]]
] = {} ] = LRUCache(CACHE_SIZE)
self._last_replied: typing.Dict[typing.Tuple[str, int], float] = {} self._last_replied: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_sent: typing.Dict[typing.Tuple[str, int], float] = {} self._last_sent: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._last_requested: typing.Dict[typing.Tuple[str, int], float] = {} self._last_requested: typing.Dict[typing.Tuple[str, int], float] = LRUCache(CACHE_SIZE)
self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = {} self._node_id_mapping: typing.Dict[typing.Tuple[str, int], bytes] = LRUCache(CACHE_SIZE)
self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = {} self._node_id_reverse_mapping: typing.Dict[bytes, typing.Tuple[str, int]] = LRUCache(CACHE_SIZE)
self._node_tokens: typing.Dict[bytes, (float, bytes)] = {} self._node_tokens: typing.Dict[bytes, (float, bytes)] = LRUCache(CACHE_SIZE)
def count_cache_keys(self):
return len(self._rpc_failures) + len(self._last_replied) + len(self._last_sent) + len(
self._last_requested) + len(self._node_id_mapping) + len(self._node_id_reverse_mapping) + len(
self._node_tokens)
def reset(self): def reset(self):
for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested): for statistic in (self._rpc_failures, self._last_replied, self._last_sent, self._last_requested):
@ -70,7 +80,7 @@ class PeerManager:
def get_node_token(self, node_id: bytes) -> typing.Optional[bytes]: def get_node_token(self, node_id: bytes) -> typing.Optional[bytes]:
ts, token = self._node_tokens.get(node_id, (0, None)) ts, token = self._node_tokens.get(node_id, (0, None))
if ts and ts > self._loop.time() - constants.token_secret_refresh_interval: if ts and ts > self._loop.time() - constants.TOKEN_SECRET_REFRESH_INTERVAL:
return token return token
def get_last_replied(self, address: str, udp_port: int) -> typing.Optional[float]: def get_last_replied(self, address: str, udp_port: int) -> typing.Optional[float]:
@ -88,28 +98,32 @@ class PeerManager:
self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id)) self._node_id_mapping.pop(self._node_id_reverse_mapping.pop(node_id))
self._node_id_mapping[(address, udp_port)] = node_id self._node_id_mapping[(address, udp_port)] = node_id
self._node_id_reverse_mapping[node_id] = (address, udp_port) self._node_id_reverse_mapping[node_id] = (address, udp_port)
self.peer_manager_keys_metric.labels("global").set(self.count_cache_keys())
def get_node_id_for_endpoint(self, address, port):
return self._node_id_mapping.get((address, port))
def prune(self): # TODO: periodically call this def prune(self): # TODO: periodically call this
now = self._loop.time() now = self._loop.time()
to_pop = [] to_pop = []
for (address, udp_port), (_, last_failure) in self._rpc_failures.items(): for (address, udp_port), (_, last_failure) in self._rpc_failures.items():
if last_failure and last_failure < now - constants.rpc_attempts_pruning_window: if last_failure and last_failure < now - constants.RPC_ATTEMPTS_PRUNING_WINDOW:
to_pop.append((address, udp_port)) to_pop.append((address, udp_port))
while to_pop: while to_pop:
del self._rpc_failures[to_pop.pop()] del self._rpc_failures[to_pop.pop()]
to_pop = [] to_pop = []
for node_id, (age, token) in self._node_tokens.items(): for node_id, (age, token) in self._node_tokens.items(): # pylint: disable=unused-variable
if age < now - constants.token_secret_refresh_interval: if age < now - constants.TOKEN_SECRET_REFRESH_INTERVAL:
to_pop.append(node_id) to_pop.append(node_id)
while to_pop: while to_pop:
del self._node_tokens[to_pop.pop()] del self._node_tokens[to_pop.pop()]
def contact_triple_is_good(self, node_id: bytes, address: str, udp_port: int): def contact_triple_is_good(self, node_id: bytes, address: str, udp_port: int): # pylint: disable=too-many-return-statements
""" """
:return: False if peer is bad, None if peer is unknown, or True if peer is good :return: False if peer is bad, None if peer is unknown, or True if peer is good
""" """
delay = self._loop.time() - constants.check_refresh_interval delay = self._loop.time() - constants.CHECK_REFRESH_INTERVAL
# fixme: find a way to re-enable that without breaking other parts # fixme: find a way to re-enable that without breaking other parts
# if node_id not in self._node_id_reverse_mapping or (address, udp_port) not in self._node_id_mapping: # if node_id not in self._node_id_reverse_mapping or (address, udp_port) not in self._node_id_mapping:
@ -139,7 +153,8 @@ class PeerManager:
def peer_is_good(self, peer: 'KademliaPeer'): def peer_is_good(self, peer: 'KademliaPeer'):
return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port) return self.contact_triple_is_good(peer.node_id, peer.address, peer.udp_port)
def decode_tcp_peer_from_compact_address(self, compact_address: bytes) -> 'KademliaPeer':
def decode_tcp_peer_from_compact_address(compact_address: bytes) -> 'KademliaPeer': # pylint: disable=no-self-use
node_id, address, tcp_port = decode_compact_address(compact_address) node_id, address, tcp_port = decode_compact_address(compact_address)
return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port) return make_kademlia_peer(node_id, address, udp_port=None, tcp_port=tcp_port)
@ -151,16 +166,17 @@ class KademliaPeer:
udp_port: typing.Optional[int] = field(hash=True) udp_port: typing.Optional[int] = field(hash=True)
tcp_port: typing.Optional[int] = field(compare=False, hash=False) tcp_port: typing.Optional[int] = field(compare=False, hash=False)
protocol_version: typing.Optional[int] = field(default=1, compare=False, hash=False) protocol_version: typing.Optional[int] = field(default=1, compare=False, hash=False)
allow_localhost: bool = field(default=False, compare=False, hash=False)
def __post_init__(self): def __post_init__(self):
if self._node_id is not None: if self._node_id is not None:
if not len(self._node_id) == constants.hash_length: if not len(self._node_id) == constants.HASH_LENGTH:
raise ValueError("invalid node_id: {}".format(hexlify(self._node_id).decode())) raise ValueError("invalid node_id: {}".format(self._node_id.hex()))
if self.udp_port is not None and not 1 <= self.udp_port <= 65535: if self.udp_port is not None and not 1024 <= self.udp_port <= 65535:
raise ValueError("invalid udp port") raise ValueError(f"invalid udp port: {self.address}:{self.udp_port}")
if self.tcp_port is not None and not 1 <= self.tcp_port <= 65535: if self.tcp_port is not None and not 1024 <= self.tcp_port <= 65535:
raise ValueError("invalid tcp port") raise ValueError(f"invalid tcp port: {self.address}:{self.tcp_port}")
if not is_valid_ipv4(self.address): if not is_valid_public_ipv4(self.address, self.allow_localhost):
raise ValueError(f"invalid ip address: '{self.address}'") raise ValueError(f"invalid ip address: '{self.address}'")
def update_tcp_port(self, tcp_port: int): def update_tcp_port(self, tcp_port: int):
@ -178,3 +194,6 @@ class KademliaPeer:
def compact_ip(self): def compact_ip(self):
return make_compact_ip(self.address) return make_compact_ip(self.address)
def __str__(self):
return f"{self.__class__.__name__}({self.node_id.hex()[:8]}@{self.address}:{self.udp_port}-{self.tcp_port})"

View file

@ -16,13 +16,19 @@ class DictDataStore:
self._peer_manager = peer_manager self._peer_manager = peer_manager
self.completed_blobs: typing.Set[str] = set() self.completed_blobs: typing.Set[str] = set()
def keys(self):
return self._data_store.keys()
def __len__(self):
return self._data_store.__len__()
def removed_expired_peers(self): def removed_expired_peers(self):
now = self.loop.time() now = self.loop.time()
keys = list(self._data_store.keys()) keys = list(self._data_store.keys())
for key in keys: for key in keys:
to_remove = [] to_remove = []
for (peer, ts) in self._data_store[key]: for (peer, ts) in self._data_store[key]:
if ts + constants.data_expiration < now or self._peer_manager.peer_is_good(peer) is False: if ts + constants.DATA_EXPIRATION < now or self._peer_manager.peer_is_good(peer) is False:
to_remove.append((peer, ts)) to_remove.append((peer, ts))
for item in to_remove: for item in to_remove:
self._data_store[key].remove(item) self._data_store[key].remove(item)
@ -43,7 +49,7 @@ class DictDataStore:
""" """
now = self.loop.time() now = self.loop.time()
for (peer, ts) in self._data_store.get(key, []): for (peer, ts) in self._data_store.get(key, []):
if ts + constants.data_expiration > now: if ts + constants.DATA_EXPIRATION > now:
yield peer yield peer
def has_peers_for_blob(self, key: bytes) -> bool: def has_peers_for_blob(self, key: bytes) -> bool:
@ -53,7 +59,7 @@ class DictDataStore:
now = self.loop.time() now = self.loop.time()
if key in self._data_store: if key in self._data_store:
current = list(filter(lambda x: x[0] == contact, self._data_store[key])) current = list(filter(lambda x: x[0] == contact, self._data_store[key]))
if len(current): if len(current) > 0:
self._data_store[key][self._data_store[key].index(current[0])] = contact, now self._data_store[key][self._data_store[key].index(current[0])] = contact, now
else: else:
self._data_store[key].append((contact, now)) self._data_store[key].append((contact, now))
@ -65,6 +71,6 @@ class DictDataStore:
def get_storing_contacts(self) -> typing.List['KademliaPeer']: def get_storing_contacts(self) -> typing.List['KademliaPeer']:
peers = set() peers = set()
for key, stored in self._data_store.items(): for _, stored in self._data_store.items():
peers.update(set(map(lambda tup: tup[0], stored))) peers.update(set(map(lambda tup: tup[0], stored)))
return list(peers) return list(peers)

View file

@ -9,17 +9,17 @@ class Distance:
""" """
def __init__(self, key: bytes): def __init__(self, key: bytes):
if len(key) != constants.hash_length: if len(key) != constants.HASH_LENGTH:
raise ValueError(f"invalid key length: {len(key)}") raise ValueError(f"invalid key length: {len(key)}")
self.key = key self.key = key
self.val_key_one = int.from_bytes(key, 'big') self.val_key_one = int.from_bytes(key, 'big')
def __call__(self, key_two: bytes) -> int: def __call__(self, key_two: bytes) -> int:
if len(key_two) != constants.hash_length: if len(key_two) != constants.HASH_LENGTH:
raise ValueError(f"invalid length of key to compare: {len(key_two)}") raise ValueError(f"invalid length of key to compare: {len(key_two)}")
val_key_two = int.from_bytes(key_two, 'big') val_key_two = int.from_bytes(key_two, 'big')
return self.val_key_one ^ val_key_two return self.val_key_one ^ val_key_two
def is_closer(self, a: bytes, b: bytes) -> bool: def is_closer(self, key_a: bytes, key_b: bytes) -> bool:
"""Returns true is `a` is closer to `key` than `b` is""" """Returns true is `key_a` is closer to `key` than `key_b` is"""
return self(a) < self(b) return self(key_a) < self(key_b)

View file

@ -0,0 +1,361 @@
import asyncio
from itertools import chain
from collections import defaultdict, OrderedDict
from collections.abc import AsyncIterator
import typing
import logging
from typing import TYPE_CHECKING
from lbry.dht import constants
from lbry.dht.error import RemoteException, TransportNotConnected
from lbry.dht.protocol.distance import Distance
from lbry.dht.peer import make_kademlia_peer, decode_tcp_peer_from_compact_address
from lbry.dht.serialization.datagram import PAGE_KEY
if TYPE_CHECKING:
from lbry.dht.protocol.protocol import KademliaProtocol
from lbry.dht.peer import PeerManager, KademliaPeer
log = logging.getLogger(__name__)
class FindResponse:
@property
def found(self) -> bool:
raise NotImplementedError()
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
raise NotImplementedError()
def get_close_kademlia_peers(self, peer_info) -> typing.Generator[typing.Iterator['KademliaPeer'], None, None]:
for contact_triple in self.get_close_triples():
node_id, address, udp_port = contact_triple
try:
yield make_kademlia_peer(node_id, address, udp_port)
except ValueError:
log.warning("misbehaving peer %s:%i returned peer with reserved ip %s:%i", peer_info.address,
peer_info.udp_port, address, udp_port)
class FindNodeResponse(FindResponse):
def __init__(self, key: bytes, close_triples: typing.List[typing.Tuple[bytes, str, int]]):
self.key = key
self.close_triples = close_triples
@property
def found(self) -> bool:
return self.key in [triple[0] for triple in self.close_triples]
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
return self.close_triples
class FindValueResponse(FindResponse):
def __init__(self, key: bytes, result_dict: typing.Dict):
self.key = key
self.token = result_dict[b'token']
self.close_triples: typing.List[typing.Tuple[bytes, bytes, int]] = result_dict.get(b'contacts', [])
self.found_compact_addresses = result_dict.get(key, [])
self.pages = int(result_dict.get(PAGE_KEY, 0))
@property
def found(self) -> bool:
return len(self.found_compact_addresses) > 0
def get_close_triples(self) -> typing.List[typing.Tuple[bytes, str, int]]:
return [(node_id, address.decode(), port) for node_id, address, port in self.close_triples]
class IterativeFinder(AsyncIterator):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
self.loop = loop
self.peer_manager = protocol.peer_manager
self.protocol = protocol
self.key = key
self.max_results = max(constants.K, max_results)
self.active: typing.Dict['KademliaPeer', int] = OrderedDict() # peer: distance, sorted
self.contacted: typing.Set['KademliaPeer'] = set()
self.distance = Distance(key)
self.iteration_queue = asyncio.Queue()
self.running_probes: typing.Dict['KademliaPeer', asyncio.Task] = {}
self.iteration_count = 0
self.running = False
self.tasks: typing.List[asyncio.Task] = []
for peer in shortlist:
if peer.node_id:
self._add_active(peer, force=True)
else:
# seed nodes
self._schedule_probe(peer)
async def send_probe(self, peer: 'KademliaPeer') -> FindResponse:
"""
Send the rpc request to the peer and return an object with the FindResponse interface
"""
raise NotImplementedError()
def search_exhausted(self):
"""
This method ends the iterator due no more peers to contact.
Override to provide last time results.
"""
self.iteration_queue.put_nowait(None)
def check_result_ready(self, response: FindResponse):
"""
Called after adding peers from an rpc result to the shortlist.
This method is responsible for putting a result for the generator into the Queue
"""
raise NotImplementedError()
def get_initial_result(self) -> typing.List['KademliaPeer']: #pylint: disable=no-self-use
"""
Get an initial or cached result to be put into the Queue. Used for findValue requests where the blob
has peers in the local data store of blobs announced to us
"""
return []
def _add_active(self, peer, force=False):
if not force and self.peer_manager.peer_is_good(peer) is False:
return
if peer in self.contacted:
return
if peer not in self.active and peer.node_id and peer.node_id != self.protocol.node_id:
self.active[peer] = self.distance(peer.node_id)
self.active = OrderedDict(sorted(self.active.items(), key=lambda item: item[1]))
async def _handle_probe_result(self, peer: 'KademliaPeer', response: FindResponse):
self._add_active(peer)
for new_peer in response.get_close_kademlia_peers(peer):
self._add_active(new_peer)
self.check_result_ready(response)
self._log_state(reason="check result")
def _reset_closest(self, peer):
if peer in self.active:
del self.active[peer]
async def _send_probe(self, peer: 'KademliaPeer'):
try:
response = await self.send_probe(peer)
except asyncio.TimeoutError:
self._reset_closest(peer)
return
except asyncio.CancelledError:
log.debug("%s[%x] cancelled probe",
type(self).__name__, id(self))
raise
except ValueError as err:
log.warning(str(err))
self._reset_closest(peer)
return
except TransportNotConnected:
await self._aclose(reason="not connected")
return
except RemoteException:
self._reset_closest(peer)
return
return await self._handle_probe_result(peer, response)
def _search_round(self):
"""
Send up to constants.alpha (5) probes to closest active peers
"""
added = 0
for index, peer in enumerate(self.active.keys()):
if index == 0:
log.debug("%s[%x] closest to probe: %s",
type(self).__name__, id(self),
peer.node_id.hex()[:8])
if peer in self.contacted:
continue
if len(self.running_probes) >= constants.ALPHA:
break
if index > (constants.K + len(self.running_probes)):
break
origin_address = (peer.address, peer.udp_port)
if peer.node_id == self.protocol.node_id:
continue
if origin_address == (self.protocol.external_ip, self.protocol.udp_port):
continue
self._schedule_probe(peer)
added += 1
log.debug("%s[%x] running %d probes for key %s",
type(self).__name__, id(self),
len(self.running_probes), self.key.hex()[:8])
if not added and not self.running_probes:
log.debug("%s[%x] search for %s exhausted",
type(self).__name__, id(self),
self.key.hex()[:8])
self.search_exhausted()
def _schedule_probe(self, peer: 'KademliaPeer'):
self.contacted.add(peer)
t = self.loop.create_task(self._send_probe(peer))
def callback(_):
self.running_probes.pop(peer, None)
if self.running:
self._search_round()
t.add_done_callback(callback)
self.running_probes[peer] = t
def _log_state(self, reason="?"):
log.debug("%s[%x] [%s] %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
def __aiter__(self):
if self.running:
raise Exception("already running")
self.running = True
self.loop.call_soon(self._search_round)
return self
async def __anext__(self) -> typing.List['KademliaPeer']:
try:
if self.iteration_count == 0:
result = self.get_initial_result() or await self.iteration_queue.get()
else:
result = await self.iteration_queue.get()
if not result:
raise StopAsyncIteration
self.iteration_count += 1
return result
except asyncio.CancelledError:
await self._aclose(reason="cancelled")
raise
except StopAsyncIteration:
await self._aclose(reason="no more results")
raise
async def _aclose(self, reason="?"):
log.debug("%s[%x] [%s] shutdown because %s: %i active nodes %i contacted %i produced %i queued",
type(self).__name__, id(self), self.key.hex()[:8],
reason, len(self.active), len(self.contacted),
self.iteration_count, self.iteration_queue.qsize())
self.running = False
self.iteration_queue.put_nowait(None)
for task in chain(self.tasks, self.running_probes.values()):
task.cancel()
self.tasks.clear()
self.running_probes.clear()
async def aclose(self):
if self.running:
await self._aclose(reason="aclose")
log.debug("%s[%x] [%s] async close completed",
type(self).__name__, id(self), self.key.hex()[:8])
class IterativeNodeFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist)
self.yielded_peers: typing.Set['KademliaPeer'] = set()
async def send_probe(self, peer: 'KademliaPeer') -> FindNodeResponse:
log.debug("probe %s:%d (%s) for NODE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8] if peer.node_id else '', self.key.hex()[:8])
response = await self.protocol.get_rpc_peer(peer).find_node(self.key)
return FindNodeResponse(self.key, response)
def search_exhausted(self):
self.put_result(self.active.keys(), finish=True)
def put_result(self, from_iter: typing.Iterable['KademliaPeer'], finish=False):
not_yet_yielded = [
peer for peer in from_iter
if peer not in self.yielded_peers
and peer.node_id != self.protocol.node_id
and self.peer_manager.peer_is_good(peer) is True # return only peers who answered
]
not_yet_yielded.sort(key=lambda peer: self.distance(peer.node_id))
to_yield = not_yet_yielded[:max(constants.K, self.max_results)]
if to_yield:
self.yielded_peers.update(to_yield)
self.iteration_queue.put_nowait(to_yield)
if finish:
self.iteration_queue.put_nowait(None)
def check_result_ready(self, response: FindNodeResponse):
found = response.found and self.key != self.protocol.node_id
if found:
log.debug("found")
return self.put_result(self.active.keys(), finish=True)
class IterativeValueFinder(IterativeFinder):
def __init__(self, loop: asyncio.AbstractEventLoop,
protocol: 'KademliaProtocol', key: bytes,
max_results: typing.Optional[int] = constants.K,
shortlist: typing.Optional[typing.List['KademliaPeer']] = None):
super().__init__(loop, protocol, key, max_results, shortlist)
self.blob_peers: typing.Set['KademliaPeer'] = set()
# this tracks the index of the most recent page we requested from each peer
self.peer_pages: typing.DefaultDict['KademliaPeer', int] = defaultdict(int)
# this tracks the set of blob peers returned by each peer
self.discovered_peers: typing.Dict['KademliaPeer', typing.Set['KademliaPeer']] = defaultdict(set)
async def send_probe(self, peer: 'KademliaPeer') -> FindValueResponse:
log.debug("probe %s:%d (%s) for VALUE %s",
peer.address, peer.udp_port, peer.node_id.hex()[:8], self.key.hex()[:8])
page = self.peer_pages[peer]
response = await self.protocol.get_rpc_peer(peer).find_value(self.key, page=page)
parsed = FindValueResponse(self.key, response)
if not parsed.found:
return parsed
already_known = len(self.discovered_peers[peer])
decoded_peers = set()
for compact_addr in parsed.found_compact_addresses:
try:
decoded_peers.add(decode_tcp_peer_from_compact_address(compact_addr))
except ValueError:
log.warning("misbehaving peer %s:%i returned invalid peer for blob",
peer.address, peer.udp_port)
self.peer_manager.report_failure(peer.address, peer.udp_port)
parsed.found_compact_addresses.clear()
return parsed
self.discovered_peers[peer].update(decoded_peers)
log.debug("probed %s:%i page %i, %i known", peer.address, peer.udp_port, page,
already_known + len(parsed.found_compact_addresses))
if len(self.discovered_peers[peer]) != already_known + len(parsed.found_compact_addresses):
log.warning("misbehaving peer %s:%i returned duplicate peers for blob", peer.address, peer.udp_port)
elif len(parsed.found_compact_addresses) >= constants.K and self.peer_pages[peer] < parsed.pages:
# the peer returned a full page and indicates it has more
self.peer_pages[peer] += 1
if peer in self.contacted:
# the peer must be removed from self.contacted so that it will be probed for the next page
self.contacted.remove(peer)
return parsed
def check_result_ready(self, response: FindValueResponse):
if response.found:
blob_peers = [decode_tcp_peer_from_compact_address(compact_addr)
for compact_addr in response.found_compact_addresses]
to_yield = []
for blob_peer in blob_peers:
if blob_peer not in self.blob_peers:
self.blob_peers.add(blob_peer)
to_yield.append(blob_peer)
if to_yield:
self.iteration_queue.put_nowait(to_yield)
def get_initial_result(self) -> typing.List['KademliaPeer']:
if self.protocol.data_store.has_peers_for_blob(self.key):
return self.protocol.data_store.get_peers_for_blob(self.key)
return []

View file

@ -3,13 +3,16 @@ import socket
import functools import functools
import hashlib import hashlib
import asyncio import asyncio
import time
import typing import typing
import binascii
import random import random
from asyncio.protocols import DatagramProtocol from asyncio.protocols import DatagramProtocol
from asyncio.transports import DatagramTransport from asyncio.transports import DatagramTransport
from prometheus_client import Gauge, Counter, Histogram
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.serialization.bencoding import DecodeError
from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram from lbry.dht.serialization.datagram import decode_datagram, ErrorDatagram, ResponseDatagram, RequestDatagram
from lbry.dht.serialization.datagram import RESPONSE_TYPE, ERROR_TYPE, PAGE_KEY from lbry.dht.serialization.datagram import RESPONSE_TYPE, ERROR_TYPE, PAGE_KEY
from lbry.dht.error import RemoteException, TransportNotConnected from lbry.dht.error import RemoteException, TransportNotConnected
@ -23,13 +26,18 @@ if typing.TYPE_CHECKING:
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
old_protocol_errors = { OLD_PROTOCOL_ERRORS = {
"findNode() takes exactly 2 arguments (5 given)": "0.19.1", "findNode() takes exactly 2 arguments (5 given)": "0.19.1",
"findValue() takes exactly 2 arguments (5 given)": "0.19.1" "findValue() takes exactly 2 arguments (5 given)": "0.19.1"
} }
class KademliaRPC: class KademliaRPC:
stored_blob_metric = Gauge(
"stored_blobs", "Number of blobs announced by other peers", namespace="dht_node",
labelnames=("scope",),
)
def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333): def __init__(self, protocol: 'KademliaProtocol', loop: asyncio.AbstractEventLoop, peer_port: int = 3333):
self.protocol = protocol self.protocol = protocol
self.loop = loop self.loop = loop
@ -48,35 +56,36 @@ class KademliaRPC:
return b'pong' return b'pong'
def store(self, rpc_contact: 'KademliaPeer', blob_hash: bytes, token: bytes, port: int) -> bytes: def store(self, rpc_contact: 'KademliaPeer', blob_hash: bytes, token: bytes, port: int) -> bytes:
if len(blob_hash) != constants.hash_bits // 8: if len(blob_hash) != constants.HASH_BITS // 8:
raise ValueError(f"invalid length of blob hash: {len(blob_hash)}") raise ValueError(f"invalid length of blob hash: {len(blob_hash)}")
if not 0 < port < 65535: if not 0 < port < 65535:
raise ValueError(f"invalid tcp port: {port}") raise ValueError(f"invalid tcp port: {port}")
rpc_contact.update_tcp_port(port) rpc_contact.update_tcp_port(port)
if not self.verify_token(token, rpc_contact.compact_ip()): if not self.verify_token(token, rpc_contact.compact_ip()):
if self.loop.time() - self.protocol.started_listening_time < constants.token_secret_refresh_interval: if self.loop.time() - self.protocol.started_listening_time < constants.TOKEN_SECRET_REFRESH_INTERVAL:
pass pass
else: else:
raise ValueError("Invalid token") raise ValueError("Invalid token")
self.protocol.data_store.add_peer_to_blob( self.protocol.data_store.add_peer_to_blob(
rpc_contact, blob_hash rpc_contact, blob_hash
) )
self.stored_blob_metric.labels("global").set(len(self.protocol.data_store))
return b'OK' return b'OK'
def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]: def find_node(self, rpc_contact: 'KademliaPeer', key: bytes) -> typing.List[typing.Tuple[bytes, str, int]]:
if len(key) != constants.hash_length: if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid contact node_id length: %i" % len(key)) raise ValueError("invalid contact node_id length: %i" % len(key))
contacts = self.protocol.routing_table.find_close_peers(key, sender_node_id=rpc_contact.node_id) contacts = self.protocol.routing_table.find_close_peers(key, sender_node_id=rpc_contact.node_id)
contact_triples = [] contact_triples = []
for contact in contacts[:constants.k * 2]: for contact in contacts[:constants.K * 2]:
contact_triples.append((contact.node_id, contact.address, contact.udp_port)) contact_triples.append((contact.node_id, contact.address, contact.udp_port))
return contact_triples return contact_triples
def find_value(self, rpc_contact: 'KademliaPeer', key: bytes, page: int = 0): def find_value(self, rpc_contact: 'KademliaPeer', key: bytes, page: int = 0):
page = page if page > 0 else 0 page = page if page > 0 else 0
if len(key) != constants.hash_length: if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid blob_exchange hash length: %i" % len(key)) raise ValueError("invalid blob_exchange hash length: %i" % len(key))
response = { response = {
@ -84,7 +93,7 @@ class KademliaRPC:
} }
if not page: if not page:
response[b'contacts'] = self.find_node(rpc_contact, key)[:constants.k] response[b'contacts'] = self.find_node(rpc_contact, key)[:constants.K]
if self.protocol.protocol_version: if self.protocol.protocol_version:
response[b'protocolVersion'] = self.protocol.protocol_version response[b'protocolVersion'] = self.protocol.protocol_version
@ -96,16 +105,16 @@ class KademliaRPC:
if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp() if not rpc_contact.tcp_port or peer.compact_address_tcp() != rpc_contact.compact_address_tcp()
] ]
# if we don't have k storing peers to return and we have this hash locally, include our contact information # if we don't have k storing peers to return and we have this hash locally, include our contact information
if len(peers) < constants.k and binascii.hexlify(key).decode() in self.protocol.data_store.completed_blobs: if len(peers) < constants.K and key.hex() in self.protocol.data_store.completed_blobs:
peers.append(self.compact_address()) peers.append(self.compact_address())
if not peers: if not peers:
response[PAGE_KEY] = 0 response[PAGE_KEY] = 0
else: else:
response[PAGE_KEY] = (len(peers) // (constants.k + 1)) + 1 # how many pages of peers we have for the blob response[PAGE_KEY] = (len(peers) // (constants.K + 1)) + 1 # how many pages of peers we have for the blob
if len(peers) > constants.k: if len(peers) > constants.K:
random.Random(self.protocol.node_id).shuffle(peers) random.Random(self.protocol.node_id).shuffle(peers)
if page * constants.k < len(peers): if page * constants.K < len(peers):
response[key] = peers[page * constants.k:page * constants.k + constants.k] response[key] = peers[page * constants.K:page * constants.K + constants.K]
return response return response
def refresh_token(self): # TODO: this needs to be called periodically def refresh_token(self): # TODO: this needs to be called periodically
@ -154,7 +163,7 @@ class RemoteKademliaRPC:
:param blob_hash: blob hash as bytes :param blob_hash: blob hash as bytes
:return: b'OK' :return: b'OK'
""" """
if len(blob_hash) != constants.hash_bits // 8: if len(blob_hash) != constants.HASH_BITS // 8:
raise ValueError(f"invalid length of blob hash: {len(blob_hash)}") raise ValueError(f"invalid length of blob hash: {len(blob_hash)}")
if not self.protocol.peer_port or not 0 < self.protocol.peer_port < 65535: if not self.protocol.peer_port or not 0 < self.protocol.peer_port < 65535:
raise ValueError(f"invalid tcp port: {self.protocol.peer_port}") raise ValueError(f"invalid tcp port: {self.protocol.peer_port}")
@ -171,7 +180,7 @@ class RemoteKademliaRPC:
""" """
:return: [(node_id, address, udp_port), ...] :return: [(node_id, address, udp_port), ...]
""" """
if len(key) != constants.hash_bits // 8: if len(key) != constants.HASH_BITS // 8:
raise ValueError(f"invalid length of find node key: {len(key)}") raise ValueError(f"invalid length of find node key: {len(key)}")
response = await self.protocol.send_request( response = await self.protocol.send_request(
self.peer, RequestDatagram.make_find_node(self.protocol.node_id, key) self.peer, RequestDatagram.make_find_node(self.protocol.node_id, key)
@ -186,7 +195,7 @@ class RemoteKademliaRPC:
<key bytes>: [<blob_peer_compact_address, ...] <key bytes>: [<blob_peer_compact_address, ...]
} }
""" """
if len(key) != constants.hash_bits // 8: if len(key) != constants.HASH_BITS // 8:
raise ValueError(f"invalid length of find value key: {len(key)}") raise ValueError(f"invalid length of find value key: {len(key)}")
response = await self.protocol.send_request( response = await self.protocol.send_request(
self.peer, RequestDatagram.make_find_value(self.protocol.node_id, key, page=page) self.peer, RequestDatagram.make_find_value(self.protocol.node_id, key, page=page)
@ -203,12 +212,16 @@ class PingQueue:
self._process_task: asyncio.Task = None self._process_task: asyncio.Task = None
self._running = False self._running = False
self._running_pings: typing.Set[asyncio.Task] = set() self._running_pings: typing.Set[asyncio.Task] = set()
self._default_delay = constants.maybe_ping_delay self._default_delay = constants.MAYBE_PING_DELAY
@property @property
def running(self): def running(self):
return self._running return self._running
@property
def busy(self):
return self._running and (any(self._running_pings) or any(self._pending_contacts))
def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None): def enqueue_maybe_ping(self, *peers: 'KademliaPeer', delay: typing.Optional[float] = None):
delay = delay if delay is not None else self._default_delay delay = delay if delay is not None else self._default_delay
now = self._loop.time() now = self._loop.time()
@ -220,11 +233,11 @@ class PingQueue:
async def ping_task(): async def ping_task():
try: try:
if self._protocol.peer_manager.peer_is_good(peer): if self._protocol.peer_manager.peer_is_good(peer):
if peer not in self._protocol.routing_table.get_peers(): if not self._protocol.routing_table.get_peer(peer.node_id):
self._protocol.add_peer(peer) self._protocol.add_peer(peer)
return return
await self._protocol.get_rpc_peer(peer).ping() await self._protocol.get_rpc_peer(peer).ping()
except asyncio.TimeoutError: except (asyncio.TimeoutError, RemoteException):
pass pass
task = self._loop.create_task(ping_task()) task = self._loop.create_task(ping_task())
@ -240,7 +253,7 @@ class PingQueue:
del self._pending_contacts[peer] del self._pending_contacts[peer]
self.maybe_ping(peer) self.maybe_ping(peer)
break break
await asyncio.sleep(1, loop=self._loop) await asyncio.sleep(1)
def start(self): def start(self):
assert not self._running assert not self._running
@ -259,9 +272,33 @@ class PingQueue:
class KademliaProtocol(DatagramProtocol): class KademliaProtocol(DatagramProtocol):
request_sent_metric = Counter(
"request_sent", "Number of requests send from DHT RPC protocol", namespace="dht_node",
labelnames=("method",),
)
request_success_metric = Counter(
"request_success", "Number of successful requests", namespace="dht_node",
labelnames=("method",),
)
request_error_metric = Counter(
"request_error", "Number of errors returned from request to other peers", namespace="dht_node",
labelnames=("method",),
)
HISTOGRAM_BUCKETS = (
.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 3.0, 3.5, 4.0, 4.50, 5.0, 5.50, 6.0, float('inf')
)
response_time_metric = Histogram(
"response_time", "Response times of DHT RPC requests", namespace="dht_node", buckets=HISTOGRAM_BUCKETS,
labelnames=("method",)
)
received_request_metric = Counter(
"received_request", "Number of received DHT RPC requests", namespace="dht_node",
labelnames=("method",),
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', node_id: bytes, external_ip: str,
udp_port: int, peer_port: int, rpc_timeout: float = constants.rpc_timeout, udp_port: int, peer_port: int, rpc_timeout: float = constants.RPC_TIMEOUT,
split_buckets_under_index: int = constants.split_buckets_under_index): split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_boostrap_node: bool = False):
self.peer_manager = peer_manager self.peer_manager = peer_manager
self.loop = loop self.loop = loop
self.node_id = node_id self.node_id = node_id
@ -271,20 +308,21 @@ class KademliaProtocol(DatagramProtocol):
self.is_seed_node = False self.is_seed_node = False
self.partial_messages: typing.Dict[bytes, typing.Dict[bytes, bytes]] = {} self.partial_messages: typing.Dict[bytes, typing.Dict[bytes, bytes]] = {}
self.sent_messages: typing.Dict[bytes, typing.Tuple['KademliaPeer', asyncio.Future, RequestDatagram]] = {} self.sent_messages: typing.Dict[bytes, typing.Tuple['KademliaPeer', asyncio.Future, RequestDatagram]] = {}
self.protocol_version = constants.protocol_version self.protocol_version = constants.PROTOCOL_VERSION
self.started_listening_time = 0 self.started_listening_time = 0
self.transport: DatagramTransport = None self.transport: DatagramTransport = None
self.old_token_secret = constants.generate_id() self.old_token_secret = constants.generate_id()
self.token_secret = constants.generate_id() self.token_secret = constants.generate_id()
self.routing_table = TreeRoutingTable(self.loop, self.peer_manager, self.node_id, split_buckets_under_index) self.routing_table = TreeRoutingTable(
self.loop, self.peer_manager, self.node_id, split_buckets_under_index, is_bootstrap_node=is_boostrap_node)
self.data_store = DictDataStore(self.loop, self.peer_manager) self.data_store = DictDataStore(self.loop, self.peer_manager)
self.ping_queue = PingQueue(self.loop, self) self.ping_queue = PingQueue(self.loop, self)
self.node_rpc = KademliaRPC(self, self.loop, self.peer_port) self.node_rpc = KademliaRPC(self, self.loop, self.peer_port)
self.rpc_timeout = rpc_timeout self.rpc_timeout = rpc_timeout
self._split_lock = asyncio.Lock(loop=self.loop) self._split_lock = asyncio.Lock()
self._to_remove: typing.Set['KademliaPeer'] = set() self._to_remove: typing.Set['KademliaPeer'] = set()
self._to_add: typing.Set['KademliaPeer'] = set() self._to_add: typing.Set['KademliaPeer'] = set()
self._wakeup_routing_task = asyncio.Event(loop=self.loop) self._wakeup_routing_task = asyncio.Event()
self.maintaing_routing_task: typing.Optional[asyncio.Task] = None self.maintaing_routing_task: typing.Optional[asyncio.Task] = None
@functools.lru_cache(128) @functools.lru_cache(128)
@ -323,72 +361,10 @@ class KademliaProtocol(DatagramProtocol):
return args, {} return args, {}
async def _add_peer(self, peer: 'KademliaPeer'): async def _add_peer(self, peer: 'KademliaPeer'):
if not peer.node_id: async def probe(some_peer: 'KademliaPeer'):
log.warning("Tried adding a peer with no node id!") rpc_peer = self.get_rpc_peer(some_peer)
return False await rpc_peer.ping()
for p in self.routing_table.get_peers(): return await self.routing_table.add_peer(peer, probe)
if (p.address, p.udp_port) == (peer.address, peer.udp_port) and p.node_id != peer.node_id:
self.routing_table.remove_peer(p)
self.routing_table.join_buckets()
bucket_index = self.routing_table.kbucket_index(peer.node_id)
if self.routing_table.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self.routing_table.should_split(bucket_index, peer.node_id):
self.routing_table.split_bucket(bucket_index)
# Retry the insertion attempt
result = await self._add_peer(peer)
self.routing_table.join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.routing_table.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for p in not_good_contacts:
last_replied = self.peer_manager.get_last_replied(p.address, p.udp_port)
if not last_replied or last_replied + 60 < self.loop.time():
not_recently_replied.append(p)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.routing_table.buckets[bucket_index].peers[0]
last_replied = self.peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self.loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
to_replace_rpc = self.get_rpc_peer(to_replace)
await to_replace_rpc.ping()
return False
except asyncio.TimeoutError:
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.routing_table.buckets[bucket_index]:
self.routing_table.buckets[bucket_index].remove_peer(to_replace)
return await self._add_peer(peer)
def add_peer(self, peer: 'KademliaPeer'): def add_peer(self, peer: 'KademliaPeer'):
if peer.node_id == self.node_id: if peer.node_id == self.node_id:
@ -406,35 +382,34 @@ class KademliaProtocol(DatagramProtocol):
async with self._split_lock: async with self._split_lock:
peer = self._to_remove.pop() peer = self._to_remove.pop()
self.routing_table.remove_peer(peer) self.routing_table.remove_peer(peer)
self.routing_table.join_buckets()
while self._to_add: while self._to_add:
async with self._split_lock: async with self._split_lock:
await self._add_peer(self._to_add.pop()) await self._add_peer(self._to_add.pop())
await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1, loop=self.loop), loop=self.loop) await asyncio.gather(self._wakeup_routing_task.wait(), asyncio.sleep(.1))
self._wakeup_routing_task.clear() self._wakeup_routing_task.clear()
def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram): def _handle_rpc(self, sender_contact: 'KademliaPeer', message: RequestDatagram):
assert sender_contact.node_id != self.node_id, (binascii.hexlify(sender_contact.node_id)[:8].decode(), assert sender_contact.node_id != self.node_id, (sender_contact.node_id.hex()[:8],
binascii.hexlify(self.node_id)[:8].decode()) self.node_id.hex()[:8])
method = message.method method = message.method
if method not in [b'ping', b'store', b'findNode', b'findValue']: if method not in [b'ping', b'store', b'findNode', b'findValue']:
raise AttributeError('Invalid method: %s' % message.method.decode()) raise AttributeError('Invalid method: %s' % message.method.decode())
if message.args and isinstance(message.args[-1], dict) and b'protocolVersion' in message.args[-1]: if message.args and isinstance(message.args[-1], dict) and b'protocolVersion' in message.args[-1]:
# args don't need reformatting # args don't need reformatting
args, kw = tuple(message.args[:-1]), message.args[-1] args, kwargs = tuple(message.args[:-1]), message.args[-1]
else: else:
args, kw = self._migrate_incoming_rpc_args(sender_contact, message.method, *message.args) args, kwargs = self._migrate_incoming_rpc_args(sender_contact, message.method, *message.args)
log.debug("%s:%i RECV CALL %s %s:%i", self.external_ip, self.udp_port, message.method.decode(), log.debug("%s:%i RECV CALL %s %s:%i", self.external_ip, self.udp_port, message.method.decode(),
sender_contact.address, sender_contact.udp_port) sender_contact.address, sender_contact.udp_port)
if method == b'ping': if method == b'ping':
result = self.node_rpc.ping() result = self.node_rpc.ping()
elif method == b'store': elif method == b'store':
blob_hash, token, port, original_publisher_id, age = args[:5] blob_hash, token, port, original_publisher_id, age = args[:5] # pylint: disable=unused-variable
result = self.node_rpc.store(sender_contact, blob_hash, token, port) result = self.node_rpc.store(sender_contact, blob_hash, token, port)
else: else:
key = args[0] key = args[0]
page = kw.get(PAGE_KEY, 0) page = kwargs.get(PAGE_KEY, 0)
if method == b'findNode': if method == b'findNode':
result = self.node_rpc.find_node(sender_contact, key) result = self.node_rpc.find_node(sender_contact, key)
else: else:
@ -447,11 +422,15 @@ class KademliaProtocol(DatagramProtocol):
def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram): def handle_request_datagram(self, address: typing.Tuple[str, int], request_datagram: RequestDatagram):
# This is an RPC method request # This is an RPC method request
self.received_request_metric.labels(method=request_datagram.method).inc()
self.peer_manager.report_last_requested(address[0], address[1]) self.peer_manager.report_last_requested(address[0], address[1])
try:
peer = self.routing_table.get_peer(request_datagram.node_id) peer = self.routing_table.get_peer(request_datagram.node_id)
except IndexError: if not peer:
try:
peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1]) peer = make_kademlia_peer(request_datagram.node_id, address[0], address[1])
except ValueError as err:
log.warning("error replying to %s: %s", address[0], str(err))
return
try: try:
self._handle_rpc(peer, request_datagram) self._handle_rpc(peer, request_datagram)
# if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it # if the contact is not known to be bad (yet) and we haven't yet queried it, send it a ping so that it
@ -484,25 +463,25 @@ class KademliaProtocol(DatagramProtocol):
def handle_response_datagram(self, address: typing.Tuple[str, int], response_datagram: ResponseDatagram): def handle_response_datagram(self, address: typing.Tuple[str, int], response_datagram: ResponseDatagram):
# Find the message that triggered this response # Find the message that triggered this response
if response_datagram.rpc_id in self.sent_messages: if response_datagram.rpc_id in self.sent_messages:
peer, df, request = self.sent_messages[response_datagram.rpc_id] peer, future, _ = self.sent_messages[response_datagram.rpc_id]
if peer.address != address[0]: if peer.address != address[0]:
df.set_exception(RemoteException( future.set_exception(
f"response from {address[0]}, expected {peer.address}") RemoteException(f"response from {address[0]}, expected {peer.address}")
) )
return return
# We got a result from the RPC # We got a result from the RPC
if peer.node_id == self.node_id: if peer.node_id == self.node_id:
df.set_exception(RemoteException("node has our node id")) future.set_exception(RemoteException("node has our node id"))
return return
elif response_datagram.node_id == self.node_id: elif response_datagram.node_id == self.node_id:
df.set_exception(RemoteException("incoming message is from our node id")) future.set_exception(RemoteException("incoming message is from our node id"))
return return
peer = make_kademlia_peer(response_datagram.node_id, address[0], address[1]) peer = make_kademlia_peer(response_datagram.node_id, address[0], address[1])
self.peer_manager.report_last_replied(address[0], address[1]) self.peer_manager.report_last_replied(address[0], address[1])
self.peer_manager.update_contact_triple(peer.node_id, address[0], address[1]) self.peer_manager.update_contact_triple(peer.node_id, address[0], address[1])
if not df.cancelled(): if not future.cancelled():
df.set_result(response_datagram) future.set_result(response_datagram)
self.add_peer(peer) self.add_peer(peer)
else: else:
log.warning("%s:%i replied, but after we cancelled the request attempt", log.warning("%s:%i replied, but after we cancelled the request attempt",
@ -516,11 +495,13 @@ class KademliaProtocol(DatagramProtocol):
# The RPC request raised a remote exception; raise it locally # The RPC request raised a remote exception; raise it locally
remote_exception = RemoteException(f"{error_datagram.exception_type}({error_datagram.response})") remote_exception = RemoteException(f"{error_datagram.exception_type}({error_datagram.response})")
if error_datagram.rpc_id in self.sent_messages: if error_datagram.rpc_id in self.sent_messages:
peer, df, request = self.sent_messages.pop(error_datagram.rpc_id) peer, future, request = self.sent_messages.pop(error_datagram.rpc_id)
if (peer.address, peer.udp_port) != address: if (peer.address, peer.udp_port) != address:
df.set_exception(RemoteException( future.set_exception(
RemoteException(
f"response from {address[0]}:{address[1]}, " f"response from {address[0]}:{address[1]}, "
f"expected {peer.address}:{peer.udp_port}") f"expected {peer.address}:{peer.udp_port}"
)
) )
return return
error_msg = f"" \ error_msg = f"" \
@ -529,28 +510,32 @@ class KademliaProtocol(DatagramProtocol):
f"Raised: {str(remote_exception)}" f"Raised: {str(remote_exception)}"
if 'Invalid token' in error_msg: if 'Invalid token' in error_msg:
log.debug(error_msg) log.debug(error_msg)
elif error_datagram.response not in old_protocol_errors: elif error_datagram.response not in OLD_PROTOCOL_ERRORS:
log.warning(error_msg) log.warning(error_msg)
else: else:
log.debug("known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)", log.debug(
peer.address, peer.udp_port, old_protocol_errors[error_datagram.response]) "known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)",
df.set_exception(remote_exception) peer.address, peer.udp_port, OLD_PROTOCOL_ERRORS[error_datagram.response]
)
future.set_exception(remote_exception)
return return
else: else:
if error_datagram.response not in old_protocol_errors: if error_datagram.response not in OLD_PROTOCOL_ERRORS:
msg = f"Received error from {address[0]}:{address[1]}, but it isn't in response to a " \ msg = f"Received error from {address[0]}:{address[1]}, but it isn't in response to a " \
f"pending request: {str(remote_exception)}" f"pending request: {str(remote_exception)}"
log.warning(msg) log.warning(msg)
else: else:
log.debug("known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)", log.debug(
address[0], address[1], old_protocol_errors[error_datagram.response]) "known dht protocol backwards compatibility error with %s:%i (lbrynet v%s)",
address[0], address[1], OLD_PROTOCOL_ERRORS[error_datagram.response]
)
def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: def datagram_received(self, datagram: bytes, address: typing.Tuple[str, int]) -> None: # pylint: disable=arguments-renamed
try: try:
message = decode_datagram(datagram) message = decode_datagram(datagram)
except (ValueError, TypeError): except (ValueError, TypeError, DecodeError):
self.peer_manager.report_failure(address[0], address[1]) self.peer_manager.report_failure(address[0], address[1])
log.warning("Couldn't decode dht datagram from %s: %s", address, binascii.hexlify(datagram).decode()) log.warning("Couldn't decode dht datagram from %s: %s", address, datagram.hex())
return return
if isinstance(message, RequestDatagram): if isinstance(message, RequestDatagram):
@ -565,14 +550,19 @@ class KademliaProtocol(DatagramProtocol):
self._send(peer, request) self._send(peer, request)
response_fut = self.sent_messages[request.rpc_id][1] response_fut = self.sent_messages[request.rpc_id][1]
try: try:
self.request_sent_metric.labels(method=request.method).inc()
start = time.perf_counter()
response = await asyncio.wait_for(response_fut, self.rpc_timeout) response = await asyncio.wait_for(response_fut, self.rpc_timeout)
self.response_time_metric.labels(method=request.method).observe(time.perf_counter() - start)
self.peer_manager.report_last_replied(peer.address, peer.udp_port) self.peer_manager.report_last_replied(peer.address, peer.udp_port)
self.request_success_metric.labels(method=request.method).inc()
return response return response
except asyncio.CancelledError: except asyncio.CancelledError:
if not response_fut.done(): if not response_fut.done():
response_fut.cancel() response_fut.cancel()
raise raise
except (asyncio.TimeoutError, RemoteException): except (asyncio.TimeoutError, RemoteException):
self.request_error_metric.labels(method=request.method).inc()
self.peer_manager.report_failure(peer.address, peer.udp_port) self.peer_manager.report_failure(peer.address, peer.udp_port)
if self.peer_manager.peer_is_good(peer) is False: if self.peer_manager.peer_is_good(peer) is False:
self.remove_peer(peer) self.remove_peer(peer)
@ -589,12 +579,12 @@ class KademliaProtocol(DatagramProtocol):
raise TransportNotConnected() raise TransportNotConnected()
data = message.bencode() data = message.bencode()
if len(data) > constants.msg_size_limit: if len(data) > constants.MSG_SIZE_LIMIT:
log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)", log.warning("cannot send datagram larger than %i bytes (packet is %i bytes)",
constants.msg_size_limit, len(data)) constants.MSG_SIZE_LIMIT, len(data))
log.debug("Packet is too large to send: %s", binascii.hexlify(data[:3500]).decode()) log.debug("Packet is too large to send: %s", data[:3500].hex())
raise ValueError( raise ValueError(
f"cannot send datagram larger than {constants.msg_size_limit} bytes (packet is {len(data)} bytes)" f"cannot send datagram larger than {constants.MSG_SIZE_LIMIT} bytes (packet is {len(data)} bytes)"
) )
if isinstance(message, (RequestDatagram, ResponseDatagram)): if isinstance(message, (RequestDatagram, ResponseDatagram)):
assert message.node_id == self.node_id, message assert message.node_id == self.node_id, message
@ -637,35 +627,38 @@ class KademliaProtocol(DatagramProtocol):
return constants.digest(self.token_secret + compact_ip) return constants.digest(self.token_secret + compact_ip)
def verify_token(self, token, compact_ip): def verify_token(self, token, compact_ip):
h = constants.hash_class() h = constants.HASH_CLASS()
h.update(self.token_secret + compact_ip) h.update(self.token_secret + compact_ip)
if self.old_token_secret and not token == h.digest(): # TODO: why should we be accepting the previous token? if self.old_token_secret and not token == h.digest(): # TODO: why should we be accepting the previous token?
h = constants.hash_class() h = constants.HASH_CLASS()
h.update(self.old_token_secret + compact_ip) h.update(self.old_token_secret + compact_ip)
if not token == h.digest(): if not token == h.digest():
return False return False
return True return True
async def store_to_peer(self, hash_value: bytes, peer: 'KademliaPeer', async def store_to_peer(self, hash_value: bytes, peer: 'KademliaPeer', # pylint: disable=too-many-return-statements
retry: bool = True) -> typing.Tuple[bytes, bool]: retry: bool = True) -> typing.Tuple[bytes, bool]:
async def __store(): async def __store():
res = await self.get_rpc_peer(peer).store(hash_value) res = await self.get_rpc_peer(peer).store(hash_value)
if res != b"OK": if res != b"OK":
raise ValueError(res) raise ValueError(res)
log.debug("Stored %s to %s", binascii.hexlify(hash_value).decode()[:8], peer) log.debug("Stored %s to %s", hash_value.hex()[:8], peer)
return peer.node_id, True return peer.node_id, True
try: try:
return await __store() return await __store()
except asyncio.TimeoutError: except asyncio.TimeoutError:
log.debug("Timeout while storing blob_hash %s at %s", binascii.hexlify(hash_value).decode()[:8], peer) log.debug("Timeout while storing blob_hash %s at %s", hash_value.hex()[:8], peer)
return peer.node_id, False return peer.node_id, False
except ValueError as err: except ValueError as err:
log.error("Unexpected response: %s" % err) log.error("Unexpected response: %s", err)
return peer.node_id, False return peer.node_id, False
except RemoteException as err: except RemoteException as err:
if 'findValue() takes exactly 2 arguments (5 given)' in str(err):
log.debug("peer %s:%i is running an incompatible version of lbrynet", peer.address, peer.udp_port)
return peer.node_id, False
if 'Invalid token' not in str(err): if 'Invalid token' not in str(err):
log.exception("Unexpected error while storing blob_hash") log.warning("Unexpected error while storing blob_hash: %s", err)
return peer.node_id, False return peer.node_id, False
self.peer_manager.clear_token(peer.node_id) self.peer_manager.clear_token(peer.node_id)
if not retry: if not retry:

View file

@ -4,7 +4,11 @@ import logging
import typing import typing
import itertools import itertools
from prometheus_client import Gauge
from lbry import utils
from lbry.dht import constants from lbry.dht import constants
from lbry.dht.error import RemoteException
from lbry.dht.protocol.distance import Distance from lbry.dht.protocol.distance import Distance
if typing.TYPE_CHECKING: if typing.TYPE_CHECKING:
from lbry.dht.peer import KademliaPeer, PeerManager from lbry.dht.peer import KademliaPeer, PeerManager
@ -13,10 +17,20 @@ log = logging.getLogger(__name__)
class KBucket: class KBucket:
""" Description - later
""" """
Kademlia K-bucket implementation.
"""
peer_in_routing_table_metric = Gauge(
"peers_in_routing_table", "Number of peers on routing table", namespace="dht_node",
labelnames=("scope",)
)
peer_with_x_bit_colliding_metric = Gauge(
"peer_x_bit_colliding", "Number of peers with at least X bits colliding with this node id",
namespace="dht_node", labelnames=("amount",)
)
def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int, node_id: bytes): def __init__(self, peer_manager: 'PeerManager', range_min: int, range_max: int,
node_id: bytes, capacity: int = constants.K):
""" """
@param range_min: The lower boundary for the range in the n-bit ID @param range_min: The lower boundary for the range in the n-bit ID
space covered by this k-bucket space covered by this k-bucket
@ -24,12 +38,12 @@ class KBucket:
covered by this k-bucket covered by this k-bucket
""" """
self._peer_manager = peer_manager self._peer_manager = peer_manager
self.last_accessed = 0
self.range_min = range_min self.range_min = range_min
self.range_max = range_max self.range_max = range_max
self.peers: typing.List['KademliaPeer'] = [] self.peers: typing.List['KademliaPeer'] = []
self._node_id = node_id self._node_id = node_id
self._distance_to_self = Distance(node_id) self._distance_to_self = Distance(node_id)
self.capacity = capacity
def add_peer(self, peer: 'KademliaPeer') -> bool: def add_peer(self, peer: 'KademliaPeer') -> bool:
""" Add contact to _contact list in the right order. This will move the """ Add contact to _contact list in the right order. This will move the
@ -50,24 +64,25 @@ class KBucket:
self.peers.append(peer) self.peers.append(peer)
return True return True
else: else:
for i in range(len(self.peers)): for i, _ in enumerate(self.peers):
p = self.peers[i] local_peer = self.peers[i]
if p.node_id == peer.node_id: if local_peer.node_id == peer.node_id:
self.peers.remove(p) self.peers.remove(local_peer)
self.peers.append(peer) self.peers.append(peer)
return True return True
if len(self.peers) < constants.k: if len(self.peers) < self.capacity:
self.peers.append(peer) self.peers.append(peer)
self.peer_in_routing_table_metric.labels("global").inc()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).inc()
return True return True
else: else:
return False return False
# raise BucketFull("No space in bucket to insert contact")
def get_peer(self, node_id: bytes) -> 'KademliaPeer': def get_peer(self, node_id: bytes) -> 'KademliaPeer':
for peer in self.peers: for peer in self.peers:
if peer.node_id == node_id: if peer.node_id == node_id:
return peer return peer
raise IndexError(node_id)
def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']: def get_peers(self, count=-1, exclude_contact=None, sort_distance_to=None) -> typing.List['KademliaPeer']:
""" Returns a list containing up to the first count number of contacts """ Returns a list containing up to the first count number of contacts
@ -101,8 +116,8 @@ class KBucket:
current_len = len(peers) current_len = len(peers)
# If count greater than k - return only k contacts # If count greater than k - return only k contacts
if count > constants.k: if count > constants.K:
count = constants.k count = constants.K
if not current_len: if not current_len:
return peers return peers
@ -124,6 +139,9 @@ class KBucket:
def remove_peer(self, peer: 'KademliaPeer') -> None: def remove_peer(self, peer: 'KademliaPeer') -> None:
self.peers.remove(peer) self.peers.remove(peer)
self.peer_in_routing_table_metric.labels("global").dec()
bits_colliding = utils.get_colliding_prefix_bits(peer.node_id, self._node_id)
self.peer_with_x_bit_colliding_metric.labels(amount=bits_colliding).dec()
def key_in_range(self, key: bytes) -> bool: def key_in_range(self, key: bytes) -> bool:
""" Tests whether the specified key (i.e. node ID) is in the range """ Tests whether the specified key (i.e. node ID) is in the range
@ -161,31 +179,43 @@ class TreeRoutingTable:
version of the Kademlia paper, in section 2.4. It does, however, use the version of the Kademlia paper, in section 2.4. It does, however, use the
ping RPC-based k-bucket eviction algorithm described in section 2.2 of ping RPC-based k-bucket eviction algorithm described in section 2.2 of
that paper. that paper.
BOOTSTRAP MODE: if set to True, we always add all peers. This is so a
bootstrap node does not get a bias towards its own node id and replies are
the best it can provide (joining peer knows its neighbors immediately).
Over time, this will need to be optimized so we use the disk as holding
everything in memory won't be feasible anymore.
See: https://github.com/bittorrent/bootstrap-dht
""" """
bucket_in_routing_table_metric = Gauge(
"buckets_in_routing_table", "Number of buckets on routing table", namespace="dht_node",
labelnames=("scope",)
)
def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes, def __init__(self, loop: asyncio.AbstractEventLoop, peer_manager: 'PeerManager', parent_node_id: bytes,
split_buckets_under_index: int = constants.split_buckets_under_index): split_buckets_under_index: int = constants.SPLIT_BUCKETS_UNDER_INDEX, is_bootstrap_node: bool = False):
self._loop = loop self._loop = loop
self._peer_manager = peer_manager self._peer_manager = peer_manager
self._parent_node_id = parent_node_id self._parent_node_id = parent_node_id
self._split_buckets_under_index = split_buckets_under_index self._split_buckets_under_index = split_buckets_under_index
self.buckets: typing.List[KBucket] = [ self.buckets: typing.List[KBucket] = [
KBucket( KBucket(
self._peer_manager, range_min=0, range_max=2 ** constants.hash_bits, node_id=self._parent_node_id self._peer_manager, range_min=0, range_max=2 ** constants.HASH_BITS, node_id=self._parent_node_id,
capacity=1 << 32 if is_bootstrap_node else constants.K
) )
] ]
def get_peers(self) -> typing.List['KademliaPeer']: def get_peers(self) -> typing.List['KademliaPeer']:
return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets))) return list(itertools.chain.from_iterable(map(lambda bucket: bucket.peers, self.buckets)))
def should_split(self, bucket_index: int, to_add: bytes) -> bool: def _should_split(self, bucket_index: int, to_add: bytes) -> bool:
# https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456 # https://stackoverflow.com/questions/32129978/highly-unbalanced-kademlia-routing-table/32187456#32187456
if bucket_index < self._split_buckets_under_index: if bucket_index < self._split_buckets_under_index:
return True return True
contacts = self.get_peers() contacts = self.get_peers()
distance = Distance(self._parent_node_id) distance = Distance(self._parent_node_id)
contacts.sort(key=lambda c: distance(c.node_id)) contacts.sort(key=lambda c: distance(c.node_id))
kth_contact = contacts[-1] if len(contacts) < constants.k else contacts[constants.k - 1] kth_contact = contacts[-1] if len(contacts) < constants.K else contacts[constants.K - 1]
return distance(to_add) < distance(kth_contact.node_id) return distance(to_add) < distance(kth_contact.node_id)
def find_close_peers(self, key: bytes, count: typing.Optional[int] = None, def find_close_peers(self, key: bytes, count: typing.Optional[int] = None,
@ -193,7 +223,7 @@ class TreeRoutingTable:
exclude = [self._parent_node_id] exclude = [self._parent_node_id]
if sender_node_id: if sender_node_id:
exclude.append(sender_node_id) exclude.append(sender_node_id)
count = count or constants.k count = count or constants.K
distance = Distance(key) distance = Distance(key)
contacts = self.get_peers() contacts = self.get_peers()
contacts = [c for c in contacts if c.node_id not in exclude] contacts = [c for c in contacts if c.node_id not in exclude]
@ -203,39 +233,32 @@ class TreeRoutingTable:
return [] return []
def get_peer(self, contact_id: bytes) -> 'KademliaPeer': def get_peer(self, contact_id: bytes) -> 'KademliaPeer':
""" return self.buckets[self._kbucket_index(contact_id)].get_peer(contact_id)
@raise IndexError: No contact with the specified contact ID is known
by this node
"""
return self.buckets[self.kbucket_index(contact_id)].get_peer(contact_id)
def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]: def get_refresh_list(self, start_index: int = 0, force: bool = False) -> typing.List[bytes]:
bucket_index = start_index
refresh_ids = [] refresh_ids = []
now = int(self._loop.time()) for offset, _ in enumerate(self.buckets[start_index:]):
for bucket in self.buckets[start_index:]: refresh_ids.append(self._midpoint_id_in_bucket_range(start_index + offset))
if force or now - bucket.last_accessed >= constants.refresh_interval: # if we have 3 or fewer populated buckets get two random ids in the range of each to try and
to_search = self.midpoint_id_in_bucket_range(bucket_index) # populate/split the buckets further
refresh_ids.append(to_search) buckets_with_contacts = self.buckets_with_contacts()
bucket_index += 1 if buckets_with_contacts <= 3:
for i in range(buckets_with_contacts):
refresh_ids.append(self._random_id_in_bucket_range(i))
refresh_ids.append(self._random_id_in_bucket_range(i))
return refresh_ids return refresh_ids
def remove_peer(self, peer: 'KademliaPeer') -> None: def remove_peer(self, peer: 'KademliaPeer') -> None:
if not peer.node_id: if not peer.node_id:
return return
bucket_index = self.kbucket_index(peer.node_id) bucket_index = self._kbucket_index(peer.node_id)
try: try:
self.buckets[bucket_index].remove_peer(peer) self.buckets[bucket_index].remove_peer(peer)
self._join_buckets()
except ValueError: except ValueError:
return return
def touch_kbucket(self, key: bytes) -> None: def _kbucket_index(self, key: bytes) -> int:
self.touch_kbucket_by_index(self.kbucket_index(key))
def touch_kbucket_by_index(self, bucket_index: int):
self.buckets[bucket_index].last_accessed = int(self._loop.time())
def kbucket_index(self, key: bytes) -> int:
i = 0 i = 0
for bucket in self.buckets: for bucket in self.buckets:
if bucket.key_in_range(key): if bucket.key_in_range(key):
@ -244,19 +267,19 @@ class TreeRoutingTable:
i += 1 i += 1
return i return i
def random_id_in_bucket_range(self, bucket_index: int) -> bytes: def _random_id_in_bucket_range(self, bucket_index: int) -> bytes:
random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max)) random_id = int(random.randrange(self.buckets[bucket_index].range_min, self.buckets[bucket_index].range_max))
return Distance( return Distance(
self._parent_node_id self._parent_node_id
)(random_id.to_bytes(constants.hash_length, 'big')).to_bytes(constants.hash_length, 'big') )(random_id.to_bytes(constants.HASH_LENGTH, 'big')).to_bytes(constants.HASH_LENGTH, 'big')
def midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes: def _midpoint_id_in_bucket_range(self, bucket_index: int) -> bytes:
half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2) half = int((self.buckets[bucket_index].range_max - self.buckets[bucket_index].range_min) // 2)
return Distance(self._parent_node_id)( return Distance(self._parent_node_id)(
int(self.buckets[bucket_index].range_min + half).to_bytes(constants.hash_length, 'big') int(self.buckets[bucket_index].range_min + half).to_bytes(constants.HASH_LENGTH, 'big')
).to_bytes(constants.hash_length, 'big') ).to_bytes(constants.HASH_LENGTH, 'big')
def split_bucket(self, old_bucket_index: int) -> None: def _split_bucket(self, old_bucket_index: int) -> None:
""" Splits the specified k-bucket into two new buckets which together """ Splits the specified k-bucket into two new buckets which together
cover the same range in the key/ID space cover the same range in the key/ID space
@ -279,11 +302,12 @@ class TreeRoutingTable:
# ...and remove them from the old bucket # ...and remove them from the old bucket
for contact in new_bucket.peers: for contact in new_bucket.peers:
old_bucket.remove_peer(contact) old_bucket.remove_peer(contact)
self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
def join_buckets(self): def _join_buckets(self):
if len(self.buckets) == 1: if len(self.buckets) == 1:
return return
to_pop = [i for i, bucket in enumerate(self.buckets) if not len(bucket)] to_pop = [i for i, bucket in enumerate(self.buckets) if len(bucket) == 0]
if not to_pop: if not to_pop:
return return
log.info("join buckets %i", len(to_pop)) log.info("join buckets %i", len(to_pop))
@ -302,18 +326,79 @@ class TreeRoutingTable:
elif can_go_higher: elif can_go_higher:
self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min self.buckets[bucket_index_to_pop + 1].range_min = bucket.range_min
self.buckets.remove(bucket) self.buckets.remove(bucket)
return self.join_buckets() self.bucket_in_routing_table_metric.labels("global").set(len(self.buckets))
return self._join_buckets()
def contact_in_routing_table(self, address_tuple: typing.Tuple[str, int]) -> bool:
for bucket in self.buckets:
for contact in bucket.get_peers(sort_distance_to=False):
if address_tuple[0] == contact.address and address_tuple[1] == contact.udp_port:
return True
return False
def buckets_with_contacts(self) -> int: def buckets_with_contacts(self) -> int:
count = 0 count = 0
for bucket in self.buckets: for bucket in self.buckets:
if len(bucket): if len(bucket) > 0:
count += 1 count += 1
return count return count
async def add_peer(self, peer: 'KademliaPeer', probe: typing.Callable[['KademliaPeer'], typing.Awaitable]):
if not peer.node_id:
log.warning("Tried adding a peer with no node id!")
return False
for my_peer in self.get_peers():
if (my_peer.address, my_peer.udp_port) == (peer.address, peer.udp_port) and my_peer.node_id != peer.node_id:
self.remove_peer(my_peer)
self._join_buckets()
bucket_index = self._kbucket_index(peer.node_id)
if self.buckets[bucket_index].add_peer(peer):
return True
# The bucket is full; see if it can be split (by checking if its range includes the host node's node_id)
if self._should_split(bucket_index, peer.node_id):
self._split_bucket(bucket_index)
# Retry the insertion attempt
result = await self.add_peer(peer, probe)
self._join_buckets()
return result
else:
# We can't split the k-bucket
#
# The 13 page kademlia paper specifies that the least recently contacted node in the bucket
# shall be pinged. If it fails to reply it is replaced with the new contact. If the ping is successful
# the new contact is ignored and not added to the bucket (sections 2.2 and 2.4).
#
# A reasonable extension to this is BEP 0005, which extends the above:
#
# Not all nodes that we learn about are equal. Some are "good" and some are not.
# Many nodes using the DHT are able to send queries and receive responses,
# but are not able to respond to queries from other nodes. It is important that
# each node's routing table must contain only known good nodes. A good node is
# a node has responded to one of our queries within the last 15 minutes. A node
# is also good if it has ever responded to one of our queries and has sent us a
# query within the last 15 minutes. After 15 minutes of inactivity, a node becomes
# questionable. Nodes become bad when they fail to respond to multiple queries
# in a row. Nodes that we know are good are given priority over nodes with unknown status.
#
# When there are bad or questionable nodes in the bucket, the least recent is selected for
# potential replacement (BEP 0005). When all nodes in the bucket are fresh, the head (least recent)
# contact is selected as described in section 2.2 of the kademlia paper. In both cases the new contact
# is ignored if the pinged node replies.
not_good_contacts = self.buckets[bucket_index].get_bad_or_unknown_peers()
not_recently_replied = []
for my_peer in not_good_contacts:
last_replied = self._peer_manager.get_last_replied(my_peer.address, my_peer.udp_port)
if not last_replied or last_replied + 60 < self._loop.time():
not_recently_replied.append(my_peer)
if not_recently_replied:
to_replace = not_recently_replied[0]
else:
to_replace = self.buckets[bucket_index].peers[0]
last_replied = self._peer_manager.get_last_replied(to_replace.address, to_replace.udp_port)
if last_replied and last_replied + 60 > self._loop.time():
return False
log.debug("pinging %s:%s", to_replace.address, to_replace.udp_port)
try:
await probe(to_replace)
return False
except (asyncio.TimeoutError, RemoteException):
log.debug("Replacing dead contact in bucket %i: %s:%i with %s:%i ", bucket_index,
to_replace.address, to_replace.udp_port, peer.address, peer.udp_port)
if to_replace in self.buckets[bucket_index]:
self.buckets[bucket_index].remove_peer(to_replace)
return await self.add_peer(peer, probe)

View file

@ -52,8 +52,7 @@ def _bdecode(data: bytes, start_index: int = 0) -> typing.Tuple[typing.Union[int
raise DecodeError(err) raise DecodeError(err)
start_index = split_pos + 1 start_index = split_pos + 1
end_pos = start_index + length end_pos = start_index + length
b = data[start_index:end_pos] return data[start_index:end_pos], end_pos
return b, end_pos
def bencode(data: typing.Dict) -> bytes: def bencode(data: typing.Dict) -> bytes:

View file

@ -7,9 +7,13 @@ REQUEST_TYPE = 0
RESPONSE_TYPE = 1 RESPONSE_TYPE = 1
ERROR_TYPE = 2 ERROR_TYPE = 2
OPTIONAL_ARG_OFFSET = 100
# bencode representation of argument keys # bencode representation of argument keys
PAGE_KEY = b'p' PAGE_KEY = b'p'
OPTIONAL_FIELDS = ()
class KademliaDatagramBase: class KademliaDatagramBase:
""" """
@ -18,7 +22,7 @@ class KademliaDatagramBase:
these correspond to the packet_type, rpc_id, and node_id args these correspond to the packet_type, rpc_id, and node_id args
""" """
fields = [ required_fields = [
'packet_type', 'packet_type',
'rpc_id', 'rpc_id',
'node_id' 'node_id'
@ -30,21 +34,26 @@ class KademliaDatagramBase:
self.packet_type = packet_type self.packet_type = packet_type
if self.expected_packet_type != packet_type: if self.expected_packet_type != packet_type:
raise ValueError(f"invalid packet type: {packet_type}, expected {self.expected_packet_type}") raise ValueError(f"invalid packet type: {packet_type}, expected {self.expected_packet_type}")
if len(rpc_id) != constants.rpc_id_length: if len(rpc_id) != constants.RPC_ID_LENGTH:
raise ValueError(f"invalid rpc node_id: {len(rpc_id)} bytes (expected 20)") raise ValueError(f"invalid rpc node_id: {len(rpc_id)} bytes (expected 20)")
if not len(node_id) == constants.hash_length: if not len(node_id) == constants.HASH_LENGTH:
raise ValueError(f"invalid node node_id: {len(node_id)} bytes (expected 48)") raise ValueError(f"invalid node node_id: {len(node_id)} bytes (expected 48)")
self.rpc_id = rpc_id self.rpc_id = rpc_id
self.node_id = node_id self.node_id = node_id
def bencode(self) -> bytes: def bencode(self) -> bytes:
return bencode({ datagram = {
i: getattr(self, k) for i, k in enumerate(self.fields) i: getattr(self, k) for i, k in enumerate(self.required_fields)
}) }
for i, k in enumerate(OPTIONAL_FIELDS):
value = getattr(self, k, None)
if value is not None:
datagram[i + OPTIONAL_ARG_OFFSET] = value
return bencode(datagram)
class RequestDatagram(KademliaDatagramBase): class RequestDatagram(KademliaDatagramBase):
fields = [ required_fields = [
'packet_type', 'packet_type',
'rpc_id', 'rpc_id',
'node_id', 'node_id',
@ -68,18 +77,18 @@ class RequestDatagram(KademliaDatagramBase):
@classmethod @classmethod
def make_ping(cls, from_node_id: bytes, rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram': def make_ping(cls, from_node_id: bytes, rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length] rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'ping') return cls(REQUEST_TYPE, rpc_id, from_node_id, b'ping')
@classmethod @classmethod
def make_store(cls, from_node_id: bytes, blob_hash: bytes, token: bytes, port: int, def make_store(cls, from_node_id: bytes, blob_hash: bytes, token: bytes, port: int,
rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram': rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length] rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
if len(blob_hash) != constants.hash_bits // 8: if len(blob_hash) != constants.HASH_BITS // 8:
raise ValueError(f"invalid blob hash length: {len(blob_hash)}") raise ValueError(f"invalid blob hash length: {len(blob_hash)}")
if not 0 < port < 65536: if not 0 < port < 65536:
raise ValueError(f"invalid port: {port}") raise ValueError(f"invalid port: {port}")
if len(token) != constants.hash_bits // 8: if len(token) != constants.HASH_BITS // 8:
raise ValueError(f"invalid token length: {len(token)}") raise ValueError(f"invalid token length: {len(token)}")
store_args = [blob_hash, token, port, from_node_id, 0] store_args = [blob_hash, token, port, from_node_id, 0]
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'store', store_args) return cls(REQUEST_TYPE, rpc_id, from_node_id, b'store', store_args)
@ -87,16 +96,16 @@ class RequestDatagram(KademliaDatagramBase):
@classmethod @classmethod
def make_find_node(cls, from_node_id: bytes, key: bytes, def make_find_node(cls, from_node_id: bytes, key: bytes,
rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram': rpc_id: typing.Optional[bytes] = None) -> 'RequestDatagram':
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length] rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
if len(key) != constants.hash_bits // 8: if len(key) != constants.HASH_BITS // 8:
raise ValueError(f"invalid key length: {len(key)}") raise ValueError(f"invalid key length: {len(key)}")
return cls(REQUEST_TYPE, rpc_id, from_node_id, b'findNode', [key]) return cls(REQUEST_TYPE, rpc_id, from_node_id, b'findNode', [key])
@classmethod @classmethod
def make_find_value(cls, from_node_id: bytes, key: bytes, def make_find_value(cls, from_node_id: bytes, key: bytes,
rpc_id: typing.Optional[bytes] = None, page: int = 0) -> 'RequestDatagram': rpc_id: typing.Optional[bytes] = None, page: int = 0) -> 'RequestDatagram':
rpc_id = rpc_id or constants.generate_id()[:constants.rpc_id_length] rpc_id = rpc_id or constants.generate_id()[:constants.RPC_ID_LENGTH]
if len(key) != constants.hash_bits // 8: if len(key) != constants.HASH_BITS // 8:
raise ValueError(f"invalid key length: {len(key)}") raise ValueError(f"invalid key length: {len(key)}")
if page < 0: if page < 0:
raise ValueError(f"cannot request a negative page ({page})") raise ValueError(f"cannot request a negative page ({page})")
@ -104,7 +113,7 @@ class RequestDatagram(KademliaDatagramBase):
class ResponseDatagram(KademliaDatagramBase): class ResponseDatagram(KademliaDatagramBase):
fields = [ required_fields = [
'packet_type', 'packet_type',
'rpc_id', 'rpc_id',
'node_id', 'node_id',
@ -119,7 +128,7 @@ class ResponseDatagram(KademliaDatagramBase):
class ErrorDatagram(KademliaDatagramBase): class ErrorDatagram(KademliaDatagramBase):
fields = [ required_fields = [
'packet_type', 'packet_type',
'rpc_id', 'rpc_id',
'node_id', 'node_id',
@ -135,7 +144,7 @@ class ErrorDatagram(KademliaDatagramBase):
self.response = response.decode() self.response = response.decode()
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]: def _decode_datagram(datagram: bytes):
msg_types = { msg_types = {
REQUEST_TYPE: RequestDatagram, REQUEST_TYPE: RequestDatagram,
RESPONSE_TYPE: ResponseDatagram, RESPONSE_TYPE: ResponseDatagram,
@ -143,23 +152,36 @@ def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDa
} }
primitive: typing.Dict = bdecode(datagram) primitive: typing.Dict = bdecode(datagram)
if primitive[0] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
datagram_type = primitive[0] # pylint: disable=unsubscriptable-object converted = {
str(k).encode() if not isinstance(k, bytes) else k: v for k, v in primitive.items()
}
if converted[b'0'] in [REQUEST_TYPE, ERROR_TYPE, RESPONSE_TYPE]: # pylint: disable=unsubscriptable-object
datagram_type = converted[b'0'] # pylint: disable=unsubscriptable-object
else: else:
raise ValueError("invalid datagram type") raise ValueError("invalid datagram type")
datagram_class = msg_types[datagram_type] datagram_class = msg_types[datagram_type]
return datagram_class(**{ decoded = {
k: primitive[i] # pylint: disable=unsubscriptable-object k: converted[str(i).encode()] # pylint: disable=unsubscriptable-object
for i, k in enumerate(datagram_class.fields) for i, k in enumerate(datagram_class.required_fields)
if i in primitive # pylint: disable=unsupported-membership-test if str(i).encode() in converted # pylint: disable=unsupported-membership-test
} }
) for i, _ in enumerate(OPTIONAL_FIELDS):
if str(i + OPTIONAL_ARG_OFFSET).encode() in converted:
decoded[i + OPTIONAL_ARG_OFFSET] = converted[str(i + OPTIONAL_ARG_OFFSET).encode()]
return decoded, datagram_class
def decode_datagram(datagram: bytes) -> typing.Union[RequestDatagram, ResponseDatagram, ErrorDatagram]:
decoded, datagram_class = _decode_datagram(datagram)
return datagram_class(**decoded)
def make_compact_ip(address: str) -> bytearray: def make_compact_ip(address: str) -> bytearray:
compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray()) compact_ip = reduce(lambda buff, x: buff + bytearray([int(x)]), address.split('.'), bytearray())
if len(compact_ip) != 4: if len(compact_ip) != 4:
raise ValueError(f"invalid IPv4 length") raise ValueError("invalid IPv4 length")
return compact_ip return compact_ip
@ -167,8 +189,8 @@ def make_compact_address(node_id: bytes, address: str, port: int) -> bytearray:
compact_ip = make_compact_ip(address) compact_ip = make_compact_ip(address)
if not 0 < port < 65536: if not 0 < port < 65536:
raise ValueError(f'Invalid port: {port}') raise ValueError(f'Invalid port: {port}')
if len(node_id) != constants.hash_bits // 8: if len(node_id) != constants.HASH_BITS // 8:
raise ValueError(f"invalid node node_id length") raise ValueError("invalid node node_id length")
return compact_ip + port.to_bytes(2, 'big') + node_id return compact_ip + port.to_bytes(2, 'big') + node_id
@ -178,6 +200,6 @@ def decode_compact_address(compact_address: bytes) -> typing.Tuple[bytes, str, i
node_id = compact_address[6:] node_id = compact_address[6:]
if not 0 < port < 65536: if not 0 < port < 65536:
raise ValueError(f'Invalid port: {port}') raise ValueError(f'Invalid port: {port}')
if len(node_id) != constants.hash_bits // 8: if len(node_id) != constants.HASH_BITS // 8:
raise ValueError(f"invalid node node_id length") raise ValueError("invalid node node_id length")
return node_id, address, port return node_id, address, port

File diff suppressed because one or more lines are too long

5
lbry/error/Makefile Normal file
View file

@ -0,0 +1,5 @@
generate:
python generate.py generate > __init__.py
analyze:
python generate.py analyze

95
lbry/error/README.md Normal file
View file

@ -0,0 +1,95 @@
# Exceptions
Exceptions in LBRY are defined and generated from the Markdown table at the end of this README.
## Guidelines
When possible, use [built-in Python exceptions](https://docs.python.org/3/library/exceptions.html) or `aiohttp` [general client](https://docs.aiohttp.org/en/latest/client_reference.html#client-exceptions) / [HTTP](https://docs.aiohttp.org/en/latest/web_exceptions.html) exceptions, unless:
1. You want to provide a better error message (extend the closest built-in/`aiohttp` exception in this case).
2. You need to represent a new situation.
When defining your own exceptions, consider:
1. Extending a built-in Python or `aiohttp` exception.
2. Using contextual variables in the error message.
## Table Column Definitions
Column | Meaning
---|---
Code | Codes are used only to define the hierarchy of exceptions and do not end up in the generated output, it is okay to re-number things as necessary at anytime to achieve the desired hierarchy.
Name | Becomes the class name of the exception with "Error" appended to the end. Changing names of existing exceptions makes the API backwards incompatible. When extending other exceptions you must specify the full class name, manually adding "Error" as necessary (if extending another SDK exception).
Message | User friendly error message explaining the exceptional event. Supports Python formatted strings: any variables used in the string will be generated as arguments in the `__init__` method. Use `--` to provide a doc string after the error message to be added to the class definition.
## Exceptions Table
Code | Name | Message
---:|---|---
**1xx** | UserInput | User input errors.
**10x** | Command | Errors preparing to execute commands.
101 | CommandDoesNotExist | Command '{command}' does not exist.
102 | CommandDeprecated | Command '{command}' is deprecated.
103 | CommandInvalidArgument | Invalid argument '{argument}' to command '{command}'.
104 | CommandTemporarilyUnavailable | Command '{command}' is temporarily unavailable. -- Such as waiting for required components to start.
105 | CommandPermanentlyUnavailable | Command '{command}' is permanently unavailable. -- such as when required component was intentionally configured not to start.
**11x** | InputValue(ValueError) | Invalid argument value provided to command.
111 | GenericInputValue | The value '{value}' for argument '{argument}' is not valid.
112 | InputValueIsNone | None or null is not valid value for argument '{argument}'.
113 | ConflictingInputValue | Only '{first_argument}' or '{second_argument}' is allowed, not both.
114 | InputStringIsBlank | {argument} cannot be blank.
115 | EmptyPublishedFile | Cannot publish empty file: {file_path}
116 | MissingPublishedFile | File does not exist: {file_path}
117 | InvalidStreamURL | Invalid LBRY stream URL: '{url}' -- When an URL cannot be downloaded, such as '@Channel/' or a collection
**2xx** | Configuration | Configuration errors.
201 | ConfigWrite | Cannot write configuration file '{path}'. -- When writing the default config fails on startup, such as due to permission issues.
202 | ConfigRead | Cannot find provided configuration file '{path}'. -- Can't open the config file user provided via command line args.
203 | ConfigParse | Failed to parse the configuration file '{path}'. -- Includes the syntax error / line number to help user fix it.
204 | ConfigMissing | Configuration file '{path}' is missing setting that has no default / fallback.
205 | ConfigInvalid | Configuration file '{path}' has setting with invalid value.
**3xx** | Network | **Networking**
301 | NoInternet | No internet connection.
302 | NoUPnPSupport | Router does not support UPnP.
**4xx** | Wallet | **Wallet Errors**
401 | TransactionRejected | Transaction rejected, unknown reason.
402 | TransactionFeeTooLow | Fee too low.
403 | TransactionInvalidSignature | Invalid signature.
404 | InsufficientFunds | Not enough funds to cover this transaction. -- determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX being created and sent but then rejected by lbrycrd for unspendable utxos.
405 | ChannelKeyNotFound | Channel signing key not found.
406 | ChannelKeyInvalid | Channel signing key is out of date. -- For example, channel was updated but you don't have the updated key.
407 | DataDownload | Failed to download blob. *generic*
408 | PrivateKeyNotFound | Couldn't find private key for {key} '{value}'.
410 | Resolve | Failed to resolve '{url}'.
411 | ResolveTimeout | Failed to resolve '{url}' within the timeout.
411 | ResolveCensored | Resolve of '{url}' was censored by channel with claim id '{censor_id}'.
420 | KeyFeeAboveMaxAllowed | {message}
421 | InvalidPassword | Password is invalid.
422 | IncompatibleWalletServer | '{server}:{port}' has an incompatibly old version.
423 | TooManyClaimSearchParameters | {key} cant have more than {limit} items.
424 | AlreadyPurchased | You already have a purchase for claim_id '{claim_id_hex}'. Use --allow-duplicate-purchase flag to override.
431 | ServerPaymentInvalidAddress | Invalid address from wallet server: '{address}' - skipping payment round.
432 | ServerPaymentWalletLocked | Cannot spend funds with locked wallet, skipping payment round.
433 | ServerPaymentFeeAboveMaxAllowed | Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.
434 | WalletNotLoaded | Wallet {wallet_id} is not loaded.
435 | WalletAlreadyLoaded | Wallet {wallet_path} is already loaded.
436 | WalletNotFound | Wallet not found at {wallet_path}.
437 | WalletAlreadyExists | Wallet {wallet_path} already exists, use `wallet_add` to load it.
**5xx** | Blob | **Blobs**
500 | BlobNotFound | Blob not found.
501 | BlobPermissionDenied | Permission denied to read blob.
502 | BlobTooBig | Blob is too big.
503 | BlobEmpty | Blob is empty.
510 | BlobFailedDecryption | Failed to decrypt blob.
511 | CorruptBlob | Blobs is corrupted.
520 | BlobFailedEncryption | Failed to encrypt blob.
531 | DownloadCancelled | Download was canceled.
532 | DownloadSDTimeout | Failed to download sd blob {download} within timeout.
533 | DownloadDataTimeout | Failed to download data blobs for sd hash {download} within timeout.
534 | InvalidStreamDescriptor | {message}
535 | InvalidData | {message}
536 | InvalidBlobHash | {message}
**6xx** | Component | **Components**
601 | ComponentStartConditionNotMet | Unresolved dependencies for: {components}
602 | ComponentsNotStarted | {message}
**7xx** | CurrencyExchange | **Currency Exchange**
701 | InvalidExchangeRateResponse | Failed to get exchange rate from {source}: {reason}
702 | CurrencyConversion | {message}
703 | InvalidCurrency | Invalid currency: {currency} is not a supported currency.

494
lbry/error/__init__.py Normal file
View file

@ -0,0 +1,494 @@
from .base import BaseError, claim_id
class UserInputError(BaseError):
"""
User input errors.
"""
class CommandError(UserInputError):
"""
Errors preparing to execute commands.
"""
class CommandDoesNotExistError(CommandError):
def __init__(self, command):
self.command = command
super().__init__(f"Command '{command}' does not exist.")
class CommandDeprecatedError(CommandError):
def __init__(self, command):
self.command = command
super().__init__(f"Command '{command}' is deprecated.")
class CommandInvalidArgumentError(CommandError):
def __init__(self, argument, command):
self.argument = argument
self.command = command
super().__init__(f"Invalid argument '{argument}' to command '{command}'.")
class CommandTemporarilyUnavailableError(CommandError):
"""
Such as waiting for required components to start.
"""
def __init__(self, command):
self.command = command
super().__init__(f"Command '{command}' is temporarily unavailable.")
class CommandPermanentlyUnavailableError(CommandError):
"""
such as when required component was intentionally configured not to start.
"""
def __init__(self, command):
self.command = command
super().__init__(f"Command '{command}' is permanently unavailable.")
class InputValueError(UserInputError, ValueError):
"""
Invalid argument value provided to command.
"""
class GenericInputValueError(InputValueError):
def __init__(self, value, argument):
self.value = value
self.argument = argument
super().__init__(f"The value '{value}' for argument '{argument}' is not valid.")
class InputValueIsNoneError(InputValueError):
def __init__(self, argument):
self.argument = argument
super().__init__(f"None or null is not valid value for argument '{argument}'.")
class ConflictingInputValueError(InputValueError):
def __init__(self, first_argument, second_argument):
self.first_argument = first_argument
self.second_argument = second_argument
super().__init__(f"Only '{first_argument}' or '{second_argument}' is allowed, not both.")
class InputStringIsBlankError(InputValueError):
def __init__(self, argument):
self.argument = argument
super().__init__(f"{argument} cannot be blank.")
class EmptyPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"Cannot publish empty file: {file_path}")
class MissingPublishedFileError(InputValueError):
def __init__(self, file_path):
self.file_path = file_path
super().__init__(f"File does not exist: {file_path}")
class InvalidStreamURLError(InputValueError):
"""
When an URL cannot be downloaded, such as '@Channel/' or a collection
"""
def __init__(self, url):
self.url = url
super().__init__(f"Invalid LBRY stream URL: '{url}'")
class ConfigurationError(BaseError):
"""
Configuration errors.
"""
class ConfigWriteError(ConfigurationError):
"""
When writing the default config fails on startup, such as due to permission issues.
"""
def __init__(self, path):
self.path = path
super().__init__(f"Cannot write configuration file '{path}'.")
class ConfigReadError(ConfigurationError):
"""
Can't open the config file user provided via command line args.
"""
def __init__(self, path):
self.path = path
super().__init__(f"Cannot find provided configuration file '{path}'.")
class ConfigParseError(ConfigurationError):
"""
Includes the syntax error / line number to help user fix it.
"""
def __init__(self, path):
self.path = path
super().__init__(f"Failed to parse the configuration file '{path}'.")
class ConfigMissingError(ConfigurationError):
def __init__(self, path):
self.path = path
super().__init__(f"Configuration file '{path}' is missing setting that has no default / fallback.")
class ConfigInvalidError(ConfigurationError):
def __init__(self, path):
self.path = path
super().__init__(f"Configuration file '{path}' has setting with invalid value.")
class NetworkError(BaseError):
"""
**Networking**
"""
class NoInternetError(NetworkError):
def __init__(self):
super().__init__("No internet connection.")
class NoUPnPSupportError(NetworkError):
def __init__(self):
super().__init__("Router does not support UPnP.")
class WalletError(BaseError):
"""
**Wallet Errors**
"""
class TransactionRejectedError(WalletError):
def __init__(self):
super().__init__("Transaction rejected, unknown reason.")
class TransactionFeeTooLowError(WalletError):
def __init__(self):
super().__init__("Fee too low.")
class TransactionInvalidSignatureError(WalletError):
def __init__(self):
super().__init__("Invalid signature.")
class InsufficientFundsError(WalletError):
"""
determined by wallet prior to attempting to broadcast a tx; this is different for example from a TX
being created and sent but then rejected by lbrycrd for unspendable utxos.
"""
def __init__(self):
super().__init__("Not enough funds to cover this transaction.")
class ChannelKeyNotFoundError(WalletError):
def __init__(self):
super().__init__("Channel signing key not found.")
class ChannelKeyInvalidError(WalletError):
"""
For example, channel was updated but you don't have the updated key.
"""
def __init__(self):
super().__init__("Channel signing key is out of date.")
class DataDownloadError(WalletError):
def __init__(self):
super().__init__("Failed to download blob. *generic*")
class PrivateKeyNotFoundError(WalletError):
def __init__(self, key, value):
self.key = key
self.value = value
super().__init__(f"Couldn't find private key for {key} '{value}'.")
class ResolveError(WalletError):
def __init__(self, url):
self.url = url
super().__init__(f"Failed to resolve '{url}'.")
class ResolveTimeoutError(WalletError):
def __init__(self, url):
self.url = url
super().__init__(f"Failed to resolve '{url}' within the timeout.")
class ResolveCensoredError(WalletError):
def __init__(self, url, censor_id, censor_row):
self.url = url
self.censor_id = censor_id
self.censor_row = censor_row
super().__init__(f"Resolve of '{url}' was censored by channel with claim id '{censor_id}'.")
class KeyFeeAboveMaxAllowedError(WalletError):
def __init__(self, message):
self.message = message
super().__init__(f"{message}")
class InvalidPasswordError(WalletError):
def __init__(self):
super().__init__("Password is invalid.")
class IncompatibleWalletServerError(WalletError):
def __init__(self, server, port):
self.server = server
self.port = port
super().__init__(f"'{server}:{port}' has an incompatibly old version.")
class TooManyClaimSearchParametersError(WalletError):
def __init__(self, key, limit):
self.key = key
self.limit = limit
super().__init__(f"{key} cant have more than {limit} items.")
class AlreadyPurchasedError(WalletError):
"""
allow-duplicate-purchase flag to override.
"""
def __init__(self, claim_id_hex):
self.claim_id_hex = claim_id_hex
super().__init__(f"You already have a purchase for claim_id '{claim_id_hex}'. Use")
class ServerPaymentInvalidAddressError(WalletError):
def __init__(self, address):
self.address = address
super().__init__(f"Invalid address from wallet server: '{address}' - skipping payment round.")
class ServerPaymentWalletLockedError(WalletError):
def __init__(self):
super().__init__("Cannot spend funds with locked wallet, skipping payment round.")
class ServerPaymentFeeAboveMaxAllowedError(WalletError):
def __init__(self, daily_fee, max_fee):
self.daily_fee = daily_fee
self.max_fee = max_fee
super().__init__(f"Daily server fee of {daily_fee} exceeds maximum configured of {max_fee} LBC.")
class WalletNotLoadedError(WalletError):
def __init__(self, wallet_id):
self.wallet_id = wallet_id
super().__init__(f"Wallet {wallet_id} is not loaded.")
class WalletAlreadyLoadedError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet {wallet_path} is already loaded.")
class WalletNotFoundError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet not found at {wallet_path}.")
class WalletAlreadyExistsError(WalletError):
def __init__(self, wallet_path):
self.wallet_path = wallet_path
super().__init__(f"Wallet {wallet_path} already exists, use `wallet_add` to load it.")
class BlobError(BaseError):
"""
**Blobs**
"""
class BlobNotFoundError(BlobError):
def __init__(self):
super().__init__("Blob not found.")
class BlobPermissionDeniedError(BlobError):
def __init__(self):
super().__init__("Permission denied to read blob.")
class BlobTooBigError(BlobError):
def __init__(self):
super().__init__("Blob is too big.")
class BlobEmptyError(BlobError):
def __init__(self):
super().__init__("Blob is empty.")
class BlobFailedDecryptionError(BlobError):
def __init__(self):
super().__init__("Failed to decrypt blob.")
class CorruptBlobError(BlobError):
def __init__(self):
super().__init__("Blobs is corrupted.")
class BlobFailedEncryptionError(BlobError):
def __init__(self):
super().__init__("Failed to encrypt blob.")
class DownloadCancelledError(BlobError):
def __init__(self):
super().__init__("Download was canceled.")
class DownloadSDTimeoutError(BlobError):
def __init__(self, download):
self.download = download
super().__init__(f"Failed to download sd blob {download} within timeout.")
class DownloadDataTimeoutError(BlobError):
def __init__(self, download):
self.download = download
super().__init__(f"Failed to download data blobs for sd hash {download} within timeout.")
class InvalidStreamDescriptorError(BlobError):
def __init__(self, message):
self.message = message
super().__init__(f"{message}")
class InvalidDataError(BlobError):
def __init__(self, message):
self.message = message
super().__init__(f"{message}")
class InvalidBlobHashError(BlobError):
def __init__(self, message):
self.message = message
super().__init__(f"{message}")
class ComponentError(BaseError):
"""
**Components**
"""
class ComponentStartConditionNotMetError(ComponentError):
def __init__(self, components):
self.components = components
super().__init__(f"Unresolved dependencies for: {components}")
class ComponentsNotStartedError(ComponentError):
def __init__(self, message):
self.message = message
super().__init__(f"{message}")
class CurrencyExchangeError(BaseError):
"""
**Currency Exchange**
"""
class InvalidExchangeRateResponseError(CurrencyExchangeError):
def __init__(self, source, reason):
self.source = source
self.reason = reason
super().__init__(f"Failed to get exchange rate from {source}: {reason}")
class CurrencyConversionError(CurrencyExchangeError):
def __init__(self, message):
self.message = message
super().__init__(f"{message}")
class InvalidCurrencyError(CurrencyExchangeError):
def __init__(self, currency):
self.currency = currency
super().__init__(f"Invalid currency: {currency} is not a supported currency.")

9
lbry/error/base.py Normal file
View file

@ -0,0 +1,9 @@
from binascii import hexlify
def claim_id(claim_hash):
return hexlify(claim_hash[::-1]).decode()
class BaseError(Exception):
pass

167
lbry/error/generate.py Normal file
View file

@ -0,0 +1,167 @@
import re
import sys
import argparse
from pathlib import Path
from textwrap import fill, indent
INDENT = ' ' * 4
CLASS = """
class {name}({parents}):{doc}
"""
INIT = """
def __init__({args}):{fields}
super().__init__({format}"{message}")
"""
FUNCTIONS = ['claim_id']
class ErrorClass:
def __init__(self, hierarchy, name, message):
self.hierarchy = hierarchy.replace('**', '')
self.other_parents = []
if '(' in name:
assert ')' in name, f"Missing closing parenthesis in '{name}'."
self.other_parents = name[name.find('(')+1:name.find(')')].split(',')
name = name[:name.find('(')]
self.name = name
self.class_name = name+'Error'
self.message = message
self.comment = ""
if '--' in message:
self.message, self.comment = message.split('--')
self.message = self.message.strip()
self.comment = self.comment.strip()
@property
def is_leaf(self):
return 'x' not in self.hierarchy
@property
def code(self):
return self.hierarchy.replace('x', '')
@property
def parent_codes(self):
return self.hierarchy[0:2], self.hierarchy[0]
def get_arguments(self):
args = ['self']
for arg in re.findall('{([a-z0-1_()]+)}', self.message):
for func in FUNCTIONS:
if arg.startswith(f'{func}('):
arg = arg[len(f'{func}('):-1]
break
args.append(arg)
return args
@staticmethod
def get_fields(args):
if len(args) > 1:
return ''.join(f'\n{INDENT*2}self.{field} = {field}' for field in args[1:])
return ''
@staticmethod
def get_doc_string(doc):
if doc:
return f'\n{INDENT}"""\n{indent(fill(doc, 100), INDENT)}\n{INDENT}"""'
return ""
def render(self, out, parent):
if not parent:
parents = ['BaseError']
else:
parents = [parent.class_name]
parents += self.other_parents
args = self.get_arguments()
if self.is_leaf:
out.write((CLASS + INIT).format(
name=self.class_name, parents=', '.join(parents),
args=', '.join(args), fields=self.get_fields(args),
message=self.message, doc=self.get_doc_string(self.comment), format='f' if len(args) > 1 else ''
))
else:
out.write(CLASS.format(
name=self.class_name, parents=', '.join(parents),
doc=self.get_doc_string(self.comment or self.message)
))
def get_errors():
with open('README.md', 'r') as readme:
lines = iter(readme.readlines())
for line in lines:
if line.startswith('## Exceptions Table'):
break
for line in lines:
if line.startswith('---:|'):
break
for line in lines:
if not line:
break
yield ErrorClass(*[c.strip() for c in line.split('|')])
def find_parent(stack, child):
for parent_code in child.parent_codes:
parent = stack.get(parent_code)
if parent:
return parent
def generate(out):
out.write(f"from .base import BaseError, {', '.join(FUNCTIONS)}\n")
stack = {}
for error in get_errors():
error.render(out, find_parent(stack, error))
if not error.is_leaf:
assert error.code not in stack, f"Duplicate code: {error.code}"
stack[error.code] = error
def analyze():
errors = {e.class_name: [] for e in get_errors() if e.is_leaf}
here = Path(__file__).absolute().parents[0]
module = here.parent
for file_path in module.glob('**/*.py'):
if here in file_path.parents:
continue
with open(file_path) as src_file:
src = src_file.read()
for error in errors.keys():
found = src.count(error)
if found > 0:
errors[error].append((file_path, found))
print('Unused Errors:\n')
for error, used in errors.items():
if used:
print(f' - {error}')
for use in used:
print(f' {use[0].relative_to(module.parent)} {use[1]}')
print('')
print('')
print('Unused Errors:')
for error, used in errors.items():
if not used:
print(f' - {error}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("action", choices=['generate', 'analyze'])
args = parser.parse_args()
if args.action == "analyze":
analyze()
elif args.action == "generate":
generate(sys.stdout)
if __name__ == "__main__":
main()

View file

@ -1,5 +1,6 @@
import os import os
import sys import sys
import shutil
import signal import signal
import pathlib import pathlib
import json import json
@ -13,9 +14,8 @@ from aiohttp.web import GracefulExit
from docopt import docopt from docopt import docopt
from lbry import __version__ as lbrynet_version from lbry import __version__ as lbrynet_version
from lbry.extras.daemon.loggly_handler import get_loggly_handler from lbry.extras.daemon.daemon import Daemon
from lbry.conf import Config, CLIConfig from lbry.conf import Config, CLIConfig
from lbry.extras.daemon.Daemon import Daemon
log = logging.getLogger('lbry') log = logging.getLogger('lbry')
@ -101,7 +101,7 @@ class ArgumentParser(argparse.ArgumentParser):
self._optionals.title = 'Options' self._optionals.title = 'Options'
if group_name is None: if group_name is None:
self.epilog = ( self.epilog = (
f"Run 'lbrynet COMMAND --help' for more information on a command or group." "Run 'lbrynet COMMAND --help' for more information on a command or group."
) )
else: else:
self.epilog = ( self.epilog = (
@ -167,16 +167,16 @@ def add_command_parser(parent, command):
def get_argument_parser(): def get_argument_parser():
main = ArgumentParser( root = ArgumentParser(
'lbrynet', description='An interface to the LBRY Network.', allow_abbrev=False, 'lbrynet', description='An interface to the LBRY Network.', allow_abbrev=False,
) )
main.add_argument( root.add_argument(
'-v', '--version', dest='cli_version', action="store_true", '-v', '--version', dest='cli_version', action="store_true",
help='Show lbrynet CLI version and exit.' help='Show lbrynet CLI version and exit.'
) )
main.set_defaults(group=None, command=None) root.set_defaults(group=None, command=None)
CLIConfig.contribute_to_argparse(main) CLIConfig.contribute_to_argparse(root)
sub = main.add_subparsers(metavar='COMMAND') sub = root.add_subparsers(metavar='COMMAND')
start = sub.add_parser( start = sub.add_parser(
'start', 'start',
usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...', usage='lbrynet start [--config FILE] [--data-dir DIR] [--wallet-dir DIR] [--download-dir DIR] ...',
@ -186,11 +186,19 @@ def get_argument_parser():
'--quiet', dest='quiet', action="store_true", '--quiet', dest='quiet', action="store_true",
help='Disable all console output.' help='Disable all console output.'
) )
start.add_argument(
'--no-logging', dest='no_logging', action="store_true",
help='Disable all logging of any kind.'
)
start.add_argument( start.add_argument(
'--verbose', nargs="*", '--verbose', nargs="*",
help=('Enable debug output for lbry logger and event loop. Optionally specify loggers for which debug output ' help=('Enable debug output for lbry logger and event loop. Optionally specify loggers for which debug output '
'should selectively be applied.') 'should selectively be applied.')
) )
start.add_argument(
'--initial-headers', dest='initial_headers',
help='Specify path to initial blockchain headers, faster than downloading them on first run.'
)
Config.contribute_to_argparse(start) Config.contribute_to_argparse(start)
start.set_defaults(command='start', start_parser=start, doc=start.format_help()) start.set_defaults(command='start', start_parser=start, doc=start.format_help())
@ -212,15 +220,18 @@ def get_argument_parser():
else: else:
add_command_parser(groups[command['group']], command) add_command_parser(groups[command['group']], command)
return main return root
def ensure_directory_exists(path: str): def ensure_directory_exists(path: str):
if not os.path.isdir(path): if not os.path.isdir(path):
pathlib.Path(path).mkdir(parents=True, exist_ok=True) pathlib.Path(path).mkdir(parents=True, exist_ok=True)
use_effective_ids = os.access in os.supports_effective_ids
if not os.access(path, os.W_OK, effective_ids=use_effective_ids):
raise PermissionError(f"The following directory is not writable: {path}")
LOG_MODULES = ('lbry', 'torba', 'aioupnp') LOG_MODULES = 'lbry', 'aioupnp'
def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config): def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config):
@ -236,7 +247,6 @@ def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config
logger.getChild(module_name).addHandler(handler) logger.getChild(module_name).addHandler(handler)
logger.getChild('lbry').setLevel(logging.INFO) logger.getChild('lbry').setLevel(logging.INFO)
logger.getChild('torba').setLevel(logging.INFO)
logger.getChild('aioupnp').setLevel(logging.WARNING) logger.getChild('aioupnp').setLevel(logging.WARNING)
logger.getChild('aiohttp').setLevel(logging.CRITICAL) logger.getChild('aiohttp').setLevel(logging.CRITICAL)
@ -247,16 +257,12 @@ def setup_logging(logger: logging.Logger, args: argparse.Namespace, conf: Config
else: else:
logger.getChild('lbry').setLevel(logging.DEBUG) logger.getChild('lbry').setLevel(logging.DEBUG)
if conf.share_usage_data:
loggly_handler = get_loggly_handler()
loggly_handler.setLevel(logging.ERROR)
logger.getChild('lbry').addHandler(loggly_handler)
def run_daemon(args: argparse.Namespace, conf: Config): def run_daemon(args: argparse.Namespace, conf: Config):
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
if args.verbose is not None: if args.verbose is not None:
loop.set_debug(True) loop.set_debug(True)
if not args.no_logging:
setup_logging(logging.getLogger(), args, conf) setup_logging(logging.getLogger(), args, conf)
daemon = Daemon(conf) daemon = Daemon(conf)
@ -297,6 +303,16 @@ def main(argv=None):
if args.help: if args.help:
args.start_parser.print_help() args.start_parser.print_help()
else: else:
if args.initial_headers:
ledger_path = os.path.join(conf.wallet_dir, 'lbc_mainnet')
ensure_directory_exists(ledger_path)
current_size = 0
headers_path = os.path.join(ledger_path, 'headers')
if os.path.exists(headers_path):
current_size = os.stat(headers_path).st_size
if os.stat(args.initial_headers).st_size > current_size:
log.info('Copying header from %s to %s', args.initial_headers, headers_path)
shutil.copy(args.initial_headers, headers_path)
run_daemon(args, conf) run_daemon(args, conf)
elif args.command is not None: elif args.command is not None:
doc = args.doc doc = args.doc

View file

@ -1,8 +1,8 @@
import asyncio import asyncio
import collections import collections
import logging import logging
import aiohttp
import typing import typing
import aiohttp
from lbry import utils from lbry import utils
from lbry.conf import Config from lbry.conf import Config
from lbry.extras import system_info from lbry.extras import system_info
@ -18,6 +18,7 @@ DOWNLOAD_STARTED = 'Download Started'
DOWNLOAD_ERRORED = 'Download Errored' DOWNLOAD_ERRORED = 'Download Errored'
DOWNLOAD_FINISHED = 'Download Finished' DOWNLOAD_FINISHED = 'Download Finished'
HEARTBEAT = 'Heartbeat' HEARTBEAT = 'Heartbeat'
DISK_SPACE = 'Disk Space'
CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon CLAIM_ACTION = 'Claim Action' # publish/create/update/abandon
NEW_CHANNEL = 'New Channel' NEW_CHANNEL = 'New Channel'
CREDITS_SENT = 'Credits Sent' CREDITS_SENT = 'Credits Sent'
@ -66,7 +67,7 @@ def _download_properties(conf: Config, external_ip: str, resolve_duration: float
"node_rpc_timeout": conf.node_rpc_timeout, "node_rpc_timeout": conf.node_rpc_timeout,
"peer_connect_timeout": conf.peer_connect_timeout, "peer_connect_timeout": conf.peer_connect_timeout,
"blob_download_timeout": conf.blob_download_timeout, "blob_download_timeout": conf.blob_download_timeout,
"use_fixed_peers": len(conf.reflector_servers) > 0, "use_fixed_peers": len(conf.fixed_peers) > 0,
"fixed_peer_delay": fixed_peer_delay, "fixed_peer_delay": fixed_peer_delay,
"added_fixed_peers": added_fixed_peers, "added_fixed_peers": added_fixed_peers,
"active_peer_count": active_peer_count, "active_peer_count": active_peer_count,
@ -110,7 +111,6 @@ class AnalyticsManager:
self.cookies = {} self.cookies = {}
self.url = ANALYTICS_ENDPOINT self.url = ANALYTICS_ENDPOINT
self._write_key = utils.deobfuscate(ANALYTICS_TOKEN) self._write_key = utils.deobfuscate(ANALYTICS_TOKEN)
self._enabled = conf.share_usage_data
self._tracked_data = collections.defaultdict(list) self._tracked_data = collections.defaultdict(list)
self.context = _make_context(system_info.get_platform()) self.context = _make_context(system_info.get_platform())
self.installation_id = installation_id self.installation_id = installation_id
@ -118,20 +118,24 @@ class AnalyticsManager:
self.task: typing.Optional[asyncio.Task] = None self.task: typing.Optional[asyncio.Task] = None
self.external_ip: typing.Optional[str] = None self.external_ip: typing.Optional[str] = None
@property
def enabled(self):
return self.conf.share_usage_data
@property @property
def is_started(self): def is_started(self):
return self.task is not None return self.task is not None
async def start(self): async def start(self):
if self._enabled and self.task is None: if self.task is None:
self.external_ip = await utils.get_external_ip()
self.task = asyncio.create_task(self.run()) self.task = asyncio.create_task(self.run())
async def run(self): async def run(self):
while True: while True:
if self.enabled:
self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
await self._send_heartbeat() await self._send_heartbeat()
await asyncio.sleep(1800) await asyncio.sleep(1800)
self.external_ip = await utils.get_external_ip()
def stop(self): def stop(self):
if self.task is not None and not self.task.done(): if self.task is not None and not self.task.done():
@ -154,7 +158,7 @@ class AnalyticsManager:
async def track(self, event: typing.Dict): async def track(self, event: typing.Dict):
"""Send a single tracking event""" """Send a single tracking event"""
if self._enabled: if self.enabled:
log.debug('Sending track event: %s', event) log.debug('Sending track event: %s', event)
await self._post(event) await self._post(event)
@ -166,6 +170,15 @@ class AnalyticsManager:
}) })
) )
async def send_disk_space_used(self, storage_used, storage_limit, is_from_network_quota):
await self.track(
self._event(DISK_SPACE, {
'used': storage_used,
'limit': storage_limit,
'from_network_quota': is_from_network_quota
})
)
async def send_server_startup(self): async def send_server_startup(self):
await self.track(self._event(SERVER_STARTUP)) await self.track(self._event(SERVER_STARTUP))

View file

@ -1,5 +1,5 @@
from lbry.conf import Config
from lbry.extras.cli import execute_command from lbry.extras.cli import execute_command
from lbry.conf import Config
def daemon_rpc(conf: Config, method: str, **kwargs): def daemon_rpc(conf: Config, method: str, **kwargs):

View file

@ -1,7 +1,7 @@
import asyncio import asyncio
import logging import logging
from lbry.conf import Config from lbry.conf import Config
from lbry.extras.daemon.ComponentManager import ComponentManager from lbry.extras.daemon.componentmanager import ComponentManager
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -9,7 +9,7 @@ log = logging.getLogger(__name__)
class ComponentType(type): class ComponentType(type):
def __new__(mcs, name, bases, newattrs): def __new__(mcs, name, bases, newattrs):
klass = type.__new__(mcs, name, bases, newattrs) klass = type.__new__(mcs, name, bases, newattrs)
if name != "Component": if name != "Component" and newattrs['__module__'] != 'lbry.testcase':
ComponentManager.default_component_classes[klass.component_name] = klass ComponentManager.default_component_classes[klass.component_name] = klass
return klass return klass
@ -37,7 +37,7 @@ class Component(metaclass=ComponentType):
def running(self): def running(self):
return self._running return self._running
async def get_status(self): async def get_status(self): # pylint: disable=no-self-use
return return
async def start(self): async def start(self):

View file

@ -1,7 +1,7 @@
import logging import logging
import asyncio import asyncio
from lbry.conf import Config from lbry.conf import Config
from lbry.error import ComponentStartConditionNotMet from lbry.error import ComponentStartConditionNotMetError
from lbry.dht.peer import PeerManager from lbry.dht.peer import PeerManager
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -42,7 +42,7 @@ class ComponentManager:
self.analytics_manager = analytics_manager self.analytics_manager = analytics_manager
self.component_classes = {} self.component_classes = {}
self.components = set() self.components = set()
self.started = asyncio.Event(loop=self.loop) self.started = asyncio.Event()
self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop()) self.peer_manager = peer_manager or PeerManager(asyncio.get_event_loop_policy().get_event_loop())
for component_name, component_class in self.default_component_classes.items(): for component_name, component_class in self.default_component_classes.items():
@ -106,7 +106,7 @@ class ComponentManager:
staged.update(to_stage) staged.update(to_stage)
steps.append(step) steps.append(step)
elif components: elif components:
raise ComponentStartConditionNotMet("Unresolved dependencies for: %s" % components) raise ComponentStartConditionNotMetError(components)
if reverse: if reverse:
steps.reverse() steps.reverse()
return steps return steps
@ -118,7 +118,7 @@ class ComponentManager:
component._setup() for component in stage if not component.running component._setup() for component in stage if not component.running
] ]
if needing_start: if needing_start:
await asyncio.wait(needing_start) await asyncio.wait(map(asyncio.create_task, needing_start))
self.started.set() self.started.set()
async def stop(self): async def stop(self):
@ -131,7 +131,7 @@ class ComponentManager:
component._stop() for component in stage if component.running component._stop() for component in stage if component.running
] ]
if needing_stop: if needing_stop:
await asyncio.wait(needing_stop) await asyncio.wait(map(asyncio.create_task, needing_stop))
def all_components_running(self, *component_names): def all_components_running(self, *component_names):
""" """
@ -158,11 +158,14 @@ class ComponentManager:
for component in self.components for component in self.components
} }
def get_component(self, component_name): def get_actual_component(self, component_name):
for component in self.components: for component in self.components:
if component.component_name == component_name: if component.component_name == component_name:
return component.component return component
raise NameError(component_name) raise NameError(component_name)
def get_component(self, component_name):
return self.get_actual_component(component_name).component
def has_component(self, component_name): def has_component(self, component_name):
return any(component for component in self.components if component_name == component.component_name) return any(component for component in self.components if component_name == component.component_name)

View file

@ -1,10 +1,10 @@
import hashlib import math
import os import os
import asyncio import asyncio
import logging import logging
import math
import binascii import binascii
import typing import typing
import base58 import base58
from aioupnp import __version__ as aioupnp_version from aioupnp import __version__ as aioupnp_version
@ -13,15 +13,22 @@ from aioupnp.fault import UPnPError
from lbry import utils from lbry import utils
from lbry.dht.node import Node from lbry.dht.node import Node
from lbry.dht.peer import is_valid_public_ipv4
from lbry.dht.blob_announcer import BlobAnnouncer from lbry.dht.blob_announcer import BlobAnnouncer
from lbry.blob.blob_manager import BlobManager from lbry.blob.blob_manager import BlobManager
from lbry.blob.disk_space_manager import DiskSpaceManager
from lbry.blob_exchange.server import BlobServer from lbry.blob_exchange.server import BlobServer
from lbry.stream.background_downloader import BackgroundDownloader
from lbry.stream.stream_manager import StreamManager from lbry.stream.stream_manager import StreamManager
from lbry.extras.daemon.Component import Component from lbry.file.file_manager import FileManager
from lbry.extras.daemon.component import Component
from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager from lbry.extras.daemon.exchange_rate_manager import ExchangeRateManager
from lbry.extras.daemon.storage import SQLiteStorage from lbry.extras.daemon.storage import SQLiteStorage
from lbry.wallet import LbryWalletManager from lbry.torrent.torrent_manager import TorrentManager
from lbry.wallet.header import Headers from lbry.wallet import WalletManager
from lbry.wallet.usage_payment import WalletServerPayer
from lbry.torrent.tracker import TrackerClient
from lbry.torrent.session import TorrentSession
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -29,14 +36,18 @@ log = logging.getLogger(__name__)
DATABASE_COMPONENT = "database" DATABASE_COMPONENT = "database"
BLOB_COMPONENT = "blob_manager" BLOB_COMPONENT = "blob_manager"
HEADERS_COMPONENT = "blockchain_headers"
WALLET_COMPONENT = "wallet" WALLET_COMPONENT = "wallet"
WALLET_SERVER_PAYMENTS_COMPONENT = "wallet_server_payments"
DHT_COMPONENT = "dht" DHT_COMPONENT = "dht"
HASH_ANNOUNCER_COMPONENT = "hash_announcer" HASH_ANNOUNCER_COMPONENT = "hash_announcer"
STREAM_MANAGER_COMPONENT = "stream_manager" FILE_MANAGER_COMPONENT = "file_manager"
DISK_SPACE_COMPONENT = "disk_space"
BACKGROUND_DOWNLOADER_COMPONENT = "background_downloader"
PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server" PEER_PROTOCOL_SERVER_COMPONENT = "peer_protocol_server"
UPNP_COMPONENT = "upnp" UPNP_COMPONENT = "upnp"
EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager" EXCHANGE_RATE_MANAGER_COMPONENT = "exchange_rate_manager"
TRACKER_ANNOUNCER_COMPONENT = "tracker_announcer_component"
LIBTORRENT_COMPONENT = "libtorrent_component"
class DatabaseComponent(Component): class DatabaseComponent(Component):
@ -52,7 +63,7 @@ class DatabaseComponent(Component):
@staticmethod @staticmethod
def get_current_db_revision(): def get_current_db_revision():
return 12 return 15
@property @property
def revision_filename(self): def revision_filename(self):
@ -97,135 +108,9 @@ class DatabaseComponent(Component):
self.storage = None self.storage = None
class HeadersComponent(Component):
component_name = HEADERS_COMPONENT
HEADERS_URL = "https://headers.lbry.io/blockchain_headers_latest"
CHECKPOINT = ('100b33ca3d0b86a48f0d6d6f30458a130ecb89d5affefe4afccb134d5a40f4c2', 600_000)
def __init__(self, component_manager):
super().__init__(component_manager)
self.headers_dir = os.path.join(self.conf.wallet_dir, 'lbc_mainnet')
self.headers_file = os.path.join(self.headers_dir, 'headers')
self.old_file = os.path.join(self.conf.wallet_dir, 'blockchain_headers')
self.headers = Headers(self.headers_file)
self.is_downloading_headers = False
self._headers_progress_percent = 0
@property
def component(self):
return self
def _round_progress(self, local_height, remote_height):
return min(max(math.ceil(float(local_height) / float(remote_height) * 100), 0), 100)
async def get_status(self) -> dict:
progress = None
if self.is_downloading_headers:
progress = self._headers_progress_percent
elif self.component_manager.has_component(WALLET_COMPONENT):
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
if wallet_manager and wallet_manager.ledger.network.remote_height > 0:
local_height = wallet_manager.ledger.headers.height
remote_height = wallet_manager.ledger.network.remote_height
progress = self._round_progress(local_height, remote_height)
return {
'downloading_headers': True,
'download_progress': progress
} if progress is not None and progress < 100 else {}
async def fetch_headers_from_s3(self):
local_header_size = self.local_header_file_size()
resume_header = {"Range": f"bytes={local_header_size}-"}
async with utils.aiohttp_request('get', self.HEADERS_URL, headers=resume_header) as response:
if response.status == 406 or response.content_length < self.headers.header_size: # our file is bigger
log.warning("s3 is more out of date than we are")
return
final_size_after_download = response.content_length + local_header_size
if final_size_after_download % self.headers.header_size != 0:
log.warning("s3 appears to have corrupted header")
return
write_mode = "wb"
if local_header_size > 0:
log.info("Resuming download of %i bytes from s3", response.content_length)
write_mode = "a+b"
with open(self.headers_file, write_mode) as fd:
while not response.content.at_eof():
local_header_size += fd.write(await response.content.readany())
self._headers_progress_percent = self._round_progress(
local_header_size, final_size_after_download
)
def local_header_file_size(self) -> int:
if os.path.isfile(self.headers_file):
return os.stat(self.headers_file).st_size
return 0
async def get_downloadable_header_height(self) -> typing.Optional[int]:
async with utils.aiohttp_request('HEAD', self.HEADERS_URL) as response:
if response.status != 200:
log.warning("Header download error, unexpected response code: %s", response.status)
return -1
return response.content_length // self.headers.header_size
async def should_download_headers_from_s3(self) -> bool:
if self.conf.blockchain_name != "lbrycrd_main":
return False
s3_headers_depth = self.conf.s3_headers_depth
if not s3_headers_depth:
return False
local_height = self.local_header_file_size() // self.headers.header_size
remote_height = await self.get_downloadable_header_height()
if remote_height is not None:
log.info("remote height: %i, local height: %i", remote_height, local_height)
if remote_height > (local_height + s3_headers_depth):
return True
return False
def verify_checkpoint(self):
expected_hash, at_height = self.CHECKPOINT
if self.local_header_file_size() // self.headers.header_size < at_height:
return False
hash = hashlib.sha256()
chunk_size = self.headers.header_size * 1000
with open(self.headers_file, 'rb') as header_file:
data = header_file.read(chunk_size)
while data and header_file.tell() <= at_height * self.headers.header_size:
hash.update(data)
data = header_file.read(chunk_size)
return hash.hexdigest() == expected_hash
async def start(self):
if not os.path.exists(self.headers_dir):
os.mkdir(self.headers_dir)
if os.path.exists(self.old_file):
log.warning("Moving old headers from %s to %s.", self.old_file, self.headers_file)
os.rename(self.old_file, self.headers_file)
try:
if await self.should_download_headers_from_s3():
self.is_downloading_headers = True
await self.fetch_headers_from_s3()
except Exception as err:
log.error("failed to fetch headers from s3: %s", err)
finally:
self.is_downloading_headers = False
# fixme: workaround, this should happen before download but happens after because headers.connect fail
if not self.verify_checkpoint():
log.info("Checkpoint failed, verifying headers using slower method.")
await self.headers.open()
await self.headers.repair()
await self.headers.close()
else:
log.info("Header checkpoint verified.")
async def stop(self):
pass
class WalletComponent(Component): class WalletComponent(Component):
component_name = WALLET_COMPONENT component_name = WALLET_COMPONENT
depends_on = [DATABASE_COMPONENT, HEADERS_COMPONENT] depends_on = [DATABASE_COMPONENT]
def __init__(self, component_manager): def __init__(self, component_manager):
super().__init__(component_manager) super().__init__(component_manager)
@ -236,27 +121,53 @@ class WalletComponent(Component):
return self.wallet_manager return self.wallet_manager
async def get_status(self): async def get_status(self):
if self.wallet_manager and self.wallet_manager.ledger.network.remote_height: if self.wallet_manager is None:
local_height = self.wallet_manager.ledger.headers.height return
remote_height = self.wallet_manager.ledger.network.remote_height is_connected = self.wallet_manager.ledger.network.is_connected
best_hash = self.wallet_manager.get_best_blockhash() sessions = []
return { connected = None
'connected_servers': [ if is_connected:
addr, port = self.wallet_manager.ledger.network.client.server
connected = f"{addr}:{port}"
sessions.append(self.wallet_manager.ledger.network.client)
result = {
'connected': connected,
'connected_features': self.wallet_manager.ledger.network.server_features,
'servers': [
{ {
'host': session.server[0], 'host': session.server[0],
'port': session.server[1], 'port': session.server[1],
'latency': round(session.connection_latency, 2), 'latency': session.connection_latency,
} for session in self.wallet_manager.ledger.network.session_pool.sessions 'availability': session.available,
if session and session.available } for session in sessions
], ],
'known_servers': len(self.wallet_manager.ledger.network.known_hubs),
'available_servers': 1 if is_connected else 0
}
if self.wallet_manager.ledger.network.remote_height:
local_height = self.wallet_manager.ledger.local_height_including_downloaded_height
disk_height = len(self.wallet_manager.ledger.headers)
remote_height = self.wallet_manager.ledger.network.remote_height
download_height, target_height = local_height - disk_height, remote_height - disk_height
if target_height > 0:
progress = min(max(math.ceil(float(download_height) / float(target_height) * 100), 0), 100)
else:
progress = 100
best_hash = await self.wallet_manager.get_best_blockhash()
result.update({
'headers_synchronization_progress': progress,
'blocks': max(local_height, 0), 'blocks': max(local_height, 0),
'blocks_behind': max(remote_height - local_height, 0), 'blocks_behind': max(remote_height - local_height, 0),
'best_blockhash': best_hash, 'best_blockhash': best_hash,
} })
return result
async def start(self): async def start(self):
log.info("Starting torba wallet") log.info("Starting wallet")
self.wallet_manager = await LbryWalletManager.from_lbrynet_config(self.conf) self.wallet_manager = await WalletManager.from_lbrynet_config(self.conf)
await self.wallet_manager.start() await self.wallet_manager.start()
async def stop(self): async def stop(self):
@ -264,6 +175,34 @@ class WalletComponent(Component):
self.wallet_manager = None self.wallet_manager = None
class WalletServerPaymentsComponent(Component):
component_name = WALLET_SERVER_PAYMENTS_COMPONENT
depends_on = [WALLET_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.usage_payment_service = WalletServerPayer(
max_fee=self.conf.max_wallet_server_fee, analytics_manager=self.component_manager.analytics_manager,
)
@property
def component(self) -> typing.Optional[WalletServerPayer]:
return self.usage_payment_service
async def start(self):
wallet_manager = self.component_manager.get_component(WALLET_COMPONENT)
await self.usage_payment_service.start(wallet_manager.ledger, wallet_manager.default_wallet)
async def stop(self):
await self.usage_payment_service.stop()
async def get_status(self):
return {
'max_fee': self.usage_payment_service.max_fee,
'running': self.usage_payment_service.running
}
class BlobComponent(Component): class BlobComponent(Component):
component_name = BLOB_COMPONENT component_name = BLOB_COMPONENT
depends_on = [DATABASE_COMPONENT] depends_on = [DATABASE_COMPONENT]
@ -304,7 +243,7 @@ class BlobComponent(Component):
class DHTComponent(Component): class DHTComponent(Component):
component_name = DHT_COMPONENT component_name = DHT_COMPONENT
depends_on = [UPNP_COMPONENT] depends_on = [UPNP_COMPONENT, DATABASE_COMPONENT]
def __init__(self, component_manager): def __init__(self, component_manager):
super().__init__(component_manager) super().__init__(component_manager)
@ -338,8 +277,9 @@ class DHTComponent(Component):
self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port) self.external_peer_port = upnp_component.upnp_redirects.get("TCP", self.conf.tcp_port)
self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port) self.external_udp_port = upnp_component.upnp_redirects.get("UDP", self.conf.udp_port)
external_ip = upnp_component.external_ip external_ip = upnp_component.external_ip
storage = self.component_manager.get_component(DATABASE_COMPONENT)
if not external_ip: if not external_ip:
external_ip = await utils.get_external_ip() external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
if not external_ip: if not external_ip:
log.warning("failed to get external ip") log.warning("failed to get external ip")
@ -352,11 +292,11 @@ class DHTComponent(Component):
external_ip=external_ip, external_ip=external_ip,
peer_port=self.external_peer_port, peer_port=self.external_peer_port,
rpc_timeout=self.conf.node_rpc_timeout, rpc_timeout=self.conf.node_rpc_timeout,
split_buckets_under_index=self.conf.split_buckets_under_index split_buckets_under_index=self.conf.split_buckets_under_index,
) is_bootstrap_node=self.conf.is_bootstrap_node,
self.dht_node.start( storage=storage
interface=self.conf.network_interface, known_node_urls=self.conf.known_dht_nodes
) )
self.dht_node.start(self.conf.network_interface, self.conf.known_dht_nodes)
log.info("Started the dht") log.info("Started the dht")
async def stop(self): async def stop(self):
@ -392,41 +332,175 @@ class HashAnnouncerComponent(Component):
} }
class StreamManagerComponent(Component): class FileManagerComponent(Component):
component_name = STREAM_MANAGER_COMPONENT component_name = FILE_MANAGER_COMPONENT
depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT] depends_on = [BLOB_COMPONENT, DATABASE_COMPONENT, WALLET_COMPONENT]
def __init__(self, component_manager): def __init__(self, component_manager):
super().__init__(component_manager) super().__init__(component_manager)
self.stream_manager: typing.Optional[StreamManager] = None self.file_manager: typing.Optional[FileManager] = None
@property @property
def component(self) -> typing.Optional[StreamManager]: def component(self) -> typing.Optional[FileManager]:
return self.stream_manager return self.file_manager
async def get_status(self): async def get_status(self):
if not self.stream_manager: if not self.file_manager:
return return
return { return {
'managed_files': len(self.stream_manager.streams), 'managed_files': len(self.file_manager.get_filtered()),
} }
async def start(self): async def start(self):
blob_manager = self.component_manager.get_component(BLOB_COMPONENT) blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT) storage = self.component_manager.get_component(DATABASE_COMPONENT)
wallet = self.component_manager.get_component(WALLET_COMPONENT) wallet = self.component_manager.get_component(WALLET_COMPONENT)
node = self.component_manager.get_component(DHT_COMPONENT)\ node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None if self.component_manager.has_component(DHT_COMPONENT) else None
log.info('Starting the file manager') log.info('Starting the file manager')
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
self.stream_manager = StreamManager( self.file_manager = FileManager(
loop, self.conf, blob_manager, wallet, storage, node, self.component_manager.analytics_manager loop, self.conf, wallet, storage, self.component_manager.analytics_manager
) )
await self.stream_manager.start() self.file_manager.source_managers['stream'] = StreamManager(
loop, self.conf, blob_manager, wallet, storage, node,
)
if self.component_manager.has_component(LIBTORRENT_COMPONENT):
torrent = self.component_manager.get_component(LIBTORRENT_COMPONENT)
self.file_manager.source_managers['torrent'] = TorrentManager(
loop, self.conf, torrent, storage, self.component_manager.analytics_manager
)
await self.file_manager.start()
log.info('Done setting up file manager') log.info('Done setting up file manager')
async def stop(self): async def stop(self):
self.stream_manager.stop() await self.file_manager.stop()
class BackgroundDownloaderComponent(Component):
MIN_PREFIX_COLLIDING_BITS = 8
component_name = BACKGROUND_DOWNLOADER_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT, DISK_SPACE_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.background_task: typing.Optional[asyncio.Task] = None
self.download_loop_delay_seconds = 60
self.ongoing_download: typing.Optional[asyncio.Task] = None
self.space_manager: typing.Optional[DiskSpaceManager] = None
self.blob_manager: typing.Optional[BlobManager] = None
self.background_downloader: typing.Optional[BackgroundDownloader] = None
self.dht_node: typing.Optional[Node] = None
self.space_available: typing.Optional[int] = None
@property
def is_busy(self):
return bool(self.ongoing_download and not self.ongoing_download.done())
@property
def component(self) -> 'BackgroundDownloaderComponent':
return self
async def get_status(self):
return {'running': self.background_task is not None and not self.background_task.done(),
'available_free_space_mb': self.space_available,
'ongoing_download': self.is_busy}
async def download_blobs_in_background(self):
while True:
self.space_available = await self.space_manager.get_free_space_mb(True)
if not self.is_busy and self.space_available > 10:
self._download_next_close_blob_hash()
await asyncio.sleep(self.download_loop_delay_seconds)
def _download_next_close_blob_hash(self):
node_id = self.dht_node.protocol.node_id
for blob_hash in self.dht_node.stored_blob_hashes:
if blob_hash.hex() in self.blob_manager.completed_blob_hashes:
continue
if utils.get_colliding_prefix_bits(node_id, blob_hash) >= self.MIN_PREFIX_COLLIDING_BITS:
self.ongoing_download = asyncio.create_task(self.background_downloader.download_blobs(blob_hash.hex()))
return
async def start(self):
self.space_manager: DiskSpaceManager = self.component_manager.get_component(DISK_SPACE_COMPONENT)
if not self.component_manager.has_component(DHT_COMPONENT):
return
self.dht_node = self.component_manager.get_component(DHT_COMPONENT)
self.blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
storage = self.component_manager.get_component(DATABASE_COMPONENT)
self.background_downloader = BackgroundDownloader(self.conf, storage, self.blob_manager, self.dht_node)
self.background_task = asyncio.create_task(self.download_blobs_in_background())
async def stop(self):
if self.ongoing_download and not self.ongoing_download.done():
self.ongoing_download.cancel()
if self.background_task:
self.background_task.cancel()
class DiskSpaceComponent(Component):
component_name = DISK_SPACE_COMPONENT
depends_on = [DATABASE_COMPONENT, BLOB_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.disk_space_manager: typing.Optional[DiskSpaceManager] = None
@property
def component(self) -> typing.Optional[DiskSpaceManager]:
return self.disk_space_manager
async def get_status(self):
if self.disk_space_manager:
space_used = await self.disk_space_manager.get_space_used_mb(cached=True)
return {
'total_used_mb': space_used['total'],
'published_blobs_storage_used_mb': space_used['private_storage'],
'content_blobs_storage_used_mb': space_used['content_storage'],
'seed_blobs_storage_used_mb': space_used['network_storage'],
'running': self.disk_space_manager.running,
}
return {'space_used': '0', 'network_seeding_space_used': '0', 'running': False}
async def start(self):
db = self.component_manager.get_component(DATABASE_COMPONENT)
blob_manager = self.component_manager.get_component(BLOB_COMPONENT)
self.disk_space_manager = DiskSpaceManager(
self.conf, db, blob_manager,
analytics=self.component_manager.analytics_manager
)
await self.disk_space_manager.start()
async def stop(self):
await self.disk_space_manager.stop()
class TorrentComponent(Component):
component_name = LIBTORRENT_COMPONENT
def __init__(self, component_manager):
super().__init__(component_manager)
self.torrent_session = None
@property
def component(self) -> typing.Optional[TorrentSession]:
return self.torrent_session
async def get_status(self):
if not self.torrent_session:
return
return {
'running': True, # TODO: what to return here?
}
async def start(self):
self.torrent_session = TorrentSession(asyncio.get_event_loop(), None)
await self.torrent_session.bind() # TODO: specify host/port
async def stop(self):
if self.torrent_session:
await self.torrent_session.pause()
class PeerProtocolServerComponent(Component): class PeerProtocolServerComponent(Component):
@ -443,9 +517,8 @@ class PeerProtocolServerComponent(Component):
async def start(self): async def start(self):
log.info("start blob server") log.info("start blob server")
upnp = self.component_manager.get_component(UPNP_COMPONENT)
blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT) blob_manager: BlobManager = self.component_manager.get_component(BLOB_COMPONENT)
wallet: LbryWalletManager = self.component_manager.get_component(WALLET_COMPONENT) wallet: WalletManager = self.component_manager.get_component(WALLET_COMPONENT)
peer_port = self.conf.tcp_port peer_port = self.conf.tcp_port
address = await wallet.get_unused_address() address = await wallet.get_unused_address()
self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address) self.blob_server = BlobServer(asyncio.get_event_loop(), blob_manager, address)
@ -478,7 +551,7 @@ class UPnPComponent(Component):
while True: while True:
if now: if now:
await self._maintain_redirects() await self._maintain_redirects()
await asyncio.sleep(360, loop=self.component_manager.loop) await asyncio.sleep(360)
async def _maintain_redirects(self): async def _maintain_redirects(self):
# setup the gateway if necessary # setup the gateway if necessary
@ -487,8 +560,6 @@ class UPnPComponent(Component):
self.upnp = await UPnP.discover(loop=self.component_manager.loop) self.upnp = await UPnP.discover(loop=self.component_manager.loop)
log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string) log.info("found upnp gateway: %s", self.upnp.gateway.manufacturer_string)
except Exception as err: except Exception as err:
if isinstance(err, asyncio.CancelledError):
raise
log.warning("upnp discovery failed: %s", err) log.warning("upnp discovery failed: %s", err)
self.upnp = None self.upnp = None
@ -501,14 +572,17 @@ class UPnPComponent(Component):
log.info("got external ip from UPnP: %s", external_ip) log.info("got external ip from UPnP: %s", external_ip)
except (asyncio.TimeoutError, UPnPError, NotImplementedError): except (asyncio.TimeoutError, UPnPError, NotImplementedError):
pass pass
if external_ip and not is_valid_public_ipv4(external_ip):
if external_ip == "0.0.0.0" or (external_ip and external_ip.startswith("192.")): log.warning("UPnP returned a private/reserved ip - %s, checking lbry.com fallback", external_ip)
log.warning("unable to get external ip from UPnP, checking lbry.com fallback") external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
external_ip = await utils.get_external_ip()
if self.external_ip and self.external_ip != external_ip: if self.external_ip and self.external_ip != external_ip:
log.info("external ip changed from %s to %s", self.external_ip, external_ip) log.info("external ip changed from %s to %s", self.external_ip, external_ip)
if external_ip: if external_ip:
self.external_ip = external_ip self.external_ip = external_ip
dht_component = self.component_manager.get_component(DHT_COMPONENT)
if dht_component:
dht_node = dht_component.component
dht_node.protocol.external_ip = external_ip
# assert self.external_ip is not None # TODO: handle going/starting offline # assert self.external_ip is not None # TODO: handle going/starting offline
if not self.upnp_redirects and self.upnp: # setup missing redirects if not self.upnp_redirects and self.upnp: # setup missing redirects
@ -553,22 +627,24 @@ class UPnPComponent(Component):
log.info("refreshed upnp redirect for peer port: %i", tcp_port) log.info("refreshed upnp redirect for peer port: %i", tcp_port)
except (asyncio.TimeoutError, UPnPError, NotImplementedError): except (asyncio.TimeoutError, UPnPError, NotImplementedError):
del self.upnp_redirects['TCP'] del self.upnp_redirects['TCP']
if ('TCP' in self.upnp_redirects if ('TCP' in self.upnp_redirects and
and PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and ( PEER_PROTOCOL_SERVER_COMPONENT not in self.component_manager.skip_components) and \
'UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components): ('UDP' in self.upnp_redirects and DHT_COMPONENT not in self.component_manager.skip_components):
if self.upnp_redirects: if self.upnp_redirects:
log.debug("upnp redirects are still active") log.debug("upnp redirects are still active")
async def start(self): async def start(self):
log.info("detecting external ip") log.info("detecting external ip")
if not self.use_upnp: if not self.use_upnp:
self.external_ip = await utils.get_external_ip() self.external_ip, _ = await utils.get_external_ip(self.conf.lbryum_servers)
return return
success = False success = False
await self._maintain_redirects() await self._maintain_redirects()
if self.upnp: if self.upnp:
if not self.upnp_redirects and not all([x in self.component_manager.skip_components for x in if not self.upnp_redirects and not all(
(DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)]): x in self.component_manager.skip_components
for x in (DHT_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT)
):
log.error("failed to setup upnp") log.error("failed to setup upnp")
else: else:
success = True success = True
@ -577,13 +653,15 @@ class UPnPComponent(Component):
else: else:
log.error("failed to setup upnp") log.error("failed to setup upnp")
if not self.external_ip: if not self.external_ip:
self.external_ip = await utils.get_external_ip() self.external_ip, probed_url = await utils.get_external_ip(self.conf.lbryum_servers)
if self.external_ip: if self.external_ip:
log.info("detected external ip using lbry.com fallback") log.info("detected external ip using %s fallback", probed_url)
if self.component_manager.analytics_manager: if self.component_manager.analytics_manager:
await self.component_manager.analytics_manager.send_upnp_setup_success_fail( self.component_manager.loop.create_task(
self.component_manager.analytics_manager.send_upnp_setup_success_fail(
success, await self.get_status() success, await self.get_status()
) )
)
self._maintain_redirects_task = self.component_manager.loop.create_task( self._maintain_redirects_task = self.component_manager.loop.create_task(
self._repeatedly_maintain_redirects(now=False) self._repeatedly_maintain_redirects(now=False)
) )
@ -593,7 +671,7 @@ class UPnPComponent(Component):
log.info("Removing upnp redirects: %s", self.upnp_redirects) log.info("Removing upnp redirects: %s", self.upnp_redirects)
await asyncio.wait([ await asyncio.wait([
self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items() self.upnp.delete_port_mapping(port, protocol) for protocol, port in self.upnp_redirects.items()
], loop=self.component_manager.loop) ])
if self._maintain_redirects_task and not self._maintain_redirects_task.done(): if self._maintain_redirects_task and not self._maintain_redirects_task.done():
self._maintain_redirects_task.cancel() self._maintain_redirects_task.cancel()
@ -624,3 +702,49 @@ class ExchangeRateManagerComponent(Component):
async def stop(self): async def stop(self):
self.exchange_rate_manager.stop() self.exchange_rate_manager.stop()
class TrackerAnnouncerComponent(Component):
component_name = TRACKER_ANNOUNCER_COMPONENT
depends_on = [FILE_MANAGER_COMPONENT]
def __init__(self, component_manager):
super().__init__(component_manager)
self.file_manager = None
self.announce_task = None
self.tracker_client: typing.Optional[TrackerClient] = None
@property
def component(self):
return self.tracker_client
@property
def running(self):
return self._running and self.announce_task and not self.announce_task.done()
async def announce_forever(self):
while True:
sleep_seconds = 60.0
announce_sd_hashes = []
for file in self.file_manager.get_filtered():
if not file.downloader:
continue
announce_sd_hashes.append(bytes.fromhex(file.sd_hash))
await self.tracker_client.announce_many(*announce_sd_hashes)
await asyncio.sleep(sleep_seconds)
async def start(self):
node = self.component_manager.get_component(DHT_COMPONENT) \
if self.component_manager.has_component(DHT_COMPONENT) else None
node_id = node.protocol.node_id if node else None
self.tracker_client = TrackerClient(node_id, self.conf.tcp_port, lambda: self.conf.tracker_servers)
await self.tracker_client.start()
self.file_manager = self.component_manager.get_component(FILE_MANAGER_COMPONENT)
self.announce_task = asyncio.create_task(self.announce_forever())
async def stop(self):
self.file_manager = None
if self.announce_task and not self.announce_task.done():
self.announce_task.cancel()
self.announce_task = None
self.tracker_client.stop()

View file

@ -0,0 +1,248 @@
import json
import time
import asyncio
import logging
from statistics import median
from decimal import Decimal
from typing import Optional, Iterable, Type
from aiohttp.client_exceptions import ContentTypeError, ClientConnectionError
from lbry.error import InvalidExchangeRateResponseError, CurrencyConversionError
from lbry.utils import aiohttp_request
from lbry.wallet.dewies import lbc_to_dewies
log = logging.getLogger(__name__)
class ExchangeRate:
def __init__(self, market, spot, ts):
if not int(time.time()) - ts < 600:
raise ValueError('The timestamp is too dated.')
if not spot > 0:
raise ValueError('Spot must be greater than 0.')
self.currency_pair = (market[0:3], market[3:6])
self.spot = spot
self.ts = ts
def __repr__(self):
return f"Currency pair:{self.currency_pair}, spot:{self.spot}, ts:{self.ts}"
def as_dict(self):
return {'spot': self.spot, 'ts': self.ts}
class MarketFeed:
name: str = ""
market: str = ""
url: str = ""
params = {}
fee = 0
update_interval = 300
request_timeout = 50
def __init__(self):
self.rate: Optional[float] = None
self.last_check = 0
self._last_response = None
self._task: Optional[asyncio.Task] = None
self.event = asyncio.Event()
@property
def has_rate(self):
return self.rate is not None
@property
def is_online(self):
return self.last_check+self.update_interval+self.request_timeout > time.time()
def get_rate_from_response(self, json_response):
raise NotImplementedError()
async def get_response(self):
async with aiohttp_request(
'get', self.url, params=self.params,
timeout=self.request_timeout, headers={"User-Agent": "lbrynet"}
) as response:
try:
self._last_response = await response.json(content_type=None)
except ContentTypeError as e:
self._last_response = {}
log.warning("Could not parse exchange rate response from %s: %s", self.name, e.message)
log.debug(await response.text())
return self._last_response
async def get_rate(self):
try:
data = await self.get_response()
rate = self.get_rate_from_response(data)
rate = rate / (1.0 - self.fee)
log.debug("Saving rate update %f for %s from %s", rate, self.market, self.name)
self.rate = ExchangeRate(self.market, rate, int(time.time()))
self.last_check = time.time()
return self.rate
except asyncio.TimeoutError:
log.warning("Timed out fetching exchange rate from %s.", self.name)
except json.JSONDecodeError as e:
msg = e.doc if '<html>' not in e.doc else 'unexpected content type.'
log.warning("Could not parse exchange rate response from %s: %s", self.name, msg)
log.debug(e.doc)
except InvalidExchangeRateResponseError as e:
log.warning(str(e))
except ClientConnectionError as e:
log.warning("Error trying to connect to exchange rate %s: %s", self.name, str(e))
except Exception as e:
log.exception("Exchange rate error (%s from %s):", self.market, self.name)
finally:
self.event.set()
async def keep_updated(self):
while True:
await self.get_rate()
await asyncio.sleep(self.update_interval)
def start(self):
if not self._task:
self._task = asyncio.create_task(self.keep_updated())
def stop(self):
if self._task and not self._task.done():
self._task.cancel()
self._task = None
self.event.clear()
class BaseBittrexFeed(MarketFeed):
name = "Bittrex"
market = None
url = None
fee = 0.0025
def get_rate_from_response(self, json_response):
if 'lastTradeRate' not in json_response:
raise InvalidExchangeRateResponseError(self.name, 'result not found')
return 1.0 / float(json_response['lastTradeRate'])
class BittrexBTCFeed(BaseBittrexFeed):
market = "BTCLBC"
url = "https://api.bittrex.com/v3/markets/LBC-BTC/ticker"
class BittrexUSDFeed(BaseBittrexFeed):
market = "USDLBC"
url = "https://api.bittrex.com/v3/markets/LBC-USD/ticker"
class BaseCoinExFeed(MarketFeed):
name = "CoinEx"
market = None
url = None
def get_rate_from_response(self, json_response):
if 'data' not in json_response or \
'ticker' not in json_response['data'] or \
'last' not in json_response['data']['ticker']:
raise InvalidExchangeRateResponseError(self.name, 'result not found')
return 1.0 / float(json_response['data']['ticker']['last'])
class CoinExBTCFeed(BaseCoinExFeed):
market = "BTCLBC"
url = "https://api.coinex.com/v1/market/ticker?market=LBCBTC"
class CoinExUSDFeed(BaseCoinExFeed):
market = "USDLBC"
url = "https://api.coinex.com/v1/market/ticker?market=LBCUSDT"
class BaseHotbitFeed(MarketFeed):
name = "hotbit"
market = None
url = "https://api.hotbit.io/api/v1/market.last"
def get_rate_from_response(self, json_response):
if 'result' not in json_response:
raise InvalidExchangeRateResponseError(self.name, 'result not found')
return 1.0 / float(json_response['result'])
class HotbitBTCFeed(BaseHotbitFeed):
market = "BTCLBC"
params = {"market": "LBC/BTC"}
class HotbitUSDFeed(BaseHotbitFeed):
market = "USDLBC"
params = {"market": "LBC/USDT"}
class UPbitBTCFeed(MarketFeed):
name = "UPbit"
market = "BTCLBC"
url = "https://api.upbit.com/v1/ticker"
params = {"markets": "BTC-LBC"}
def get_rate_from_response(self, json_response):
if "error" in json_response or len(json_response) != 1 or 'trade_price' not in json_response[0]:
raise InvalidExchangeRateResponseError(self.name, 'result not found')
return 1.0 / float(json_response[0]['trade_price'])
FEEDS: Iterable[Type[MarketFeed]] = (
BittrexBTCFeed,
BittrexUSDFeed,
CoinExBTCFeed,
CoinExUSDFeed,
# HotbitBTCFeed,
# HotbitUSDFeed,
# UPbitBTCFeed,
)
class ExchangeRateManager:
def __init__(self, feeds=FEEDS):
self.market_feeds = [Feed() for Feed in feeds]
def wait(self):
return asyncio.wait(
[feed.event.wait() for feed in self.market_feeds],
)
def start(self):
log.info("Starting exchange rate manager")
for feed in self.market_feeds:
feed.start()
def stop(self):
log.info("Stopping exchange rate manager")
for source in self.market_feeds:
source.stop()
def convert_currency(self, from_currency, to_currency, amount):
log.debug(
"Converting %f %s to %s, rates: %s",
amount, from_currency, to_currency,
[market.rate for market in self.market_feeds]
)
if from_currency == to_currency:
return round(amount, 8)
rates = []
for market in self.market_feeds:
if (market.has_rate and market.is_online and
market.rate.currency_pair == (from_currency, to_currency)):
rates.append(market.rate.spot)
if rates:
return round(amount * Decimal(median(rates)), 8)
raise CurrencyConversionError(
f'Unable to convert {amount} from {from_currency} to {to_currency}')
def to_dewies(self, currency, amount) -> int:
converted = self.convert_currency(currency, "LBC", amount)
return lbc_to_dewies(str(converted))
def fee_dict(self):
return {market: market.rate.as_dict() for market in self.market_feeds}

View file

@ -6,11 +6,11 @@ from json import JSONEncoder
from google.protobuf.message import DecodeError from google.protobuf.message import DecodeError
from torba.client.wallet import Wallet
from torba.client.bip32 import PubKey
from lbry.schema.claim import Claim from lbry.schema.claim import Claim
from lbry.wallet.ledger import MainNetLedger, Account from lbry.schema.support import Support
from lbry.wallet.transaction import Transaction, Output from lbry.torrent.torrent_manager import TorrentSource
from lbry.wallet import Wallet, Ledger, Account, Transaction, Output
from lbry.wallet.bip32 import PublicKey
from lbry.wallet.dewies import dewies_to_lbc from lbry.wallet.dewies import dewies_to_lbc
from lbry.stream.managed_stream import ManagedStream from lbry.stream.managed_stream import ManagedStream
@ -27,17 +27,22 @@ def encode_txo_doc():
'address': "address of who can spend the txo", 'address': "address of who can spend the txo",
'confirmations': "number of confirmed blocks", 'confirmations': "number of confirmed blocks",
'is_change': "payment to change address, only available when it can be determined", 'is_change': "payment to change address, only available when it can be determined",
'is_received': "true if txo was sent from external account to this account",
'is_spent': "true if txo is spent",
'is_mine': "payment to one of your accounts, only available when it can be determined", 'is_mine': "payment to one of your accounts, only available when it can be determined",
'type': "one of 'claim', 'support' or 'payment'", 'type': "one of 'claim', 'support' or 'purchase'",
'name': "when type is 'claim' or 'support', this is the claim name", 'name': "when type is 'claim' or 'support', this is the claim name",
'claim_id': "when type is 'claim' or 'support', this is the claim id", 'claim_id': "when type is 'claim', 'support' or 'purchase', this is the claim id",
'claim_op': "when type is 'claim', this determines if it is 'create' or 'update'", 'claim_op': "when type is 'claim', this determines if it is 'create' or 'update'",
'value': "when type is 'claim' or 'support' with payload, this is the decoded protobuf payload", 'value': "when type is 'claim' or 'support' with payload, this is the decoded protobuf payload",
'value_type': "determines the type of the 'value' field: 'channel', 'stream', etc", 'value_type': "determines the type of the 'value' field: 'channel', 'stream', etc",
'protobuf': "hex encoded raw protobuf version of 'value' field", 'protobuf': "hex encoded raw protobuf version of 'value' field",
'permanent_url': "when type is 'claim' or 'support', this is the long permanent claim URL", 'permanent_url': "when type is 'claim' or 'support', this is the long permanent claim URL",
'claim': "for purchase outputs only, metadata of purchased claim",
'reposted_claim': "for repost claims only, metadata of claim being reposted",
'signing_channel': "for signed claims only, metadata of signing channel", 'signing_channel': "for signed claims only, metadata of signing channel",
'is_channel_signature_valid': "for signed claims only, whether signature is valid", 'is_channel_signature_valid': "for signed claims only, whether signature is valid",
'purchase_receipt': "metadata for the purchase transaction associated with this claim"
} }
@ -105,23 +110,25 @@ def encode_file_doc():
'metadata': '(dict) None if claim is not found else the claim metadata', 'metadata': '(dict) None if claim is not found else the claim metadata',
'channel_claim_id': '(str) None if claim is not found or not signed', 'channel_claim_id': '(str) None if claim is not found or not signed',
'channel_name': '(str) None if claim is not found or not signed', 'channel_name': '(str) None if claim is not found or not signed',
'claim_name': '(str) None if claim is not found else the claim name' 'claim_name': '(str) None if claim is not found else the claim name',
'reflector_progress': '(int) reflector upload progress, 0 to 100',
'uploading_to_reflector': '(bool) set to True when currently uploading to reflector'
} }
class JSONResponseEncoder(JSONEncoder): class JSONResponseEncoder(JSONEncoder):
def __init__(self, *args, ledger: MainNetLedger, include_protobuf=False, **kwargs): def __init__(self, *args, ledger: Ledger, include_protobuf=False, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.ledger = ledger self.ledger = ledger
self.include_protobuf = include_protobuf self.include_protobuf = include_protobuf
def default(self, obj): # pylint: disable=method-hidden def default(self, obj): # pylint: disable=method-hidden,arguments-renamed,too-many-return-statements
if isinstance(obj, Account): if isinstance(obj, Account):
return self.encode_account(obj) return self.encode_account(obj)
if isinstance(obj, Wallet): if isinstance(obj, Wallet):
return self.encode_wallet(obj) return self.encode_wallet(obj)
if isinstance(obj, ManagedStream): if isinstance(obj, (ManagedStream, TorrentSource)):
return self.encode_file(obj) return self.encode_file(obj)
if isinstance(obj, Transaction): if isinstance(obj, Transaction):
return self.encode_transaction(obj) return self.encode_transaction(obj)
@ -129,7 +136,9 @@ class JSONResponseEncoder(JSONEncoder):
return self.encode_output(obj) return self.encode_output(obj)
if isinstance(obj, Claim): if isinstance(obj, Claim):
return self.encode_claim(obj) return self.encode_claim(obj)
if isinstance(obj, PubKey): if isinstance(obj, Support):
return obj.to_dict()
if isinstance(obj, PublicKey):
return obj.extended_key_string() return obj.extended_key_string()
if isinstance(obj, datetime): if isinstance(obj, datetime):
return obj.strftime("%Y%m%dT%H:%M:%S") return obj.strftime("%Y%m%dT%H:%M:%S")
@ -152,6 +161,8 @@ class JSONResponseEncoder(JSONEncoder):
} }
def encode_output(self, txo, check_signature=True): def encode_output(self, txo, check_signature=True):
if not txo:
return
tx_height = txo.tx_ref.height tx_height = txo.tx_ref.height
best_height = self.ledger.headers.height best_height = self.ledger.headers.height
output = { output = {
@ -159,14 +170,24 @@ class JSONResponseEncoder(JSONEncoder):
'nout': txo.position, 'nout': txo.position,
'height': tx_height, 'height': tx_height,
'amount': dewies_to_lbc(txo.amount), 'amount': dewies_to_lbc(txo.amount),
'address': txo.get_address(self.ledger), 'address': txo.get_address(self.ledger) if txo.has_address else None,
'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height, 'confirmations': (best_height+1) - tx_height if tx_height > 0 else tx_height,
'timestamp': self.ledger.headers[tx_height]['timestamp'] if 0 < tx_height <= best_height else None 'timestamp': self.ledger.headers.estimated_timestamp(tx_height)
} }
if txo.is_change is not None: if txo.is_spent is not None:
output['is_change'] = txo.is_change output['is_spent'] = txo.is_spent
if txo.is_my_account is not None: if txo.is_my_output is not None:
output['is_mine'] = txo.is_my_account output['is_my_output'] = txo.is_my_output
if txo.is_my_input is not None:
output['is_my_input'] = txo.is_my_input
if txo.sent_supports is not None:
output['sent_supports'] = dewies_to_lbc(txo.sent_supports)
if txo.sent_tips is not None:
output['sent_tips'] = dewies_to_lbc(txo.sent_tips)
if txo.received_tips is not None:
output['received_tips'] = dewies_to_lbc(txo.received_tips)
if txo.is_internal_transfer is not None:
output['is_internal_transfer'] = txo.is_internal_transfer
if txo.script.is_claim_name: if txo.script.is_claim_name:
output['type'] = 'claim' output['type'] = 'claim'
@ -176,6 +197,13 @@ class JSONResponseEncoder(JSONEncoder):
output['claim_op'] = 'update' output['claim_op'] = 'update'
elif txo.script.is_support_claim: elif txo.script.is_support_claim:
output['type'] = 'support' output['type'] = 'support'
elif txo.script.is_return_data:
output['type'] = 'data'
elif txo.purchase is not None:
output['type'] = 'purchase'
output['claim_id'] = txo.purchased_claim_id
if txo.purchased_claim is not None:
output['claim'] = self.encode_output(txo.purchased_claim)
else: else:
output['type'] = 'payment' output['type'] = 'payment'
@ -191,20 +219,27 @@ class JSONResponseEncoder(JSONEncoder):
output['short_url'] = output['meta'].pop('short_url') output['short_url'] = output['meta'].pop('short_url')
if 'canonical_url' in output['meta']: if 'canonical_url' in output['meta']:
output['canonical_url'] = output['meta'].pop('canonical_url') output['canonical_url'] = output['meta'].pop('canonical_url')
if txo.script.is_claim_name or txo.script.is_update_claim: if txo.claims is not None:
output['claims'] = [self.encode_output(o) for o in txo.claims]
if txo.reposted_claim is not None:
output['reposted_claim'] = self.encode_output(txo.reposted_claim)
if txo.script.is_claim_name or txo.script.is_update_claim or txo.script.is_support_claim_data:
try: try:
output['value'] = txo.claim output['value'] = txo.signable
output['value_type'] = txo.claim.claim_type
if self.include_protobuf: if self.include_protobuf:
output['protobuf'] = hexlify(txo.claim.to_bytes()) output['protobuf'] = hexlify(txo.signable.to_bytes())
if txo.purchase_receipt is not None:
output['purchase_receipt'] = self.encode_output(txo.purchase_receipt)
if txo.script.is_claim_name or txo.script.is_update_claim:
output['value_type'] = txo.claim.claim_type
if txo.claim.is_channel: if txo.claim.is_channel:
output['has_signing_key'] = txo.has_private_key output['has_signing_key'] = txo.has_private_key
if check_signature and txo.claim.is_signed: if check_signature and txo.signable.is_signed:
if txo.channel is not None: if txo.channel is not None:
output['signing_channel'] = self.encode_output(txo.channel) output['signing_channel'] = self.encode_output(txo.channel)
output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.ledger) output['is_channel_signature_valid'] = txo.is_signed_by(txo.channel, self.ledger)
else: else:
output['signing_channel'] = {'channel_id': txo.claim.signing_channel_id} output['signing_channel'] = {'channel_id': txo.signable.signing_channel_id}
output['is_channel_signature_valid'] = False output['is_channel_signature_valid'] = False
except DecodeError: except DecodeError:
pass pass
@ -216,7 +251,7 @@ class JSONResponseEncoder(JSONEncoder):
if isinstance(value, int): if isinstance(value, int):
meta[key] = dewies_to_lbc(value) meta[key] = dewies_to_lbc(value)
if 0 < meta.get('creation_height', 0) <= self.ledger.headers.height: if 0 < meta.get('creation_height', 0) <= self.ledger.headers.height:
meta['creation_timestamp'] = self.ledger.headers[meta['creation_height']]['timestamp'] meta['creation_timestamp'] = self.ledger.headers.estimated_timestamp(meta['creation_height'])
return meta return meta
def encode_input(self, txi): def encode_input(self, txi):
@ -232,7 +267,8 @@ class JSONResponseEncoder(JSONEncoder):
result['is_default'] = self.ledger.accounts[0] == account result['is_default'] = self.ledger.accounts[0] == account
return result return result
def encode_wallet(self, wallet): @staticmethod
def encode_wallet(wallet):
return { return {
'id': wallet.id, 'id': wallet.id,
'name': wallet.name 'name': wallet.name
@ -242,26 +278,32 @@ class JSONResponseEncoder(JSONEncoder):
output_exists = managed_stream.output_file_exists output_exists = managed_stream.output_file_exists
tx_height = managed_stream.stream_claim_info.height tx_height = managed_stream.stream_claim_info.height
best_height = self.ledger.headers.height best_height = self.ledger.headers.height
return { is_stream = hasattr(managed_stream, 'stream_hash')
'streaming_url': managed_stream.stream_url, if is_stream:
total_bytes_lower_bound = managed_stream.descriptor.lower_bound_decrypted_length()
total_bytes = managed_stream.descriptor.upper_bound_decrypted_length()
else:
total_bytes_lower_bound = total_bytes = managed_stream.torrent_length
result = {
'streaming_url': None,
'completed': managed_stream.completed, 'completed': managed_stream.completed,
'file_name': managed_stream.file_name if output_exists else None, 'file_name': None,
'download_directory': managed_stream.download_directory if output_exists else None, 'download_directory': None,
'download_path': managed_stream.full_path if output_exists else None, 'download_path': None,
'points_paid': 0.0, 'points_paid': 0.0,
'stopped': not managed_stream.running, 'stopped': not managed_stream.running,
'stream_hash': managed_stream.stream_hash, 'stream_hash': None,
'stream_name': managed_stream.descriptor.stream_name, 'stream_name': None,
'suggested_file_name': managed_stream.descriptor.suggested_file_name, 'suggested_file_name': None,
'sd_hash': managed_stream.descriptor.sd_hash, 'sd_hash': None,
'mime_type': managed_stream.mime_type, 'mime_type': None,
'key': managed_stream.descriptor.key, 'key': None,
'total_bytes_lower_bound': managed_stream.descriptor.lower_bound_decrypted_length(), 'total_bytes_lower_bound': total_bytes_lower_bound,
'total_bytes': managed_stream.descriptor.upper_bound_decrypted_length(), 'total_bytes': total_bytes,
'written_bytes': managed_stream.written_bytes, 'written_bytes': managed_stream.written_bytes,
'blobs_completed': managed_stream.blobs_completed, 'blobs_completed': None,
'blobs_in_stream': managed_stream.blobs_in_stream, 'blobs_in_stream': None,
'blobs_remaining': managed_stream.blobs_remaining, 'blobs_remaining': None,
'status': managed_stream.status, 'status': managed_stream.status,
'claim_id': managed_stream.claim_id, 'claim_id': managed_stream.claim_id,
'txid': managed_stream.txid, 'txid': managed_stream.txid,
@ -273,11 +315,42 @@ class JSONResponseEncoder(JSONEncoder):
'channel_name': managed_stream.channel_name, 'channel_name': managed_stream.channel_name,
'claim_name': managed_stream.claim_name, 'claim_name': managed_stream.claim_name,
'content_fee': managed_stream.content_fee, 'content_fee': managed_stream.content_fee,
'purchase_receipt': self.encode_output(managed_stream.purchase_receipt),
'added_on': managed_stream.added_on, 'added_on': managed_stream.added_on,
'height': tx_height, 'height': tx_height,
'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height, 'confirmations': (best_height + 1) - tx_height if tx_height > 0 else tx_height,
'timestamp': self.ledger.headers[tx_height]['timestamp'] if 0 < tx_height <= best_height else None 'timestamp': self.ledger.headers.estimated_timestamp(tx_height),
'is_fully_reflected': False,
'reflector_progress': False,
'uploading_to_reflector': False
} }
if is_stream:
result.update({
'streaming_url': managed_stream.stream_url,
'stream_hash': managed_stream.stream_hash,
'stream_name': managed_stream.stream_name,
'suggested_file_name': managed_stream.suggested_file_name,
'sd_hash': managed_stream.descriptor.sd_hash,
'mime_type': managed_stream.mime_type,
'key': managed_stream.descriptor.key,
'blobs_completed': managed_stream.blobs_completed,
'blobs_in_stream': managed_stream.blobs_in_stream,
'blobs_remaining': managed_stream.blobs_remaining,
'is_fully_reflected': managed_stream.is_fully_reflected,
'reflector_progress': managed_stream.reflector_progress,
'uploading_to_reflector': managed_stream.uploading_to_reflector
})
else:
result.update({
'streaming_url': f'file://{managed_stream.full_path}',
})
if output_exists:
result.update({
'file_name': managed_stream.file_name,
'download_directory': managed_stream.download_directory,
'download_path': managed_stream.full_path,
})
return result
def encode_claim(self, claim): def encode_claim(self, claim):
encoded = getattr(claim, claim.claim_type).to_dict() encoded = getattr(claim, claim.claim_type).to_dict()

View file

@ -31,6 +31,14 @@ def migrate_db(conf, start, end):
from .migrate10to11 import do_migration from .migrate10to11 import do_migration
elif current == 11: elif current == 11:
from .migrate11to12 import do_migration from .migrate11to12 import do_migration
elif current == 12:
from .migrate12to13 import do_migration
elif current == 13:
from .migrate13to14 import do_migration
elif current == 14:
from .migrate14to15 import do_migration
elif current == 15:
from .migrate15to16 import do_migration
else: else:
raise Exception(f"DB migration of version {current} to {current+1} is not available") raise Exception(f"DB migration of version {current} to {current+1} is not available")
try: try:

View file

@ -0,0 +1,80 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
current_columns = []
for col_info in cursor.execute("pragma table_info('file');").fetchall():
current_columns.append(col_info[1])
if 'bt_infohash' in current_columns:
connection.close()
print("already migrated")
return
cursor.executescript("""
pragma foreign_keys=off;
create table if not exists torrent (
bt_infohash char(20) not null primary key,
tracker text,
length integer not null,
name text not null
);
create table if not exists torrent_node ( -- BEP-0005
bt_infohash char(20) not null references torrent,
host text not null,
port integer not null
);
create table if not exists torrent_tracker ( -- BEP-0012
bt_infohash char(20) not null references torrent,
tracker text not null
);
create table if not exists torrent_http_seed ( -- BEP-0017
bt_infohash char(20) not null references torrent,
http_seed text not null
);
create table if not exists new_file (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
file_name text,
download_directory text,
blob_data_rate real not null,
status text not null,
saved_file integer not null,
content_fee text,
added_on integer not null
);
create table if not exists new_content_claim (
stream_hash char(96) references stream,
bt_infohash char(20) references torrent,
claim_outpoint text unique not null references claim
);
insert into new_file (stream_hash, bt_infohash, file_name, download_directory, blob_data_rate, status,
saved_file, content_fee, added_on) select
stream_hash, NULL, file_name, download_directory, blob_data_rate, status, saved_file, content_fee,
added_on
from file;
insert or ignore into new_content_claim (stream_hash, bt_infohash, claim_outpoint)
select stream_hash, NULL, claim_outpoint from content_claim;
drop table file;
drop table content_claim;
alter table new_file rename to file;
alter table new_content_claim rename to content_claim;
pragma foreign_keys=on;
""")
connection.commit()
connection.close()

View file

@ -0,0 +1,21 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
create table if not exists peer (
node_id char(96) not null primary key,
address text not null,
udp_port integer not null,
tcp_port integer,
unique (address, udp_port)
);
""")
connection.commit()
connection.close()

View file

@ -0,0 +1,16 @@
import os
import sqlite3
def do_migration(conf):
db_path = os.path.join(conf.data_dir, "lbrynet.sqlite")
connection = sqlite3.connect(db_path)
cursor = connection.cursor()
cursor.executescript("""
alter table blob add column added_on integer not null default 0;
alter table blob add column is_mine integer not null default 1;
""")
connection.commit()
connection.close()

Some files were not shown because too many files have changed in this diff Show more